• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom Dongle Host Driver (DHD), Linux-specific network interface.
3  * Basically selected code segments from usb-cdc.c and usb-rndis.c
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Open:>>
23  *
24  * $Id$
25  */
26 
27 #include <typedefs.h>
28 #include <linuxver.h>
29 #include <osl.h>
30 #ifdef SHOW_LOGTRACE
31 #include <linux/syscalls.h>
32 #include <event_log.h>
33 #endif /* SHOW_LOGTRACE */
34 
35 #if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE)
36 #include <bcmmsgbuf.h>
37 #endif /* PCIE_FULL_DONGLE */
38 
39 #include <linux/init.h>
40 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/inetdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/etherdevice.h>
47 #include <linux/random.h>
48 #include <linux/spinlock.h>
49 #include <linux/ethtool.h>
50 #include <linux/fcntl.h>
51 #include <linux/fs.h>
52 #include <linux/ip.h>
53 #include <linux/reboot.h>
54 #include <linux/notifier.h>
55 #include <linux/irq.h>
56 #if defined(CONFIG_TIZEN)
57 #include <linux/net_stat_tizen.h>
58 #endif /* CONFIG_TIZEN */
59 #include <net/addrconf.h>
60 #ifdef ENABLE_ADAPTIVE_SCHED
61 #include <linux/cpufreq.h>
62 #endif /* ENABLE_ADAPTIVE_SCHED */
63 #include <linux/rtc.h>
64 #include <linux/namei.h>
65 #include <asm/uaccess.h>
66 #include <asm/unaligned.h>
67 #include <dhd_linux_priv.h>
68 
69 #include <epivers.h>
70 #include <bcmutils.h>
71 #include <bcmendian.h>
72 #include <bcmdevs.h>
73 #include <bcmdevs_legacy.h>    /* need to still support chips no longer in trunk firmware */
74 #include <bcmiov.h>
75 #include <bcmstdlib_s.h>
76 
77 #include <ethernet.h>
78 #include <bcmevent.h>
79 #include <vlan.h>
80 #include <802.3.h>
81 
82 #ifdef WL_NANHO
83 #include <nanho.h>
84 #endif /* WL_NANHO */
85 #include <dhd_linux_wq.h>
86 #include <dhd.h>
87 #include <dhd_linux.h>
88 #include <dhd_linux_pktdump.h>
89 #ifdef DHD_WET
90 #include <dhd_wet.h>
91 #endif /* DHD_WET */
92 #ifdef PCIE_FULL_DONGLE
93 #include <dhd_flowring.h>
94 #endif
95 #include <dhd_bus.h>
96 #include <dhd_proto.h>
97 #include <dhd_config.h>
98 #ifdef WL_ESCAN
99 #include <wl_escan.h>
100 #endif
101 #include <dhd_dbg.h>
102 #include <dhd_dbg_ring.h>
103 #include <dhd_debug.h>
104 #if defined(WL_CFG80211)
105 #include <wl_cfg80211.h>
106 #ifdef WL_BAM
107 #include <wl_bam.h>
108 #endif	/* WL_BAM */
109 #endif	/* WL_CFG80211 */
110 #ifdef PNO_SUPPORT
111 #include <dhd_pno.h>
112 #endif
113 #ifdef RTT_SUPPORT
114 #include <dhd_rtt.h>
115 #endif
116 #ifdef DHD_TIMESYNC
117 #include <dhd_timesync.h>
118 #include <linux/ip.h>
119 #include <net/icmp.h>
120 #endif /* DHD_TIMESYNC */
121 
122 #include <dhd_linux_sock_qos.h>
123 
124 #ifdef CSI_SUPPORT
125 #include <dhd_csi.h>
126 #endif /* CSI_SUPPORT */
127 
128 #ifdef CONFIG_COMPAT
129 #include <linux/compat.h>
130 #endif
131 
132 #ifdef CONFIG_ARCH_EXYNOS
133 #ifndef SUPPORT_EXYNOS7420
134 #include <linux/exynos-pci-ctrl.h>
135 #endif /* SUPPORT_EXYNOS7420 */
136 #endif /* CONFIG_ARCH_EXYNOS */
137 
138 #ifdef DHD_WMF
139 #include <dhd_wmf_linux.h>
140 #endif /* DHD_WMF */
141 
142 #ifdef DHD_L2_FILTER
143 #include <bcmicmp.h>
144 #include <bcm_l2_filter.h>
145 #include <dhd_l2_filter.h>
146 #endif /* DHD_L2_FILTER */
147 
148 #ifdef DHD_PSTA
149 #include <dhd_psta.h>
150 #endif /* DHD_PSTA */
151 
152 #ifdef AMPDU_VO_ENABLE
153 /* XXX: Enabling VO AMPDU to reduce FER */
154 #include <802.1d.h>
155 #endif /* AMPDU_VO_ENABLE */
156 
157 #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
158 #include <dhd_ip.h>
159 #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
160 #include <dhd_daemon.h>
161 #ifdef DHD_PKT_LOGGING
162 #include <dhd_pktlog.h>
163 #endif /* DHD_PKT_LOGGING */
164 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
165 #include <eapol.h>
166 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
167 #ifdef DHD_DEBUG_PAGEALLOC
168 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
169 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
170 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
171 #endif /* DHD_DEBUG_PAGEALLOC */
172 
173 #if defined(DHD_TCP_WINSIZE_ADJUST)
174 #include <linux/tcp.h>
175 #include <net/tcp.h>
176 #endif /* DHD_TCP_WINSIZE_ADJUST */
177 
178 #ifdef ENABLE_DHD_GRO
179 #include <net/sch_generic.h>
180 #endif /* ENABLE_DHD_GRO */
181 
182 #define IP_PROT_RESERVED	0xFF
183 
184 #ifdef DHD_MQ
185 #define MQ_MAX_QUEUES AC_COUNT
186 #define MQ_MAX_CPUS 16
187 int enable_mq = TRUE;
188 module_param(enable_mq, int, 0644);
189 int mq_select_disable = FALSE;
190 #endif
191 
192 #ifdef BCMINTERNAL
193 #ifdef DHD_FWTRACE
194 #include <dhd_fwtrace.h>
195 #endif /* DHD_FWTRACE */
196 #endif /* BCMINTERNAL */
197 
198 #if defined(DHD_LB)
199 #if !defined(PCIE_FULL_DONGLE)
200 #error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
201 #endif /* !PCIE_FULL_DONGLE */
202 #endif /* DHD_LB */
203 
204 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) || defined(DHD_LB_STATS)
205 #if !defined(DHD_LB)
206 #error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
207 #endif /* !DHD_LB */
208 #endif /* DHD_LB_RXP || DHD_LB_TXP || DHD_LB_STATS */
209 
210 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
211 static void dhd_m4_state_handler(struct work_struct * work);
212 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
213 
214 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
215 static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
216 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
217 
218 #ifdef FIX_CPU_MIN_CLOCK
219 #include <linux/pm_qos.h>
220 #endif /* FIX_CPU_MIN_CLOCK */
221 
222 #ifdef ENABLE_ADAPTIVE_SCHED
223 #define DEFAULT_CPUFREQ_THRESH		1000000	/* threshold frequency : 1000000 = 1GHz */
224 #ifndef CUSTOM_CPUFREQ_THRESH
225 #define CUSTOM_CPUFREQ_THRESH	DEFAULT_CPUFREQ_THRESH
226 #endif /* CUSTOM_CPUFREQ_THRESH */
227 #endif /* ENABLE_ADAPTIVE_SCHED */
228 
229 /* enable HOSTIP cache update from the host side when an eth0:N is up */
230 #define AOE_IP_ALIAS_SUPPORT 1
231 
232 #ifdef PROP_TXSTATUS
233 #include <wlfc_proto.h>
234 #include <dhd_wlfc.h>
235 #endif
236 
237 #if defined(OEM_ANDROID)
238 #include <wl_android.h>
239 #endif
240 
241 /* Maximum STA per radio */
242 #if defined(BCM_ROUTER_DHD)
243 #define DHD_MAX_STA     128
244 #else
245 #define DHD_MAX_STA     32
246 #endif /* BCM_ROUTER_DHD */
247 
248 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
249 #include <ctf/hndctf.h>
250 
251 #ifdef CTFPOOL
252 #define RXBUFPOOLSZ		2048
253 #define	RXBUFSZ			DHD_FLOWRING_RX_BUFPOST_PKTSZ /* packet data buffer size */
254 #endif /* CTFPOOL */
255 #endif /* BCM_ROUTER_DHD && HNDCTF */
256 
257 #ifdef BCMDBG
258 #include <dhd_macdbg.h>
259 #endif /* BCMDBG */
260 
261 #ifdef DHD_EVENT_LOG_FILTER
262 #include <dhd_event_log_filter.h>
263 #endif /* DHD_EVENT_LOG_FILTER */
264 
265 #ifdef DHDTCPSYNC_FLOOD_BLK
266 static void dhd_blk_tsfl_handler(struct work_struct * work);
267 #endif /* DHDTCPSYNC_FLOOD_BLK */
268 
269 #ifdef WL_NATOE
270 #include <dhd_linux_nfct.h>
271 #endif /* WL_NATOE */
272 
273 #ifdef DHD_TX_PROFILE
274 #include <bcmarp.h>
275 #include <bcmicmp.h>
276 #include <bcmudp.h>
277 #include <bcmproto.h>
278 #endif /* defined(DHD_TX_PROFILE) */
279 
280 #if defined(DHD_TCP_WINSIZE_ADJUST)
281 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
282 static uint dhd_use_tcp_window_size_adjust = FALSE;
283 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
284 #endif /* DHD_TCP_WINSIZE_ADJUST */
285 
286 #ifdef SET_RANDOM_MAC_SOFTAP
287 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
288 #define CONFIG_DHD_SET_RANDOM_MAC_VAL	0x001A11
289 #endif
290 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
291 #endif /* SET_RANDOM_MAC_SOFTAP */
292 
293 #if defined(BCM_ROUTER_DHD)
294 /*
295  * Queue budget: Minimum number of packets that a queue must be allowed to hold
296  * to prevent starvation.
297  */
298 #define DHD_QUEUE_BUDGET_DEFAULT    (256)
299 int dhd_queue_budget = DHD_QUEUE_BUDGET_DEFAULT;
300 
301 module_param(dhd_queue_budget, int, 0);
302 
303 /*
304  * Per station pkt threshold: Sum total of all packets in the backup queues of
305  * flowrings belonging to the station, not including packets already admitted
306  * to flowrings.
307  */
308 #define DHD_STA_THRESHOLD_DEFAULT   (2048)
309 int dhd_sta_threshold = DHD_STA_THRESHOLD_DEFAULT;
310 module_param(dhd_sta_threshold, int, 0);
311 
312 /*
313  * Per interface pkt threshold: Sum total of all packets in the backup queues of
314  * flowrings belonging to the interface, not including packets already admitted
315  * to flowrings.
316  */
317 #define DHD_IF_THRESHOLD_DEFAULT   (2048 * 32)
318 int dhd_if_threshold = DHD_IF_THRESHOLD_DEFAULT;
319 module_param(dhd_if_threshold, int, 0);
320 #endif /* BCM_ROUTER_DHD */
321 
322 /* XXX: where does this belong? */
323 /* XXX: this needs to reviewed for host OS. */
324 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
325 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
326 #define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
327 
328 #ifdef ARP_OFFLOAD_SUPPORT
329 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
330 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
331 	unsigned long event, void *ptr);
332 static struct notifier_block dhd_inetaddr_notifier = {
333 	.notifier_call = dhd_inetaddr_notifier_call
334 };
335 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
336  * created in the kernel notifier link list (with 'next' pointing to itself)
337  */
338 static bool dhd_inetaddr_notifier_registered = FALSE;
339 #endif /* ARP_OFFLOAD_SUPPORT */
340 
341 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
342 int dhd_inet6addr_notifier_call(struct notifier_block *this,
343 	unsigned long event, void *ptr);
344 static struct notifier_block dhd_inet6addr_notifier = {
345 	.notifier_call = dhd_inet6addr_notifier_call
346 };
347 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
348  * created in kernel notifier link list (with 'next' pointing to itself)
349  */
350 static bool dhd_inet6addr_notifier_registered = FALSE;
351 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
352 
353 #if defined (CONFIG_PM_SLEEP)
354 #include <linux/suspend.h>
355 volatile bool dhd_mmc_suspend = FALSE;
356 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
357 #ifdef ENABLE_WAKEUP_PKT_DUMP
358 volatile bool dhd_mmc_wake = FALSE;
359 long long temp_raw;
360 #endif /* ENABLE_WAKEUP_PKT_DUMP */
361 #endif /* defined(CONFIG_PM_SLEEP) */
362 
363 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
364 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
365 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
366 #if defined(OEM_ANDROID)
367 static void dhd_hang_process(struct work_struct *work_data);
368 #endif /* OEM_ANDROID */
369 MODULE_LICENSE("GPL and additional rights");
370 
371 #if defined(MULTIPLE_SUPPLICANT)
372 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
373 DEFINE_MUTEX(_dhd_mutex_lock_);
374 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
375 #endif
376 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force);
377 
378 #include <dhd_bus.h>
379 
380 /* XXX Set up an MTU change notifier per linux/notifier.h? */
381 #ifndef PROP_TXSTATUS
382 #define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
383 #else
384 #define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
385 #endif
386 
387 #ifdef PROP_TXSTATUS
388 extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
389 extern void dhd_wlfc_plat_init(void *dhd);
390 extern void dhd_wlfc_plat_deinit(void *dhd);
391 #endif /* PROP_TXSTATUS */
392 #ifdef USE_DYNAMIC_F2_BLKSIZE
393 extern uint sd_f2_blocksize;
394 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
395 #endif /* USE_DYNAMIC_F2_BLKSIZE */
396 
397 /* Linux wireless extension support */
398 #if defined(WL_WIRELESS_EXT)
399 #include <wl_iw.h>
400 #endif /* defined(WL_WIRELESS_EXT) */
401 
402 #ifdef CONFIG_PARTIALSUSPEND_SLP
403 /* XXX SLP use defferent earlysuspend header file and some functions
404  * But most of meaning is same as Android
405  */
406 #include <linux/partialsuspend_slp.h>
407 #define CONFIG_HAS_EARLYSUSPEND
408 #define DHD_USE_EARLYSUSPEND
409 #define register_early_suspend		register_pre_suspend
410 #define unregister_early_suspend	unregister_pre_suspend
411 #define early_suspend				pre_suspend
412 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN		50
413 #else
414 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
415 #include <linux/earlysuspend.h>
416 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
417 #endif /* CONFIG_PARTIALSUSPEND_SLP */
418 
419 #ifdef CONFIG_IRQ_HISTORY
420 #include <linux/power/irq_history.h>
421 #endif /* CONFIG_IRQ_HISTORY */
422 
423 #if defined(OEM_ANDROID)
424 #include <linux/nl80211.h>
425 #endif /* OEM_ANDROID */
426 
427 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
428 static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
429 	u8* program, uint32 program_len);
430 static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
431 	uint32 mode, uint32 enable);
432 static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
433 #endif /* PKT_FILTER_SUPPORT && APF */
434 
435 #ifdef DHD_FW_COREDUMP
436 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
437 #endif /* DHD_FW_COREDUMP */
438 
439 #ifdef DHD_LOG_DUMP
440 
441 struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
442 
443 /* Only header for log dump buffers is stored in array
444  * header for sections like 'dhd dump', 'ext trap'
445  * etc, is not in the array, because they are not log
446  * ring buffers
447  */
448 dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
449 		{GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
450 		{PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
451 		{SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
452 };
453 static int dld_buf_size[DLD_BUFFER_NUM] = {
454 		LOG_DUMP_GENERAL_MAX_BUFSIZE,	/* DLD_BUF_TYPE_GENERAL */
455 		LOG_DUMP_PRESERVE_MAX_BUFSIZE,	/* DLD_BUF_TYPE_PRESERVE */
456 		LOG_DUMP_SPECIAL_MAX_BUFSIZE,	/* DLD_BUF_TYPE_SPECIAL */
457 };
458 
459 static void dhd_log_dump_init(dhd_pub_t *dhd);
460 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
461 static void dhd_log_dump(void *handle, void *event_info, u8 event);
462 static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
463 static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
464 static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
465 char *dhd_dbg_get_system_timestamp(void);
466 #endif /* DHD_LOG_DUMP */
467 
468 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
469 #include <linux/workqueue.h>
470 #include <linux/pm_runtime.h>
471 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
472 
473 #ifdef DHD_DEBUG_UART
474 #include <linux/kmod.h>
475 #define DHD_DEBUG_UART_EXEC_PATH	"/system/bin/wldu"
476 static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
477 static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
478 #endif	/* DHD_DEBUG_UART */
479 
480 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
481 static struct notifier_block dhd_reboot_notifier = {
482 	.notifier_call = dhd_reboot_callback,
483 	.priority = 1,
484 };
485 
486 #ifdef OEM_ANDROID
487 #ifdef BCMPCIE
488 static int is_reboot = 0;
489 #endif /* BCMPCIE */
490 #endif /* OEM_ANDROID */
491 
492 dhd_pub_t	*g_dhd_pub = NULL;
493 
494 #if defined(BT_OVER_SDIO)
495 #include "dhd_bt_interface.h"
496 #endif /* defined (BT_OVER_SDIO) */
497 
498 #ifdef WL_NANHO
499 static int dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf);
500 static int dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock);
501 static int dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len);
502 #endif /* WL_NANHO */
503 
504 #ifdef WL_STATIC_IF
505 bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
506 #endif /* WL_STATIC_IF */
507 
508 atomic_t exit_in_progress = ATOMIC_INIT(0);
509 
510 static void dhd_process_daemon_msg(struct sk_buff *skb);
511 static void dhd_destroy_to_notifier_skt(void);
512 static int dhd_create_to_notifier_skt(void);
513 static struct sock *nl_to_event_sk = NULL;
514 int sender_pid = 0;
515 
516 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
517 struct netlink_kernel_cfg dhd_netlink_cfg = {
518 	.groups = 1,
519 	.input = dhd_process_daemon_msg,
520 };
521 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
522 
523 #ifdef DHD_PKTTS
524 static int dhd_create_to_notifier_ts(void);
525 static void dhd_destroy_to_notifier_ts(void);
526 
527 static struct sock *nl_to_ts = NULL;
528 int sender_pid_ts = 0;
529 
530 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
531 static void dhd_recv_msg_from_ts(struct sk_buff *skb);
532 
533 struct netlink_kernel_cfg dhd_netlink_ts = {
534 	.groups = 1,
535 	.input = dhd_recv_msg_from_ts,
536 };
537 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
538 
539 #define GET_METADATA_VER(val)		((uint16)((val & 0xffff0000) >> 16))
540 #define GET_METADATA_BUFLEN(val)	((uint16)(val & 0x0000ffff))
541 #endif /* DHD_PKTTS */
542 
543 #if defined(BT_OVER_SDIO)
544 /* Flag to indicate if driver is initialized */
545 uint dhd_driver_init_done = TRUE;
546 #else
547 /* Flag to indicate if driver is initialized */
548 uint dhd_driver_init_done = FALSE;
549 #endif
550 /* Flag to indicate if we should download firmware on driver load */
551 uint dhd_download_fw_on_driverload = TRUE;
552 
553 /* Definitions to provide path to the firmware and nvram
554  * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
555  */
556 char firmware_path[MOD_PARAM_PATHLEN];
557 char nvram_path[MOD_PARAM_PATHLEN];
558 char clm_path[MOD_PARAM_PATHLEN];
559 char config_path[MOD_PARAM_PATHLEN];
560 #ifdef DHD_UCODE_DOWNLOAD
561 char ucode_path[MOD_PARAM_PATHLEN];
562 #endif /* DHD_UCODE_DOWNLOAD */
563 
564 module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
565 
566 /* backup buffer for firmware and nvram path */
567 char fw_bak_path[MOD_PARAM_PATHLEN];
568 char nv_bak_path[MOD_PARAM_PATHLEN];
569 
570 /* information string to keep firmware, chio, cheip version info visiable from log */
571 char info_string[MOD_PARAM_INFOLEN];
572 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
573 int op_mode = 0;
574 int disable_proptx = 0;
575 module_param(op_mode, int, 0644);
576 #if defined(OEM_ANDROID)
577 extern int wl_control_wl_start(struct net_device *dev);
578 #if defined(BCMLXSDMMC) || defined(BCMDBUS)
579 struct semaphore dhd_registration_sem;
580 #endif /* BCMXSDMMC */
581 #endif /* defined(OEM_ANDROID) */
582 void dhd_generate_rand_mac_addr(struct ether_addr *ea_addr);
583 
584 #ifdef DHD_LOG_DUMP
585 int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
586 module_param(logdump_max_filesize, int, 0644);
587 int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
588 module_param(logdump_max_bufsize, int, 0644);
589 int logdump_periodic_flush = FALSE;
590 module_param(logdump_periodic_flush, int, 0644);
591 #ifdef EWP_ECNTRS_LOGGING
592 int logdump_ecntr_enable = TRUE;
593 #else
594 int logdump_ecntr_enable = FALSE;
595 #endif /* EWP_ECNTRS_LOGGING */
596 module_param(logdump_ecntr_enable, int, 0644);
597 #ifdef EWP_RTT_LOGGING
598 int logdump_rtt_enable = TRUE;
599 #else
600 int logdump_rtt_enable = FALSE;
601 #endif /* EWP_RTT_LOGGING */
602 int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
603 #endif /* DHD_LOG_DUMP */
604 
605 #ifdef EWP_EDL
606 int host_edl_support = TRUE;
607 module_param(host_edl_support, int, 0644);
608 #endif
609 
610 /* deferred handlers */
611 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
612 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
613 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
614 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
615 #ifdef BCM_ROUTER_DHD
616 static void dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event);
617 #endif
618 #ifdef WL_NATOE
619 static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
620 static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
621 #endif /* WL_NATOE */
622 
623 #ifdef DHD_UPDATE_INTF_MAC
624 static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
625 #endif /* DHD_UPDATE_INTF_MAC */
626 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
627 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
628 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
629 #ifdef WL_CFG80211
630 extern void dhd_netdev_free(struct net_device *ndev);
631 #endif /* WL_CFG80211 */
632 static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
633 
634 #if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
635 static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
636 #endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
637 
638 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
639 /* update rx_pkt_chainable state of dhd interface */
640 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
641 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
642 
643 /* Error bits */
644 module_param(dhd_msg_level, int, 0);
645 #if defined(WL_WIRELESS_EXT)
646 module_param(iw_msg_level, int, 0);
647 #endif
648 #ifdef WL_CFG80211
649 module_param(wl_dbg_level, int, 0);
650 #endif
651 module_param(android_msg_level, int, 0);
652 module_param(config_msg_level, int, 0);
653 
654 #ifdef ARP_OFFLOAD_SUPPORT
655 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
656 /* XXX ARP HOST Auto Reply can cause dongle trap at VSDB situation */
657 /* XXX ARP OL SNOOP can be used to more good quility */
658 
659 #ifdef ENABLE_ARP_SNOOP_MODE
660 uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
661 		ARP_OL_UPDATE_HOST_CACHE);
662 #else
663 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
664 #endif /* ENABLE_ARP_SNOOP_MODE */
665 
666 module_param(dhd_arp_mode, uint, 0);
667 #endif /* ARP_OFFLOAD_SUPPORT */
668 
669 /* Disable Prop tx */
670 module_param(disable_proptx, int, 0644);
671 /* load firmware and/or nvram values from the filesystem */
672 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
673 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
674 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
675 #ifdef DHD_UCODE_DOWNLOAD
676 module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
677 #endif /* DHD_UCODE_DOWNLOAD */
678 
679 /* wl event forwarding */
680 #ifdef WL_EVENT_ENAB
681 uint wl_event_enable = true;
682 #else
683 uint wl_event_enable = false;
684 #endif /* WL_EVENT_ENAB */
685 module_param(wl_event_enable, uint, 0660);
686 
687 /* wl event forwarding */
688 #ifdef LOGTRACE_PKT_SENDUP
689 uint logtrace_pkt_sendup = true;
690 #else
691 uint logtrace_pkt_sendup = false;
692 #endif /* LOGTRACE_PKT_SENDUP */
693 module_param(logtrace_pkt_sendup, uint, 0660);
694 
695 /* Watchdog interval */
696 /* extend watchdog expiration to 2 seconds when DPC is running */
697 #define WATCHDOG_EXTEND_INTERVAL (2000)
698 
699 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
700 module_param(dhd_watchdog_ms, uint, 0);
701 
702 #ifdef DHD_PCIE_RUNTIMEPM
703 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
704 #endif /* DHD_PCIE_RUNTIMEPMT */
705 #if defined(DHD_DEBUG)
706 /* Console poll interval */
707 #if defined(OEM_ANDROID)
708 uint dhd_console_ms = 0;  /* XXX andrey by default no fw msg prints */
709 #else
710 uint dhd_console_ms = 250;
711 #endif /* OEM_ANDROID */
712 module_param(dhd_console_ms, uint, 0644);
713 #else
714 uint dhd_console_ms = 0;
715 #endif /* DHD_DEBUG */
716 
717 uint dhd_slpauto = TRUE;
718 module_param(dhd_slpauto, uint, 0);
719 
720 #ifdef PKT_FILTER_SUPPORT
721 /* Global Pkt filter enable control */
722 uint dhd_pkt_filter_enable = TRUE;
723 module_param(dhd_pkt_filter_enable, uint, 0);
724 #endif
725 
726 /* Pkt filter init setup */
727 uint dhd_pkt_filter_init = 0;
728 module_param(dhd_pkt_filter_init, uint, 0);
729 
730 /* Pkt filter mode control */
731 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
732 uint dhd_master_mode = FALSE;
733 #else
734 uint dhd_master_mode = FALSE;
735 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
736 module_param(dhd_master_mode, uint, 0);
737 
738 int dhd_watchdog_prio = 0;
739 module_param(dhd_watchdog_prio, int, 0);
740 
741 /* DPC thread priority */
742 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
743 module_param(dhd_dpc_prio, int, 0);
744 
745 /* RX frame thread priority */
746 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
747 module_param(dhd_rxf_prio, int, 0);
748 
749 #if !defined(BCMDBUS)
750 extern int dhd_dongle_ramsize;
751 module_param(dhd_dongle_ramsize, int, 0);
752 #endif /* !BCMDBUS */
753 
754 #ifdef WL_CFG80211
755 int passive_channel_skip = 0;
756 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
757 #endif /* WL_CFG80211 */
758 static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
759 
760 #ifdef DHD_MSI_SUPPORT
761 uint enable_msi = TRUE;
762 module_param(enable_msi, uint, 0);
763 #endif /* PCIE_FULL_DONGLE */
764 
765 #ifdef DHD_SSSR_DUMP
766 int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
767 module_param(sssr_enab, uint, 0);
768 module_param(fis_enab, uint, 0);
769 #endif /* DHD_SSSR_DUMP */
770 
771 /* Keep track of number of instances */
772 static int dhd_found = 0;
773 static int instance_base = 0; /* Starting instance number */
774 module_param(instance_base, int, 0644);
775 
776 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
777 /*
778  * Rx path process budget(dhd_napi_weight) number of packets in one go and hands over
779  * the packets to network stack.
780  *
781  * dhd_dpc tasklet is the producer(packets received from dongle) and dhd_napi_poll()
782  * is the consumer. The maximum number of packets that can be received from the dongle
783  * at any given point of time are D2HRING_RXCMPLT_MAX_ITEM.
784  * Also DHD will always post fresh rx buffers to dongle while processing rx completions.
785  *
786  * The consumer must consume the packets at equal are better rate than the producer.
787  * i.e if dhd_napi_poll() does not process at the same rate as the producer(dhd_dpc),
788  * rx_process_queue depth increases, which can even consume the entire system memory.
789  * Such situation will be tacken care by rx flow control.
790  *
791  * Device drivers are strongly advised to not use bigger value than NAPI_POLL_WEIGHT
792  */
793 static int dhd_napi_weight = NAPI_POLL_WEIGHT;
794 module_param(dhd_napi_weight, int, 0644);
795 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
796 
797 #ifdef PCIE_FULL_DONGLE
798 extern int h2d_max_txpost;
799 module_param(h2d_max_txpost, int, 0644);
800 
801 #if defined(DHD_HTPUT_TUNABLES)
802 extern int h2d_htput_max_txpost;
803 module_param(h2d_htput_max_txpost, int, 0644);
804 #endif /* DHD_HTPUT_TUNABLES */
805 
806 #ifdef AGG_H2D_DB
807 extern bool agg_h2d_db_enab;
808 module_param(agg_h2d_db_enab, bool, 0644);
809 extern uint agg_h2d_db_timeout;
810 module_param(agg_h2d_db_timeout, uint, 0644);
811 extern uint agg_h2d_db_inflight_thresh;
812 module_param(agg_h2d_db_inflight_thresh, uint, 0644);
813 #endif /* AGG_H2D_DB */
814 
815 extern uint dma_ring_indices;
816 module_param(dma_ring_indices, uint, 0644);
817 
818 extern bool h2d_phase;
819 module_param(h2d_phase, bool, 0644);
820 extern bool force_trap_bad_h2d_phase;
821 module_param(force_trap_bad_h2d_phase, bool, 0644);
822 #endif /* PCIE_FULL_DONGLE */
823 
824 #ifdef FORCE_TPOWERON
825 /*
826  * On Fire's reference platform, coming out of L1.2,
827  * there is a constant delay of 45us between CLKREQ# and stable REFCLK
828  * Due to this delay, with tPowerOn < 50
829  * there is a chance of the refclk sense to trigger on noise.
830  *
831  * 0x29 when written to L1SSControl2 translates to 50us.
832  */
833 #define FORCE_TPOWERON_50US 0x29
834 uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
835 module_param(tpoweron_scale, uint, 0644);
836 #endif /* FORCE_TPOWERON */
837 
838 #ifdef SHOW_LOGTRACE
839 #if defined(CUSTOMER_HW4_DEBUG)
840 #define WIFI_PATH "/etc/wifi/"
841 static char *logstrs_path = VENDOR_PATH WIFI_PATH"logstrs.bin";
842 char *st_str_file_path = VENDOR_PATH WIFI_PATH"rtecdc.bin";
843 static char *map_file_path = VENDOR_PATH WIFI_PATH"rtecdc.map";
844 static char *rom_st_str_file_path = VENDOR_PATH WIFI_PATH"roml.bin";
845 static char *rom_map_file_path = VENDOR_PATH WIFI_PATH"roml.map";
846 #else
847 static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
848 char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
849 static char *map_file_path = PLATFORM_PATH"rtecdc.map";
850 static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
851 static char *rom_map_file_path = PLATFORM_PATH"roml.map";
852 #endif /* CUSTOMER_HW4_DEBUG */
853 
854 static char *ram_file_str = "rtecdc";
855 static char *rom_file_str = "roml";
856 
857 module_param(logstrs_path, charp, S_IRUGO);
858 module_param(st_str_file_path, charp, S_IRUGO);
859 module_param(map_file_path, charp, S_IRUGO);
860 module_param(rom_st_str_file_path, charp, S_IRUGO);
861 module_param(rom_map_file_path, charp, S_IRUGO);
862 
863 static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
864 static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
865 	uint32 *rodata_end);
866 static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
867 	char *map_file);
868 #endif /* SHOW_LOGTRACE */
869 
870 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
871 static void gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name);
872 static void gdb_proxy_fs_remove(dhd_info_t *dhd);
873 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
874 
875 #ifdef D2H_MINIDUMP
876 void dhd_d2h_minidump(dhd_pub_t *dhdp);
877 #endif /* D2H_MINIDUMP */
878 
879 #define DHD_MEMDUMP_TYPE_STR_LEN 32
880 #define DHD_MEMDUMP_PATH_STR_LEN 128
881 
882 #ifdef DHD_TX_PROFILE
883 /* process layer 3 headers, to ultimately determine if a
884  * dhd_tx_profile_protocol_t matches
885  */
886 static int process_layer3_headers(uint8 **p, int plen, uint16 *type);
887 
888 /* process layer 2 headers, to ultimately determine if a
889  * dhd_tx_profile_protocol_t matches
890  */
891 static int process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc);
892 
893 /* whether or not a dhd_tx_profile_protocol_t matches with data in a packet */
894 bool dhd_protocol_matches_profile(uint8 *p, int plen, const
895 		dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
896 #endif /* defined(DHD_TX_PROFILE) */
897 
898 #define PATH_BANDLOCK_INFO PLATFORM_PATH".bandlock.info"
899 
900 static void dhd_set_bandlock(dhd_pub_t * dhd);
901 
902 static void
dhd_tx_stop_queues(struct net_device * net)903 dhd_tx_stop_queues(struct net_device *net)
904 {
905 #ifdef DHD_MQ
906 	netif_tx_stop_all_queues(net);
907 #else
908 	netif_stop_queue(net);
909 #endif
910 }
911 
912 static void
dhd_tx_start_queues(struct net_device * net)913 dhd_tx_start_queues(struct net_device *net)
914 {
915 #ifdef DHD_MQ
916 	netif_tx_wake_all_queues(net);
917 #else
918 	netif_wake_queue(net);
919 #endif
920 }
921 
922 #ifdef USE_WFA_CERT_CONF
923 int g_frameburst = 1;
924 #endif /* USE_WFA_CERT_CONF */
925 
926 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
927 
928 #ifdef PCIE_FULL_DONGLE
929 #define DHD_IF_STA_LIST_LOCK_INIT(lock) spin_lock_init(lock)
930 
931 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
932 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
933 	struct list_head *snapshot_list);
934 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
935 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
936 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
937 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
938 #endif /* PCIE_FULL_DONGLE */
939 
940 /* Control fw roaming */
941 #ifdef BCMCCX
942 uint dhd_roam_disable = 0;
943 #else
944 #ifdef OEM_ANDROID
945 uint dhd_roam_disable = 0;
946 #else
947 uint dhd_roam_disable = 1;
948 #endif
949 #endif /* BCMCCX */
950 
951 #ifdef BCMDBGFS
952 extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
953 extern void dhd_dbgfs_remove(void);
954 #endif
955 
956 /* Enable TX status metadta report: 0=disable 1=enable 2=debug */
957 static uint pcie_txs_metadata_enable = 0;
958 module_param(pcie_txs_metadata_enable, int, 0);
959 
960 /* Control radio state */
961 uint dhd_radio_up = 1;
962 
963 /* Network inteface name */
964 char iface_name[IFNAMSIZ] = {'\0'};
965 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
966 
967 /* The following are specific to the SDIO dongle */
968 
969 /* IOCTL response timeout */
970 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
971 
972 /* DS Exit response timeout */
973 int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
974 
975 /* Idle timeout for backplane clock */
976 int dhd_idletime = DHD_IDLETIME_TICKS;
977 module_param(dhd_idletime, int, 0);
978 
979 /* Use polling */
980 uint dhd_poll = FALSE;
981 module_param(dhd_poll, uint, 0);
982 
983 /* Use interrupts */
984 uint dhd_intr = TRUE;
985 module_param(dhd_intr, uint, 0);
986 
987 /* SDIO Drive Strength (in milliamps) */
988 uint dhd_sdiod_drive_strength = 6;
989 module_param(dhd_sdiod_drive_strength, uint, 0);
990 
991 #ifdef BCMSDIO
992 /* Tx/Rx bounds */
993 extern uint dhd_txbound;
994 extern uint dhd_rxbound;
995 module_param(dhd_txbound, uint, 0);
996 module_param(dhd_rxbound, uint, 0);
997 
998 /* Deferred transmits */
999 extern uint dhd_deferred_tx;
1000 module_param(dhd_deferred_tx, uint, 0);
1001 
1002 #ifdef BCMINTERNAL
1003 extern uint dhd_anychip;
1004 module_param(dhd_anychip, uint, 0);
1005 #endif /* BCMINTERNAL */
1006 #endif /* BCMSDIO */
1007 
1008 #ifdef BCMSLTGT
1009 #ifdef BCMFPGA_HW
1010 /* For FPGA use fixed htclkration as 30 */
1011 uint htclkratio = 30;
1012 #else
1013 uint htclkratio = 1;
1014 #endif /* BCMFPGA_HW */
1015 module_param(htclkratio, uint, 0);
1016 
1017 int dngl_xtalfreq = 0;
1018 module_param(dngl_xtalfreq, int, 0);
1019 #endif /* BCMSLTGT */
1020 
1021 #ifdef SDTEST
1022 /* Echo packet generator (pkts/s) */
1023 uint dhd_pktgen = 0;
1024 module_param(dhd_pktgen, uint, 0);
1025 
1026 /* Echo packet len (0 => sawtooth, max 2040) */
1027 uint dhd_pktgen_len = 0;
1028 module_param(dhd_pktgen_len, uint, 0);
1029 #endif /* SDTEST */
1030 
1031 #ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
1032 uint dhd_dscpmap_enable = 1;
1033 module_param(dhd_dscpmap_enable, uint, 0644);
1034 #endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
1035 
1036 #if defined(BCMSUP_4WAY_HANDSHAKE)
1037 /* Use in dongle supplicant for 4-way handshake */
1038 #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
1039 /* Enable idsup by default (if supported in fw) */
1040 uint dhd_use_idsup = 1;
1041 #else
1042 uint dhd_use_idsup = 0;
1043 #endif /* WLFBT || WL_ENABLE_IDSUP */
1044 module_param(dhd_use_idsup, uint, 0);
1045 #endif /* BCMSUP_4WAY_HANDSHAKE */
1046 
1047 #ifndef BCMDBUS
1048 #if defined(OEM_ANDROID)
1049 /* Allow delayed firmware download for debug purpose */
1050 int allow_delay_fwdl = FALSE;
1051 #elif defined(BCM_ROUTER_DHD)
1052 /* Allow delayed firmware download for debug purpose */
1053 int allow_delay_fwdl = FALSE;
1054 #else
1055 int allow_delay_fwdl = TRUE;
1056 #endif /* OEM_ANDROID */
1057 module_param(allow_delay_fwdl, int, 0);
1058 #endif /* !BCMDBUS */
1059 
1060 #ifdef GDB_PROXY
1061 /* Adds/replaces deadman_to= in NVRAM file with deadman_to=0 */
1062 static uint nodeadman = 0;
1063 module_param(nodeadman, uint, 0);
1064 #endif /* GDB_PROXY */
1065 
1066 #ifdef ECOUNTER_PERIODIC_DISABLE
1067 uint enable_ecounter = FALSE;
1068 #else
1069 uint enable_ecounter = TRUE;
1070 #endif
1071 module_param(enable_ecounter, uint, 0);
1072 
1073 #ifdef BCMQT_HW
1074 int qt_flr_reset = FALSE;
1075 module_param(qt_flr_reset, int, 0);
1076 
1077 int qt_dngl_timeout = 0; // dongle attach timeout in ms
1078 module_param(qt_dngl_timeout, int, 0);
1079 #endif /* BCMQT_HW */
1080 
1081 /* TCM verification flag */
1082 uint dhd_tcm_test_enable = FALSE;
1083 module_param(dhd_tcm_test_enable, uint, 0644);
1084 
1085 extern char dhd_version[];
1086 extern char fw_version[];
1087 extern char clm_version[];
1088 
1089 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1090 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1091 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1092 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1093 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1094 
1095 /* Monitor interface */
1096 int dhd_monitor_init(void *dhd_pub);
1097 int dhd_monitor_uninit(void);
1098 
1099 #ifdef DHD_PM_CONTROL_FROM_FILE
1100 bool g_pm_control;
1101 #ifdef DHD_EXPORT_CNTL_FILE
1102 uint32 pmmode_val = 0xFF;
1103 #endif /* DHD_EXPORT_CNTL_FILE */
1104 #ifdef CUSTOMER_HW10
1105 void dhd_control_pm(dhd_pub_t *dhd, uint *);
1106 #else
1107 void sec_control_pm(dhd_pub_t *dhd, uint *);
1108 #endif /* CUSTOMER_HW10 */
1109 #endif /* DHD_PM_CONTROL_FROM_FILE */
1110 
1111 #if defined(WL_WIRELESS_EXT)
1112 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1113 #endif /* defined(WL_WIRELESS_EXT) */
1114 
1115 #ifdef DHD_PM_OVERRIDE
1116 bool g_pm_override;
1117 #endif /* DHD_PM_OVERRIDE */
1118 
1119 #ifndef BCMDBUS
1120 static void dhd_dpc(ulong data);
1121 #endif /* !BCMDBUS */
1122 /* forward decl */
1123 extern int dhd_wait_pend8021x(struct net_device *dev);
1124 void dhd_os_wd_timer_extend(void *bus, bool extend);
1125 
1126 #ifdef TOE
1127 #ifndef BDC
1128 #error TOE requires BDC
1129 #endif /* !BDC */
1130 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1131 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1132 #endif /* TOE */
1133 
1134 static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
1135 		wl_event_msg_t *event_ptr, void **data_ptr);
1136 
1137 #if defined(CONFIG_PM_SLEEP)
dhd_pm_callback(struct notifier_block * nfb,unsigned long action,void * ignored)1138 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1139 {
1140 	int ret = NOTIFY_DONE;
1141 	bool suspend = FALSE;
1142 	dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, const dhd_info_t, pm_notifier);;
1143 
1144 	BCM_REFERENCE(dhdinfo);
1145 	BCM_REFERENCE(suspend);
1146 
1147 	switch (action) {
1148 	case PM_HIBERNATION_PREPARE:
1149 	case PM_SUSPEND_PREPARE:
1150 		suspend = TRUE;
1151 		break;
1152 
1153 	case PM_POST_HIBERNATION:
1154 	case PM_POST_SUSPEND:
1155 		suspend = FALSE;
1156 		break;
1157 	}
1158 
1159 	printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n",
1160 		__FUNCTION__, action, suspend, dhdinfo->pub.conf->suspend_mode);
1161 	if (suspend) {
1162 		DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1163 		if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1164 			dhd_suspend_resume_helper(dhdinfo, suspend, 0);
1165 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1166 		dhd_wlfc_suspend(&dhdinfo->pub);
1167 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1168 		if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1169 			dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
1170 		DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1171 	} else {
1172 		if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1173 			dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
1174 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1175 		dhd_wlfc_resume(&dhdinfo->pub);
1176 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1177 		if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1178 			dhd_suspend_resume_helper(dhdinfo, suspend, 0);
1179 	}
1180 
1181 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1182         KERNEL_VERSION(2, 6, 39))
1183 	dhd_mmc_suspend = suspend;
1184 	smp_mb();
1185 #endif
1186 
1187 	return ret;
1188 }
1189 
1190 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1191  * created in kernel notifier link list (with 'next' pointing to itself)
1192  */
1193 static bool dhd_pm_notifier_registered = FALSE;
1194 
1195 extern int register_pm_notifier(struct notifier_block *nb);
1196 extern int unregister_pm_notifier(struct notifier_block *nb);
1197 #endif /* CONFIG_PM_SLEEP */
1198 
1199 /* Request scheduling of the bus rx frame */
1200 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1201 static void dhd_os_rxflock(dhd_pub_t *pub);
1202 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1203 
1204 #if defined(DHD_H2D_LOG_TIME_SYNC)
1205 static void
1206 dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
1207 #endif /* DHD_H2D_LOG_TIME_SYNC */
1208 
1209 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1210 typedef struct dhd_dev_priv {
1211 	dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1212 	dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
1213 	int          ifidx; /* interface index */
1214 	void       * lkup;
1215 } dhd_dev_priv_t;
1216 
1217 #define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
1218 #define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
1219 #define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1220 #define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1221 #define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1222 #define DHD_DEV_LKUP(dev)		(((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1223 
1224 /** Clear the dhd net_device's private structure. */
1225 static inline void
dhd_dev_priv_clear(struct net_device * dev)1226 dhd_dev_priv_clear(struct net_device * dev)
1227 {
1228 	dhd_dev_priv_t * dev_priv;
1229 	ASSERT(dev != (struct net_device *)NULL);
1230 	dev_priv = DHD_DEV_PRIV(dev);
1231 	dev_priv->dhd = (dhd_info_t *)NULL;
1232 	dev_priv->ifp = (dhd_if_t *)NULL;
1233 	dev_priv->ifidx = DHD_BAD_IF;
1234 	dev_priv->lkup = (void *)NULL;
1235 }
1236 
1237 /** Setup the dhd net_device's private structure. */
1238 static inline void
dhd_dev_priv_save(struct net_device * dev,dhd_info_t * dhd,dhd_if_t * ifp,int ifidx)1239 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1240                   int ifidx)
1241 {
1242 	dhd_dev_priv_t * dev_priv;
1243 	ASSERT(dev != (struct net_device *)NULL);
1244 	dev_priv = DHD_DEV_PRIV(dev);
1245 	dev_priv->dhd = dhd;
1246 	dev_priv->ifp = ifp;
1247 	dev_priv->ifidx = ifidx;
1248 }
1249 
1250 /* Return interface pointer */
dhd_get_ifp(dhd_pub_t * dhdp,uint32 ifidx)1251 struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1252 {
1253 	ASSERT(ifidx < DHD_MAX_IFS);
1254 
1255 	if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
1256 		return NULL;
1257 
1258 	return dhdp->info->iflist[ifidx];
1259 }
1260 
1261 #ifdef WLEASYMESH
1262 int
dhd_set_1905_almac(dhd_pub_t * dhdp,uint8 ifidx,uint8 * ea,bool mcast)1263 dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast)
1264 {
1265 	dhd_if_t *ifp;
1266 
1267 	ASSERT(ea != NULL);
1268 	ifp = dhd_get_ifp(dhdp, ifidx);
1269 	if (ifp == NULL) {
1270 		return BCME_ERROR;
1271 	}
1272 	if (mcast) {
1273 		memcpy(ifp->_1905_al_mcast, ea, ETHER_ADDR_LEN);
1274 	} else {
1275 		memcpy(ifp->_1905_al_ucast, ea, ETHER_ADDR_LEN);
1276 	}
1277 	return BCME_OK;
1278 }
1279 int
dhd_get_1905_almac(dhd_pub_t * dhdp,uint8 ifidx,uint8 * ea,bool mcast)1280 dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast)
1281 {
1282 	dhd_if_t *ifp;
1283 
1284 	ASSERT(ea != NULL);
1285 	ifp = dhd_get_ifp(dhdp, ifidx);
1286 	if (ifp == NULL) {
1287 		return BCME_ERROR;
1288 	}
1289 	if (mcast) {
1290 		memcpy(ea, ifp->_1905_al_mcast, ETHER_ADDR_LEN);
1291 	} else {
1292 		memcpy(ea, ifp->_1905_al_ucast, ETHER_ADDR_LEN);
1293 	}
1294 	return BCME_OK;
1295 }
1296 #endif /* WLEASYMESH */
1297 
1298 #ifdef PCIE_FULL_DONGLE
1299 
1300 /** Dummy objects are defined with state representing bad|down.
1301  * Performance gains from reducing branch conditionals, instruction parallelism,
1302  * dual issue, reducing load shadows, avail of larger pipelines.
1303  * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1304  * is accessed via the dhd_sta_t.
1305  */
1306 
1307 /* Dummy dhd_info object */
1308 dhd_info_t dhd_info_null = {
1309 	.pub = {
1310 	         .info = &dhd_info_null,
1311 #ifdef DHDTCPACK_SUPPRESS
1312 	         .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1313 #endif /* DHDTCPACK_SUPPRESS */
1314 #if defined(BCM_ROUTER_DHD)
1315 	         .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
1316 #endif
1317 	         .up = FALSE,
1318 	         .busstate = DHD_BUS_DOWN
1319 	}
1320 };
1321 #define DHD_INFO_NULL (&dhd_info_null)
1322 #define DHD_PUB_NULL  (&dhd_info_null.pub)
1323 
1324 /* Dummy netdevice object */
1325 struct net_device dhd_net_dev_null = {
1326 	.reg_state = NETREG_UNREGISTERED
1327 };
1328 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1329 
1330 /* Dummy dhd_if object */
1331 dhd_if_t dhd_if_null = {
1332 #ifdef WMF
1333 	.wmf = { .wmf_enable = TRUE },
1334 #endif
1335 	.info = DHD_INFO_NULL,
1336 	.net = DHD_NET_DEV_NULL,
1337 	.idx = DHD_BAD_IF
1338 };
1339 #define DHD_IF_NULL  (&dhd_if_null)
1340 
1341 /* XXX should we use the sta_pool[0] object as DHD_STA_NULL? */
1342 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1343 
1344 /** Interface STA list management. */
1345 
1346 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1347 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1348 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1349 
1350 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1351 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1352 
1353 /* Construct/Destruct a sta pool. */
1354 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1355 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1356 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1357 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1358 
1359 /** Reset a dhd_sta object and free into the dhd pool. */
1360 static void
dhd_sta_free(dhd_pub_t * dhdp,dhd_sta_t * sta)1361 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1362 {
1363 	int prio;
1364 
1365 	ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1366 
1367 	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1368 
1369 	/*
1370 	 * Flush and free all packets in all flowring's queues belonging to sta.
1371 	 * Packets in flow ring will be flushed later.
1372 	 */
1373 	for (prio = 0; prio < (int)NUMPRIO; prio++) {
1374 		uint16 flowid = sta->flowid[prio];
1375 
1376 		if (flowid != FLOWID_INVALID) {
1377 			unsigned long flags;
1378 			flow_ring_node_t * flow_ring_node;
1379 
1380 #ifdef DHDTCPACK_SUPPRESS
1381 			/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1382 			 * when there is a newly coming packet from network stack.
1383 			 */
1384 			dhd_tcpack_info_tbl_clean(dhdp);
1385 #endif /* DHDTCPACK_SUPPRESS */
1386 
1387 			flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1388 			if (flow_ring_node) {
1389 				flow_queue_t *queue = &flow_ring_node->queue;
1390 
1391 				DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1392 				flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1393 
1394 				if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1395 					void * pkt;
1396 					while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1397 						NULL) {
1398 						PKTFREE(dhdp->osh, pkt, TRUE);
1399 					}
1400 				}
1401 
1402 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1403 				ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1404 			}
1405 		}
1406 
1407 		sta->flowid[prio] = FLOWID_INVALID;
1408 	}
1409 
1410 	id16_map_free(dhdp->staid_allocator, sta->idx);
1411 	DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1412 	sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1413 	sta->ifidx = DHD_BAD_IF;
1414 	bzero(sta->ea.octet, ETHER_ADDR_LEN);
1415 	INIT_LIST_HEAD(&sta->list);
1416 	sta->idx = ID16_INVALID; /* implying free */
1417 }
1418 
1419 /** Allocate a dhd_sta object from the dhd pool. */
1420 static dhd_sta_t *
dhd_sta_alloc(dhd_pub_t * dhdp)1421 dhd_sta_alloc(dhd_pub_t * dhdp)
1422 {
1423 	uint16 idx;
1424 	dhd_sta_t * sta;
1425 	dhd_sta_pool_t * sta_pool;
1426 
1427 	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1428 
1429 	idx = id16_map_alloc(dhdp->staid_allocator);
1430 	if (idx == ID16_INVALID) {
1431 		DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1432 		return DHD_STA_NULL;
1433 	}
1434 
1435 	sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1436 	sta = &sta_pool[idx];
1437 
1438 	ASSERT((sta->idx == ID16_INVALID) &&
1439 	       (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1440 
1441 	DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1442 
1443 	sta->idx = idx; /* implying allocated */
1444 
1445 	return sta;
1446 }
1447 
1448 /** Delete all STAs in an interface's STA list. */
1449 static void
dhd_if_del_sta_list(dhd_if_t * ifp)1450 dhd_if_del_sta_list(dhd_if_t *ifp)
1451 {
1452 	dhd_sta_t *sta, *next;
1453 	unsigned long flags;
1454 
1455 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1456 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1457 	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1458 		GCC_DIAGNOSTIC_POP();
1459 		list_del(&sta->list);
1460 		dhd_sta_free(&ifp->info->pub, sta);
1461 	}
1462 
1463 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1464 
1465 	return;
1466 }
1467 
1468 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1469 static int
dhd_sta_pool_init(dhd_pub_t * dhdp,int max_sta)1470 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1471 {
1472 	int idx, prio, sta_pool_memsz;
1473 	dhd_sta_t * sta;
1474 	dhd_sta_pool_t * sta_pool;
1475 	void * staid_allocator;
1476 
1477 	ASSERT(dhdp != (dhd_pub_t *)NULL);
1478 	ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1479 
1480 	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1481 	staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1482 	if (staid_allocator == NULL) {
1483 		DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1484 		return BCME_ERROR;
1485 	}
1486 
1487 	/* Pre allocate a pool of dhd_sta objects (one extra). */
1488 	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1489 	sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1490 	if (sta_pool == NULL) {
1491 		DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1492 		id16_map_fini(dhdp->osh, staid_allocator);
1493 		return BCME_ERROR;
1494 	}
1495 
1496 	dhdp->sta_pool = sta_pool;
1497 	dhdp->staid_allocator = staid_allocator;
1498 
1499 	/* Initialize all sta(s) for the pre-allocated free pool. */
1500 	bzero((uchar *)sta_pool, sta_pool_memsz);
1501 	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1502 		sta = &sta_pool[idx];
1503 		sta->idx = id16_map_alloc(staid_allocator);
1504 		ASSERT(sta->idx <= max_sta);
1505 	}
1506 
1507 	/* Now place them into the pre-allocated free pool. */
1508 	for (idx = 1; idx <= max_sta; idx++) {
1509 		sta = &sta_pool[idx];
1510 		for (prio = 0; prio < (int)NUMPRIO; prio++) {
1511 			sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1512 		}
1513 		dhd_sta_free(dhdp, sta);
1514 	}
1515 
1516 	return BCME_OK;
1517 }
1518 
1519 /** Destruct the pool of dhd_sta_t objects.
1520  * Caller must ensure that no STA objects are currently associated with an if.
1521  */
1522 static void
dhd_sta_pool_fini(dhd_pub_t * dhdp,int max_sta)1523 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1524 {
1525 	dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1526 
1527 	if (sta_pool) {
1528 		int idx;
1529 		int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1530 		for (idx = 1; idx <= max_sta; idx++) {
1531 			ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1532 			ASSERT(sta_pool[idx].idx == ID16_INVALID);
1533 		}
1534 		MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1535 	}
1536 
1537 	id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1538 	dhdp->staid_allocator = NULL;
1539 }
1540 
1541 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1542 static void
dhd_sta_pool_clear(dhd_pub_t * dhdp,int max_sta)1543 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1544 {
1545 	int idx, prio, sta_pool_memsz;
1546 	dhd_sta_t * sta;
1547 	dhd_sta_pool_t * sta_pool;
1548 	void *staid_allocator;
1549 
1550 	if (!dhdp) {
1551 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1552 		return;
1553 	}
1554 
1555 	sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1556 	staid_allocator = dhdp->staid_allocator;
1557 
1558 	if (!sta_pool) {
1559 		DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1560 		return;
1561 	}
1562 
1563 	if (!staid_allocator) {
1564 		DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1565 		return;
1566 	}
1567 
1568 	/* clear free pool */
1569 	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1570 	bzero((uchar *)sta_pool, sta_pool_memsz);
1571 
1572 	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1573 	id16_map_clear(staid_allocator, max_sta, 1);
1574 
1575 	/* Initialize all sta(s) for the pre-allocated free pool. */
1576 	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1577 		sta = &sta_pool[idx];
1578 		sta->idx = id16_map_alloc(staid_allocator);
1579 		ASSERT(sta->idx <= max_sta);
1580 	}
1581 	/* Now place them into the pre-allocated free pool. */
1582 	for (idx = 1; idx <= max_sta; idx++) {
1583 		sta = &sta_pool[idx];
1584 		for (prio = 0; prio < (int)NUMPRIO; prio++) {
1585 			sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1586 		}
1587 		dhd_sta_free(dhdp, sta);
1588 	}
1589 }
1590 
1591 /** Find STA with MAC address ea in an interface's STA list. */
1592 dhd_sta_t *
dhd_find_sta(void * pub,int ifidx,void * ea)1593 dhd_find_sta(void *pub, int ifidx, void *ea)
1594 {
1595 	dhd_sta_t *sta;
1596 	dhd_if_t *ifp;
1597 	unsigned long flags;
1598 
1599 	ASSERT(ea != NULL);
1600 	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1601 	if (ifp == NULL)
1602 		return DHD_STA_NULL;
1603 
1604 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1605 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1606 	list_for_each_entry(sta, &ifp->sta_list, list) {
1607 		GCC_DIAGNOSTIC_POP();
1608 		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1609 			DHD_INFO(("%s: Found STA " MACDBG "\n",
1610 				__FUNCTION__, MAC2STRDBG((char *)ea)));
1611 			DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1612 			return sta;
1613 		}
1614 	}
1615 
1616 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1617 
1618 	return DHD_STA_NULL;
1619 }
1620 
1621 /** Add STA into the interface's STA list. */
1622 dhd_sta_t *
dhd_add_sta(void * pub,int ifidx,void * ea)1623 dhd_add_sta(void *pub, int ifidx, void *ea)
1624 {
1625 	dhd_sta_t *sta;
1626 	dhd_if_t *ifp;
1627 	unsigned long flags;
1628 
1629 	ASSERT(ea != NULL);
1630 	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1631 	if (ifp == NULL)
1632 		return DHD_STA_NULL;
1633 
1634 	if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1635 		DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1636 		return DHD_STA_NULL;
1637 	}
1638 
1639 	sta = dhd_sta_alloc((dhd_pub_t *)pub);
1640 	if (sta == DHD_STA_NULL) {
1641 		DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1642 		return DHD_STA_NULL;
1643 	}
1644 
1645 	memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1646 
1647 	/* link the sta and the dhd interface */
1648 	sta->ifp = ifp;
1649 	sta->ifidx = ifidx;
1650 #ifdef DHD_WMF
1651 	sta->psta_prim = NULL;
1652 #endif
1653 	INIT_LIST_HEAD(&sta->list);
1654 
1655 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1656 
1657 	list_add_tail(&sta->list, &ifp->sta_list);
1658 
1659 	DHD_ERROR(("%s: Adding  STA " MACDBG "\n",
1660 		__FUNCTION__, MAC2STRDBG((char *)ea)));
1661 
1662 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1663 
1664 	return sta;
1665 }
1666 
1667 /** Delete all STAs from the interface's STA list. */
1668 void
dhd_del_all_sta(void * pub,int ifidx)1669 dhd_del_all_sta(void *pub, int ifidx)
1670 {
1671 	dhd_sta_t *sta, *next;
1672 	dhd_if_t *ifp;
1673 	unsigned long flags;
1674 
1675 	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1676 	if (ifp == NULL)
1677 		return;
1678 
1679 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1680 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1681 	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1682 		GCC_DIAGNOSTIC_POP();
1683 		list_del(&sta->list);
1684 		dhd_sta_free(&ifp->info->pub, sta);
1685 #ifdef DHD_L2_FILTER
1686 		if (ifp->parp_enable) {
1687 			/* clear Proxy ARP cache of specific Ethernet Address */
1688 			bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
1689 					ifp->phnd_arp_table, FALSE,
1690 					sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1691 		}
1692 #endif /* DHD_L2_FILTER */
1693 	}
1694 
1695 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1696 
1697 	return;
1698 }
1699 
1700 /** Delete STA from the interface's STA list. */
1701 void
dhd_del_sta(void * pub,int ifidx,void * ea)1702 dhd_del_sta(void *pub, int ifidx, void *ea)
1703 {
1704 	dhd_sta_t *sta, *next;
1705 	dhd_if_t *ifp;
1706 	unsigned long flags;
1707 
1708 	ASSERT(ea != NULL);
1709 	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1710 	if (ifp == NULL)
1711 		return;
1712 
1713 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1714 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1715 	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1716 		GCC_DIAGNOSTIC_POP();
1717 		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1718 			DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
1719 				__FUNCTION__, MAC2STRDBG(sta->ea.octet)));
1720 			list_del(&sta->list);
1721 			dhd_sta_free(&ifp->info->pub, sta);
1722 		}
1723 	}
1724 
1725 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1726 #ifdef DHD_L2_FILTER
1727 	if (ifp->parp_enable) {
1728 		/* clear Proxy ARP cache of specific Ethernet Address */
1729 		bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1730 			ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1731 	}
1732 #endif /* DHD_L2_FILTER */
1733 	return;
1734 }
1735 
1736 /** Add STA if it doesn't exist. Not reentrant. */
1737 dhd_sta_t*
dhd_findadd_sta(void * pub,int ifidx,void * ea)1738 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1739 {
1740 	dhd_sta_t *sta;
1741 
1742 	sta = dhd_find_sta(pub, ifidx, ea);
1743 
1744 	if (!sta) {
1745 		/* Add entry */
1746 		sta = dhd_add_sta(pub, ifidx, ea);
1747 	}
1748 
1749 	return sta;
1750 }
1751 
1752 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1753 static struct list_head *
dhd_sta_list_snapshot(dhd_info_t * dhd,dhd_if_t * ifp,struct list_head * snapshot_list)1754 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1755 {
1756 	unsigned long flags;
1757 	dhd_sta_t *sta, *snapshot;
1758 
1759 	INIT_LIST_HEAD(snapshot_list);
1760 
1761 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1762 
1763 	list_for_each_entry(sta, &ifp->sta_list, list) {
1764 		/* allocate one and add to snapshot */
1765 		snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1766 		if (snapshot == NULL) {
1767 			DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1768 			continue;
1769 		}
1770 
1771 		memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1772 
1773 		INIT_LIST_HEAD(&snapshot->list);
1774 		list_add_tail(&snapshot->list, snapshot_list);
1775 	}
1776 
1777 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1778 
1779 	return snapshot_list;
1780 }
1781 
1782 static void
dhd_sta_list_snapshot_free(dhd_info_t * dhd,struct list_head * snapshot_list)1783 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1784 {
1785 	dhd_sta_t *sta, *next;
1786 
1787 	list_for_each_entry_safe(sta, next, snapshot_list, list) {
1788 		list_del(&sta->list);
1789 		MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1790 	}
1791 }
1792 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1793 
1794 #else
dhd_if_del_sta_list(dhd_if_t * ifp)1795 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
dhd_sta_pool_init(dhd_pub_t * dhdp,int max_sta)1796 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
dhd_sta_pool_fini(dhd_pub_t * dhdp,int max_sta)1797 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
dhd_sta_pool_clear(dhd_pub_t * dhdp,int max_sta)1798 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
dhd_findadd_sta(void * pub,int ifidx,void * ea)1799 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
dhd_find_sta(void * pub,int ifidx,void * ea)1800 dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
dhd_del_sta(void * pub,int ifidx,void * ea)1801 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1802 #endif /* PCIE_FULL_DONGLE */
1803 
1804 #ifdef BCM_ROUTER_DHD
1805 /** Bind a flowid to the dhd_sta's flowid table. */
1806 void
dhd_add_flowid(dhd_pub_t * dhdp,int ifidx,uint8 ac_prio,void * ea,uint16 flowid)1807 dhd_add_flowid(dhd_pub_t * dhdp, int ifidx, uint8 ac_prio, void * ea,
1808                 uint16 flowid)
1809 {
1810 	int prio;
1811 	dhd_if_t * ifp;
1812 	dhd_sta_t * sta;
1813 	flow_queue_t * queue;
1814 
1815 	ASSERT((dhdp != (dhd_pub_t *)NULL) && (ea != NULL));
1816 
1817 	/* Fetch the dhd_if object given the if index */
1818 	ifp = dhd_get_ifp(dhdp, ifidx);
1819 	if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */
1820 		return;
1821 
1822 	/* Intializing the backup queue parameters */
1823 	if (DHD_IF_ROLE_WDS(dhdp, ifidx) ||
1824 #ifdef DHD_WET
1825 		WET_ENABLED(dhdp) ||
1826 #endif /* DHD_WET */
1827 		0) {
1828 		queue = dhd_flow_queue(dhdp, flowid);
1829 		dhd_flow_ring_config_thresholds(dhdp, flowid,
1830 			dhd_queue_budget, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
1831 			dhd_if_threshold, (void *)&ifp->cumm_ctr);
1832 		return;
1833 	} else if ((sta = dhd_find_sta(dhdp, ifidx, ea)) == DHD_STA_NULL) {
1834 		/* Fetch the station with a matching Mac address. */
1835 		/* Update queue's grandparent cummulative length threshold */
1836 		if (ETHER_ISMULTI((char *)ea)) {
1837 			queue = dhd_flow_queue(dhdp, flowid);
1838 			if (ifidx != 0 && DHD_IF_ROLE_STA(dhdp, ifidx)) {
1839 				/* Use default dhdp->cumm_ctr and dhdp->l2cumm_ctr,
1840 				 * in PSTA mode the ifp will be deleted but we don't delete
1841 				 * the PSTA flowring.
1842 				 */
1843 				dhd_flow_ring_config_thresholds(dhdp, flowid,
1844 					queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
1845 					dhd_if_threshold, DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
1846 			}
1847 			else if (DHD_FLOW_QUEUE_L2CLEN_PTR(queue) != (void *)&ifp->cumm_ctr) {
1848 				dhd_flow_ring_config_thresholds(dhdp, flowid,
1849 					queue->max, queue->max, DHD_FLOW_QUEUE_CLEN_PTR(queue),
1850 					dhd_if_threshold, (void *)&ifp->cumm_ctr);
1851 			}
1852 		}
1853 		return;
1854 	}
1855 
1856 	/* Set queue's min budget and queue's parent cummulative length threshold */
1857 	dhd_flow_ring_config_thresholds(dhdp, flowid, dhd_queue_budget,
1858 	                                dhd_sta_threshold, (void *)&sta->cumm_ctr,
1859 	                                dhd_if_threshold, (void *)&ifp->cumm_ctr);
1860 
1861 	/* Populate the flowid into the stations flowid table, for all packet
1862 	 * priorities that would match the given flow's ac priority.
1863 	 */
1864 	for (prio = 0; prio < (int)NUMPRIO; prio++) {
1865 		if (dhdp->flow_prio_map[prio] == ac_prio) {
1866 			/* flowring shared for all these pkt prio */
1867 			sta->flowid[prio] = flowid;
1868 		}
1869 	}
1870 }
1871 
1872 /** Unbind a flowid to the sta's flowid table. */
1873 void
dhd_del_flowid(dhd_pub_t * dhdp,int ifidx,uint16 flowid)1874 dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid)
1875 {
1876 	int prio;
1877 	dhd_if_t * ifp;
1878 	dhd_sta_t * sta;
1879 	unsigned long flags;
1880 
1881 	/* Fetch the dhd_if object given the if index */
1882 	ifp = dhd_get_ifp(dhdp, ifidx);
1883 	if (ifp == (dhd_if_t *)NULL) /* ifp fetched from dhdp iflist[] */
1884 		return;
1885 
1886 	/* Walk all stations and delete clear any station's reference to flowid */
1887 	DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1888 
1889 	list_for_each_entry(sta, &ifp->sta_list, list) {
1890 		for (prio = 0; prio < (int)NUMPRIO; prio++) {
1891 			if (sta->flowid[prio] == flowid) {
1892 				sta->flowid[prio] = FLOWID_INVALID;
1893 			}
1894 		}
1895 	}
1896 
1897 	DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1898 }
1899 #endif /* BCM_ROUTER_DHD */
1900 
1901 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
1902 void
dhd_axi_error_dispatch(dhd_pub_t * dhdp)1903 dhd_axi_error_dispatch(dhd_pub_t *dhdp)
1904 {
1905 	dhd_info_t *dhd = dhdp->info;
1906 	schedule_work(&dhd->axi_error_dispatcher_work);
1907 }
1908 
dhd_axi_error_dispatcher_fn(struct work_struct * work)1909 static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
1910 {
1911 	struct dhd_info *dhd =
1912 		container_of(work, struct dhd_info, axi_error_dispatcher_work);
1913 	dhd_axi_error(&dhd->pub);
1914 }
1915 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
1916 
1917 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
dhd_bssidx2idx(dhd_pub_t * dhdp,uint32 bssidx)1918 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1919 {
1920 	dhd_if_t *ifp;
1921 	dhd_info_t *dhd = dhdp->info;
1922 	int i;
1923 
1924 	ASSERT(bssidx < DHD_MAX_IFS);
1925 	ASSERT(dhdp);
1926 
1927 	for (i = 0; i < DHD_MAX_IFS; i++) {
1928 		ifp = dhd->iflist[i];
1929 		if (ifp && (ifp->bssidx == bssidx)) {
1930 			DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1931 				ifp->name, bssidx, i));
1932 			break;
1933 		}
1934 	}
1935 	return i;
1936 }
1937 
dhd_rxf_enqueue(dhd_pub_t * dhdp,void * skb)1938 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1939 {
1940 	uint32 store_idx;
1941 	uint32 sent_idx;
1942 
1943 	if (!skb) {
1944 		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1945 		return BCME_ERROR;
1946 	}
1947 
1948 	dhd_os_rxflock(dhdp);
1949 	store_idx = dhdp->store_idx;
1950 	sent_idx = dhdp->sent_idx;
1951 	if (dhdp->skbbuf[store_idx] != NULL) {
1952 		/* Make sure the previous packets are processed */
1953 		dhd_os_rxfunlock(dhdp);
1954 #ifdef RXF_DEQUEUE_ON_BUSY
1955 		DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1956 			skb, store_idx, sent_idx));
1957 		return BCME_BUSY;
1958 #else /* RXF_DEQUEUE_ON_BUSY */
1959 		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1960 			skb, store_idx, sent_idx));
1961 		/* removed msleep here, should use wait_event_timeout if we
1962 		 * want to give rx frame thread a chance to run
1963 		 */
1964 #if defined(WAIT_DEQUEUE)
1965 		OSL_SLEEP(1);
1966 #endif
1967 		return BCME_ERROR;
1968 #endif /* RXF_DEQUEUE_ON_BUSY */
1969 	}
1970 	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1971 		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1972 	dhdp->skbbuf[store_idx] = skb;
1973 	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1974 	dhd_os_rxfunlock(dhdp);
1975 
1976 	return BCME_OK;
1977 }
1978 
dhd_rxf_dequeue(dhd_pub_t * dhdp)1979 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1980 {
1981 	uint32 store_idx;
1982 	uint32 sent_idx;
1983 	void *skb;
1984 
1985 	dhd_os_rxflock(dhdp);
1986 
1987 	store_idx = dhdp->store_idx;
1988 	sent_idx = dhdp->sent_idx;
1989 	skb = dhdp->skbbuf[sent_idx];
1990 
1991 	if (skb == NULL) {
1992 		dhd_os_rxfunlock(dhdp);
1993 		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1994 			store_idx, sent_idx));
1995 		return NULL;
1996 	}
1997 
1998 	dhdp->skbbuf[sent_idx] = NULL;
1999 	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2000 
2001 	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2002 		skb, sent_idx));
2003 
2004 	dhd_os_rxfunlock(dhdp);
2005 
2006 	return skb;
2007 }
2008 
dhd_process_cid_mac(dhd_pub_t * dhdp,bool prepost)2009 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2010 {
2011 #if defined(BCMSDIO) || defined(BCMPCIE)
2012 	uint chipid = dhd_bus_chip_id(dhdp);
2013 	int ret = BCME_OK;
2014 	if (prepost) { /* pre process */
2015 		ret = dhd_alloc_cis(dhdp);
2016 		if (ret != BCME_OK) {
2017 			return ret;
2018 		}
2019 		switch (chipid) {
2020 #ifndef DHD_READ_CIS_FROM_BP
2021 			case BCM4389_CHIP_GRPID:
2022 				/* BCM4389B0 or higher rev is used new otp iovar */
2023 				dhd_read_otp_sw_rgn(dhdp);
2024 				break;
2025 #endif /* !DHD_READ_CIS_FROM_BP */
2026 			default:
2027 				dhd_read_cis(dhdp);
2028 				break;
2029 		}
2030 		dhd_check_module_cid(dhdp);
2031 		dhd_check_module_mac(dhdp);
2032 		dhd_set_macaddr_from_file(dhdp);
2033 	} else { /* post process */
2034 		dhd_write_macaddr(&dhdp->mac);
2035 		dhd_clear_cis(dhdp);
2036 	}
2037 #endif
2038 
2039 	return BCME_OK;
2040 }
2041 
2042 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2043 #if defined(PKT_FILTER_SUPPORT)
2044 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2045 static bool
_turn_on_arp_filter(dhd_pub_t * dhd,int op_mode_param)2046 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
2047 {
2048 	bool _apply = FALSE;
2049 	/* In case of IBSS mode, apply arp pkt filter */
2050 	if (op_mode_param & DHD_FLAG_IBSS_MODE) {
2051 		_apply = TRUE;
2052 		goto exit;
2053 	}
2054 	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2055 	if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
2056 		_apply = TRUE;
2057 		goto exit;
2058 	}
2059 
2060 exit:
2061 	return _apply;
2062 }
2063 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2064 
2065 void
dhd_set_packet_filter(dhd_pub_t * dhd)2066 dhd_set_packet_filter(dhd_pub_t *dhd)
2067 {
2068 	int i;
2069 
2070 	DHD_TRACE(("%s: enter\n", __FUNCTION__));
2071 	if (dhd_pkt_filter_enable) {
2072 		for (i = 0; i < dhd->pktfilter_count; i++) {
2073 			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2074 		}
2075 	}
2076 }
2077 
2078 void
dhd_enable_packet_filter(int value,dhd_pub_t * dhd)2079 dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2080 {
2081 	int i;
2082 
2083 	DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2084 	if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
2085 			!dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
2086 		DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2087 		return;
2088 	}
2089 	/* 1 - Enable packet filter, only allow unicast packet to send up */
2090 	/* 0 - Disable packet filter */
2091 	if (dhd_pkt_filter_enable && (!value ||
2092 	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
2093 	    dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
2094 	{
2095 		for (i = 0; i < dhd->pktfilter_count; i++) {
2096 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2097 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2098 			if (value && (i == DHD_ARP_FILTER_NUM) &&
2099 				!_turn_on_arp_filter(dhd, dhd->op_mode)) {
2100 				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2101 					"val %d, cnt %d, op_mode 0x%x\n",
2102 					value, i, dhd->op_mode));
2103 				continue;
2104 			}
2105 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2106 #ifdef APSTA_BLOCK_ARP_DURING_DHCP
2107 			if (value && (i == DHD_BROADCAST_ARP_FILTER_NUM) &&
2108 				dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM]) {
2109 				/* XXX: BROADCAST_ARP_FILTER is only for the
2110 				 * STA/SoftAP concurrent mode (Please refer to RB:90348)
2111 				 * Remove the filter for other cases explicitly
2112 				 */
2113 				DHD_ERROR(("%s: Remove the DHD_BROADCAST_ARP_FILTER\n",
2114 					__FUNCTION__));
2115 				dhd_packet_filter_add_remove(dhd, FALSE,
2116 					DHD_BROADCAST_ARP_FILTER_NUM);
2117 			}
2118 #endif /* APSTA_BLOCK_ARP_DURING_DHCP */
2119 			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2120 				value, dhd_master_mode);
2121 		}
2122 	}
2123 }
2124 
2125 int
dhd_packet_filter_add_remove(dhd_pub_t * dhdp,int add_remove,int num)2126 dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
2127 {
2128 	char *filterp = NULL;
2129 	int filter_id = 0;
2130 
2131 	switch (num) {
2132 		case DHD_BROADCAST_FILTER_NUM:
2133 			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
2134 			filter_id = 101;
2135 			break;
2136 		case DHD_MULTICAST4_FILTER_NUM:
2137 			filter_id = 102;
2138 			if (FW_SUPPORTED((dhdp), pf6)) {
2139 				if (dhdp->pktfilter[num] != NULL) {
2140 					dhd_pktfilter_offload_delete(dhdp, filter_id);
2141 					dhdp->pktfilter[num] = NULL;
2142 				}
2143 				if (!add_remove) {
2144 					filterp = DISCARD_IPV4_MCAST;
2145 					add_remove = 1;
2146 					break;
2147 				}
2148 			} /* XXX: intend omitting else case */
2149 			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
2150 			break;
2151 		case DHD_MULTICAST6_FILTER_NUM:
2152 			filter_id = 103;
2153 			if (FW_SUPPORTED((dhdp), pf6)) {
2154 				if (dhdp->pktfilter[num] != NULL) {
2155 					dhd_pktfilter_offload_delete(dhdp, filter_id);
2156 					dhdp->pktfilter[num] = NULL;
2157 				}
2158 				if (!add_remove) {
2159 					filterp = DISCARD_IPV6_MCAST;
2160 					add_remove = 1;
2161 					break;
2162 				}
2163 			} /* XXX: intend omitting else case */
2164 			filterp = "103 0 0 0 0xFFFF 0x3333";
2165 			break;
2166 		case DHD_MDNS_FILTER_NUM:
2167 			filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
2168 			filter_id = 104;
2169 			break;
2170 		case DHD_ARP_FILTER_NUM:
2171 			filterp = "105 0 0 12 0xFFFF 0x0806";
2172 			filter_id = 105;
2173 			break;
2174 		case DHD_BROADCAST_ARP_FILTER_NUM:
2175 			filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
2176 				" 0xFFFFFFFFFFFF0000000000000806";
2177 			filter_id = 106;
2178 			break;
2179 		default:
2180 			return -EINVAL;
2181 	}
2182 
2183 	/* Add filter */
2184 	if (add_remove) {
2185 		dhdp->pktfilter[num] = filterp;
2186 		dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
2187 	} else { /* Delete filter */
2188 		if (dhdp->pktfilter[num] != NULL) {
2189 			dhd_pktfilter_offload_delete(dhdp, filter_id);
2190 			dhdp->pktfilter[num] = NULL;
2191 		}
2192 	}
2193 
2194 	return 0;
2195 }
2196 #endif /* PKT_FILTER_SUPPORT */
2197 
dhd_set_suspend(int value,dhd_pub_t * dhd)2198 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2199 {
2200 #ifndef SUPPORT_PM2_ONLY
2201 	int power_mode = PM_MAX;
2202 #endif /* SUPPORT_PM2_ONLY */
2203 	/* wl_pkt_filter_enable_t	enable_parm; */
2204 	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2205 	int ret = 0;
2206 #ifdef DHD_USE_EARLYSUSPEND
2207 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2208 	int roam_time_thresh = 0;   /* (ms) */
2209 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2210 #ifndef ENABLE_FW_ROAM_SUSPEND
2211 	uint roamvar = 1;
2212 #endif /* ENABLE_FW_ROAM_SUSPEND */
2213 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2214 	int bcn_li_bcn = 1;
2215 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2216 	uint nd_ra_filter = 0;
2217 #ifdef ENABLE_IPMCAST_FILTER
2218 	int ipmcast_l2filter;
2219 #endif /* ENABLE_IPMCAST_FILTER */
2220 #ifdef CUSTOM_EVENT_PM_WAKE
2221 	uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
2222 #endif /* CUSTOM_EVENT_PM_WAKE */
2223 #endif /* DHD_USE_EARLYSUSPEND */
2224 #ifdef PASS_ALL_MCAST_PKTS
2225 	struct dhd_info *dhdinfo;
2226 	uint32 allmulti;
2227 	uint i;
2228 #endif /* PASS_ALL_MCAST_PKTS */
2229 #ifdef DYNAMIC_SWOOB_DURATION
2230 #ifndef CUSTOM_INTR_WIDTH
2231 #define CUSTOM_INTR_WIDTH 100
2232 	int intr_width = 0;
2233 #endif /* CUSTOM_INTR_WIDTH */
2234 #endif /* DYNAMIC_SWOOB_DURATION */
2235 
2236 #if defined(DHD_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2237 	/* CUSTOM_BCN_TIMEOUT_IN_SUSPEND in suspend, otherwise CUSTOM_BCN_TIMEOUT */
2238 	int bcn_timeout = CUSTOM_BCN_TIMEOUT;
2239 #endif /* DHD_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
2240 #if defined(OEM_ANDROID) && defined(BCMPCIE)
2241 	int lpas = 0;
2242 	int dtim_period = 0;
2243 	int bcn_interval = 0;
2244 	int bcn_to_dly = 0;
2245 #endif /* OEM_ANDROID && BCMPCIE */
2246 
2247 	if (!dhd)
2248 		return -ENODEV;
2249 
2250 #ifdef PASS_ALL_MCAST_PKTS
2251 	dhdinfo = dhd->info;
2252 #endif /* PASS_ALL_MCAST_PKTS */
2253 
2254 	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2255 		__FUNCTION__, value, dhd->in_suspend));
2256 
2257 	dhd_suspend_lock(dhd);
2258 
2259 #ifdef CUSTOM_SET_CPUCORE
2260 	DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2261 	/* set specific cpucore */
2262 	dhd_set_cpucore(dhd, TRUE);
2263 #endif /* CUSTOM_SET_CPUCORE */
2264 	if (dhd->up) {
2265 		if (value && dhd->in_suspend) {
2266 #ifdef PKT_FILTER_SUPPORT
2267 				dhd->early_suspended = 1;
2268 #endif
2269 				/* Kernel suspended */
2270 				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
2271 
2272 #ifndef SUPPORT_PM2_ONLY
2273 				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2274 				                 sizeof(power_mode), TRUE, 0);
2275 #endif /* SUPPORT_PM2_ONLY */
2276 
2277 #ifdef PKT_FILTER_SUPPORT
2278 				/* Enable packet filter,
2279 				 * only allow unicast packet to send up
2280 				 */
2281 				dhd_enable_packet_filter(1, dhd);
2282 #ifdef APF
2283 				dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
2284 #endif /* APF */
2285 #endif /* PKT_FILTER_SUPPORT */
2286 #ifdef ARP_OFFLOAD_SUPPORT
2287 				if (dhd->arpoe_enable) {
2288 					dhd_arp_offload_enable(dhd, TRUE);
2289 				}
2290 #endif /* ARP_OFFLOAD_SUPPORT */
2291 
2292 #ifdef PASS_ALL_MCAST_PKTS
2293 				allmulti = 0;
2294 				for (i = 0; i < DHD_MAX_IFS; i++) {
2295 					if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) {
2296 						ret = dhd_iovar(dhd, i, "allmulti",
2297 								(char *)&allmulti,
2298 								sizeof(allmulti),
2299 								NULL, 0, TRUE);
2300 						if (ret < 0) {
2301 							DHD_ERROR(("%s allmulti failed %d\n",
2302 								__FUNCTION__, ret));
2303 						}
2304 					}
2305 				}
2306 #endif /* PASS_ALL_MCAST_PKTS */
2307 
2308 				/* If DTIM skip is set up as default, force it to wake
2309 				 * each third DTIM for better power savings.  Note that
2310 				 * one side effect is a chance to miss BC/MC packet.
2311 				 */
2312 #ifdef WLTDLS
2313 				/* Do not set bcn_li_ditm on WFD mode */
2314 				if (dhd->tdls_mode) {
2315 					bcn_li_dtim = 0;
2316 				} else
2317 #endif /* WLTDLS */
2318 #if defined(OEM_ANDROID) && defined(BCMPCIE)
2319 				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
2320 						&bcn_interval);
2321 				ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2322 						sizeof(bcn_li_dtim), NULL, 0, TRUE);
2323 				if (ret < 0) {
2324 					DHD_ERROR(("%s bcn_li_dtim failed %d\n",
2325 							__FUNCTION__, ret));
2326 				}
2327 				if ((bcn_li_dtim * dtim_period * bcn_interval) >=
2328 					MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
2329 					/*
2330 					 * Increase max roaming threshold from 2 secs to 8 secs
2331 					 * the real roam threshold is MIN(max_roam_threshold,
2332 					 * bcn_timeout/2)
2333 					 */
2334 					lpas = 1;
2335 					ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
2336 							NULL, 0, TRUE);
2337 					if (ret < 0) {
2338 						if (ret == BCME_UNSUPPORTED) {
2339 							DHD_ERROR(("%s lpas, UNSUPPORTED\n",
2340 								__FUNCTION__));
2341 						} else {
2342 							DHD_ERROR(("%s set lpas failed %d\n",
2343 								__FUNCTION__, ret));
2344 						}
2345 					}
2346 					bcn_to_dly = 1;
2347 					/*
2348 					 * if bcn_to_dly is 1, the real roam threshold is
2349 					 * MIN(max_roam_threshold, bcn_timeout -1);
2350 					 * notify link down event after roaming procedure complete
2351 					 * if we hit bcn_timeout while we are in roaming progress.
2352 					 */
2353 					ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2354 							sizeof(bcn_to_dly), NULL, 0, TRUE);
2355 					if (ret < 0) {
2356 						if (ret == BCME_UNSUPPORTED) {
2357 							DHD_ERROR(("%s bcn_to_dly, UNSUPPORTED\n",
2358 								__FUNCTION__));
2359 						} else {
2360 							DHD_ERROR(("%s set bcn_to_dly failed %d\n",
2361 								__FUNCTION__, ret));
2362 						}
2363 					}
2364 				}
2365 #else
2366 				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2367 				if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2368 						sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
2369 					DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2370 #endif /* OEM_ANDROID && BCMPCIE */
2371 
2372 #ifdef DHD_USE_EARLYSUSPEND
2373 #ifdef DHD_BCN_TIMEOUT_IN_SUSPEND
2374 				bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2375 				ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2376 						sizeof(bcn_timeout), NULL, 0, TRUE);
2377 				if (ret < 0) {
2378 					DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
2379 						ret));
2380 				}
2381 #endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */
2382 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2383 				roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2384 				ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2385 						(char *)&roam_time_thresh,
2386 						sizeof(roam_time_thresh), NULL, 0, TRUE);
2387 				if (ret < 0) {
2388 					DHD_ERROR(("%s roam_time_thresh failed %d\n",
2389 						__FUNCTION__, ret));
2390 				}
2391 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2392 #ifndef ENABLE_FW_ROAM_SUSPEND
2393 				/* Disable firmware roaming during suspend */
2394 				ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2395 						sizeof(roamvar), NULL, 0, TRUE);
2396 				if (ret < 0) {
2397 					DHD_ERROR(("%s roam_off failed %d\n",
2398 						__FUNCTION__, ret));
2399 				}
2400 #endif /* ENABLE_FW_ROAM_SUSPEND */
2401 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2402 				if (bcn_li_dtim) {
2403 					bcn_li_bcn = 0;
2404 				}
2405 				ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2406 						sizeof(bcn_li_bcn), NULL, 0, TRUE);
2407 				if (ret < 0) {
2408 					DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
2409 				}
2410 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2411 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
2412 				ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
2413 				if (ret != BCME_OK) {
2414 					DHD_ERROR(("failed to stop beacon recv event on"
2415 						" suspend state (%d)\n", ret));
2416 				}
2417 #endif /* WL_CFG80211 && WL_BCNRECV */
2418 #ifdef NDO_CONFIG_SUPPORT
2419 				if (dhd->ndo_enable) {
2420 					if (!dhd->ndo_host_ip_overflow) {
2421 						/* enable ND offload on suspend */
2422 						ret = dhd_ndo_enable(dhd, TRUE);
2423 						if (ret < 0) {
2424 							DHD_ERROR(("%s: failed to enable NDO\n",
2425 								__FUNCTION__));
2426 						}
2427 					} else {
2428 						DHD_INFO(("%s: NDO disabled on suspend due to"
2429 								"HW capacity\n", __FUNCTION__));
2430 					}
2431 				}
2432 #endif /* NDO_CONFIG_SUPPORT */
2433 #ifndef APF
2434 				if (FW_SUPPORTED(dhd, ndoe))
2435 #else
2436 				if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2437 #endif /* APF */
2438 				{
2439 					/* enable IPv6 RA filter in  firmware during suspend */
2440 					nd_ra_filter = 1;
2441 					ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2442 							(char *)&nd_ra_filter, sizeof(nd_ra_filter),
2443 							NULL, 0, TRUE);
2444 					if (ret < 0)
2445 						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2446 							ret));
2447 				}
2448 				dhd_os_suppress_logging(dhd, TRUE);
2449 #ifdef ENABLE_IPMCAST_FILTER
2450 				ipmcast_l2filter = 1;
2451 				ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2452 						(char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2453 						NULL, 0, TRUE);
2454 				if (ret < 0) {
2455 					DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
2456 				}
2457 #endif /* ENABLE_IPMCAST_FILTER */
2458 #ifdef DYNAMIC_SWOOB_DURATION
2459 				intr_width = CUSTOM_INTR_WIDTH;
2460 				ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2461 						sizeof(intr_width), NULL, 0, TRUE);
2462 				if (ret < 0) {
2463 					DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2464 				}
2465 #endif /* DYNAMIC_SWOOB_DURATION */
2466 #ifdef CUSTOM_EVENT_PM_WAKE
2467 				pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
2468 				ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2469 					(char *)&pm_awake_thresh,
2470 					sizeof(pm_awake_thresh), NULL, 0, TRUE);
2471 				if (ret < 0) {
2472 					DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2473 						__FUNCTION__, ret));
2474 				}
2475 #endif /* CUSTOM_EVENT_PM_WAKE */
2476 #ifdef CONFIG_SILENT_ROAM
2477 				if (!dhd->sroamed) {
2478 					ret = dhd_sroam_set_mon(dhd, TRUE);
2479 					if (ret < 0) {
2480 						DHD_ERROR(("%s set sroam failed %d\n",
2481 							__FUNCTION__, ret));
2482 					}
2483 				}
2484 				dhd->sroamed = FALSE;
2485 #endif /* CONFIG_SILENT_ROAM */
2486 #endif /* DHD_USE_EARLYSUSPEND */
2487 			} else {
2488 #ifdef PKT_FILTER_SUPPORT
2489 				dhd->early_suspended = 0;
2490 #endif
2491 				/* Kernel resumed  */
2492 				DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
2493 #ifdef DYNAMIC_SWOOB_DURATION
2494 				intr_width = 0;
2495 				ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2496 						sizeof(intr_width), NULL, 0, TRUE);
2497 				if (ret < 0) {
2498 					DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2499 				}
2500 #endif /* DYNAMIC_SWOOB_DURATION */
2501 #ifndef SUPPORT_PM2_ONLY
2502 				power_mode = PM_FAST;
2503 				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2504 				                 sizeof(power_mode), TRUE, 0);
2505 #endif /* SUPPORT_PM2_ONLY */
2506 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
2507 				ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
2508 				if (ret != BCME_OK) {
2509 					DHD_ERROR(("failed to resume beacon recv state (%d)\n",
2510 							ret));
2511 				}
2512 #endif /* WL_CF80211 && WL_BCNRECV */
2513 #ifdef ARP_OFFLOAD_SUPPORT
2514 				if (dhd->arpoe_enable) {
2515 					dhd_arp_offload_enable(dhd, FALSE);
2516 				}
2517 #endif /* ARP_OFFLOAD_SUPPORT */
2518 #ifdef PKT_FILTER_SUPPORT
2519 				/* disable pkt filter */
2520 				dhd_enable_packet_filter(0, dhd);
2521 #ifdef APF
2522 				dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
2523 #endif /* APF */
2524 #endif /* PKT_FILTER_SUPPORT */
2525 #ifdef PASS_ALL_MCAST_PKTS
2526 				allmulti = 1;
2527 				for (i = 0; i < DHD_MAX_IFS; i++) {
2528 					if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2529 						ret = dhd_iovar(dhd, i, "allmulti",
2530 								(char *)&allmulti,
2531 								sizeof(allmulti), NULL,
2532 								0, TRUE);
2533 					if (ret < 0) {
2534 						DHD_ERROR(("%s: allmulti failed:%d\n",
2535 								__FUNCTION__, ret));
2536 					}
2537 				}
2538 #endif /* PASS_ALL_MCAST_PKTS */
2539 #if defined(OEM_ANDROID) && defined(BCMPCIE)
2540 				/* restore pre-suspend setting */
2541 				ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2542 						sizeof(bcn_li_dtim), NULL, 0, TRUE);
2543 				if (ret < 0) {
2544 					DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
2545 							__FUNCTION__, ret));
2546 				}
2547 				ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
2548 						0, TRUE);
2549 				if (ret < 0) {
2550 					if (ret == BCME_UNSUPPORTED) {
2551 						DHD_ERROR(("%s lpas, UNSUPPORTED\n", __FUNCTION__));
2552 					 } else {
2553 						DHD_ERROR(("%s set lpas failed %d\n",
2554 							__FUNCTION__, ret));
2555 					 }
2556 				}
2557 				ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2558 						sizeof(bcn_to_dly), NULL, 0, TRUE);
2559 				if (ret < 0) {
2560 					if (ret == BCME_UNSUPPORTED) {
2561 						DHD_ERROR(("%s bcn_to_dly UNSUPPORTED\n",
2562 							__FUNCTION__));
2563 					} else {
2564 						DHD_ERROR(("%s set bcn_to_dly failed %d\n",
2565 							__FUNCTION__, ret));
2566 					}
2567 				}
2568 #else
2569 				/* restore pre-suspend setting for dtim_skip */
2570 				ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2571 						sizeof(bcn_li_dtim), NULL, 0, TRUE);
2572 				if (ret < 0) {
2573 					DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
2574 				}
2575 #endif /* OEM_ANDROID && BCMPCIE */
2576 #ifdef DHD_USE_EARLYSUSPEND
2577 #ifdef DHD_BCN_TIMEOUT_IN_SUSPEND
2578 				bcn_timeout = CUSTOM_BCN_TIMEOUT;
2579 				ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2580 						sizeof(bcn_timeout), NULL, 0, TRUE);
2581 				if (ret < 0) {
2582 					DHD_ERROR(("%s:bcn_timeout failed:%d\n",
2583 						__FUNCTION__, ret));
2584 				}
2585 #endif /* DHD_BCN_TIMEOUT_IN_SUSPEND */
2586 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2587 				roam_time_thresh = 2000;
2588 				ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2589 						(char *)&roam_time_thresh,
2590 						sizeof(roam_time_thresh), NULL, 0, TRUE);
2591 				if (ret < 0) {
2592 					DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
2593 							__FUNCTION__, ret));
2594 				}
2595 
2596 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2597 #ifndef ENABLE_FW_ROAM_SUSPEND
2598 				roamvar = dhd_roam_disable;
2599 				ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2600 						sizeof(roamvar), NULL, 0, TRUE);
2601 				if (ret < 0) {
2602 					DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
2603 				}
2604 #endif /* ENABLE_FW_ROAM_SUSPEND */
2605 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2606 				ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2607 						sizeof(bcn_li_bcn), NULL, 0, TRUE);
2608 				if (ret < 0) {
2609 					DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
2610 						__FUNCTION__, ret));
2611 				}
2612 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2613 #ifdef NDO_CONFIG_SUPPORT
2614 				if (dhd->ndo_enable) {
2615 					/* Disable ND offload on resume */
2616 					ret = dhd_ndo_enable(dhd, FALSE);
2617 					if (ret < 0) {
2618 						DHD_ERROR(("%s: failed to disable NDO\n",
2619 							__FUNCTION__));
2620 					}
2621 				}
2622 #endif /* NDO_CONFIG_SUPPORT */
2623 #ifndef APF
2624 				if (FW_SUPPORTED(dhd, ndoe))
2625 #else
2626 				if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2627 #endif /* APF */
2628 				{
2629 					/* disable IPv6 RA filter in  firmware during suspend */
2630 					nd_ra_filter = 0;
2631 					ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2632 							(char *)&nd_ra_filter, sizeof(nd_ra_filter),
2633 							NULL, 0, TRUE);
2634 					if (ret < 0) {
2635 						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2636 							ret));
2637 					}
2638 				}
2639 				dhd_os_suppress_logging(dhd, FALSE);
2640 #ifdef ENABLE_IPMCAST_FILTER
2641 				ipmcast_l2filter = 0;
2642 				ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2643 						(char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2644 						NULL, 0, TRUE);
2645 				if (ret < 0) {
2646 					DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
2647 				}
2648 #endif /* ENABLE_IPMCAST_FILTER */
2649 #ifdef CUSTOM_EVENT_PM_WAKE
2650 				ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2651 					(char *)&pm_awake_thresh,
2652 					sizeof(pm_awake_thresh), NULL, 0, TRUE);
2653 				if (ret < 0) {
2654 					DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2655 						__FUNCTION__, ret));
2656 				}
2657 #endif /* CUSTOM_EVENT_PM_WAKE */
2658 #ifdef CONFIG_SILENT_ROAM
2659 				ret = dhd_sroam_set_mon(dhd, FALSE);
2660 				if (ret < 0) {
2661 					DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
2662 				}
2663 #endif /* CONFIG_SILENT_ROAM */
2664 #endif /* DHD_USE_EARLYSUSPEND */
2665 			}
2666 	}
2667 	dhd_suspend_unlock(dhd);
2668 
2669 	return 0;
2670 }
2671 
dhd_suspend_resume_helper(struct dhd_info * dhd,int val,int force)2672 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2673 {
2674 	dhd_pub_t *dhdp = &dhd->pub;
2675 	int ret = 0;
2676 
2677 	DHD_OS_WAKE_LOCK(dhdp);
2678 
2679 	/* Set flag when early suspend was called */
2680 	dhdp->in_suspend = val;
2681 	if ((force || !dhdp->suspend_disable_flag) &&
2682 		(dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
2683 	{
2684 		ret = dhd_set_suspend(val, dhdp);
2685 	}
2686 
2687 	DHD_OS_WAKE_UNLOCK(dhdp);
2688 	return ret;
2689 }
2690 
2691 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
dhd_early_suspend(struct early_suspend * h)2692 static void dhd_early_suspend(struct early_suspend *h)
2693 {
2694 	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2695 	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2696 
2697 	if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
2698 		dhd_suspend_resume_helper(dhd, 1, 0);
2699 		dhd_conf_set_suspend_resume(&dhd->pub, 1);
2700 	}
2701 }
2702 
dhd_late_resume(struct early_suspend * h)2703 static void dhd_late_resume(struct early_suspend *h)
2704 {
2705 	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2706 	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2707 
2708 	if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
2709 		dhd_conf_set_suspend_resume(&dhd->pub, 0);
2710 		dhd_suspend_resume_helper(dhd, 0, 0);
2711 	}
2712 }
2713 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2714 
2715 /*
2716  * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
2717  * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
2718  *
2719  *      dhd_timeout_start(&tmo, usec);
2720  *      while (!dhd_timeout_expired(&tmo))
2721  *              if (poll_something())
2722  *                      break;
2723  *      if (dhd_timeout_expired(&tmo))
2724  *              fatal();
2725  */
2726 
2727 void
dhd_timeout_start(dhd_timeout_t * tmo,uint usec)2728 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2729 {
2730 #ifdef BCMQT
2731 	tmo->limit = usec * htclkratio;
2732 #else
2733 	tmo->limit = usec;
2734 #endif
2735 	tmo->increment = 0;
2736 	tmo->elapsed = 0;
2737 	tmo->tick = 10 * USEC_PER_MSEC;	/* 10 msec */
2738 }
2739 
2740 int
dhd_timeout_expired(dhd_timeout_t * tmo)2741 dhd_timeout_expired(dhd_timeout_t *tmo)
2742 {
2743 	/* Does nothing the first call */
2744 	if (tmo->increment == 0) {
2745 		tmo->increment = USEC_PER_MSEC;	/* Start with 1 msec */
2746 		return 0;
2747 	}
2748 
2749 	if (tmo->elapsed >= tmo->limit)
2750 		return 1;
2751 
2752 	DHD_INFO(("%s: CAN_SLEEP():%d tmo->increment=%ld msec\n",
2753 		__FUNCTION__, CAN_SLEEP(), tmo->increment / USEC_PER_MSEC));
2754 
2755 	CAN_SLEEP() ? OSL_SLEEP(tmo->increment / USEC_PER_MSEC) : OSL_DELAY(tmo->increment);
2756 
2757 	/* Till tmo->tick, the delay will be in 2x, after that delay will be constant
2758 	 * tmo->tick (10 msec), till timer elapses.
2759 	 */
2760 	tmo->increment = (tmo->increment >= tmo->tick) ? tmo->tick : (tmo->increment * 2);
2761 
2762 	/* Add the delay that's about to take place */
2763 #ifdef BCMQT
2764 	tmo->elapsed += tmo->increment * htclkratio;
2765 #else
2766 	tmo->elapsed += tmo->increment;
2767 #endif
2768 
2769 	return 0;
2770 }
2771 
2772 int
dhd_net2idx(dhd_info_t * dhd,struct net_device * net)2773 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2774 {
2775 	int i = 0;
2776 
2777 	if (!dhd) {
2778 		DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2779 		return DHD_BAD_IF;
2780 	}
2781 
2782 	while (i < DHD_MAX_IFS) {
2783 		if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2784 			return i;
2785 		i++;
2786 	}
2787 
2788 	return DHD_BAD_IF;
2789 }
2790 
dhd_idx2net(void * pub,int ifidx)2791 struct net_device * dhd_idx2net(void *pub, int ifidx)
2792 {
2793 	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2794 	struct dhd_info *dhd_info;
2795 
2796 	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2797 		return NULL;
2798 	dhd_info = dhd_pub->info;
2799 	if (dhd_info && dhd_info->iflist[ifidx])
2800 		return dhd_info->iflist[ifidx]->net;
2801 	return NULL;
2802 }
2803 
2804 int
dhd_ifname2idx(dhd_info_t * dhd,char * name)2805 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2806 {
2807 	int i = DHD_MAX_IFS;
2808 
2809 	ASSERT(dhd);
2810 
2811 	if (name == NULL || *name == '\0')
2812 		return 0;
2813 
2814 	while (--i > 0)
2815 		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2816 				break;
2817 
2818 	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2819 
2820 	return i;	/* default - the primary interface */
2821 }
2822 
2823 char *
dhd_ifname(dhd_pub_t * dhdp,int ifidx)2824 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2825 {
2826 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2827 
2828 	ASSERT(dhd);
2829 
2830 	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2831 		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2832 		return "<if_bad>";
2833 	}
2834 
2835 	if (dhd->iflist[ifidx] == NULL) {
2836 		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2837 		return "<if_null>";
2838 	}
2839 
2840 	if (dhd->iflist[ifidx]->net)
2841 		return dhd->iflist[ifidx]->net->name;
2842 
2843 	return "<if_none>";
2844 }
2845 
2846 uint8 *
dhd_bssidx2bssid(dhd_pub_t * dhdp,int idx)2847 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2848 {
2849 	int i;
2850 	dhd_info_t *dhd = (dhd_info_t *)dhdp;
2851 
2852 	ASSERT(dhd);
2853 	for (i = 0; i < DHD_MAX_IFS; i++)
2854 	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2855 		return dhd->iflist[i]->mac_addr;
2856 
2857 	return NULL;
2858 }
2859 
2860 static void
_dhd_set_multicast_list(dhd_info_t * dhd,int ifidx)2861 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2862 {
2863 	struct net_device *dev;
2864 	struct netdev_hw_addr *ha;
2865 	uint32 allmulti, cnt;
2866 
2867 	wl_ioctl_t ioc;
2868 	char *buf, *bufp;
2869 	uint buflen;
2870 	int ret;
2871 
2872 #ifdef MCAST_LIST_ACCUMULATION
2873 	int i;
2874 	uint32 cnt_iface[DHD_MAX_IFS];
2875 	cnt = 0;
2876 	allmulti = 0;
2877 
2878 	for (i = 0; i < DHD_MAX_IFS; i++) {
2879 		if (dhd->iflist[i]) {
2880 			dev = dhd->iflist[i]->net;
2881 			if (!dev)
2882 				continue;
2883 			netif_addr_lock_bh(dev);
2884 			cnt_iface[i] = netdev_mc_count(dev);
2885 			cnt += cnt_iface[i];
2886 			netif_addr_unlock_bh(dev);
2887 
2888 			/* Determine initial value of allmulti flag */
2889 			allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2890 		}
2891 	}
2892 #else /* !MCAST_LIST_ACCUMULATION */
2893 	if (!dhd->iflist[ifidx]) {
2894 		DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
2895 		return;
2896 	}
2897 	dev = dhd->iflist[ifidx]->net;
2898 	if (!dev)
2899 		return;
2900 	netif_addr_lock_bh(dev);
2901 	cnt = netdev_mc_count(dev);
2902 	netif_addr_unlock_bh(dev);
2903 
2904 	/* Determine initial value of allmulti flag */
2905 	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2906 #endif /* MCAST_LIST_ACCUMULATION */
2907 
2908 #ifdef PASS_ALL_MCAST_PKTS
2909 #ifdef PKT_FILTER_SUPPORT
2910 	if (!dhd->pub.early_suspended)
2911 #endif /* PKT_FILTER_SUPPORT */
2912 		allmulti = TRUE;
2913 #endif /* PASS_ALL_MCAST_PKTS */
2914 
2915 	/* Send down the multicast list first. */
2916 
2917 	/* XXX Not using MAXMULTILIST to avoid including wlc_pub.h; but
2918 	 * maybe we should?  (Or should that be in wlioctl.h instead?)
2919 	 */
2920 
2921 	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2922 	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2923 		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2924 		           dhd_ifname(&dhd->pub, ifidx), cnt));
2925 		return;
2926 	}
2927 
2928 	strlcpy(bufp, "mcast_list", buflen);
2929 	bufp += strlen("mcast_list") + 1;
2930 
2931 	cnt = htol32(cnt);
2932 	memcpy(bufp, &cnt, sizeof(cnt));
2933 	bufp += sizeof(cnt);
2934 
2935 #ifdef MCAST_LIST_ACCUMULATION
2936 	for (i = 0; i < DHD_MAX_IFS; i++) {
2937 		if (dhd->iflist[i]) {
2938 			DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
2939 			dev = dhd->iflist[i]->net;
2940 
2941 			netif_addr_lock_bh(dev);
2942 			GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
2943 			netdev_for_each_mc_addr(ha, dev) {
2944 				GCC_DIAGNOSTIC_POP();
2945 				if (!cnt_iface[i])
2946 					break;
2947 				memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2948 				bufp += ETHER_ADDR_LEN;
2949 				DHD_TRACE(("_dhd_set_multicast_list: cnt "
2950 					"%d " MACDBG "\n",
2951 					cnt_iface[i], MAC2STRDBG(ha->addr)));
2952 				cnt_iface[i]--;
2953 			}
2954 			netif_addr_unlock_bh(dev);
2955 		}
2956 	}
2957 #else /* !MCAST_LIST_ACCUMULATION */
2958 	netif_addr_lock_bh(dev);
2959 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
2960 	netdev_for_each_mc_addr(ha, dev) {
2961 		GCC_DIAGNOSTIC_POP();
2962 		if (!cnt)
2963 			break;
2964 		memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2965 		bufp += ETHER_ADDR_LEN;
2966 		cnt--;
2967 	}
2968 	netif_addr_unlock_bh(dev);
2969 #endif /* MCAST_LIST_ACCUMULATION */
2970 
2971 	memset(&ioc, 0, sizeof(ioc));
2972 	ioc.cmd = WLC_SET_VAR;
2973 	ioc.buf = buf;
2974 	ioc.len = buflen;
2975 	ioc.set = TRUE;
2976 
2977 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2978 	if (ret < 0) {
2979 		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2980 			dhd_ifname(&dhd->pub, ifidx), cnt));
2981 		allmulti = cnt ? TRUE : allmulti;
2982 	}
2983 
2984 	MFREE(dhd->pub.osh, buf, buflen);
2985 
2986 	/* Now send the allmulti setting.  This is based on the setting in the
2987 	 * net_device flags, but might be modified above to be turned on if we
2988 	 * were trying to set some addresses and dongle rejected it...
2989 	 */
2990 
2991 	allmulti = htol32(allmulti);
2992 	ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
2993 			sizeof(allmulti), NULL, 0, TRUE);
2994 	if (ret < 0) {
2995 		DHD_ERROR(("%s: set allmulti %d failed\n",
2996 		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2997 	}
2998 
2999 	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3000 
3001 #ifdef MCAST_LIST_ACCUMULATION
3002 	allmulti = 0;
3003 	for (i = 0; i < DHD_MAX_IFS; i++) {
3004 		if (dhd->iflist[i]) {
3005 			dev = dhd->iflist[i]->net;
3006 			allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3007 		}
3008 	}
3009 #else
3010 	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3011 #endif /* MCAST_LIST_ACCUMULATION */
3012 
3013 	allmulti = htol32(allmulti);
3014 
3015 	memset(&ioc, 0, sizeof(ioc));
3016 	ioc.cmd = WLC_SET_PROMISC;
3017 	ioc.buf = &allmulti;
3018 	ioc.len = sizeof(allmulti);
3019 	ioc.set = TRUE;
3020 
3021 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3022 	if (ret < 0) {
3023 		DHD_ERROR(("%s: set promisc %d failed\n",
3024 		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3025 	}
3026 }
3027 
3028 int
_dhd_set_mac_address(dhd_info_t * dhd,int ifidx,uint8 * addr,bool skip_stop)3029 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr, bool skip_stop)
3030 {
3031 	int ret;
3032 
3033 #ifdef DHD_NOTIFY_MAC_CHANGED
3034 	if (skip_stop) {
3035 		WL_MSG(dhd_ifname(&dhd->pub, ifidx), "close dev for mac changing\n");
3036 		dhd->pub.skip_dhd_stop = TRUE;
3037 		dev_close(dhd->iflist[ifidx]->net);
3038 	}
3039 #endif /* DHD_NOTIFY_MAC_CHANGED */
3040 
3041 	ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
3042 			ETHER_ADDR_LEN, NULL, 0, TRUE);
3043 	if (ret < 0) {
3044 		DHD_ERROR(("%s: set cur_etheraddr %pM failed ret=%d\n",
3045 			dhd_ifname(&dhd->pub, ifidx), addr, ret));
3046 		goto exit;
3047 	} else {
3048 		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3049 		if (ifidx == 0)
3050 			memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3051 		WL_MSG(dhd_ifname(&dhd->pub, ifidx), "MACID %pM is overwritten\n", addr);
3052 	}
3053 
3054 exit:
3055 #ifdef DHD_NOTIFY_MAC_CHANGED
3056 	if (skip_stop) {
3057 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
3058 		dev_open(dhd->iflist[ifidx]->net, NULL);
3059 #else
3060 		dev_open(dhd->iflist[ifidx]->net);
3061 #endif
3062 		dhd->pub.skip_dhd_stop = FALSE;
3063 		WL_MSG(dhd_ifname(&dhd->pub, ifidx), "notify mac changed done\n");
3064 	}
3065 #endif /* DHD_NOTIFY_MAC_CHANGED */
3066 
3067 	return ret;
3068 }
3069 
dhd_update_rand_mac_addr(dhd_pub_t * dhd)3070 int dhd_update_rand_mac_addr(dhd_pub_t *dhd)
3071 {
3072 	struct ether_addr mac_addr;
3073 	dhd_generate_rand_mac_addr(&mac_addr);
3074 	if (_dhd_set_mac_address(dhd->info, 0, mac_addr.octet, TRUE) != 0) {
3075 		DHD_ERROR(("randmac setting failed\n"));
3076 #ifdef STA_RANDMAC_ENFORCED
3077 		return  BCME_BADADDR;
3078 #endif /* STA_RANDMAC_ENFORCED */
3079 	}
3080 	return BCME_OK;
3081 }
3082 
3083 #ifdef BCM_ROUTER_DHD
dhd_update_dpsta_interface_for_sta(dhd_pub_t * dhdp,int ifidx,void * event_data)3084 void dhd_update_dpsta_interface_for_sta(dhd_pub_t* dhdp, int ifidx, void* event_data)
3085 {
3086 	struct wl_dpsta_intf_event *dpsta_prim_event = (struct wl_dpsta_intf_event *)event_data;
3087 	dhd_if_t *ifp = dhdp->info->iflist[ifidx];
3088 
3089 	if (dpsta_prim_event->intf_type == WL_INTF_DWDS) {
3090 		ifp->primsta_dwds = TRUE;
3091 	} else {
3092 		ifp->primsta_dwds = FALSE;
3093 	}
3094 }
3095 #endif /* BCM_ROUTER_DHD */
3096 
3097 #ifdef DHD_WMF
dhd_update_psta_interface_for_sta(dhd_pub_t * dhdp,char * ifname,void * ea,void * event_data)3098 void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
3099 		void* event_data)
3100 {
3101 	struct wl_psta_primary_intf_event *psta_prim_event =
3102 			(struct wl_psta_primary_intf_event*)event_data;
3103 	dhd_sta_t *psta_interface =  NULL;
3104 	dhd_sta_t *sta = NULL;
3105 	uint8 ifindex;
3106 	ASSERT(ifname);
3107 	ASSERT(psta_prim_event);
3108 	ASSERT(ea);
3109 
3110 	ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
3111 	sta = dhd_find_sta(dhdp, ifindex, ea);
3112 	if (sta != NULL) {
3113 		psta_interface = dhd_find_sta(dhdp, ifindex,
3114 				(void *)(psta_prim_event->prim_ea.octet));
3115 		if (psta_interface != NULL) {
3116 			sta->psta_prim = psta_interface;
3117 		}
3118 	}
3119 }
3120 
3121 /* Get wmf_psta_disable configuration configuration */
dhd_get_wmf_psta_disable(dhd_pub_t * dhdp,uint32 idx)3122 int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
3123 {
3124 	dhd_info_t *dhd = dhdp->info;
3125 	dhd_if_t *ifp;
3126 	ASSERT(idx < DHD_MAX_IFS);
3127 	ifp = dhd->iflist[idx];
3128 	return ifp->wmf_psta_disable;
3129 }
3130 
3131 /* Set wmf_psta_disable configuration configuration */
dhd_set_wmf_psta_disable(dhd_pub_t * dhdp,uint32 idx,int val)3132 int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
3133 {
3134 	dhd_info_t *dhd = dhdp->info;
3135 	dhd_if_t *ifp;
3136 	ASSERT(idx < DHD_MAX_IFS);
3137 	ifp = dhd->iflist[idx];
3138 	ifp->wmf_psta_disable = val;
3139 	return 0;
3140 }
3141 #endif /* DHD_WMF */
3142 
3143 #ifdef DHD_PSTA
3144 /* Get psta/psr configuration configuration */
dhd_get_psta_mode(dhd_pub_t * dhdp)3145 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3146 {
3147 	dhd_info_t *dhd = dhdp->info;
3148 	return (int)dhd->psta_mode;
3149 }
3150 /* Set psta/psr configuration configuration */
dhd_set_psta_mode(dhd_pub_t * dhdp,uint32 val)3151 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3152 {
3153 	dhd_info_t *dhd = dhdp->info;
3154 	dhd->psta_mode = val;
3155 	return 0;
3156 }
3157 #endif /* DHD_PSTA */
3158 
3159 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
3160 static void
dhd_update_rx_pkt_chainable_state(dhd_pub_t * dhdp,uint32 idx)3161 dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
3162 {
3163 	dhd_info_t *dhd = dhdp->info;
3164 	dhd_if_t *ifp;
3165 
3166 	ASSERT(idx < DHD_MAX_IFS);
3167 
3168 	ifp = dhd->iflist[idx];
3169 
3170 	if (
3171 #ifdef DHD_L2_FILTER
3172 		(ifp->block_ping) ||
3173 #endif
3174 #ifdef DHD_WET
3175 		(dhd->wet_mode) ||
3176 #endif
3177 #ifdef DHD_MCAST_REGEN
3178 		(ifp->mcast_regen_bss_enable) ||
3179 #endif
3180 		FALSE) {
3181 		ifp->rx_pkt_chainable = FALSE;
3182 	}
3183 }
3184 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
3185 
3186 #ifdef DHD_WET
3187 /* Get wet configuration configuration */
dhd_get_wet_mode(dhd_pub_t * dhdp)3188 int dhd_get_wet_mode(dhd_pub_t *dhdp)
3189 {
3190 	dhd_info_t *dhd = dhdp->info;
3191 	return (int)dhd->wet_mode;
3192 }
3193 
3194 /* Set wet configuration configuration */
dhd_set_wet_mode(dhd_pub_t * dhdp,uint32 val)3195 int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
3196 {
3197 	dhd_info_t *dhd = dhdp->info;
3198 	dhd->wet_mode = val;
3199 	dhd_update_rx_pkt_chainable_state(dhdp, 0);
3200 	return 0;
3201 }
3202 #endif /* DHD_WET */
3203 
3204 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhd_role_to_nl80211_iftype(int32 role)3205 int32 dhd_role_to_nl80211_iftype(int32 role)
3206 {
3207 	switch (role) {
3208 	case WLC_E_IF_ROLE_STA:
3209 		return NL80211_IFTYPE_STATION;
3210 	case WLC_E_IF_ROLE_AP:
3211 		return NL80211_IFTYPE_AP;
3212 	case WLC_E_IF_ROLE_WDS:
3213 		return NL80211_IFTYPE_WDS;
3214 	case WLC_E_IF_ROLE_P2P_GO:
3215 		return NL80211_IFTYPE_P2P_GO;
3216 	case WLC_E_IF_ROLE_P2P_CLIENT:
3217 		return NL80211_IFTYPE_P2P_CLIENT;
3218 	case WLC_E_IF_ROLE_IBSS:
3219 	case WLC_E_IF_ROLE_NAN:
3220 		return NL80211_IFTYPE_ADHOC;
3221 	default:
3222 		return NL80211_IFTYPE_UNSPECIFIED;
3223 	}
3224 }
3225 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3226 
3227 static void
dhd_ifadd_event_handler(void * handle,void * event_info,u8 event)3228 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3229 {
3230 	dhd_info_t *dhd = handle;
3231 	dhd_if_event_t *if_event = event_info;
3232 	int ifidx, bssidx;
3233 	int ret = 0;
3234 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3235 	struct wl_if_event_info info;
3236 #if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
3237 	struct net_device *ndev = NULL;
3238 #endif
3239 #else
3240 	struct net_device *ndev;
3241 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3242 #ifdef DHD_AWDL
3243 	bool is_awdl_iface = FALSE;
3244 #endif /* DHD_AWDL */
3245 
3246 	BCM_REFERENCE(ret);
3247 	if (event != DHD_WQ_WORK_IF_ADD) {
3248 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3249 		return;
3250 	}
3251 
3252 	if (!dhd) {
3253 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3254 		return;
3255 	}
3256 
3257 	if (!if_event) {
3258 		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3259 		return;
3260 	}
3261 
3262 	dhd_net_if_lock_local(dhd);
3263 	DHD_OS_WAKE_LOCK(&dhd->pub);
3264 
3265 	ifidx = if_event->event.ifidx;
3266 	bssidx = if_event->event.bssidx;
3267 	DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3268 
3269 #ifdef DHD_AWDL
3270 	if (if_event->event.opcode == WLC_E_IF_ADD &&
3271 		if_event->event.role == WLC_E_IF_ROLE_AWDL) {
3272 		dhd->pub.awdl_ifidx = ifidx;
3273 		is_awdl_iface = TRUE;
3274 	}
3275 #endif /* DHD_AWDL */
3276 
3277 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3278 	if (if_event->event.ifidx > 0) {
3279 		u8 *mac_addr;
3280 		bzero(&info, sizeof(info));
3281 		info.ifidx = ifidx;
3282 		info.bssidx = bssidx;
3283 		info.role = if_event->event.role;
3284 		strlcpy(info.name, if_event->name, sizeof(info.name));
3285 		if (is_valid_ether_addr(if_event->mac)) {
3286 			mac_addr = if_event->mac;
3287 		} else {
3288 			mac_addr = NULL;
3289 		}
3290 
3291 #ifdef WLEASYMESH
3292 		if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
3293 			&info, mac_addr, if_event->name, true)) == NULL)
3294 #else
3295 		if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
3296 			&info, mac_addr, NULL, true) == NULL)
3297 #endif
3298 		{
3299 			/* Do the post interface create ops */
3300 			DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
3301 			ret = BCME_ERROR;
3302 			goto done;
3303 		}
3304 	}
3305 #else
3306 	/* This path is for non-android case */
3307 	/* The interface name in host and in event msg are same */
3308 	/* if name in event msg is used to create dongle if list on host */
3309 	ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3310 		if_event->mac, bssidx, TRUE, if_event->name);
3311 	if (!ndev) {
3312 		DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
3313 		ret = BCME_NOMEM;
3314 		goto done;
3315 	}
3316 
3317 	ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3318 	if (ret != BCME_OK) {
3319 		DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3320 		dhd_remove_if(&dhd->pub, ifidx, TRUE);
3321 		goto done;
3322 	}
3323 
3324 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3325 
3326 #ifndef PCIE_FULL_DONGLE
3327 	/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3328 	if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3329 		uint32 var_int =  1;
3330 		ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
3331 				NULL, 0, TRUE);
3332 		if (ret != BCME_OK) {
3333 			DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3334 			dhd_remove_if(&dhd->pub, ifidx, TRUE);
3335 		}
3336 	}
3337 #endif /* PCIE_FULL_DONGLE */
3338 
3339 done:
3340 #ifdef DHD_AWDL
3341 	if (ret != BCME_OK && is_awdl_iface) {
3342 		dhd->pub.awdl_ifidx = 0;
3343 	}
3344 #endif /* DHD_AWDL */
3345 
3346 	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3347 #if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
3348 	if (dhd->pub.info->iflist[ifidx]) {
3349 		dhd_bridge_dev_set(dhd, ifidx, ndev);
3350     }
3351 #endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
3352 
3353 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3354 	dhd_net_if_unlock_local(dhd);
3355 }
3356 
3357 static void
dhd_ifdel_event_handler(void * handle,void * event_info,u8 event)3358 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3359 {
3360 	dhd_info_t *dhd = handle;
3361 	int ifidx;
3362 	dhd_if_event_t *if_event = event_info;
3363 
3364 	if (event != DHD_WQ_WORK_IF_DEL) {
3365 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3366 		return;
3367 	}
3368 
3369 	if (!dhd) {
3370 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3371 		return;
3372 	}
3373 
3374 	if (!if_event) {
3375 		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3376 		return;
3377 	}
3378 
3379 	dhd_net_if_lock_local(dhd);
3380 	DHD_OS_WAKE_LOCK(&dhd->pub);
3381 
3382 	ifidx = if_event->event.ifidx;
3383 	DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3384 #if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
3385 	if (dhd->pub.info->iflist[ifidx]) {
3386 		dhd_bridge_dev_set(dhd, ifidx, NULL);
3387     }
3388 #endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
3389 
3390 	if (!dhd->pub.info->iflist[ifidx]) {
3391 		/* No matching netdev found */
3392 		DHD_ERROR(("Netdev not found! Do nothing.\n"));
3393 		goto done;
3394 	}
3395 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3396 	if (if_event->event.ifidx > 0) {
3397 		/* Do the post interface del ops */
3398 		if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
3399 				true, if_event->event.ifidx) != 0) {
3400 			DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3401 			goto done;
3402 		}
3403 	}
3404 #else
3405 	/* For non-cfg80211 drivers */
3406 	dhd_remove_if(&dhd->pub, ifidx, TRUE);
3407 #ifdef DHD_AWDL
3408 	if (if_event->event.opcode == WLC_E_IF_DEL &&
3409 		if_event->event.role == WLC_E_IF_ROLE_AWDL) {
3410 		dhd->pub.awdl_ifidx = 0;
3411 	}
3412 #endif /* DHD_AWDL */
3413 
3414 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3415 
3416 done:
3417 	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3418 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3419 	dhd_net_if_unlock_local(dhd);
3420 }
3421 
3422 #ifdef DHD_UPDATE_INTF_MAC
3423 static void
dhd_ifupdate_event_handler(void * handle,void * event_info,u8 event)3424 dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
3425 {
3426 	dhd_info_t *dhd = handle;
3427 	int ifidx;
3428 	dhd_if_event_t *if_event = event_info;
3429 
3430 	if (event != DHD_WQ_WORK_IF_UPDATE) {
3431 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3432 		return;
3433 	}
3434 
3435 	if (!dhd) {
3436 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3437 		return;
3438 	}
3439 
3440 	if (!if_event) {
3441 		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3442 		return;
3443 	}
3444 
3445 	dhd_net_if_lock_local(dhd);
3446 	DHD_OS_WAKE_LOCK(&dhd->pub);
3447 
3448 	ifidx = if_event->event.ifidx;
3449 	DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
3450 
3451 	dhd_op_if_update(&dhd->pub, ifidx);
3452 
3453 	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3454 
3455 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3456 	dhd_net_if_unlock_local(dhd);
3457 }
3458 
dhd_op_if_update(dhd_pub_t * dhdpub,int ifidx)3459 int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
3460 {
3461 	dhd_info_t *    dhdinfo = NULL;
3462 	dhd_if_t   *    ifp = NULL;
3463 	int             ret = 0;
3464 	char            buf[128];
3465 
3466 	if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
3467 		DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
3468 		return -1;
3469 	} else {
3470 		dhdinfo = (dhd_info_t *)dhdpub->info;
3471 		ifp = dhdinfo->iflist[ifidx];
3472 		if (NULL==ifp) {
3473 		    DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
3474 		    return -2;
3475 		}
3476 	}
3477 
3478 	DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
3479 	// Get MAC address
3480 	strcpy(buf, "cur_etheraddr");
3481 	ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
3482 	if (0>ret) {
3483 		DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
3484 		// avoid collision
3485 		dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
3486 		// force locally administrate address
3487 		ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
3488 	} else {
3489 		DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
3490 		           ifp->name, ifp->idx,
3491 		           (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
3492 		           (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
3493 		memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
3494 		if (dhdinfo->iflist[ifp->idx]->net) {
3495 		    memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
3496 		}
3497 	}
3498 
3499 	return ret;
3500 }
3501 #endif /* DHD_UPDATE_INTF_MAC */
3502 
3503 static void
dhd_set_mac_addr_handler(void * handle,void * event_info,u8 event)3504 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3505 {
3506 	dhd_info_t *dhd = handle;
3507 	dhd_if_t *ifp = event_info;
3508 
3509 	if (event != DHD_WQ_WORK_SET_MAC) {
3510 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3511 	}
3512 
3513 	if (!dhd) {
3514 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3515 		return;
3516 	}
3517 
3518 	dhd_net_if_lock_local(dhd);
3519 	DHD_OS_WAKE_LOCK(&dhd->pub);
3520 
3521 	// terence 20160907: fix for not able to set mac when wlan0 is down
3522 	if (ifp == NULL || !ifp->set_macaddress) {
3523 		goto done;
3524 	}
3525 	if (ifp == NULL || !dhd->pub.up) {
3526 		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3527 		goto done;
3528 	}
3529 
3530 	ifp->set_macaddress = FALSE;
3531 
3532 #ifdef DHD_NOTIFY_MAC_CHANGED
3533 	rtnl_lock();
3534 #endif /* DHD_NOTIFY_MAC_CHANGED */
3535 
3536 	if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr, TRUE) == 0)
3537 		DHD_INFO(("%s: MACID is overwritten\n",	__FUNCTION__));
3538 	else
3539 		DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3540 
3541 #ifdef DHD_NOTIFY_MAC_CHANGED
3542 	rtnl_unlock();
3543 #endif /* DHD_NOTIFY_MAC_CHANGED */
3544 
3545 done:
3546 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3547 	dhd_net_if_unlock_local(dhd);
3548 }
3549 
3550 static void
dhd_set_mcast_list_handler(void * handle,void * event_info,u8 event)3551 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3552 {
3553 	dhd_info_t *dhd = handle;
3554 	int ifidx = (int)((long int)event_info);
3555 	dhd_if_t *ifp = NULL;
3556 
3557 	if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3558 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3559 		return;
3560 	}
3561 
3562 	if (!dhd) {
3563 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3564 		return;
3565 	}
3566 
3567 	dhd_net_if_lock_local(dhd);
3568 	DHD_OS_WAKE_LOCK(&dhd->pub);
3569 
3570 	ifp = dhd->iflist[ifidx];
3571 
3572 	if (ifp == NULL || !dhd->pub.up) {
3573 		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3574 		goto done;
3575 	}
3576 
3577 	if (ifp == NULL || !dhd->pub.up) {
3578 		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3579 		goto done;
3580 	}
3581 
3582 	ifidx = ifp->idx;
3583 
3584 #ifdef MCAST_LIST_ACCUMULATION
3585 	ifidx = 0;
3586 #endif /* MCAST_LIST_ACCUMULATION */
3587 
3588 	_dhd_set_multicast_list(dhd, ifidx);
3589 	DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3590 
3591 done:
3592 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
3593 	dhd_net_if_unlock_local(dhd);
3594 }
3595 
3596 static int
dhd_set_mac_address(struct net_device * dev,void * addr)3597 dhd_set_mac_address(struct net_device *dev, void *addr)
3598 {
3599 	int ret = 0;
3600 
3601 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
3602 	struct sockaddr *sa = (struct sockaddr *)addr;
3603 	int ifidx;
3604 	dhd_if_t *dhdif;
3605 #ifdef WL_STATIC_IF
3606 	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
3607 #endif /* WL_STATIC_IF */
3608 	dhd_pub_t *dhdp = &dhd->pub;
3609 
3610 	BCM_REFERENCE(ifidx);
3611 
3612 	DHD_TRACE(("%s \n", __func__));
3613 
3614 	dhdif = dhd_get_ifp_by_ndev(dhdp, dev);
3615 	if (!dhdif) {
3616 		return -ENODEV;
3617 	}
3618 	ifidx = dhdif->idx;
3619 	dhd_net_if_lock_local(dhd);
3620 	memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3621 	dhdif->set_macaddress = TRUE;
3622 	dhd_net_if_unlock_local(dhd);
3623 
3624 	WL_MSG(dev->name, "iftype = %d macaddr = %pM\n",
3625 		dev->ieee80211_ptr->iftype, dhdif->mac_addr);
3626 #ifdef WL_CFG80211
3627 	/* Check wdev->iftype for the role */
3628 	if (wl_cfg80211_macaddr_sync_reqd(dev)) {
3629 		/* Supplicant and certain user layer applications expect macaddress to be
3630 		 * set once the context returns. so set it from the same context
3631 		 */
3632 #ifdef WL_STATIC_IF
3633 		if (IS_CFG80211_STATIC_IF(cfg, dev) && !(dev->flags & IFF_UP)) {
3634 			/* In softap case, the macaddress will be applied before interface up
3635 			 * and hence curether_addr can't be done at this stage (no fw iface
3636 			 * available). Store the address and return. macaddr will be applied
3637 			 * from interface create context.
3638 			 */
3639 			(void)memcpy_s(dev->dev_addr, ETH_ALEN, dhdif->mac_addr, ETH_ALEN);
3640 #ifdef DHD_NOTIFY_MAC_CHANGED
3641 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
3642 			dev_open(dev, NULL);
3643 #else
3644 			dev_open(dev);
3645 #endif
3646 #endif /* DHD_NOTIFY_MAC_CHANGED */
3647 			return ret;
3648 		}
3649 #endif /* WL_STATIC_IF */
3650 		wl_cfg80211_handle_macaddr_change(dev, dhdif->mac_addr);
3651 		return _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr, TRUE);
3652 	}
3653 #endif /* WL_CFG80211 */
3654 
3655 	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3656 		dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3657 	return ret;
3658 }
3659 
3660 static void
dhd_set_multicast_list(struct net_device * dev)3661 dhd_set_multicast_list(struct net_device *dev)
3662 {
3663 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
3664 	int ifidx;
3665 
3666 	ifidx = dhd_net2idx(dhd, dev);
3667 	if (ifidx == DHD_BAD_IF)
3668 		return;
3669 
3670 	dhd->iflist[ifidx]->set_multicast = TRUE;
3671 	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
3672 		DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
3673 
3674 	// terence 20160907: fix for not able to set mac when wlan0 is down
3675 	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3676 		DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3677 }
3678 
3679 #ifdef DHD_UCODE_DOWNLOAD
3680 /* Get ucode path */
3681 char *
dhd_get_ucode_path(dhd_pub_t * dhdp)3682 dhd_get_ucode_path(dhd_pub_t *dhdp)
3683 {
3684 	dhd_info_t *dhd = dhdp->info;
3685 	return dhd->uc_path;
3686 }
3687 #endif /* DHD_UCODE_DOWNLOAD */
3688 
3689 #ifdef PROP_TXSTATUS
3690 int
dhd_os_wlfc_block(dhd_pub_t * pub)3691 dhd_os_wlfc_block(dhd_pub_t *pub)
3692 {
3693 	dhd_info_t *di = (dhd_info_t *)(pub->info);
3694 	ASSERT(di != NULL);
3695 	/* terence 20161229: don't do spin lock if proptx not enabled */
3696 	if (disable_proptx)
3697 		return 1;
3698 #ifdef BCMDBUS
3699 	spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
3700 #else
3701 	spin_lock_bh(&di->wlfc_spinlock);
3702 #endif /* BCMDBUS */
3703 	return 1;
3704 }
3705 
3706 int
dhd_os_wlfc_unblock(dhd_pub_t * pub)3707 dhd_os_wlfc_unblock(dhd_pub_t *pub)
3708 {
3709 	dhd_info_t *di = (dhd_info_t *)(pub->info);
3710 
3711 	ASSERT(di != NULL);
3712 	/* terence 20161229: don't do spin lock if proptx not enabled */
3713 	if (disable_proptx)
3714 		return 1;
3715 #ifdef BCMDBUS
3716 	spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
3717 #else
3718 	spin_unlock_bh(&di->wlfc_spinlock);
3719 #endif /* BCMDBUS */
3720 	return 1;
3721 }
3722 
3723 #endif /* PROP_TXSTATUS */
3724 
3725 #if defined(WL_MONITOR) && defined(BCMSDIO)
3726 static void
3727 dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx);
3728 bool
3729 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
3730 #endif /* WL_MONITOR && BCMSDIO */
3731 
3732 /*  This routine do not support Packet chain feature, Currently tested for
3733  *  proxy arp feature
3734  */
dhd_sendup(dhd_pub_t * dhdp,int ifidx,void * p)3735 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3736 {
3737 	struct sk_buff *skb;
3738 	void *skbhead = NULL;
3739 	void *skbprev = NULL;
3740 	dhd_if_t *ifp;
3741 	ASSERT(!PKTISCHAINED(p));
3742 	skb = PKTTONATIVE(dhdp->osh, p);
3743 
3744 	ifp = dhdp->info->iflist[ifidx];
3745 	skb->dev = ifp->net;
3746 	skb->protocol = eth_type_trans(skb, skb->dev);
3747 
3748 	if (in_interrupt()) {
3749 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3750 			__FUNCTION__, __LINE__);
3751 		netif_rx(skb);
3752 	} else {
3753 		if (dhdp->info->rxthread_enabled) {
3754 			if (!skbhead) {
3755 				skbhead = skb;
3756 			} else {
3757 				PKTSETNEXT(dhdp->osh, skbprev, skb);
3758 			}
3759 			skbprev = skb;
3760 		} else {
3761 			/* If the receive is not processed inside an ISR,
3762 			 * the softirqd must be woken explicitly to service
3763 			 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
3764 			 * by netif_rx_ni(), but in earlier kernels, we need
3765 			 * to do it manually.
3766 			 */
3767 			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3768 				__FUNCTION__, __LINE__);
3769 #if defined(WL_MONITOR) && defined(BCMSDIO)
3770 			if (dhd_monitor_enabled(dhdp, ifidx))
3771 				dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
3772 			else
3773 #endif /* WL_MONITOR && BCMSDIO */
3774 			netif_rx_ni(skb);
3775 		}
3776 	}
3777 
3778 	if (dhdp->info->rxthread_enabled && skbhead)
3779 		dhd_sched_rxf(dhdp, skbhead);
3780 
3781 	return BCME_OK;
3782 }
3783 
3784 void
dhd_handle_pktdata(dhd_pub_t * dhdp,int ifidx,void * pkt,uint8 * pktdata,uint32 pktid,uint32 pktlen,uint16 * pktfate,uint8 * dhd_udr,bool tx,int pkt_wake,bool pkt_log)3785 dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata, uint32 pktid,
3786 	uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake, bool pkt_log)
3787 {
3788 	struct ether_header *eh;
3789 	uint16 ether_type;
3790 	uint32 pkthash;
3791 	uint8 pkt_type = PKT_TYPE_DATA;
3792 
3793 	if (!pktdata || pktlen < ETHER_HDR_LEN) {
3794 		return;
3795 	}
3796 
3797 	eh = (struct ether_header *)pktdata;
3798 	ether_type = ntoh16(eh->ether_type);
3799 
3800 	/* Check packet type */
3801 	if (dhd_check_ip_prot(pktdata, ether_type)) {
3802 		if (dhd_check_dhcp(pktdata)) {
3803 			pkt_type = PKT_TYPE_DHCP;
3804 		} else if (dhd_check_icmp(pktdata)) {
3805 			pkt_type = PKT_TYPE_ICMP;
3806 		} else if (dhd_check_dns(pktdata)) {
3807 			pkt_type = PKT_TYPE_DNS;
3808 		}
3809 	}
3810 	else if (dhd_check_arp(pktdata, ether_type)) {
3811 		pkt_type = PKT_TYPE_ARP;
3812 	}
3813 	else if (ether_type == ETHER_TYPE_802_1X) {
3814 		pkt_type = PKT_TYPE_EAP;
3815 	}
3816 
3817 #ifdef DHD_SBN
3818 	/* Set UDR based on packet type */
3819 	if (dhd_udr && (pkt_type == PKT_TYPE_DHCP ||
3820 		pkt_type == PKT_TYPE_DNS ||
3821 		pkt_type == PKT_TYPE_ARP)) {
3822 		*dhd_udr = TRUE;
3823 	}
3824 #endif /* DHD_SBN */
3825 
3826 #ifdef DHD_PKT_LOGGING
3827 #ifdef DHD_SKIP_PKTLOGGING_FOR_DATA_PKTS
3828 	if (pkt_type != PKT_TYPE_DATA)
3829 #endif
3830 	{
3831 		if (pkt_log) {
3832 			if (tx) {
3833 				if (pktfate) {
3834 					/* Tx status */
3835 					DHD_PKTLOG_TXS(dhdp, pkt, pktdata, pktid, *pktfate);
3836 				} else {
3837 					/* Tx packet */
3838 					DHD_PKTLOG_TX(dhdp, pkt, pktdata, pktid);
3839 				}
3840 				pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
3841 			} else {
3842 				struct sk_buff *skb = (struct sk_buff *)pkt;
3843 				if (pkt_wake) {
3844 					DHD_PKTLOG_WAKERX(dhdp, skb, pktdata);
3845 				} else {
3846 					DHD_PKTLOG_RX(dhdp, skb, pktdata);
3847 				}
3848 			}
3849 		}
3850 	}
3851 #endif /* DHD_PKT_LOGGING */
3852 
3853 	/* Dump packet data */
3854 	switch (pkt_type) {
3855 		case PKT_TYPE_DHCP:
3856 			dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
3857 			break;
3858 		case PKT_TYPE_ICMP:
3859 			dhd_icmp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
3860 			break;
3861 		case PKT_TYPE_DNS:
3862 			dhd_dns_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
3863 			break;
3864 		case PKT_TYPE_ARP:
3865 			dhd_arp_dump(dhdp, ifidx, pktdata, tx, &pkthash, pktfate);
3866 			break;
3867 		case PKT_TYPE_EAP:
3868 			dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen, tx, &pkthash, pktfate);
3869 			break;
3870 		default:
3871 			break;
3872 	}
3873 }
3874 
3875 int
BCMFASTPATH(__dhd_sendpkt)3876 BCMFASTPATH(__dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3877 {
3878 	int ret = BCME_OK;
3879 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3880 	struct ether_header *eh = NULL;
3881 	uint8 pkt_flow_prio;
3882 
3883 #if (defined(DHD_L2_FILTER) || (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET)))
3884 	dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3885 #endif /* DHD_L2_FILTER || (BCM_ROUTER_DHD && QOS_MAP_SET) */
3886 
3887 	/* Reject if down */
3888 	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3889 		/* free the packet here since the caller won't */
3890 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
3891 		return -ENODEV;
3892 	}
3893 
3894 #ifdef PCIE_FULL_DONGLE
3895 	if (dhdp->busstate == DHD_BUS_SUSPEND) {
3896 		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3897 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
3898 		return NETDEV_TX_BUSY;
3899 	}
3900 #endif /* PCIE_FULL_DONGLE */
3901 
3902 	/* Reject if pktlen > MAX_MTU_SZ */
3903 	if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
3904 		/* free the packet here since the caller won't */
3905 		dhdp->tx_big_packets++;
3906 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
3907 		return BCME_ERROR;
3908 	}
3909 
3910 #ifdef DHD_L2_FILTER
3911 	/* if dhcp_unicast is enabled, we need to convert the */
3912 	/* broadcast DHCP ACK/REPLY packets to Unicast. */
3913 	if (ifp->dhcp_unicast) {
3914 	    uint8* mac_addr;
3915 	    uint8* ehptr = NULL;
3916 	    int ret;
3917 	    ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3918 	    if (ret == BCME_OK) {
3919 		/*  if given mac address having valid entry in sta list
3920 		 *  copy the given mac address, and return with BCME_OK
3921 		*/
3922 		if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3923 		    ehptr = PKTDATA(dhdp->osh, pktbuf);
3924 		    bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3925 		}
3926 	    }
3927 	}
3928 
3929 	if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3930 	    if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3931 			PKTCFREE(dhdp->osh, pktbuf, TRUE);
3932 			return BCME_ERROR;
3933 	    }
3934 	}
3935 
3936 	if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3937 		ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3938 
3939 		/* Drop the packets if l2 filter has processed it already
3940 		 * otherwise continue with the normal path
3941 		 */
3942 		if (ret == BCME_OK) {
3943 			PKTCFREE(dhdp->osh, pktbuf, TRUE);
3944 			return BCME_ERROR;
3945 		}
3946 	}
3947 #endif /* DHD_L2_FILTER */
3948 	/* Update multicast statistic */
3949 	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3950 		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3951 		eh = (struct ether_header *)pktdata;
3952 
3953 		if (ETHER_ISMULTI(eh->ether_dhost))
3954 			dhdp->tx_multicast++;
3955 		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3956 #ifdef DHD_LOSSLESS_ROAMING
3957 			uint8 prio = (uint8)PKTPRIO(pktbuf);
3958 
3959 			/* back up 802.1x's priority */
3960 			dhdp->prio_8021x = prio;
3961 #endif /* DHD_LOSSLESS_ROAMING */
3962 			DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
3963 			atomic_inc(&dhd->pend_8021x_cnt);
3964 #if defined(WL_CFG80211) && defined (WL_WPS_SYNC)
3965 			wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
3966 				pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
3967 #endif /* WL_CFG80211 && WL_WPS_SYNC */
3968 		}
3969 		dhd_dump_pkt(dhdp, ifidx, pktdata,
3970 			(uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
3971 	} else {
3972 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
3973 		return BCME_ERROR;
3974 	}
3975 
3976 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3977 	if (ifp->qosmap_up_table_enable) {
3978 		pktsetprio_qms(pktbuf, ifp->qosmap_up_table, FALSE);
3979 	}
3980 	else
3981 #endif
3982 	{
3983 		/* Look into the packet and update the packet priority */
3984 #ifndef PKTPRIO_OVERRIDE
3985 		/* XXX RB:6270 Ignore skb->priority from TCP/IP stack */
3986 		if (PKTPRIO(pktbuf) == 0)
3987 #endif /* !PKTPRIO_OVERRIDE */
3988 		{
3989 #if (!defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3990 			pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
3991 #else
3992 			/* For LLR, pkt prio will be changed to 7(NC) here */
3993 			pktsetprio(pktbuf, FALSE);
3994 #endif /* QOS_MAP_SET */
3995 		}
3996 #ifndef PKTPRIO_OVERRIDE
3997 		else {
3998 			/* Some protocols like OZMO use priority values from 256..263.
3999 			 * these are magic values to indicate a specific 802.1d priority.
4000 			 * make sure that priority field is in range of 0..7
4001 			 */
4002 			PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
4003 		}
4004 #endif /* !PKTPRIO_OVERRIDE */
4005 	}
4006 
4007 #if defined(BCM_ROUTER_DHD)
4008 	traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
4009 
4010 #endif /* BCM_ROUTER_DHD */
4011 
4012 	BCM_REFERENCE(pkt_flow_prio);
4013 	/* Intercept and create Socket level statistics */
4014 	/*
4015 	 * TODO: Some how moving this code block above the pktsetprio code
4016 	 * is resetting the priority back to 0, but this does not happen for
4017 	 * packets generated from iperf uisng -S option. Can't understand why.
4018 	 */
4019 	dhd_update_sock_flows(dhd, pktbuf);
4020 
4021 #ifdef SUPPORT_SET_TID
4022 	dhd_set_tid_based_on_uid(dhdp, pktbuf);
4023 #endif	/* SUPPORT_SET_TID */
4024 
4025 #ifdef PCIE_FULL_DONGLE
4026 	/*
4027 	 * Lkup the per interface hash table, for a matching flowring. If one is not
4028 	 * available, allocate a unique flowid and add a flowring entry.
4029 	 * The found or newly created flowid is placed into the pktbuf's tag.
4030 	 */
4031 
4032 #ifdef DHD_TX_PROFILE
4033 	if (dhdp->tx_profile_enab && dhdp->num_profiles > 0 &&
4034 		dhd_protocol_matches_profile(PKTDATA(dhdp->osh, pktbuf),
4035 		PKTLEN(dhdp->osh, pktbuf), dhdp->protocol_filters,
4036 		dhdp->host_sfhllc_supported)) {
4037 		/* we only have support for one tx_profile at the moment */
4038 
4039 		/* tagged packets must be put into TID 6 */
4040 		pkt_flow_prio = PRIO_8021D_VO;
4041 	} else
4042 #endif /* defined(DHD_TX_PROFILE) */
4043 	{
4044 		pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
4045 	}
4046 
4047 	ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
4048 	if (ret != BCME_OK) {
4049 		PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
4050 		return ret;
4051 	}
4052 #endif /* PCIE_FULL_DONGLE */
4053 	/* terence 20150901: Micky add to ajust the 802.1X priority */
4054 	/* Set the 802.1X packet with the highest priority 7 */
4055 	if (dhdp->conf->pktprio8021x >= 0)
4056 		pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
4057 
4058 #ifdef PROP_TXSTATUS
4059 	if (dhd_wlfc_is_supported(dhdp)) {
4060 		/* store the interface ID */
4061 		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
4062 
4063 		/* store destination MAC in the tag as well */
4064 		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
4065 
4066 		/* decide which FIFO this packet belongs to */
4067 		if (ETHER_ISMULTI(eh->ether_dhost))
4068 			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
4069 			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
4070 		else
4071 			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
4072 	} else
4073 #endif /* PROP_TXSTATUS */
4074 	{
4075 		/* If the protocol uses a data header, apply it */
4076 		dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
4077 	}
4078 
4079 	/* Use bus module to send data frame */
4080 #ifdef PROP_TXSTATUS
4081 	{
4082 		if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
4083 			dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
4084 			/* non-proptxstatus way */
4085 #ifdef BCMPCIE
4086 			ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4087 #else
4088 			ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4089 #endif /* BCMPCIE */
4090 		}
4091 	}
4092 #else
4093 #ifdef BCMPCIE
4094 	ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4095 #else
4096 	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4097 #endif /* BCMPCIE */
4098 #endif /* PROP_TXSTATUS */
4099 #ifdef BCMDBUS
4100 	if (ret)
4101 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
4102 #endif /* BCMDBUS */
4103 
4104 	return ret;
4105 }
4106 
4107 int
BCMFASTPATH(dhd_sendpkt)4108 BCMFASTPATH(dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4109 {
4110 	int ret = 0;
4111 	unsigned long flags;
4112 	dhd_if_t *ifp;
4113 
4114 	DHD_GENERAL_LOCK(dhdp, flags);
4115 	ifp = dhd_get_ifp(dhdp, ifidx);
4116 	if (!ifp || ifp->del_in_progress) {
4117 		DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
4118 			__FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
4119 		DHD_GENERAL_UNLOCK(dhdp, flags);
4120 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
4121 		return -ENODEV;
4122 	}
4123 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
4124 		DHD_ERROR(("%s: returning as busstate=%d\n",
4125 			__FUNCTION__, dhdp->busstate));
4126 		DHD_GENERAL_UNLOCK(dhdp, flags);
4127 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
4128 		return -ENODEV;
4129 	}
4130 	DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4131 	DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
4132 	DHD_GENERAL_UNLOCK(dhdp, flags);
4133 
4134 #ifdef DHD_PCIE_RUNTIMEPM
4135 	if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
4136 		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4137 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
4138 		ret = -EBUSY;
4139 		goto exit;
4140 	}
4141 #endif /* DHD_PCIE_RUNTIMEPM */
4142 
4143 	DHD_GENERAL_LOCK(dhdp, flags);
4144 	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
4145 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4146 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4147 		DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4148 		DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4149 		dhd_os_tx_completion_wake(dhdp);
4150 		dhd_os_busbusy_wake(dhdp);
4151 		DHD_GENERAL_UNLOCK(dhdp, flags);
4152 		PKTCFREE(dhdp->osh, pktbuf, TRUE);
4153 		return -ENODEV;
4154 	}
4155 	DHD_GENERAL_UNLOCK(dhdp, flags);
4156 
4157 	ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
4158 
4159 #ifdef DHD_PCIE_RUNTIMEPM
4160 exit:
4161 #endif
4162 	DHD_GENERAL_LOCK(dhdp, flags);
4163 	DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4164 	DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4165 	dhd_os_tx_completion_wake(dhdp);
4166 	dhd_os_busbusy_wake(dhdp);
4167 	DHD_GENERAL_UNLOCK(dhdp, flags);
4168 	return ret;
4169 }
4170 
4171 #ifdef DHD_MQ
4172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
4173 static uint16
BCMFASTPATH(dhd_select_queue)4174 BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb,
4175        void *accel_priv, select_queue_fallback_t fallback)
4176 #else
4177 static uint16
4178 BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb)
4179 #endif /* LINUX_VERSION_CODE */
4180 {
4181 	dhd_info_t *dhd_info = DHD_DEV_INFO(net);
4182 	dhd_pub_t *dhdp = &dhd_info->pub;
4183 	uint16 prio = 0;
4184 
4185 	BCM_REFERENCE(dhd_info);
4186 	BCM_REFERENCE(dhdp);
4187 	BCM_REFERENCE(prio);
4188 
4189 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
4190 	if (mq_select_disable) {
4191 		/* if driver side queue selection is disabled via sysfs, call the kernel
4192 		* supplied fallback function to select the queue, which is usually
4193 		* '__netdev_pick_tx()' in net/core/dev.c
4194 		*/
4195 		return fallback(net, skb);
4196 	}
4197 #endif /* LINUX_VERSION */
4198 
4199 	prio = dhdp->flow_prio_map[skb->priority];
4200 	if (prio < AC_COUNT)
4201 		return prio;
4202 	else
4203 		return AC_BK;
4204 }
4205 #endif /* DHD_MQ */
4206 
4207 netdev_tx_t
BCMFASTPATH(dhd_start_xmit)4208 BCMFASTPATH(dhd_start_xmit)(struct sk_buff *skb, struct net_device *net)
4209 {
4210 	int ret;
4211 	uint datalen;
4212 	void *pktbuf;
4213 	dhd_info_t *dhd = DHD_DEV_INFO(net);
4214 	dhd_if_t *ifp = NULL;
4215 	int ifidx;
4216 	unsigned long flags;
4217 #if !defined(BCM_ROUTER_DHD)
4218 	uint8 htsfdlystat_sz = 0;
4219 #endif /* ! BCM_ROUTER_DHD */
4220 #ifdef DHD_WMF
4221 	struct ether_header *eh;
4222 	uint8 *iph;
4223 #endif /* DHD_WMF */
4224 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
4225 	int qidx = 0;
4226 	int cpuid = 0;
4227 	int prio = 0;
4228 #endif /* DHD_MQ && DHD_MQ_STATS */
4229 
4230 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4231 
4232 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
4233 	qidx = skb_get_queue_mapping(skb);
4234 	/* if in a non pre-emptable context, smp_processor_id can be used
4235 	* else get_cpu and put_cpu should be used
4236 	*/
4237 	if (!CAN_SLEEP()) {
4238 		cpuid = smp_processor_id();
4239 	}
4240 	else {
4241 		cpuid = get_cpu();
4242 		put_cpu();
4243 	}
4244 	prio = dhd->pub.flow_prio_map[skb->priority];
4245 	DHD_TRACE(("%s: Q idx = %d, CPU = %d, prio = %d \n", __FUNCTION__,
4246 		qidx, cpuid, prio));
4247 	dhd->pktcnt_qac_histo[qidx][prio]++;
4248 	dhd->pktcnt_per_ac[prio]++;
4249 	dhd->cpu_qstats[qidx][cpuid]++;
4250 #endif /* DHD_MQ && DHD_MQ_STATS */
4251 
4252 	if (dhd_query_bus_erros(&dhd->pub)) {
4253 		return -ENODEV;
4254 	}
4255 
4256 	DHD_GENERAL_LOCK(&dhd->pub, flags);
4257 	DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
4258 	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4259 
4260 #ifdef DHD_PCIE_RUNTIMEPM
4261 	if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
4262 		/* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
4263 		/* stop the network queue temporarily until resume done */
4264 		DHD_GENERAL_LOCK(&dhd->pub, flags);
4265 		if (!dhdpcie_is_resume_done(&dhd->pub)) {
4266 			dhd_bus_stop_queue(dhd->pub.bus);
4267 		}
4268 		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4269 		dhd_os_busbusy_wake(&dhd->pub);
4270 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4271 		return NETDEV_TX_BUSY;
4272 	}
4273 #endif /* DHD_PCIE_RUNTIMEPM */
4274 
4275 	DHD_GENERAL_LOCK(&dhd->pub, flags);
4276 #ifdef BCMPCIE
4277 	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4278 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4279 			__FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
4280 		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4281 #ifdef PCIE_FULL_DONGLE
4282 		/* Stop tx queues if suspend is in progress */
4283 		if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4284 			dhd_bus_stop_queue(dhd->pub.bus);
4285 		}
4286 #endif /* PCIE_FULL_DONGLE */
4287 		dhd_os_busbusy_wake(&dhd->pub);
4288 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4289 		return NETDEV_TX_BUSY;
4290 	}
4291 #else
4292 	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4293 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4294 			__FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
4295 	}
4296 #endif
4297 
4298 	DHD_OS_WAKE_LOCK(&dhd->pub);
4299 
4300 #if defined(DHD_HANG_SEND_UP_TEST)
4301 	if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
4302 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
4303 		dhd->pub.busstate = DHD_BUS_DOWN;
4304 	}
4305 #endif /* DHD_HANG_SEND_UP_TEST */
4306 
4307 	/* Reject if down */
4308 	/* XXX kernel panic issue when first bootup time,
4309 	 *	 rmmod without interface down make unnecessary hang event.
4310 	 */
4311 	if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
4312 		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
4313 			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
4314 		dhd_tx_stop_queues(net);
4315 #if defined(OEM_ANDROID)
4316 		/* Send Event when bus down detected during data session */
4317 		if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
4318 			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
4319 			dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
4320 			net_os_send_hang_message(net);
4321 		}
4322 #endif /* OEM_ANDROID */
4323 		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4324 		dhd_os_busbusy_wake(&dhd->pub);
4325 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4326 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
4327 		return NETDEV_TX_BUSY;
4328 	}
4329 
4330 	ifp = DHD_DEV_IFP(net);
4331 	ifidx = DHD_DEV_IFIDX(net);
4332 #ifdef DHD_BUZZZ_LOG_ENABLED
4333 	BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
4334 #endif /* DHD_BUZZZ_LOG_ENABLED */
4335 	if (ifidx == DHD_BAD_IF) {
4336 		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
4337 		dhd_tx_stop_queues(net);
4338 		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4339 		dhd_os_busbusy_wake(&dhd->pub);
4340 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4341 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
4342 		return NETDEV_TX_BUSY;
4343 	}
4344 
4345 	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4346 
4347 	/* If tput test is in progress */
4348 	if (dhd->pub.tput_data.tput_test_running) {
4349 		return NETDEV_TX_BUSY;
4350 	}
4351 
4352 	ASSERT(ifidx == dhd_net2idx(dhd, net));
4353 	ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4354 
4355 	bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4356 
4357 	/* re-align socket buffer if "skb->data" is odd address */
4358 	if (((unsigned long)(skb->data)) & 0x1) {
4359 		unsigned char *data = skb->data;
4360 		uint32 length = skb->len;
4361 		PKTPUSH(dhd->pub.osh, skb, 1);
4362 		memmove(skb->data, data, length);
4363 		PKTSETLEN(dhd->pub.osh, skb, length);
4364 	}
4365 
4366 	datalen  = PKTLEN(dhd->pub.osh, skb);
4367 
4368 #ifdef HOST_TPUT_TEST
4369 	dhd_os_sdlock_txq(&dhd->pub);
4370 	dhd->pub.conf->net_len += datalen;
4371 	dhd_os_sdunlock_txq(&dhd->pub);
4372 	if ((dhd->pub.conf->data_drop_mode == XMIT_DROP) &&
4373 			(PKTLEN(dhd->pub.osh, skb) > 500)) {
4374 		dev_kfree_skb(skb);
4375 		return NETDEV_TX_OK;
4376 	}
4377 #endif
4378 	/* Make sure there's enough room for any header */
4379 #if !defined(BCM_ROUTER_DHD)
4380 	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4381 		struct sk_buff *skb2;
4382 
4383 		DHD_INFO(("%s: insufficient headroom\n",
4384 		          dhd_ifname(&dhd->pub, ifidx)));
4385 		dhd->pub.tx_realloc++;
4386 
4387 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4388 		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4389 
4390 		dev_kfree_skb(skb);
4391 		if ((skb = skb2) == NULL) {
4392 			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4393 			           dhd_ifname(&dhd->pub, ifidx)));
4394 			ret = -ENOMEM;
4395 			goto done;
4396 		}
4397 		bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4398 	}
4399 #endif /* !BCM_ROUTER_DHD */
4400 
4401 	/* move from dhdsdio_sendfromq(), try to orphan skb early */
4402 	if (dhd->pub.conf->orphan_move == 2)
4403 		PKTORPHAN(skb, dhd->pub.conf->tsq);
4404 	else if (dhd->pub.conf->orphan_move == 3)
4405 		skb_orphan(skb);
4406 
4407 	/* Convert to packet */
4408 	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4409 		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4410 		           dhd_ifname(&dhd->pub, ifidx)));
4411 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4412 		dev_kfree_skb_any(skb);
4413 		ret = -ENOMEM;
4414 		goto done;
4415 	}
4416 
4417 #ifdef DHD_WET
4418 	/* wet related packet proto manipulation should be done in DHD
4419 	   since dongle doesn't have complete payload
4420 	 */
4421 	if (WET_ENABLED(&dhd->pub) &&
4422 			(dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
4423 		DHD_INFO(("%s:%s: wet send proc failed\n",
4424 				__FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
4425 		PKTFREE(dhd->pub.osh, pktbuf, FALSE);
4426 		ret =  -EFAULT;
4427 		goto done;
4428 	}
4429 #endif /* DHD_WET */
4430 
4431 #ifdef DHD_WMF
4432 	eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4433 	iph = (uint8 *)eh + ETHER_HDR_LEN;
4434 
4435 	/* WMF processing for multicast packets
4436 	 * Only IPv4 packets are handled
4437 	 */
4438 	if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4439 		(IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4440 		((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4441 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4442 		void *sdu_clone;
4443 		bool ucast_convert = FALSE;
4444 #ifdef DHD_UCAST_UPNP
4445 		uint32 dest_ip;
4446 
4447 		dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4448 		ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4449 #endif /* DHD_UCAST_UPNP */
4450 #ifdef DHD_IGMP_UCQUERY
4451 		ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4452 			(IPV4_PROT(iph) == IP_PROT_IGMP) &&
4453 			(*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4454 #endif /* DHD_IGMP_UCQUERY */
4455 		if (ucast_convert) {
4456 			dhd_sta_t *sta;
4457 			unsigned long flags;
4458 			struct list_head snapshot_list;
4459 			struct list_head *wmf_ucforward_list;
4460 
4461 			ret = NETDEV_TX_OK;
4462 
4463 			/* For non BCM_GMAC3 platform we need a snapshot sta_list to
4464 			 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4465 			 */
4466 			wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4467 
4468 			GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4469 			/* Convert upnp/igmp query to unicast for each assoc STA */
4470 			list_for_each_entry(sta, wmf_ucforward_list, list) {
4471 				GCC_DIAGNOSTIC_POP();
4472 				/* Skip sending to proxy interfaces of proxySTA */
4473 				if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
4474 					continue;
4475 				}
4476 				if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4477 					ret = WMF_NOP;
4478 					break;
4479 				}
4480 				dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4481 			}
4482 			DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4483 
4484 			DHD_GENERAL_LOCK(&dhd->pub, flags);
4485 			DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4486 			dhd_os_busbusy_wake(&dhd->pub);
4487 			DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4488 			DHD_OS_WAKE_UNLOCK(&dhd->pub);
4489 
4490 			if (ret == NETDEV_TX_OK)
4491 				PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4492 
4493 			return ret;
4494 		} else
4495 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4496 		{
4497 			/* There will be no STA info if the packet is coming from LAN host
4498 			 * Pass as NULL
4499 			 */
4500 			ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4501 			switch (ret) {
4502 			case WMF_TAKEN:
4503 			case WMF_DROP:
4504 				/* Either taken by WMF or we should drop it.
4505 				 * Exiting send path
4506 				 */
4507 
4508 				DHD_GENERAL_LOCK(&dhd->pub, flags);
4509 				DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4510 				dhd_os_busbusy_wake(&dhd->pub);
4511 				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4512 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
4513 				return NETDEV_TX_OK;
4514 			default:
4515 				/* Continue the transmit path */
4516 				break;
4517 			}
4518 		}
4519 	}
4520 #endif /* DHD_WMF */
4521 #ifdef DHD_PSTA
4522 	/* PSR related packet proto manipulation should be done in DHD
4523 	 * since dongle doesn't have complete payload
4524 	 */
4525 	if (PSR_ENABLED(&dhd->pub) &&
4526 #ifdef BCM_ROUTER_DHD
4527 		!(ifp->primsta_dwds) &&
4528 #endif /* BCM_ROUTER_DHD */
4529 		(dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
4530 
4531 			DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4532 				dhd_ifname(&dhd->pub, ifidx)));
4533 	}
4534 #endif /* DHD_PSTA */
4535 
4536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_PACING_SHIFT)
4537 #ifndef DHD_DEFAULT_TCP_PACING_SHIFT
4538 #define DHD_DEFAULT_TCP_PACING_SHIFT 7
4539 #endif /* DHD_DEFAULT_TCP_PACING_SHIFT */
4540 	if (skb->sk) {
4541 		sk_pacing_shift_update(skb->sk, DHD_DEFAULT_TCP_PACING_SHIFT);
4542 	}
4543 #endif /* LINUX_VERSION_CODE >= 4.19.0 && DHD_TCP_PACING_SHIFT */
4544 
4545 #ifdef DHDTCPSYNC_FLOOD_BLK
4546 	if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
4547 		ifp->tsyncack_txed ++;
4548 	}
4549 #endif /* DHDTCPSYNC_FLOOD_BLK */
4550 
4551 #ifdef DHDTCPACK_SUPPRESS
4552 	if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4553 		/* If this packet has been hold or got freed, just return */
4554 		if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4555 			ret = 0;
4556 			goto done;
4557 		}
4558 	} else {
4559 		/* If this packet has replaced another packet and got freed, just return */
4560 		if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4561 			ret = 0;
4562 			goto done;
4563 		}
4564 	}
4565 #endif /* DHDTCPACK_SUPPRESS */
4566 
4567 	/*
4568 	 * If Load Balance is enabled queue the packet
4569 	 * else send directly from here.
4570 	 */
4571 #if defined(DHD_LB_TXP)
4572 	ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
4573 #else
4574 	ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4575 #endif
4576 
4577 done:
4578 	/* XXX Bus modules may have different "native" error spaces? */
4579 	/* XXX USB is native linux and it'd be nice to retain errno  */
4580 	/* XXX meaning, but SDIO is not so we'd need an OSL_ERROR.   */
4581 	if (ret) {
4582 		ifp->stats.tx_dropped++;
4583 		dhd->pub.tx_dropped++;
4584 	} else {
4585 #ifdef PROP_TXSTATUS
4586 		/* tx_packets counter can counted only when wlfc is disabled */
4587 		if (!dhd_wlfc_is_supported(&dhd->pub))
4588 #endif
4589 		{
4590 			dhd->pub.tx_packets++;
4591 			ifp->stats.tx_packets++;
4592 			ifp->stats.tx_bytes += datalen;
4593 		}
4594 		dhd->pub.actual_tx_pkts++;
4595 	}
4596 
4597 	DHD_GENERAL_LOCK(&dhd->pub, flags);
4598 	DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4599 	DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4600 	dhd_os_tx_completion_wake(&dhd->pub);
4601 	dhd_os_busbusy_wake(&dhd->pub);
4602 	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4603 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
4604 #ifdef DHD_BUZZZ_LOG_ENABLED
4605 	BUZZZ_LOG(START_XMIT_END, 0);
4606 #endif /* DHD_BUZZZ_LOG_ENABLED */
4607 	/* Return ok: we always eat the packet */
4608 	return NETDEV_TX_OK;
4609 }
4610 
4611 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhd_rx_wq_wakeup(struct work_struct * ptr)4612 void dhd_rx_wq_wakeup(struct work_struct *ptr)
4613 {
4614 	struct dhd_rx_tx_work *work;
4615 	struct dhd_pub * pub;
4616 
4617 	work = container_of(ptr, struct dhd_rx_tx_work, work);
4618 
4619 	pub = work->pub;
4620 
4621 	DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
4622 
4623 	if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
4624 		return;
4625 	}
4626 
4627 	DHD_OS_WAKE_LOCK(pub);
4628 	if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
4629 
4630 		// do nothing but wakeup the bus.
4631 		pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
4632 		pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
4633 	}
4634 	DHD_OS_WAKE_UNLOCK(pub);
4635 	kfree(work);
4636 }
4637 
dhd_start_xmit_wq_adapter(struct work_struct * ptr)4638 void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
4639 {
4640 	struct dhd_rx_tx_work *work;
4641 	netdev_tx_t ret;
4642 	dhd_info_t *dhd;
4643 	struct dhd_bus * bus;
4644 
4645 	work = container_of(ptr, struct dhd_rx_tx_work, work);
4646 
4647 	dhd = DHD_DEV_INFO(work->net);
4648 
4649 	bus = dhd->pub.bus;
4650 
4651 	if (atomic_read(&dhd->pub.block_bus)) {
4652 		kfree_skb(work->skb);
4653 		kfree(work);
4654 		dhd_netif_start_queue(bus);
4655 		return;
4656 	}
4657 
4658 	if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
4659 		ret = dhd_start_xmit(work->skb, work->net);
4660 		pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
4661 		pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
4662 	}
4663 	kfree(work);
4664 	dhd_netif_start_queue(bus);
4665 
4666 	if (ret)
4667 		netdev_err(work->net,
4668 			   "error: dhd_start_xmit():%d\n", ret);
4669 }
4670 
4671 netdev_tx_t
BCMFASTPATH(dhd_start_xmit_wrapper)4672 BCMFASTPATH(dhd_start_xmit_wrapper)(struct sk_buff *skb, struct net_device *net)
4673 {
4674 	struct dhd_rx_tx_work *start_xmit_work;
4675 	netdev_tx_t ret;
4676 	dhd_info_t *dhd = DHD_DEV_INFO(net);
4677 
4678 	if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4679 		DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
4680 
4681 		dhd_netif_stop_queue(dhd->pub.bus);
4682 
4683 		start_xmit_work = (struct dhd_rx_tx_work*)
4684 			kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
4685 
4686 		if (!start_xmit_work) {
4687 			netdev_err(net,
4688 				   "error: failed to alloc start_xmit_work\n");
4689 			ret = -ENOMEM;
4690 			goto exit;
4691 		}
4692 
4693 		INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
4694 		start_xmit_work->skb = skb;
4695 		start_xmit_work->net = net;
4696 		queue_work(dhd->tx_wq, &start_xmit_work->work);
4697 		ret = NET_XMIT_SUCCESS;
4698 
4699 	} else if (dhd->pub.busstate == DHD_BUS_DATA) {
4700 		ret = dhd_start_xmit(skb, net);
4701 	} else {
4702 		/* when bus is down */
4703 		ret = -ENODEV;
4704 	}
4705 
4706 exit:
4707 	return ret;
4708 }
4709 void
dhd_bus_wakeup_work(dhd_pub_t * dhdp)4710 dhd_bus_wakeup_work(dhd_pub_t *dhdp)
4711 {
4712 	struct dhd_rx_tx_work *rx_work;
4713 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4714 
4715 	rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
4716 	if (!rx_work) {
4717 		DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
4718 		return;
4719 	}
4720 
4721 	INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
4722 	rx_work->pub = dhdp;
4723 	queue_work(dhd->rx_wq, &rx_work->work);
4724 
4725 }
4726 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4727 
4728 static void
__dhd_txflowcontrol(dhd_pub_t * dhdp,struct net_device * net,bool state)4729 __dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
4730 {
4731 	if (state == ON) {
4732 		if (!netif_queue_stopped(net)) {
4733 			DHD_ERROR(("%s: Stop Netif Queue\n", __FUNCTION__));
4734 			netif_stop_queue(net);
4735 		} else {
4736 			DHD_LOG_MEM(("%s: Netif Queue already stopped\n", __FUNCTION__));
4737 		}
4738 	}
4739 
4740 	if (state == OFF) {
4741 		if (netif_queue_stopped(net)) {
4742 			DHD_ERROR(("%s: Start Netif Queue\n", __FUNCTION__));
4743 			netif_wake_queue(net);
4744 		} else {
4745 			DHD_LOG_MEM(("%s: Netif Queue already started\n", __FUNCTION__));
4746 		}
4747 	}
4748 }
4749 
4750 void
dhd_txflowcontrol(dhd_pub_t * dhdp,int ifidx,bool state)4751 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4752 {
4753 	struct net_device *net;
4754 	dhd_info_t *dhd = dhdp->info;
4755 	int i;
4756 
4757 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4758 
4759 	ASSERT(dhd);
4760 
4761 #ifdef DHD_LOSSLESS_ROAMING
4762 	/* block flowcontrol during roaming */
4763 	if ((dhdp->dequeue_prec_map == (1 << dhdp->flow_prio_map[PRIO_8021D_NC])) && (state == ON))
4764 	{
4765 		DHD_ERROR_RLMT(("%s: Roaming in progress, cannot stop network queue (0x%x:%d)\n",
4766 			__FUNCTION__, dhdp->dequeue_prec_map, dhdp->flow_prio_map[PRIO_8021D_NC]));
4767 		return;
4768 	}
4769 #endif
4770 
4771 	if (ifidx == ALL_INTERFACES) {
4772 		for (i = 0; i < DHD_MAX_IFS; i++) {
4773 			if (dhd->iflist[i]) {
4774 				net = dhd->iflist[i]->net;
4775 				__dhd_txflowcontrol(dhdp, net, state);
4776 			}
4777 		}
4778 	} else {
4779 		if (dhd->iflist[ifidx]) {
4780 			net = dhd->iflist[ifidx]->net;
4781 			__dhd_txflowcontrol(dhdp, net, state);
4782 		}
4783 	}
4784 	dhdp->txoff = state;
4785 }
4786 
4787 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
4788 
4789 /* Dump CTF stats */
4790 void
dhd_ctf_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)4791 dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
4792 {
4793 	dhd_info_t *dhd = dhdp->info;
4794 
4795 	bcm_bprintf(strbuf, "CTF stats:\n");
4796 	ctf_dump(dhd->cih, strbuf);
4797 }
4798 
4799 bool
BCMFASTPATH(dhd_rx_pkt_chainable)4800 BCMFASTPATH(dhd_rx_pkt_chainable)(dhd_pub_t *dhdp, int ifidx)
4801 {
4802 	dhd_info_t *dhd = dhdp->info;
4803 	dhd_if_t *ifp = dhd->iflist[ifidx];
4804 
4805 	return ifp->rx_pkt_chainable;
4806 }
4807 
4808 /* Returns FALSE if block ping is enabled */
4809 bool
BCMFASTPATH(dhd_l2_filter_chainable)4810 BCMFASTPATH(dhd_l2_filter_chainable)(dhd_pub_t *dhdp, uint8 *eh, int ifidx)
4811 {
4812 #ifdef DHD_L2_FILTER
4813 	dhd_info_t *dhd = dhdp->info;
4814 	dhd_if_t *ifp = dhd->iflist[ifidx];
4815 	ASSERT(ifp != NULL);
4816 	return ifp->block_ping ? FALSE : TRUE;
4817 #else
4818 	return TRUE;
4819 #endif /* DHD_L2_FILTER */
4820 }
4821 /* Returns FALSE if WET is enabled */
4822 bool
BCMFASTPATH(dhd_wet_chainable)4823 BCMFASTPATH(dhd_wet_chainable)(dhd_pub_t *dhdp)
4824 {
4825 #ifdef DHD_WET
4826 	return (!WET_ENABLED(dhdp));
4827 #else
4828 	return TRUE;
4829 #endif
4830 }
4831 
4832 /* Returns TRUE if hot bridge entry for this da is present */
4833 bool
BCMFASTPATH(dhd_ctf_hotbrc_check)4834 BCMFASTPATH(dhd_ctf_hotbrc_check)(dhd_pub_t *dhdp, uint8 *eh, int ifidx)
4835 {
4836 	dhd_info_t *dhd = dhdp->info;
4837 	dhd_if_t *ifp = dhd->iflist[ifidx];
4838 
4839 	ASSERT(ifp != NULL);
4840 
4841 	if (!dhd->brc_hot)
4842 		return FALSE;
4843 
4844 	return CTF_HOTBRC_CMP(dhd->brc_hot, (eh), (void *)(ifp->net));
4845 }
4846 
4847 /*
4848  * Try to forward the complete packet chain through CTF.
4849  * If unsuccessful,
4850  *   - link the chain by skb->next
4851  *   - change the pnext to the 2nd packet of the chain
4852  *   - the chained packets will be sent up to the n/w stack
4853  */
4854 static inline int32
BCMFASTPATH(dhd_ctf_forward)4855 BCMFASTPATH(dhd_ctf_forward)(dhd_info_t *dhd, struct sk_buff *skb, void **pnext)
4856 {
4857 	dhd_pub_t *dhdp = &dhd->pub;
4858 	void *p, *n;
4859 	void *old_pnext;
4860 
4861 	/* try cut thru first */
4862 	if (!CTF_ENAB(dhd->cih) || (ctf_forward(dhd->cih, skb, skb->dev) == BCME_ERROR)) {
4863 		/* Fall back to slow path if ctf is disabled or if ctf_forward fails */
4864 
4865 		/* clear skipct flag before sending up */
4866 		PKTCLRSKIPCT(dhdp->osh, skb);
4867 
4868 #ifdef CTFPOOL
4869 		/* allocate and add a new skb to the pkt pool */
4870 		if (PKTISFAST(dhdp->osh, skb))
4871 			osl_ctfpool_add(dhdp->osh);
4872 
4873 		/* clear fast buf flag before sending up */
4874 		PKTCLRFAST(dhdp->osh, skb);
4875 
4876 		/* re-init the hijacked field */
4877 		CTFPOOLPTR(dhdp->osh, skb) = NULL;
4878 #endif /* CTFPOOL */
4879 
4880 		/* link the chained packets by skb->next */
4881 		if (PKTISCHAINED(skb)) {
4882 			old_pnext = *pnext;
4883 			PKTFRMNATIVE(dhdp->osh, skb);
4884 			p = (void *)skb;
4885 			FOREACH_CHAINED_PKT(p, n) {
4886 				PKTCLRCHAINED(dhdp->osh, p);
4887 				PKTCCLRFLAGS(p);
4888 				if (p == (void *)skb)
4889 					PKTTONATIVE(dhdp->osh, p);
4890 				if (n)
4891 					PKTSETNEXT(dhdp->osh, p, n);
4892 				else
4893 					PKTSETNEXT(dhdp->osh, p, old_pnext);
4894 			}
4895 			*pnext = PKTNEXT(dhdp->osh, skb);
4896 			PKTSETNEXT(dhdp->osh, skb, NULL);
4897 		}
4898 		return (BCME_ERROR);
4899 	}
4900 
4901 	return (BCME_OK);
4902 }
4903 #endif /* BCM_ROUTER_DHD && HNDCTF */
4904 
4905 #ifdef DHD_WMF
4906 bool
dhd_is_rxthread_enabled(dhd_pub_t * dhdp)4907 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4908 {
4909 	dhd_info_t *dhd = dhdp->info;
4910 
4911 	return dhd->rxthread_enabled;
4912 }
4913 #endif /* DHD_WMF */
4914 
4915 #ifdef DHD_MCAST_REGEN
4916 /*
4917  * Description: This function is called to do the reverse translation
4918  *
4919  * Input    eh - pointer to the ethernet header
4920  */
4921 int32
dhd_mcast_reverse_translation(struct ether_header * eh)4922 dhd_mcast_reverse_translation(struct ether_header *eh)
4923 {
4924 	uint8 *iph;
4925 	uint32 dest_ip;
4926 
4927 	iph = (uint8 *)eh + ETHER_HDR_LEN;
4928 	dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4929 
4930 	/* Only IP packets are handled */
4931 	if (eh->ether_type != hton16(ETHER_TYPE_IP))
4932 		return BCME_ERROR;
4933 
4934 	/* Non-IPv4 multicast packets are not handled */
4935 	if (IP_VER(iph) != IP_VER_4)
4936 		return BCME_ERROR;
4937 
4938 	/*
4939 	 * The packet has a multicast IP and unicast MAC. That means
4940 	 * we have to do the reverse translation
4941 	 */
4942 	if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
4943 		ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
4944 		return BCME_OK;
4945 	}
4946 
4947 	return BCME_ERROR;
4948 }
4949 #endif /* MCAST_REGEN */
4950 
4951 void
dhd_dpc_tasklet_dispatcher_work(struct work_struct * work)4952 dhd_dpc_tasklet_dispatcher_work(struct work_struct * work)
4953 {
4954 	struct delayed_work *dw = to_delayed_work(work);
4955 	struct dhd_info *dhd;
4956 
4957 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4958 	dhd = container_of(dw, struct dhd_info, dhd_dpc_dispatcher_work);
4959 	GCC_DIAGNOSTIC_POP();
4960 
4961 	DHD_INFO(("%s:\n", __FUNCTION__));
4962 
4963 	tasklet_schedule(&dhd->tasklet);
4964 }
4965 
4966 void
dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t * dhdp,ulong delay)4967 dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay)
4968 {
4969 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4970 	int dpc_cpu = atomic_read(&dhd->dpc_cpu);
4971 	DHD_INFO(("%s:\n", __FUNCTION__));
4972 
4973 	/* scheduler will take care of scheduling to appropriate cpu if dpc_cpu is not online */
4974 	schedule_delayed_work_on(dpc_cpu, &dhd->dhd_dpc_dispatcher_work, delay);
4975 
4976 	return;
4977 }
4978 
4979 #ifdef SHOW_LOGTRACE
4980 static void
dhd_netif_rx_ni(struct sk_buff * skb)4981 dhd_netif_rx_ni(struct sk_buff * skb)
4982 {
4983 	/* Do not call netif_recieve_skb as this workqueue scheduler is
4984 	 * not from NAPI Also as we are not in INTR context, do not call
4985 	 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
4986 	 * does netif_rx, disables irq, raise NET_IF_RX softirq and
4987 	 * enables interrupts back
4988 	 */
4989 	netif_rx_ni(skb);
4990 }
4991 
4992 static int
dhd_event_logtrace_pkt_process(dhd_pub_t * dhdp,struct sk_buff * skb)4993 dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
4994 {
4995 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4996 	int ret = BCME_OK;
4997 	uint datalen;
4998 	bcm_event_msg_u_t evu;
4999 	void *data = NULL;
5000 	void *pktdata = NULL;
5001 	bcm_event_t *pvt_data;
5002 	uint pktlen;
5003 
5004 	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5005 
5006 	/* In dhd_rx_frame, header is stripped using skb_pull
5007 	 * of size ETH_HLEN, so adjust pktlen accordingly
5008 	 */
5009 	pktlen = skb->len + ETH_HLEN;
5010 
5011 	pktdata = (void *)skb_mac_header(skb);
5012 	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5013 
5014 	if (ret != BCME_OK) {
5015 		DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5016 			__FUNCTION__, ret));
5017 		goto exit;
5018 	}
5019 
5020 	datalen = ntoh32(evu.event.datalen);
5021 
5022 	pvt_data = (bcm_event_t *)pktdata;
5023 	data = &pvt_data[1];
5024 
5025 	dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
5026 
5027 exit:
5028 	return ret;
5029 }
5030 
5031 /*
5032  * dhd_event_logtrace_process_items processes
5033  * each skb from evt_trace_queue.
5034  * Returns TRUE if more packets to be processed
5035  * else returns FALSE
5036  */
5037 
5038 static int
dhd_event_logtrace_process_items(dhd_info_t * dhd)5039 dhd_event_logtrace_process_items(dhd_info_t *dhd)
5040 {
5041 	dhd_pub_t *dhdp;
5042 	struct sk_buff *skb;
5043 	uint32 qlen;
5044 	uint32 process_len;
5045 
5046 	if (!dhd) {
5047 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5048 		return 0;
5049 	}
5050 
5051 	dhdp = &dhd->pub;
5052 
5053 	if (!dhdp) {
5054 		DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5055 		return 0;
5056 	}
5057 
5058 #ifdef BCMINTERNAL
5059 #ifdef DHD_FWTRACE
5060 	/* Check if there is any update in the firmware trace buffer */
5061 	process_fw_trace_data(dhdp);
5062 #endif /* DHD_FWTRACE */
5063 #endif /* BCMINTERNAL */
5064 	qlen = skb_queue_len(&dhd->evt_trace_queue);
5065 	process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
5066 
5067 	/* Run while loop till bound is reached or skb queue is empty */
5068 	while (process_len--) {
5069 		int ifid = 0;
5070 		skb = skb_dequeue(&dhd->evt_trace_queue);
5071 		if (skb == NULL) {
5072 			DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
5073 				__FUNCTION__));
5074 			break;
5075 		}
5076 		BCM_REFERENCE(ifid);
5077 #ifdef PCIE_FULL_DONGLE
5078 		/* Check if pkt is from INFO ring or WLC_E_TRACE */
5079 		ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
5080 		if (ifid == DHD_DUMMY_INFO_IF) {
5081 			/* Process logtrace from info rings */
5082 			dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
5083 		} else
5084 #endif /* PCIE_FULL_DONGLE */
5085 		{
5086 			/* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
5087 			dhd_event_logtrace_pkt_process(dhdp, skb);
5088 		}
5089 
5090 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
5091 		OSL_SLEEP(0);
5092 
5093 		/* Send packet up if logtrace_pkt_sendup is TRUE */
5094 		if (dhdp->logtrace_pkt_sendup) {
5095 #ifdef DHD_USE_STATIC_CTRLBUF
5096 			/* If bufs are allocated via static buf pool
5097 			 * and logtrace_pkt_sendup enabled, make a copy,
5098 			 * free the local one and send the copy up.
5099 			 */
5100 			void *npkt = PKTDUP(dhdp->osh, skb);
5101 			/* Clone event and send it up */
5102 			PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5103 			if (npkt) {
5104 				skb = npkt;
5105 			} else {
5106 				DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
5107 				/* Packet is already freed, go to next packet */
5108 				continue;
5109 			}
5110 #endif /* DHD_USE_STATIC_CTRLBUF */
5111 #ifdef PCIE_FULL_DONGLE
5112 			/* For infobuf packets as if is DHD_DUMMY_INFO_IF,
5113 			 * to send skb to network layer, assign skb->dev with
5114 			 * Primary interface n/w device
5115 			 */
5116 			if (ifid == DHD_DUMMY_INFO_IF) {
5117 				skb = PKTTONATIVE(dhdp->osh, skb);
5118 				skb->dev = dhd->iflist[0]->net;
5119 			}
5120 #endif /* PCIE_FULL_DONGLE */
5121 			/* Send pkt UP */
5122 			dhd_netif_rx_ni(skb);
5123 		} else	{
5124 			/* Don't send up. Free up the packet. */
5125 #ifdef DHD_USE_STATIC_CTRLBUF
5126 			PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5127 #else
5128 			PKTFREE(dhdp->osh, skb, FALSE);
5129 #endif /* DHD_USE_STATIC_CTRLBUF */
5130 		}
5131 	}
5132 
5133 	/* Reschedule if more packets to be processed */
5134 	return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
5135 }
5136 
5137 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5138 static int
dhd_logtrace_thread(void * data)5139 dhd_logtrace_thread(void *data)
5140 {
5141 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5142 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5143 	dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5144 	int ret;
5145 
5146 	while (1) {
5147 		dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
5148 		if (!binary_sema_down(tsk)) {
5149 			dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
5150 			SMP_RD_BARRIER_DEPENDS();
5151 			if (dhd->pub.dongle_reset == FALSE) {
5152 				do {
5153 					/* Check terminated before processing the items */
5154 					if (tsk->terminated) {
5155 						DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
5156 						goto exit;
5157 					}
5158 #ifdef EWP_EDL
5159 					/* check if EDL is being used */
5160 					if (dhd->pub.dongle_edl_support) {
5161 						ret = dhd_prot_process_edl_complete(&dhd->pub,
5162 								&dhd->event_data);
5163 					} else {
5164 						ret = dhd_event_logtrace_process_items(dhd);
5165 					}
5166 #else
5167 					ret = dhd_event_logtrace_process_items(dhd);
5168 #endif /* EWP_EDL */
5169 					/* if ret > 0, bound has reached so to be fair to other
5170 					 * processes need to yield the scheduler.
5171 					 * The comment above yield()'s definition says:
5172 					 * If you want to use yield() to wait for something,
5173 					 * use wait_event().
5174 					 * If you want to use yield() to be 'nice' for others,
5175 					 * use cond_resched().
5176 					 * If you still want to use yield(), do not!
5177 					 */
5178 					if (ret > 0) {
5179 						cond_resched();
5180 						OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
5181 					} else if (ret < 0) {
5182 						DHD_ERROR(("%s: ERROR should not reach here\n",
5183 							__FUNCTION__));
5184 					}
5185 				} while (ret > 0);
5186 			}
5187 			if (tsk->flush_ind) {
5188 				DHD_ERROR(("%s: flushed\n", __FUNCTION__));
5189 				dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
5190 				tsk->flush_ind = 0;
5191 				complete(&tsk->flushed);
5192 			}
5193 		} else {
5194 			DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
5195 			dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
5196 			break;
5197 		}
5198 	}
5199 exit:
5200 	complete_and_exit(&tsk->completed, 0);
5201 	dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
5202 }
5203 #else
5204 static void
dhd_event_logtrace_process(struct work_struct * work)5205 dhd_event_logtrace_process(struct work_struct * work)
5206 {
5207 /* Ignore compiler warnings due to -Werror=cast-qual */
5208 	struct delayed_work *dw = to_delayed_work(work);
5209 	struct dhd_info *dhd;
5210 
5211 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
5212 	dhd = container_of(dw, struct dhd_info, event_log_dispatcher_work);
5213 	GCC_DIAGNOSTIC_POP();
5214 
5215 #ifdef EWP_EDL
5216 	if (dhd->pub.dongle_edl_support) {
5217 		ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
5218 	} else {
5219 		ret = dhd_event_logtrace_process_items(dhd);
5220 	}
5221 #else
5222 	ret = dhd_event_logtrace_process_items(dhd);
5223 #endif /* EWP_EDL */
5224 
5225 	if (ret > 0) {
5226 		schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
5227 			msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
5228 	}
5229 	return;
5230 }
5231 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5232 
5233 void
dhd_schedule_logtrace(void * dhd_info)5234 dhd_schedule_logtrace(void *dhd_info)
5235 {
5236 	dhd_info_t *dhd = (dhd_info_t *)dhd_info;
5237 
5238 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5239 	if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
5240 		binary_sema_up(&dhd->thr_logtrace_ctl);
5241 	} else {
5242 		DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
5243 			dhd->thr_logtrace_ctl.thr_pid));
5244 	}
5245 #else
5246 	schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
5247 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5248 	return;
5249 }
5250 
5251 void
dhd_cancel_logtrace_process_sync(dhd_info_t * dhd)5252 dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
5253 {
5254 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5255 	if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
5256 		PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
5257 	} else {
5258 		DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
5259 			dhd->thr_logtrace_ctl.thr_pid));
5260 	}
5261 #else
5262 	cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
5263 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5264 }
5265 
5266 void
dhd_flush_logtrace_process(dhd_info_t * dhd)5267 dhd_flush_logtrace_process(dhd_info_t *dhd)
5268 {
5269 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5270 	if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
5271 		PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
5272 	} else {
5273 		DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
5274 			dhd->thr_logtrace_ctl.thr_pid));
5275 	}
5276 #else
5277 	flush_delayed_work(&dhd->event_log_dispatcher_work);
5278 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5279 }
5280 
5281 int
dhd_init_logtrace_process(dhd_info_t * dhd)5282 dhd_init_logtrace_process(dhd_info_t *dhd)
5283 {
5284 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5285 	dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
5286 	PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
5287 	if (dhd->thr_logtrace_ctl.thr_pid < 0) {
5288 		DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
5289 		return BCME_ERROR;
5290 	} else {
5291 		DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
5292 			dhd->thr_logtrace_ctl.thr_pid));
5293 	}
5294 #else
5295 	INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
5296 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5297 	return BCME_OK;
5298 }
5299 
5300 int
dhd_reinit_logtrace_process(dhd_info_t * dhd)5301 dhd_reinit_logtrace_process(dhd_info_t *dhd)
5302 {
5303 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
5304 	/* Re-init only if PROC_STOP from dhd_stop was called
5305 	 * which can be checked via thr_pid
5306 	 */
5307 	if (dhd->thr_logtrace_ctl.thr_pid < 0) {
5308 		PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
5309 			0, "dhd_logtrace_thread");
5310 		if (dhd->thr_logtrace_ctl.thr_pid < 0) {
5311 			DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
5312 			return BCME_ERROR;
5313 		} else {
5314 		DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
5315 			dhd->thr_logtrace_ctl.thr_pid));
5316 	}
5317 	}
5318 #else
5319 	/* No need to re-init for WQ as calcel_delayed_work_sync will
5320 	 * will not delete the WQ
5321 	 */
5322 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
5323 	return BCME_OK;
5324 }
5325 
5326 void
dhd_event_logtrace_enqueue(dhd_pub_t * dhdp,int ifidx,void * pktbuf)5327 dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
5328 {
5329 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5330 
5331 #ifdef PCIE_FULL_DONGLE
5332 	/* Add ifidx in the PKTTAG */
5333 	DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
5334 #endif /* PCIE_FULL_DONGLE */
5335 	skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
5336 
5337 	dhd_schedule_logtrace(dhd);
5338 }
5339 
5340 #ifdef BCMINTERNAL
5341 #ifdef DHD_FWTRACE
5342 void
dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t * dhdp)5343 dhd_event_logtrace_enqueue_fwtrace(dhd_pub_t *dhdp)
5344 {
5345 	dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
5346 
5347 	/* Schedule a kernel thread */
5348 	dhd_schedule_logtrace(dhd);
5349 
5350 	return;
5351 }
5352 #endif	/* DHD_FWTRACE */
5353 #endif	/* BCMINTERNAL */
5354 
5355 void
dhd_event_logtrace_flush_queue(dhd_pub_t * dhdp)5356 dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
5357 {
5358 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5359 	struct sk_buff *skb;
5360 
5361 	while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5362 #ifdef DHD_USE_STATIC_CTRLBUF
5363 		PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5364 #else
5365 		PKTFREE(dhdp->osh, skb, FALSE);
5366 #endif /* DHD_USE_STATIC_CTRLBUF */
5367 	}
5368 }
5369 
5370 #ifdef EWP_EDL
5371 void
dhd_sendup_info_buf(dhd_pub_t * dhdp,uint8 * msg)5372 dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
5373 {
5374 	struct sk_buff *skb = NULL;
5375 	uint32 pktsize = 0;
5376 	void *pkt = NULL;
5377 	info_buf_payload_hdr_t *infobuf = NULL;
5378 	dhd_info_t *dhd = dhdp->info;
5379 	uint8 *pktdata = NULL;
5380 
5381 	if (!msg)
5382 		return;
5383 
5384 	/* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
5385 	infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
5386 	pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
5387 			sizeof(uint32));
5388 	pkt = PKTGET(dhdp->osh, pktsize, FALSE);
5389 	if (!pkt) {
5390 		DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
5391 	} else {
5392 		PKTSETLEN(dhdp->osh, pkt, pktsize);
5393 		pktdata = PKTDATA(dhdp->osh, pkt);
5394 		memcpy(pktdata, msg, pktsize);
5395 		/* For infobuf packets assign skb->dev with
5396 		 * Primary interface n/w device
5397 		 */
5398 		skb = PKTTONATIVE(dhdp->osh, pkt);
5399 		skb->dev = dhd->iflist[0]->net;
5400 		/* Send pkt UP */
5401 		dhd_netif_rx_ni(skb);
5402 	}
5403 }
5404 #endif /* EWP_EDL */
5405 #endif /* SHOW_LOGTRACE */
5406 
5407 #ifdef BTLOG
5408 static void
dhd_bt_log_process(struct work_struct * work)5409 dhd_bt_log_process(struct work_struct *work)
5410 {
5411 	struct dhd_info *dhd;
5412 	dhd_pub_t *dhdp;
5413 	struct sk_buff *skb;
5414 
5415 	/* Ignore compiler warnings due to -Werror=cast-qual */
5416 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
5417 	dhd = container_of(work, struct dhd_info, bt_log_dispatcher_work);
5418 	GCC_DIAGNOSTIC_POP();
5419 
5420 	if (!dhd) {
5421 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5422 		return;
5423 	}
5424 
5425 	dhdp = &dhd->pub;
5426 
5427 	if (!dhdp) {
5428 		DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5429 		return;
5430 	}
5431 
5432 	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5433 
5434 	/* Run while(1) loop till all skbs are dequeued */
5435 	while ((skb = skb_dequeue(&dhd->bt_log_queue)) != NULL) {
5436 		dhd_bt_log_pkt_process(dhdp, skb);
5437 #ifdef DHD_USE_STATIC_CTRLBUF
5438 		PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5439 #else
5440 		PKTFREE(dhdp->osh, skb, FALSE);
5441 #endif /* DHD_USE_STATIC_CTRLBUF */
5442 	}
5443 }
5444 
5445 void
dhd_rx_bt_log(dhd_pub_t * dhdp,void * pkt)5446 dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt)
5447 {
5448 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5449 
5450 	skb_queue_tail(&dhd->bt_log_queue, pkt);
5451 
5452 	/* schedule workqueue to process bt logs */
5453 	schedule_work(&dhd->bt_log_dispatcher_work);
5454 }
5455 #endif	/* BTLOG */
5456 
5457 #ifdef EWP_EDL
5458 static void
dhd_edl_process_work(struct work_struct * work)5459 dhd_edl_process_work(struct work_struct *work)
5460 {
5461 	struct delayed_work *dw = to_delayed_work(work);
5462 	struct dhd_info *dhd_info;
5463 	/* Ignore compiler warnings due to -Werror=cast-qual */
5464 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
5465 	dhd_info = container_of(dw, struct dhd_info, edl_dispatcher_work);
5466 	GCC_DIAGNOSTIC_POP();
5467 
5468 	if (dhd_info)
5469 		dhd_prot_process_edl_complete(&dhd_info->pub, &dhd_info->event_data);
5470 }
5471 
5472 void
dhd_schedule_edl_work(dhd_pub_t * dhdp,uint delay_ms)5473 dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms)
5474 {
5475 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5476 	schedule_delayed_work(&dhd->edl_dispatcher_work, msecs_to_jiffies(delay_ms));
5477 }
5478 #endif /* EWP_EDL */
5479 
5480 #ifdef WL_NANHO
5481 /* forward NAN event to NANHO host module. API returns TRUE if event is consumed by NANHO */
5482 static bool
dhd_nho_evt_process(dhd_pub_t * pub,int ifidx,wl_event_msg_t * evt_msg,void * pktdata,uint16 pktlen)5483 dhd_nho_evt_process(dhd_pub_t *pub, int ifidx, wl_event_msg_t *evt_msg,
5484 	void *pktdata, uint16 pktlen)
5485 {
5486 	uint32 evt_type = ntoh32_ua(&evt_msg->event_type);
5487 	bool consumed = FALSE;
5488 
5489 	if ((evt_type == WLC_E_NAN_CRITICAL) || (evt_type == WLC_E_NAN_NON_CRITICAL)) {
5490 		bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
5491 		uint32 event_len = sizeof(wl_event_msg_t) + ntoh32_ua(&evt_msg->datalen);
5492 
5493 		bcm_nanho_evt(pub->nanhoi, &pvt_data->event, event_len, &consumed);
5494 	}
5495 	return consumed;
5496 }
5497 
5498 static int
dhd_nho_evt_cb(void * drv_ctx,int ifidx,bcm_event_t * evt,uint16 evt_len)5499 dhd_nho_evt_cb(void *drv_ctx, int ifidx, bcm_event_t *evt, uint16 evt_len)
5500 {
5501 	struct sk_buff *p, *skb;
5502 	dhd_if_t *ifp;
5503 	dhd_pub_t *dhdp = (dhd_pub_t *)drv_ctx;
5504 
5505 	if ((p = PKTGET(dhdp->osh, evt_len, FALSE))) {
5506 		memcpy(PKTDATA(dhdp->osh, p), (uint8 *)evt, evt_len);
5507 		skb = PKTTONATIVE(dhdp->osh, p);
5508 
5509 		ifp = dhdp->info->iflist[ifidx];
5510 		if (ifp == NULL) {
5511 			/* default to main interface */
5512 			ifp = dhdp->info->iflist[0];
5513 		}
5514 		ASSERT(ifp);
5515 
5516 		skb->dev = ifp->net;
5517 		skb->protocol = eth_type_trans(skb, skb->dev);
5518 
5519 		/* strip header, count, deliver upward */
5520 		skb_pull(skb, ETH_HLEN);
5521 
5522 		/* send the packet */
5523 		if (in_interrupt()) {
5524 			netif_rx(skb);
5525 		} else {
5526 			netif_rx_ni(skb);
5527 		}
5528 	} else {
5529 		DHD_ERROR(("NHO: dhd_nho_evt_cb: unable to alloc sk_buf"));
5530 		return BCME_NOMEM;
5531 	}
5532 
5533 	return BCME_OK;
5534 }
5535 #endif /* WL_NANHO */
5536 
5537 #ifdef ENABLE_WAKEUP_PKT_DUMP
5538 static void
update_wake_pkt_info(struct sk_buff * skb)5539 update_wake_pkt_info(struct sk_buff *skb)
5540 {
5541 	struct iphdr *ip_header;
5542 	struct ipv6hdr *ipv6_header;
5543 	struct udphdr *udp_header;
5544 	struct tcphdr *tcp_header;
5545 	uint16 dport = 0;
5546 
5547 	ip_header = (struct iphdr *)(skb->data);
5548 
5549 	temp_raw |= ((long long)ntoh16(skb->protocol)) << 48;
5550 
5551 	DHD_INFO(("eth_hdr(skb)->h_dest : %pM\n", eth_hdr(skb)->h_dest));
5552 	if (eth_hdr(skb)->h_dest[0] & 0x01) {
5553 		temp_raw |= (long long)1 << 39;
5554 	}
5555 
5556 	if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5557 		wl_event_msg_t event;
5558 		bcm_event_msg_u_t evu;
5559 		int ret;
5560 		uint event_type;
5561 
5562 		ret = wl_host_event_get_data(
5563 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5564 			skb_mac_header(skb),
5565 #else
5566 			skb->mac.raw,
5567 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5568 			skb->len, &evu);
5569 		if (ret != BCME_OK) {
5570 			DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5571 				__FUNCTION__, ret));
5572 		}
5573 
5574 		memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5575 		event_type = ntoh32_ua((void *)&event.event_type);
5576 
5577 		temp_raw |= (long long)event_type << 40;
5578 	} else if (ntoh16(skb->protocol) == ETHER_TYPE_IP ||
5579 			ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
5580 		if (ip_header->version == 6) {
5581 			ipv6_header = (struct ipv6hdr *)ip_header;
5582 			temp_raw |= ((long long)ipv6_header->nexthdr) << 40;
5583 			dport = 0;
5584 
5585 			if (ipv6_header->daddr.s6_addr[0] & 0xff) {
5586 				temp_raw |= (long long)1 << 38;
5587 			}
5588 
5589 			DHD_INFO(("IPv6 [%x]%pI6c > %pI6c:%d\n",
5590 				ip_header->protocol, &(ipv6_header->saddr.s6_addr),
5591 				&(ipv6_header->daddr.s6_addr), dport));
5592 		} else if (ip_header->version == 4) {
5593 			temp_raw |= ((long long)ip_header->protocol) << 40;
5594 
5595 #define IP_HDR_OFFSET	((char *)ip_header + IPV4_HLEN(ip_header))
5596 			if (ip_header->protocol == IPPROTO_TCP) {
5597 				tcp_header = (struct tcphdr *)IP_HDR_OFFSET;
5598 				dport = ntohs(tcp_header->dest);
5599 			}
5600 			else if (ip_header->protocol == IPPROTO_UDP) {
5601 				udp_header = (struct udphdr *)IP_HDR_OFFSET;
5602 				dport = ntohs(udp_header->dest);
5603 			}
5604 
5605 			if (ipv4_is_multicast(ip_header->daddr)) {
5606 				temp_raw |= (long long)1 << 38;
5607 			}
5608 
5609 			DHD_INFO(("IP [%x] %pI4 > %pI4:%d\n",
5610 				ip_header->protocol, &(ip_header->saddr),
5611 				&(ip_header->daddr), dport));
5612 		}
5613 
5614 		temp_raw |= (long long)dport << 16;
5615 	}
5616 }
5617 #endif /* ENABLE_WAKEUP_PKT_DUMP */
5618 
5619 #if defined(BCMPCIE)
5620 int
dhd_check_shinfo_nrfrags(dhd_pub_t * dhdp,void * pktbuf,dmaaddr_t * pa,uint32 pktid)5621 dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf,
5622 	dmaaddr_t *pa, uint32 pktid)
5623 {
5624 	struct sk_buff *skb;
5625 	struct skb_shared_info *shinfo;
5626 
5627 	if (!pktbuf)
5628 		return BCME_ERROR;
5629 
5630 	skb = PKTTONATIVE(dhdp->osh, pktbuf);
5631 	shinfo = skb_shinfo(skb);
5632 
5633 	if (shinfo->nr_frags) {
5634 #ifdef CONFIG_64BIT
5635 		DHD_ERROR(("!!Invalid nr_frags: %u pa.loaddr: 0x%llx pa.hiaddr: 0x%llx "
5636 			"skb: 0x%llx skb_data: 0x%llx skb_head: 0x%llx skb_tail: 0x%llx "
5637 			"skb_end: 0x%llx skb_len: %u shinfo: 0x%llx pktid: %u\n",
5638 			shinfo->nr_frags, (uint64)(pa->loaddr), (uint64)(pa->hiaddr),
5639 			(uint64)skb, (uint64)(skb->data), (uint64)(skb->head), (uint64)(skb->tail),
5640 			(uint64)(skb->end), skb->len, (uint64)shinfo, pktid));
5641 #else
5642 		DHD_ERROR(("!!Invalid nr_frags: %u "
5643 			"skb: 0x%x skb_data: 0x%x skb_head: 0x%x skb_tail: 0x%x "
5644 			"skb_end: 0x%x skb_len: %u shinfo: 0x%x pktid: %u\n",
5645 			shinfo->nr_frags,
5646 			(uint)skb, (uint)(skb->data), (uint)(skb->head), (uint)(skb->tail),
5647 			(uint)(skb->end), skb->len, (uint)shinfo, pktid));
5648 #endif
5649 		prhex("shinfo", (char*)shinfo, sizeof(struct skb_shared_info));
5650 		if (!dhd_query_bus_erros(dhdp)) {
5651 #ifdef DHD_FW_COREDUMP
5652 			/* Collect socram dump */
5653 			if (dhdp->memdump_enabled) {
5654 				/* collect core dump */
5655 				dhdp->memdump_type = DUMP_TYPE_INVALID_SHINFO_NRFRAGS;
5656 				dhd_bus_mem_dump(dhdp);
5657 			} else
5658 #endif /* DHD_FW_COREDUMP */
5659 			{
5660 				shinfo->nr_frags = 0;
5661 				/* In production case, free the packet and continue
5662 				 * if nfrags is corrupted. Whereas in non-production
5663 				 * case collect memdump and call BUG_ON().
5664 				 */
5665 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5666 			}
5667 		}
5668 		return BCME_ERROR;
5669 	}
5670 	return BCME_OK;
5671 }
5672 #endif /* BCMPCIE */
5673 
5674 /** Called when a frame is received by the dongle on interface 'ifidx' */
5675 void
dhd_rx_frame(dhd_pub_t * dhdp,int ifidx,void * pktbuf,int numpkt,uint8 chan)5676 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
5677 {
5678 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5679 	struct sk_buff *skb;
5680 	uchar *eth;
5681 	uint len;
5682 	void *data, *pnext = NULL;
5683 	int i;
5684 	dhd_if_t *ifp;
5685 	wl_event_msg_t event;
5686 #if defined(OEM_ANDROID)
5687 	int tout_rx = 0;
5688 	int tout_ctrl = 0;
5689 #endif /* OEM_ANDROID */
5690 	void *skbhead = NULL;
5691 	void *skbprev = NULL;
5692 	uint16 protocol;
5693 	unsigned char *dump_data;
5694 #ifdef DHD_MCAST_REGEN
5695 	uint8 interface_role;
5696 	if_flow_lkup_t *if_flow_lkup;
5697 	unsigned long flags;
5698 #endif
5699 #ifdef DHD_WAKE_STATUS
5700 	wake_counts_t *wcp = NULL;
5701 #endif /* DHD_WAKE_STATUS */
5702 	int pkt_wake = 0;
5703 #ifdef ENABLE_DHD_GRO
5704 	bool dhd_gro_enable = TRUE;
5705 	struct Qdisc *qdisc = NULL;
5706 #endif /* ENABLE_DHD_GRO */
5707 
5708 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5709 	BCM_REFERENCE(dump_data);
5710 	BCM_REFERENCE(pkt_wake);
5711 
5712 #ifdef DHD_TPUT_PATCH
5713 	if (dhdp->conf->pktsetsum)
5714 		PKTSETSUMGOOD(pktbuf, TRUE);
5715 #endif
5716 
5717 #ifdef ENABLE_DHD_GRO
5718 	if (ifidx < DHD_MAX_IFS) {
5719 		ifp = dhd->iflist[ifidx];
5720 		if (ifp && ifp->net->qdisc) {
5721 			if (ifp->net->qdisc->ops->cl_ops) {
5722 				dhd_gro_enable = FALSE;
5723 				DHD_TRACE(("%s: disable sw gro becasue of"
5724 						" qdisc tx traffic control\n", __FUNCTION__));
5725 			}
5726 
5727 			if (dev_ingress_queue(ifp->net)) {
5728 				qdisc = dev_ingress_queue(ifp->net)->qdisc_sleeping;
5729 				if (qdisc != NULL && (qdisc->flags & TCQ_F_INGRESS)) {
5730 					dhd_gro_enable = FALSE;
5731 					DHD_TRACE(("%s: disable sw gro because of"
5732 						" qdisc rx traffic control\n", __FUNCTION__));
5733 				}
5734 			}
5735 		}
5736 	}
5737 #ifdef DHD_GRO_ENABLE_HOST_CTRL
5738 	if (!dhdp->permitted_gro && dhd_gro_enable) {
5739 		dhd_gro_enable = FALSE;
5740 	}
5741 #endif /* DHD_GRO_ENABLE_HOST_CTRL */
5742 #endif /* ENABLE_DHD_GRO */
5743 
5744 	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
5745 		struct ether_header *eh;
5746 
5747 		pnext = PKTNEXT(dhdp->osh, pktbuf);
5748 		PKTSETNEXT(dhdp->osh, pktbuf, NULL);
5749 
5750 		/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5751 		 * special ifidx of DHD_DUMMY_INFO_IF.  This is just internal to dhd to get the data
5752 		 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5753 		 */
5754 		if (ifidx == DHD_DUMMY_INFO_IF) {
5755 			/* Event msg printing is called from dhd_rx_frame which is in Tasklet
5756 			 * context in case of PCIe FD, in case of other bus this will be from
5757 			 * DPC context. If we get bunch of events from Dongle then printing all
5758 			 * of them from Tasklet/DPC context that too in data path is costly.
5759 			 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5760 			 * events with type WLC_E_TRACE.
5761 			 * We'll print this console logs from the WorkQueue context by enqueing SKB
5762 			 * here and Dequeuing will be done in WorkQueue and will be freed only if
5763 			 * logtrace_pkt_sendup is TRUE
5764 			 */
5765 #ifdef SHOW_LOGTRACE
5766 			dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5767 #else /* !SHOW_LOGTRACE */
5768 		/* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
5769 		 * free the PKT here itself
5770 		 */
5771 #ifdef DHD_USE_STATIC_CTRLBUF
5772 		PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5773 #else
5774 		PKTFREE(dhdp->osh, pktbuf, FALSE);
5775 #endif /* DHD_USE_STATIC_CTRLBUF */
5776 #endif /* SHOW_LOGTRACE */
5777 			continue;
5778 		}
5779 #ifdef DHD_WAKE_STATUS
5780 #ifdef BCMDBUS
5781 		wcp = NULL;
5782 #else
5783 		pkt_wake = dhd_bus_get_bus_wake(dhdp);
5784 		wcp = dhd_bus_get_wakecount(dhdp);
5785 #endif /* BCMDBUS */
5786 		if (wcp == NULL) {
5787 			/* If wakeinfo count buffer is null do not  update wake count values */
5788 			pkt_wake = 0;
5789 		}
5790 #endif /* DHD_WAKE_STATUS */
5791 
5792 		eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5793 #ifdef DHD_AWDL
5794 		if (dhdp->awdl_llc_enabled &&
5795 			dhdp->awdl_ifidx && ifidx == dhdp->awdl_ifidx) {
5796 			if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
5797 				dhd_awdl_llc_to_eth_hdr(dhdp, eh, pktbuf);
5798 			}
5799 		}
5800 #endif /* DHD_AWDL */
5801 
5802 		if (dhd->pub.tput_data.tput_test_running &&
5803 			dhd->pub.tput_data.direction == TPUT_DIR_RX &&
5804 			ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
5805 			dhd_tput_test_rx(dhdp, pktbuf);
5806 			PKTFREE(dhd->pub.osh, pktbuf, FALSE);
5807 			continue;
5808 		}
5809 
5810 		if (ifidx >= DHD_MAX_IFS) {
5811 			DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
5812 				__FUNCTION__, ifidx));
5813 			if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5814 #ifdef DHD_USE_STATIC_CTRLBUF
5815 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5816 #else
5817 				PKTFREE(dhdp->osh, pktbuf, FALSE);
5818 #endif /* DHD_USE_STATIC_CTRLBUF */
5819 			} else {
5820 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5821 			}
5822 			continue;
5823 		}
5824 
5825 		ifp = dhd->iflist[ifidx];
5826 		if (ifp == NULL) {
5827 			DHD_ERROR_RLMT(("%s: ifp is NULL. drop packet\n",
5828 				__FUNCTION__));
5829 			if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5830 #ifdef DHD_USE_STATIC_CTRLBUF
5831 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5832 #else
5833 				PKTFREE(dhdp->osh, pktbuf, FALSE);
5834 #endif /* DHD_USE_STATIC_CTRLBUF */
5835 			} else {
5836 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5837 			}
5838 			continue;
5839 		}
5840 
5841 		/* Dropping only data packets before registering net device to avoid kernel panic */
5842 #ifndef PROP_TXSTATUS_VSDB
5843 		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
5844 			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5845 #else
5846 		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
5847 			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5848 #endif /* PROP_TXSTATUS_VSDB */
5849 		{
5850 			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5851 			__FUNCTION__));
5852 			PKTCFREE(dhdp->osh, pktbuf, FALSE);
5853 			continue;
5854 		}
5855 
5856 #ifdef PROP_TXSTATUS
5857 		if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
5858 			/* WLFC may send header only packet when
5859 			there is an urgent message but no packet to
5860 			piggy-back on
5861 			*/
5862 			PKTCFREE(dhdp->osh, pktbuf, FALSE);
5863 			continue;
5864 		}
5865 #endif
5866 #ifdef DHD_L2_FILTER
5867 		/* If block_ping is enabled drop the ping packet */
5868 		if (ifp->block_ping) {
5869 			if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
5870 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5871 				continue;
5872 			}
5873 		}
5874 		if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
5875 		    if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
5876 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5877 				continue;
5878 		    }
5879 		}
5880 		if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
5881 			int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
5882 
5883 			/* Drop the packets if l2 filter has processed it already
5884 			 * otherwise continue with the normal path
5885 			 */
5886 			if (ret == BCME_OK) {
5887 				PKTCFREE(dhdp->osh, pktbuf, TRUE);
5888 				continue;
5889 			}
5890 		}
5891 		if (ifp->block_tdls) {
5892 			if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
5893 				PKTCFREE(dhdp->osh, pktbuf, FALSE);
5894 				continue;
5895 			}
5896 		}
5897 #endif /* DHD_L2_FILTER */
5898 
5899 #ifdef DHD_MCAST_REGEN
5900 		DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
5901 		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
5902 		ASSERT(if_flow_lkup);
5903 
5904 		interface_role = if_flow_lkup[ifidx].role;
5905 		DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
5906 
5907 		if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
5908 				!DHD_IF_ROLE_AP(dhdp, ifidx) &&
5909 				ETHER_ISUCAST(eh->ether_dhost)) {
5910 			if (dhd_mcast_reverse_translation(eh) ==  BCME_OK) {
5911 #ifdef DHD_PSTA
5912 				/* Change bsscfg to primary bsscfg for unicast-multicast packets */
5913 				if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
5914 						(dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
5915 					if (ifidx != 0) {
5916 						/* Let the primary in PSTA interface handle this
5917 						 * frame after unicast to Multicast conversion
5918 						 */
5919 						ifp = dhd_get_ifp(dhdp, 0);
5920 						ASSERT(ifp);
5921 					}
5922 				}
5923 			}
5924 #endif /* PSTA */
5925 		}
5926 #endif /* MCAST_REGEN */
5927 
5928 #ifdef DHD_WMF
5929 		/* WMF processing for multicast packets */
5930 		if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
5931 			dhd_sta_t *sta;
5932 			int ret;
5933 
5934 			sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
5935 			ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
5936 			switch (ret) {
5937 				case WMF_TAKEN:
5938 					/* The packet is taken by WMF. Continue to next iteration */
5939 					continue;
5940 				case WMF_DROP:
5941 					/* Packet DROP decision by WMF. Toss it */
5942 					DHD_ERROR(("%s: WMF decides to drop packet\n",
5943 						__FUNCTION__));
5944 					PKTCFREE(dhdp->osh, pktbuf, FALSE);
5945 					continue;
5946 				default:
5947 					/* Continue the transmit path */
5948 					break;
5949 			}
5950 		}
5951 #endif /* DHD_WMF */
5952 
5953 #ifdef DHDTCPSYNC_FLOOD_BLK
5954 		if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
5955 			int delta_sec;
5956 			int delta_sync;
5957 			int sync_per_sec;
5958 			u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
5959 			ifp->tsync_rcvd ++;
5960 			delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
5961 			delta_sec = curr_time - ifp->last_sync;
5962 			if (delta_sec > 1) {
5963 				sync_per_sec = delta_sync/delta_sec;
5964 				if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
5965 					schedule_work(&ifp->blk_tsfl_work);
5966 					DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
5967 						"sync recvied %d pkt/sec \n",
5968 						ifidx, sync_per_sec));
5969 					ifp->tsync_per_sec = sync_per_sec;
5970 				}
5971 				dhd_reset_tcpsync_info_by_ifp(ifp);
5972 			}
5973 
5974 		}
5975 #endif /* DHDTCPSYNC_FLOOD_BLK */
5976 
5977 #ifdef DHDTCPACK_SUPPRESS
5978 		dhd_tcpdata_info_get(dhdp, pktbuf);
5979 #endif
5980 		skb = PKTTONATIVE(dhdp->osh, pktbuf);
5981 
5982 		ASSERT(ifp);
5983 		skb->dev = ifp->net;
5984 #ifdef DHD_WET
5985 		/* wet related packet proto manipulation should be done in DHD
5986 		 * since dongle doesn't have complete payload
5987 		 */
5988 		if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5989 				pktbuf) < 0)) {
5990 			DHD_INFO(("%s:%s: wet recv proc failed\n",
5991 				__FUNCTION__, dhd_ifname(dhdp, ifidx)));
5992 		}
5993 #endif /* DHD_WET */
5994 
5995 #ifdef DHD_PSTA
5996 		if (PSR_ENABLED(dhdp) &&
5997 #ifdef BCM_ROUTER_DHD
5998 				!(ifp->primsta_dwds) &&
5999 #endif /* BCM_ROUTER_DHD */
6000 				(dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
6001 			DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
6002 				dhd_ifname(dhdp, ifidx)));
6003 		}
6004 #endif /* DHD_PSTA */
6005 
6006 #if defined(BCM_ROUTER_DHD)
6007 		/* XXX Use WOFA for both dhdap and dhdap-atlas router. */
6008 		/* XXX dhd_sendpkt verify pkt accounting (TO/FRM NATIVE) and PKTCFREE */
6009 
6010 		if (DHD_IF_ROLE_AP(dhdp, ifidx) && (!ifp->ap_isolate)) {
6011 			eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
6012 			if (ETHER_ISUCAST(eh->ether_dhost)) {
6013 				if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
6014 					dhd_sendpkt(dhdp, ifidx, pktbuf);
6015 					continue;
6016 				}
6017 			} else {
6018 				void *npkt;
6019 #if defined(HNDCTF)
6020 				if (PKTISCHAINED(pktbuf)) { /* XXX WAR */
6021 					DHD_ERROR(("Error: %s():%d Chained non unicast pkt<%p>\n",
6022 						__FUNCTION__, __LINE__, pktbuf));
6023 					PKTFRMNATIVE(dhdp->osh, pktbuf);
6024 					PKTCFREE(dhdp->osh, pktbuf, FALSE);
6025 					continue;
6026 				}
6027 #endif /* HNDCTF */
6028 				if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
6029 					((npkt = PKTDUP(dhdp->osh, pktbuf)) != NULL))
6030 					dhd_sendpkt(dhdp, ifidx, npkt);
6031 			}
6032 		}
6033 
6034 #if defined(HNDCTF)
6035 		/* try cut thru' before sending up */
6036 		if (dhd_ctf_forward(dhd, skb, &pnext) == BCME_OK) {
6037 			continue;
6038 		}
6039 #endif /* HNDCTF */
6040 
6041 #else /* !BCM_ROUTER_DHD */
6042 #ifdef PCIE_FULL_DONGLE
6043 		if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
6044 			(!ifp->ap_isolate)) {
6045 			eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
6046 			if (ETHER_ISUCAST(eh->ether_dhost)) {
6047 				if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
6048 					dhd_sendpkt(dhdp, ifidx, pktbuf);
6049 					continue;
6050 				}
6051 			} else {
6052 				if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE)) {
6053 					void *npktbuf = NULL;
6054 					/*
6055 					* If host_sfhllc_supported enabled, do skb_copy as SFHLLC
6056 					* header will be inserted during Tx, due to which network
6057 					* stack will not decode the Rx packet.
6058 					* Else PKTDUP(skb_clone) is enough.
6059 					*/
6060 					if (dhdp->host_sfhllc_supported) {
6061 						npktbuf = skb_copy(skb, GFP_ATOMIC);
6062 					} else {
6063 						npktbuf = PKTDUP(dhdp->osh, pktbuf);
6064 					}
6065 					if (npktbuf != NULL) {
6066 						dhd_sendpkt(dhdp, ifidx, npktbuf);
6067 					}
6068 				}
6069 			}
6070 		}
6071 #endif /* PCIE_FULL_DONGLE */
6072 #endif /* BCM_ROUTER_DHD */
6073 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
6074 		if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
6075 			(ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
6076 			(dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
6077 				DHD_ERROR(("%s: Reassoc is in progress. "
6078 					"Drop EAPOL M1 frame\n", __FUNCTION__));
6079 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6080 				continue;
6081 		}
6082 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
6083 #ifdef WLEASYMESH
6084 		if ((dhdp->conf->fw_type == FW_TYPE_EZMESH) &&
6085 				(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
6086 			uint16 * da = (uint16 *)(eh->ether_dhost);
6087 			ASSERT(ISALIGNED(da, 2));
6088 
6089 			/* XXX: Special handling for 1905 messages
6090 			 * if DA matches with configured 1905 AL MAC addresses
6091 			 * bypass fwder and foward it to linux stack
6092 			 */
6093 			if (ntoh16(eh->ether_type) == ETHER_TYPE_1905_1) {
6094 				if (!eacmp(da, ifp->_1905_al_ucast) || !eacmp(da, ifp->_1905_al_mcast)) {
6095 					//skb->fwr_flood = 0;
6096 				} else {
6097 					//skb->fwr_flood = 1;
6098 				}
6099 			}
6100 		}
6101 #endif /* WLEASYMESH */
6102 		/* Get the protocol, maintain skb around eth_type_trans()
6103 		 * The main reason for this hack is for the limitation of
6104 		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
6105 		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
6106 		 * coping of the packet coming from the network stack to add
6107 		 * BDC, Hardware header etc, during network interface registration
6108 		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
6109 		 * for BDC, Hardware header etc. and not just the ETH_HLEN
6110 		 */
6111 		eth = skb->data;
6112 		len = skb->len;
6113 		dump_data = skb->data;
6114 		protocol = (skb->data[12] << 8) | skb->data[13];
6115 
6116 		if (protocol == ETHER_TYPE_802_1X) {
6117 			DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
6118 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
6119 			wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
6120 #endif /* WL_CFG80211 && WL_WPS_SYNC */
6121 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6122 			if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
6123 				OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
6124 			}
6125 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6126 		}
6127 		dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
6128 
6129 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
6130 		if (pkt_wake) {
6131 			dhd_prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 64), DHD_ERROR_VAL);
6132 			DHD_ERROR(("config check in_suspend: %d ", dhdp->in_suspend));
6133 #ifdef ARP_OFFLOAD_SUPPORT
6134 			DHD_ERROR(("arp hmac_update:%d \n", dhdp->hmac_updated));
6135 #endif /* ARP_OFFLOAD_SUPPORT */
6136 		}
6137 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
6138 
6139 #ifdef BCMINTERNAL
6140 		if (dhd->pub.loopback) {
6141 			struct ether_header *local_eh = (struct ether_header *)eth;
6142 			if (ntoh16(local_eh->ether_type) == ETHER_TYPE_IP) {
6143 				uint8 *myp = (uint8 *)local_eh;
6144 				struct ipv4_hdr *iph = (struct ipv4_hdr *)(myp + ETHER_HDR_LEN);
6145 				uint16 iplen = (iph->version_ihl & 0xf) * sizeof(uint32);
6146 				if (iph->prot == 1) {
6147 					uint8 *icmph = (uint8 *)iph + iplen;
6148 					if (icmph[0] == 8) {
6149 						uint8 temp_addr[ETHER_ADDR_LEN];
6150 						uint8 temp_ip[IPV4_ADDR_LEN];
6151 						/* Ether header flip */
6152 						memcpy(temp_addr, local_eh->ether_dhost,
6153 							ETHER_ADDR_LEN);
6154 						memcpy(local_eh->ether_dhost,
6155 							local_eh->ether_shost, ETHER_ADDR_LEN);
6156 						memcpy(local_eh->ether_shost, temp_addr,
6157 							ETHER_ADDR_LEN);
6158 
6159 						/* IP header flip */
6160 						memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN);
6161 						memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN);
6162 						memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN);
6163 
6164 						/* ICMP header flip */
6165 						icmph[0] = 0;
6166 					}
6167 				} else if (iph->prot == 17) {
6168 					uint8 *udph = (uint8 *)iph + iplen;
6169 					uint16 destport = ntoh16(*((uint16 *)udph + 1));
6170 					if (destport == 8888) {
6171 						uint8 temp_addr[ETHER_ADDR_LEN];
6172 						uint8 temp_ip[IPV4_ADDR_LEN];
6173 						/* Ether header flip */
6174 						memcpy(temp_addr, local_eh->ether_dhost,
6175 							ETHER_ADDR_LEN);
6176 						memcpy(local_eh->ether_dhost,
6177 							local_eh->ether_shost, ETHER_ADDR_LEN);
6178 						memcpy(local_eh->ether_shost, temp_addr,
6179 							ETHER_ADDR_LEN);
6180 
6181 						/* IP header flip */
6182 						memcpy(temp_ip, iph->src_ip, IPV4_ADDR_LEN);
6183 						memcpy(iph->src_ip, iph->dst_ip, IPV4_ADDR_LEN);
6184 						memcpy(iph->dst_ip, temp_ip, IPV4_ADDR_LEN);
6185 
6186 						/* Reset UDP checksum to */
6187 						*((uint16 *)udph + 3) = 0;
6188 					}
6189 				}
6190 			}
6191 		}
6192 #endif /* BCMINTERNAL */
6193 		skb->protocol = eth_type_trans(skb, skb->dev);
6194 
6195 		if (skb->pkt_type == PACKET_MULTICAST) {
6196 			dhd->pub.rx_multicast++;
6197 			ifp->stats.multicast++;
6198 		}
6199 
6200 		skb->data = eth;
6201 		skb->len = len;
6202 
6203 		/* TODO: XXX: re-look into dropped packets. */
6204 		DHD_DBG_PKT_MON_RX(dhdp, skb);
6205 		/* Strip header, count, deliver upward */
6206 		skb_pull(skb, ETH_HLEN);
6207 
6208 #ifdef ENABLE_WAKEUP_PKT_DUMP
6209 		if (dhd_mmc_wake) {
6210 			DHD_INFO(("wake_pkt %s(%d)\n", __FUNCTION__, __LINE__));
6211 			if (DHD_INFO_ON()) {
6212 				prhex("wake_pkt", (char*) eth, MIN(len, 48));
6213 			}
6214 			update_wake_pkt_info(skb);
6215 #ifdef CONFIG_IRQ_HISTORY
6216 			add_irq_history(0, "WIFI");
6217 #endif
6218 			dhd_mmc_wake = FALSE;
6219 		}
6220 #endif /* ENABLE_WAKEUP_PKT_DUMP */
6221 
6222 		/* Process special event packets and then discard them */
6223 		/* XXX Decide on a better way to fit this in */
6224 		memset(&event, 0, sizeof(event));
6225 
6226 		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
6227 			bcm_event_msg_u_t evu;
6228 			int ret_event, event_type;
6229 			void *pkt_data = skb_mac_header(skb);
6230 
6231 			ret_event = wl_host_event_get_data(pkt_data, len, &evu);
6232 
6233 			if (ret_event != BCME_OK) {
6234 				DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
6235 					__FUNCTION__, ret_event));
6236 #ifdef DHD_USE_STATIC_CTRLBUF
6237 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6238 #else
6239 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6240 #endif
6241 				continue;
6242 			}
6243 
6244 			memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
6245 			event_type = ntoh32_ua((void *)&event.event_type);
6246 #ifdef SHOW_LOGTRACE
6247 			/* Event msg printing is called from dhd_rx_frame which is in Tasklet
6248 			 * context in case of PCIe FD, in case of other bus this will be from
6249 			 * DPC context. If we get bunch of events from Dongle then printing all
6250 			 * of them from Tasklet/DPC context that too in data path is costly.
6251 			 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
6252 			 * events with type WLC_E_TRACE.
6253 			 * We'll print this console logs from the WorkQueue context by enqueing SKB
6254 			 * here and Dequeuing will be done in WorkQueue and will be freed only if
6255 			 * logtrace_pkt_sendup is true
6256 			 */
6257 			if (event_type == WLC_E_TRACE) {
6258 				DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
6259 				dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
6260 				continue;
6261 			}
6262 #endif /* SHOW_LOGTRACE */
6263 
6264 #ifdef WL_NANHO
6265 			/* Process firmware NAN event by NANHO host module */
6266 			if (dhd_nho_evt_process(dhdp, ifidx, &event, pkt_data, len)) {
6267 				/* NANHO host module consumed NAN event. free pkt here. */
6268 #ifdef DHD_USE_STATIC_CTRLBUF
6269 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6270 #else
6271 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6272 #endif
6273 				continue;
6274 			}
6275 #endif /* WL_NANHO */
6276 
6277 			ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
6278 
6279 			wl_event_to_host_order(&event);
6280 #if defined(OEM_ANDROID)
6281 			if (!tout_ctrl)
6282 				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
6283 #endif /* OEM_ANDROID */
6284 
6285 #if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
6286 			if (event_type == WLC_E_PFN_NET_FOUND) {
6287 				/* enforce custom wake lock to garantee that Kernel not suspended */
6288 				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
6289 			}
6290 #endif /* PNO_SUPPORT */
6291 			if (numpkt != 1) {
6292 				DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
6293 				__FUNCTION__));
6294 			}
6295 
6296 #ifdef DHD_WAKE_STATUS
6297 			if (unlikely(pkt_wake)) {
6298 #ifdef DHD_WAKE_EVENT_STATUS
6299 				if (event.event_type < WLC_E_LAST) {
6300 					wcp->rc_event[event.event_type]++;
6301 					wcp->rcwake++;
6302 					pkt_wake = 0;
6303 				}
6304 #endif /* DHD_WAKE_EVENT_STATUS */
6305 			}
6306 #endif /* DHD_WAKE_STATUS */
6307 
6308 			/* For delete virtual interface event, wl_host_event returns positive
6309 			 * i/f index, do not proceed. just free the pkt.
6310 			 */
6311 			if ((event_type == WLC_E_IF) && (ret_event > 0)) {
6312 				DHD_ERROR(("%s: interface is deleted. Free event packet\n",
6313 				__FUNCTION__));
6314 #ifdef DHD_USE_STATIC_CTRLBUF
6315 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6316 #else
6317 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6318 #endif
6319 				continue;
6320 			}
6321 
6322 			/*
6323 			 * For the event packets, there is a possibility
6324 			 * of ifidx getting modifed.Thus update the ifp
6325 			 * once again.
6326 			 */
6327 			ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
6328 			ifp = dhd->iflist[ifidx];
6329 #ifndef PROP_TXSTATUS_VSDB
6330 			if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
6331 #else
6332 			if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
6333 				dhd->pub.up))
6334 #endif /* PROP_TXSTATUS_VSDB */
6335 			{
6336 				DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
6337 				__FUNCTION__));
6338 #ifdef DHD_USE_STATIC_CTRLBUF
6339 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6340 #else
6341 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6342 #endif
6343 				continue;
6344 			}
6345 
6346 #ifdef SENDPROB
6347 			if (dhdp->wl_event_enabled ||
6348 				(dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG)))
6349 #else
6350 			if (dhdp->wl_event_enabled)
6351 #endif
6352 			{
6353 #ifdef DHD_USE_STATIC_CTRLBUF
6354 				/* If event bufs are allocated via static buf pool
6355 				 * and wl events are enabled, make a copy, free the
6356 				 * local one and send the copy up.
6357 				 */
6358 				struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
6359 				/* Copy event and send it up */
6360 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6361 				if (nskb) {
6362 					skb = nskb;
6363 				} else {
6364 					DHD_ERROR(("skb clone failed. dropping event.\n"));
6365 					continue;
6366 				}
6367 #endif /* DHD_USE_STATIC_CTRLBUF */
6368 			} else {
6369 				/* If event enabled not explictly set, drop events */
6370 #ifdef DHD_USE_STATIC_CTRLBUF
6371 				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6372 #else
6373 				PKTFREE(dhdp->osh, pktbuf, FALSE);
6374 #endif /* DHD_USE_STATIC_CTRLBUF */
6375 				continue;
6376 			}
6377 		} else {
6378 #if defined(OEM_ANDROID)
6379 			tout_rx = DHD_PACKET_TIMEOUT_MS;
6380 #endif /* OEM_ANDROID */
6381 
6382 #ifdef PROP_TXSTATUS
6383 			dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
6384 #endif /* PROP_TXSTATUS */
6385 
6386 #ifdef DHD_WAKE_STATUS
6387 			if (unlikely(pkt_wake)) {
6388 				wcp->rxwake++;
6389 #ifdef DHD_WAKE_RX_STATUS
6390 #define ETHER_ICMP6_HEADER	20
6391 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
6392 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
6393 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
6394 
6395 				if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
6396 					wcp->rx_arp++;
6397 				if (dump_data[0] == 0xFF) { /* Broadcast */
6398 					wcp->rx_bcast++;
6399 				} else if (dump_data[0] & 0x01) { /* Multicast */
6400 					wcp->rx_mcast++;
6401 					if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
6402 					    wcp->rx_multi_ipv6++;
6403 					    if ((skb->len > ETHER_ICMP6_HEADER) &&
6404 					        (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
6405 					        wcp->rx_icmpv6++;
6406 					        if (skb->len > ETHER_ICMPV6_TYPE) {
6407 					            switch (dump_data[ETHER_ICMPV6_TYPE]) {
6408 					            case NDISC_ROUTER_ADVERTISEMENT:
6409 					                wcp->rx_icmpv6_ra++;
6410 					                break;
6411 					            case NDISC_NEIGHBOUR_ADVERTISEMENT:
6412 					                wcp->rx_icmpv6_na++;
6413 					                break;
6414 					            case NDISC_NEIGHBOUR_SOLICITATION:
6415 					                wcp->rx_icmpv6_ns++;
6416 					                break;
6417 					            }
6418 					        }
6419 					    }
6420 					} else if (dump_data[2] == 0x5E) {
6421 						wcp->rx_multi_ipv4++;
6422 					} else {
6423 						wcp->rx_multi_other++;
6424 					}
6425 				} else { /* Unicast */
6426 					wcp->rx_ucast++;
6427 				}
6428 #undef ETHER_ICMP6_HEADER
6429 #undef ETHER_IPV6_SADDR
6430 #undef ETHER_IPV6_DAADR
6431 #undef ETHER_ICMPV6_TYPE
6432 #endif /* DHD_WAKE_RX_STATUS */
6433 				pkt_wake = 0;
6434 			}
6435 #endif /* DHD_WAKE_STATUS */
6436 		}
6437 
6438 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
6439 		ifp->net->last_rx = jiffies;
6440 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
6441 
6442 		if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
6443 			dhdp->dstats.rx_bytes += skb->len;
6444 			dhdp->rx_packets++; /* Local count */
6445 			ifp->stats.rx_bytes += skb->len;
6446 			ifp->stats.rx_packets++;
6447 		}
6448 #if defined(DHD_TCP_WINSIZE_ADJUST)
6449 		if (dhd_use_tcp_window_size_adjust) {
6450 			if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
6451 				dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
6452 			}
6453 		}
6454 #endif /* DHD_TCP_WINSIZE_ADJUST */
6455 
6456 		/* XXX WL here makes sure data is 4-byte aligned? */
6457 		if (in_interrupt()) {
6458 			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6459 				__FUNCTION__, __LINE__);
6460 #if defined(DHD_LB_RXP)
6461 #ifdef ENABLE_DHD_GRO
6462 			/* The pktlog module clones a skb using skb_clone and
6463 			 * stores the skb point to the ring buffer of the pktlog module.
6464 			 * Once the buffer is full,
6465 			 * the PKTFREE is called for removing the oldest skb.
6466 			 * The kernel panic occurred when the pktlog module free
6467 			 * the rx frame handled by napi_gro_receive().
6468 			 * It is a fix code that DHD don't use napi_gro_receive() for
6469 			 * the packet used in pktlog module.
6470 			 */
6471 			if (dhd_gro_enable && !skb_cloned(skb) &&
6472 				ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
6473 				napi_gro_receive(&dhd->rx_napi_struct, skb);
6474 			} else {
6475 				netif_receive_skb(skb);
6476 			}
6477 #else
6478 #if defined(WL_MONITOR) && defined(BCMSDIO)
6479 			if (dhd_monitor_enabled(dhdp, ifidx))
6480 				dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
6481 			else
6482 #endif /* WL_MONITOR && BCMSDIO */
6483 			netif_receive_skb(skb);
6484 #endif /* ENABLE_DHD_GRO */
6485 #else /* !defined(DHD_LB_RXP) */
6486 			netif_rx(skb);
6487 #endif /* !defined(DHD_LB_RXP) */
6488 		} else {
6489 			if (dhd->rxthread_enabled) {
6490 				if (!skbhead)
6491 					skbhead = skb;
6492 				else
6493 					PKTSETNEXT(dhdp->osh, skbprev, skb);
6494 				skbprev = skb;
6495 			} else {
6496 
6497 				/* If the receive is not processed inside an ISR,
6498 				 * the softirqd must be woken explicitly to service
6499 				 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
6500 				 * by netif_rx_ni(), but in earlier kernels, we need
6501 				 * to do it manually.
6502 				 */
6503 				bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6504 					__FUNCTION__, __LINE__);
6505 
6506 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6507 				dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6508 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6509 #if defined(DHD_LB_RXP)
6510 #ifdef ENABLE_DHD_GRO
6511 				if (dhd_gro_enable && !skb_cloned(skb) &&
6512 					ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
6513 					napi_gro_receive(&dhd->rx_napi_struct, skb);
6514 				} else {
6515 					netif_receive_skb(skb);
6516 				}
6517 #else
6518 				netif_receive_skb(skb);
6519 #endif /* ENABLE_DHD_GRO */
6520 #else /* !defined(DHD_LB_RXP) */
6521 				netif_rx_ni(skb);
6522 #endif /* !defined(DHD_LB_RXP) */
6523 			}
6524 		}
6525 	}
6526 
6527 	if (dhd->rxthread_enabled && skbhead)
6528 		dhd_sched_rxf(dhdp, skbhead);
6529 
6530 #if defined(OEM_ANDROID)
6531 	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
6532 	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
6533 #endif /* OEM_ANDROID */
6534 }
6535 
6536 void
dhd_event(struct dhd_info * dhd,char * evpkt,int evlen,int ifidx)6537 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
6538 {
6539 	/* Linux version has nothing to do */
6540 	return;
6541 }
6542 
6543 void
dhd_txcomplete(dhd_pub_t * dhdp,void * txp,bool success)6544 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
6545 {
6546 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
6547 	struct ether_header *eh;
6548 	uint16 type;
6549 
6550 	if (dhdp->tput_data.tput_test_running) {
6551 
6552 		dhdp->batch_tx_pkts_cmpl++;
6553 
6554 		/* don't count the stop pkt */
6555 		if (success &&
6556 			dhdp->batch_tx_pkts_cmpl <= dhdp->batch_tx_num_pkts)
6557 			dhdp->tput_data.pkts_good++;
6558 		else if (!success)
6559 			dhdp->tput_data.pkts_bad++;
6560 
6561 		/* we dont care for the stop packet in tput test */
6562 		if (dhdp->batch_tx_pkts_cmpl == dhdp->batch_tx_num_pkts) {
6563 			dhdp->tput_stop_ts = OSL_SYSUPTIME_US();
6564 			dhdp->tput_data.pkts_cmpl += dhdp->batch_tx_pkts_cmpl;
6565 			dhdp->tput_data.num_pkts += dhdp->batch_tx_num_pkts;
6566 			dhd_os_tput_test_wake(dhdp);
6567 		}
6568 	}
6569 	/* XXX where does this stuff belong to? */
6570 	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
6571 
6572 	/* XXX Use packet tag when it is available to identify its type */
6573 
6574 	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
6575 	type  = ntoh16(eh->ether_type);
6576 
6577 	if (type == ETHER_TYPE_802_1X) {
6578 		atomic_dec(&dhd->pend_8021x_cnt);
6579 	}
6580 
6581 #ifdef PROP_TXSTATUS
6582 	if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
6583 		dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
6584 		uint datalen  = PKTLEN(dhd->pub.osh, txp);
6585 		if (ifp != NULL) {
6586 			if (success) {
6587 				dhd->pub.tx_packets++;
6588 				ifp->stats.tx_packets++;
6589 				ifp->stats.tx_bytes += datalen;
6590 			} else {
6591 				ifp->stats.tx_dropped++;
6592 			}
6593 		}
6594 	}
6595 #endif
6596 	if (success) {
6597 		dhd->pub.tot_txcpl++;
6598 	}
6599 }
6600 
dhd_os_tput_test_wait(dhd_pub_t * pub,uint * condition,uint timeout_ms)6601 int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
6602 		uint timeout_ms)
6603 {
6604 	int timeout;
6605 
6606 	/* Convert timeout in millsecond to jiffies */
6607 	timeout = msecs_to_jiffies(timeout_ms);
6608 	pub->tput_test_done = FALSE;
6609 	condition = (uint *)&pub->tput_test_done;
6610 	timeout = wait_event_timeout(pub->tx_tput_test_wait,
6611 		(*condition), timeout);
6612 
6613 	return timeout;
6614 }
6615 
dhd_os_tput_test_wake(dhd_pub_t * pub)6616 int dhd_os_tput_test_wake(dhd_pub_t * pub)
6617 {
6618 	OSL_SMP_WMB();
6619 	pub->tput_test_done = TRUE;
6620 	OSL_SMP_WMB();
6621 	wake_up(&(pub->tx_tput_test_wait));
6622 	return 0;
6623 }
6624 
6625 static struct net_device_stats *
dhd_get_stats(struct net_device * net)6626 dhd_get_stats(struct net_device *net)
6627 {
6628 	dhd_info_t *dhd = DHD_DEV_INFO(net);
6629 	dhd_if_t *ifp;
6630 
6631 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6632 
6633 	if (!dhd) {
6634 		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
6635 		goto error;
6636 	}
6637 
6638 	ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
6639 	if (!ifp) {
6640 		/* return empty stats */
6641 		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
6642 		goto error;
6643 	}
6644 
6645 	if (dhd->pub.up) {
6646 		/* Use the protocol to get dongle stats */
6647 		dhd_prot_dstats(&dhd->pub);
6648 	}
6649 	return &ifp->stats;
6650 
6651 error:
6652 	memset(&net->stats, 0, sizeof(net->stats));
6653 	return &net->stats;
6654 }
6655 
6656 #ifndef BCMDBUS
6657 static int
dhd_watchdog_thread(void * data)6658 dhd_watchdog_thread(void *data)
6659 {
6660 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6661 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6662 	/* This thread doesn't need any user-level access,
6663 	 * so get rid of all our resources
6664 	 */
6665 	if (dhd_watchdog_prio > 0) {
6666 		struct sched_param param;
6667 		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
6668 			dhd_watchdog_prio:(MAX_RT_PRIO-1);
6669 		setScheduler(current, SCHED_FIFO, &param);
6670 	}
6671 
6672 	while (1) {
6673 		if (down_interruptible (&tsk->sema) == 0) {
6674 			unsigned long flags;
6675 			unsigned long jiffies_at_start = jiffies;
6676 			unsigned long time_lapse;
6677 #ifdef BCMPCIE
6678 			DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6679 #endif /* BCMPCIE */
6680 
6681 			SMP_RD_BARRIER_DEPENDS();
6682 			if (tsk->terminated) {
6683 #ifdef BCMPCIE
6684 				DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6685 #endif /* BCMPCIE */
6686 				break;
6687 			}
6688 
6689 			if (dhd->pub.dongle_reset == FALSE) {
6690 				DHD_TIMER(("%s:\n", __FUNCTION__));
6691 				dhd_analyze_sock_flows(dhd, dhd_watchdog_ms);
6692 				dhd_bus_watchdog(&dhd->pub);
6693 
6694 #ifdef DHD_TIMESYNC
6695 				/* Call the timesync module watchdog */
6696 				dhd_timesync_watchdog(&dhd->pub);
6697 #endif /* DHD_TIMESYNC */
6698 #if defined(BCM_ROUTER_DHD) && defined(CTFPOOL)
6699 				/* allocate and add a new skb to the pkt pool */
6700 				if (CTF_ENAB(dhd->cih))
6701 					osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH);
6702 #endif /* BCM_ROUTER_DHD && CTFPOOL */
6703 
6704 				DHD_GENERAL_LOCK(&dhd->pub, flags);
6705 				/* Count the tick for reference */
6706 				dhd->pub.tickcnt++;
6707 #ifdef DHD_L2_FILTER
6708 				dhd_l2_filter_watchdog(&dhd->pub);
6709 #endif /* DHD_L2_FILTER */
6710 				time_lapse = jiffies - jiffies_at_start;
6711 
6712 				/* Reschedule the watchdog */
6713 				if (dhd->wd_timer_valid) {
6714 					mod_timer(&dhd->timer,
6715 					    jiffies +
6716 					    msecs_to_jiffies(dhd_watchdog_ms) -
6717 					    min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
6718 				}
6719 				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6720 			}
6721 #ifdef BCMPCIE
6722 			DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6723 #endif /* BCMPCIE */
6724 		} else {
6725 			break;
6726 		}
6727 	}
6728 
6729 	complete_and_exit(&tsk->completed, 0);
6730 }
6731 
dhd_watchdog(ulong data)6732 static void dhd_watchdog(ulong data)
6733 {
6734 	dhd_info_t *dhd = (dhd_info_t *)data;
6735 	unsigned long flags;
6736 
6737 	if (dhd->pub.dongle_reset) {
6738 		return;
6739 	}
6740 
6741 	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
6742 		up(&dhd->thr_wdt_ctl.sema);
6743 		return;
6744 	}
6745 
6746 #ifdef BCMPCIE
6747 	DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6748 #endif /* BCMPCIE */
6749 	/* Call the bus module watchdog */
6750 	dhd_bus_watchdog(&dhd->pub);
6751 
6752 #ifdef DHD_TIMESYNC
6753 	/* Call the timesync module watchdog */
6754 	dhd_timesync_watchdog(&dhd->pub);
6755 #endif /* DHD_TIMESYNC */
6756 
6757 	DHD_GENERAL_LOCK(&dhd->pub, flags);
6758 	/* Count the tick for reference */
6759 	dhd->pub.tickcnt++;
6760 
6761 #ifdef DHD_L2_FILTER
6762 	dhd_l2_filter_watchdog(&dhd->pub);
6763 #endif /* DHD_L2_FILTER */
6764 	/* Reschedule the watchdog */
6765 	if (dhd->wd_timer_valid)
6766 		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
6767 	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6768 #ifdef BCMPCIE
6769 	DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6770 #endif /* BCMPCIE */
6771 #if defined(BCM_ROUTER_DHD) && defined(CTFPOOL)
6772 	    /* allocate and add a new skb to the pkt pool */
6773 	    if (CTF_ENAB(dhd->cih))
6774 			osl_ctfpool_replenish(dhd->pub.osh, CTFPOOL_REFILL_THRESH);
6775 #endif /* BCM_ROUTER_DHD && CTFPOOL */
6776 }
6777 
6778 #ifdef DHD_PCIE_RUNTIMEPM
6779 static int
dhd_rpm_state_thread(void * data)6780 dhd_rpm_state_thread(void *data)
6781 {
6782 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6783 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6784 
6785 	while (1) {
6786 		if (down_interruptible (&tsk->sema) == 0) {
6787 			unsigned long flags;
6788 			unsigned long jiffies_at_start = jiffies;
6789 			unsigned long time_lapse;
6790 
6791 			SMP_RD_BARRIER_DEPENDS();
6792 			if (tsk->terminated) {
6793 				break;
6794 			}
6795 
6796 			if (dhd->pub.dongle_reset == FALSE) {
6797 				DHD_TIMER(("%s:\n", __FUNCTION__));
6798 				if (dhd->pub.up) {
6799 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
6800 					dhd_bus_dw_deassert(&dhd->pub);
6801 #endif /* PCIE_OOB || PCIE_INB_DW */
6802 					if (dhd_get_rpm_state(&dhd->pub)) {
6803 						dhd_runtimepm_state(&dhd->pub);
6804 					}
6805 				}
6806 				DHD_GENERAL_LOCK(&dhd->pub, flags);
6807 				time_lapse = jiffies - jiffies_at_start;
6808 
6809 				/* Reschedule the watchdog */
6810 				if (dhd->rpm_timer_valid) {
6811 					mod_timer(&dhd->rpm_timer,
6812 						jiffies +
6813 						msecs_to_jiffies(dhd_runtimepm_ms) -
6814 						min(msecs_to_jiffies(dhd_runtimepm_ms),
6815 							time_lapse));
6816 				}
6817 				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6818 			}
6819 		} else {
6820 			break;
6821 		}
6822 	}
6823 
6824 	complete_and_exit(&tsk->completed, 0);
6825 }
6826 
dhd_runtimepm(ulong data)6827 static void dhd_runtimepm(ulong data)
6828 {
6829 	dhd_info_t *dhd = (dhd_info_t *)data;
6830 
6831 	if (dhd->pub.dongle_reset) {
6832 		return;
6833 	}
6834 
6835 	if (dhd->thr_rpm_ctl.thr_pid >= 0) {
6836 		up(&dhd->thr_rpm_ctl.sema);
6837 		return;
6838 	}
6839 }
6840 
dhd_runtime_pm_disable(dhd_pub_t * dhdp)6841 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
6842 {
6843 	dhd_set_rpm_state(dhdp, FALSE);
6844 	dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
6845 }
6846 
dhd_runtime_pm_enable(dhd_pub_t * dhdp)6847 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
6848 {
6849 	/* Enable Runtime PM except for MFG Mode */
6850 	if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
6851 		if (dhd_get_idletime(dhdp)) {
6852 			dhd_set_rpm_state(dhdp, TRUE);
6853 		}
6854 	}
6855 }
6856 
6857 #endif /* DHD_PCIE_RUNTIMEPM */
6858 
6859 #ifdef ENABLE_ADAPTIVE_SCHED
6860 static void
dhd_sched_policy(int prio)6861 dhd_sched_policy(int prio)
6862 {
6863 	struct sched_param param;
6864 	if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
6865 		param.sched_priority = 0;
6866 		setScheduler(current, SCHED_NORMAL, &param);
6867 	} else {
6868 		if (get_scheduler_policy(current) != SCHED_FIFO) {
6869 			param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
6870 			setScheduler(current, SCHED_FIFO, &param);
6871 		}
6872 	}
6873 }
6874 #endif /* ENABLE_ADAPTIVE_SCHED */
6875 #ifdef DEBUG_CPU_FREQ
dhd_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)6876 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
6877 {
6878 	dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
6879 	struct cpufreq_freqs *freq = data;
6880 	if (dhd) {
6881 		if (!dhd->new_freq)
6882 			goto exit;
6883 		if (val == CPUFREQ_POSTCHANGE) {
6884 			DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6885 				freq->new, freq->cpu));
6886 			*per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
6887 		}
6888 	}
6889 exit:
6890 	return 0;
6891 }
6892 #endif /* DEBUG_CPU_FREQ */
6893 
6894 static int
dhd_dpc_thread(void * data)6895 dhd_dpc_thread(void *data)
6896 {
6897 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6898 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6899 
6900 	/* This thread doesn't need any user-level access,
6901 	 * so get rid of all our resources
6902 	 */
6903 	if (dhd_dpc_prio > 0)
6904 	{
6905 		struct sched_param param;
6906 		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
6907 		setScheduler(current, SCHED_FIFO, &param);
6908 	}
6909 
6910 #ifdef CUSTOM_DPC_CPUCORE
6911 	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
6912 #endif
6913 #ifdef CUSTOM_SET_CPUCORE
6914 	dhd->pub.current_dpc = current;
6915 #endif /* CUSTOM_SET_CPUCORE */
6916 	/* Run until signal received */
6917 	while (1) {
6918 		if (dhd->pub.conf->dpc_cpucore >= 0) {
6919 			printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
6920 			set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
6921 			dhd->pub.conf->dpc_cpucore = -1;
6922 		}
6923 		if (dhd->pub.conf->dhd_dpc_prio >= 0) {
6924 			struct sched_param param;
6925 			printf("%s: set dhd_dpc_prio %d\n", __FUNCTION__, dhd->pub.conf->dhd_dpc_prio);
6926 			param.sched_priority = (dhd->pub.conf->dhd_dpc_prio < MAX_RT_PRIO)?
6927 				dhd->pub.conf->dhd_dpc_prio:(MAX_RT_PRIO-1);
6928 			setScheduler(current, SCHED_FIFO, &param);
6929 			dhd->pub.conf->dhd_dpc_prio = -1;
6930 		}
6931 		if (!binary_sema_down(tsk)) {
6932 #ifdef ENABLE_ADAPTIVE_SCHED
6933 			dhd_sched_policy(dhd_dpc_prio);
6934 #endif /* ENABLE_ADAPTIVE_SCHED */
6935 			SMP_RD_BARRIER_DEPENDS();
6936 			if (tsk->terminated) {
6937 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
6938 				break;
6939 			}
6940 
6941 			/* Call bus dpc unless it indicated down (then clean stop) */
6942 			if (dhd->pub.busstate != DHD_BUS_DOWN) {
6943 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6944 				int resched_cnt = 0;
6945 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6946 				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
6947 				while (dhd_bus_dpc(dhd->pub.bus)) {
6948 					/* process all data */
6949 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6950 					resched_cnt++;
6951 					if (resched_cnt > MAX_RESCHED_CNT) {
6952 						DHD_INFO(("%s Calling msleep to"
6953 							"let other processes run. \n",
6954 							__FUNCTION__));
6955 						dhd->pub.dhd_bug_on = true;
6956 						resched_cnt = 0;
6957 						OSL_SLEEP(1);
6958 					}
6959 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6960 				}
6961 				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
6962 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
6963 			} else {
6964 				if (dhd->pub.up)
6965 					dhd_bus_stop(dhd->pub.bus, TRUE);
6966 				DHD_OS_WAKE_UNLOCK(&dhd->pub);
6967 			}
6968 		} else {
6969 			break;
6970 		}
6971 	}
6972 	complete_and_exit(&tsk->completed, 0);
6973 }
6974 
6975 static int
dhd_rxf_thread(void * data)6976 dhd_rxf_thread(void *data)
6977 {
6978 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6979 	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6980 #if defined(WAIT_DEQUEUE)
6981 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
6982 	ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
6983 #endif
6984 	dhd_pub_t *pub = &dhd->pub;
6985 
6986 	/* This thread doesn't need any user-level access,
6987 	 * so get rid of all our resources
6988 	 */
6989 	if (dhd_rxf_prio > 0)
6990 	{
6991 		struct sched_param param;
6992 		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
6993 		setScheduler(current, SCHED_FIFO, &param);
6994 	}
6995 
6996 #ifdef CUSTOM_SET_CPUCORE
6997 	dhd->pub.current_rxf = current;
6998 #endif /* CUSTOM_SET_CPUCORE */
6999 	/* Run until signal received */
7000 	while (1) {
7001 		if (dhd->pub.conf->rxf_cpucore >= 0) {
7002 			printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
7003 			set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
7004 			dhd->pub.conf->rxf_cpucore = -1;
7005 		}
7006 		if (down_interruptible(&tsk->sema) == 0) {
7007 			void *skb;
7008 #ifdef ENABLE_ADAPTIVE_SCHED
7009 			dhd_sched_policy(dhd_rxf_prio);
7010 #endif /* ENABLE_ADAPTIVE_SCHED */
7011 
7012 			SMP_RD_BARRIER_DEPENDS();
7013 
7014 			if (tsk->terminated) {
7015 				DHD_OS_WAKE_UNLOCK(pub);
7016 				break;
7017 			}
7018 			skb = dhd_rxf_dequeue(pub);
7019 
7020 			if (skb == NULL) {
7021 				continue;
7022 			}
7023 			while (skb) {
7024 				void *skbnext = PKTNEXT(pub->osh, skb);
7025 				PKTSETNEXT(pub->osh, skb, NULL);
7026 				bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
7027 					__FUNCTION__, __LINE__);
7028 #if defined(WL_MONITOR) && defined(BCMSDIO)
7029 				if (dhd_monitor_enabled(pub, 0))
7030 					dhd_rx_mon_pkt_sdio(pub, skb, 0);
7031 				else
7032 #endif /* WL_MONITOR && BCMSDIO */
7033 				netif_rx_ni(skb);
7034 				skb = skbnext;
7035 			}
7036 #if defined(WAIT_DEQUEUE)
7037 			if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
7038 				OSL_SLEEP(1);
7039 				watchdogTime = OSL_SYSUPTIME();
7040 			}
7041 #endif
7042 
7043 			DHD_OS_WAKE_UNLOCK(pub);
7044 		} else {
7045 			break;
7046 		}
7047 	}
7048 	complete_and_exit(&tsk->completed, 0);
7049 }
7050 
7051 #ifdef BCMPCIE
dhd_dpc_enable(dhd_pub_t * dhdp)7052 void dhd_dpc_enable(dhd_pub_t *dhdp)
7053 {
7054 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
7055 	dhd_info_t *dhd;
7056 
7057 	if (!dhdp || !dhdp->info)
7058 		return;
7059 	dhd = dhdp->info;
7060 #endif /* DHD_LB_RXP || DHD_LB_TXP */
7061 
7062 #ifdef DHD_LB_RXP
7063 	__skb_queue_head_init(&dhd->rx_pend_queue);
7064 #endif /* DHD_LB_RXP */
7065 
7066 #ifdef DHD_LB_TXP
7067 	skb_queue_head_init(&dhd->tx_pend_queue);
7068 #endif /* DHD_LB_TXP */
7069 }
7070 #endif /* BCMPCIE */
7071 
7072 #ifdef BCMPCIE
7073 void
dhd_dpc_kill(dhd_pub_t * dhdp)7074 dhd_dpc_kill(dhd_pub_t *dhdp)
7075 {
7076 	dhd_info_t *dhd;
7077 
7078 	if (!dhdp) {
7079 		return;
7080 	}
7081 
7082 	dhd = dhdp->info;
7083 
7084 	if (!dhd) {
7085 		return;
7086 	}
7087 
7088 	if (dhd->thr_dpc_ctl.thr_pid < 0) {
7089 		tasklet_kill(&dhd->tasklet);
7090 		DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
7091 	}
7092 
7093 	cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work);
7094 #ifdef DHD_LB
7095 #ifdef DHD_LB_RXP
7096 	cancel_work_sync(&dhd->rx_napi_dispatcher_work);
7097 	__skb_queue_purge(&dhd->rx_pend_queue);
7098 #endif /* DHD_LB_RXP */
7099 #ifdef DHD_LB_TXP
7100 	cancel_work_sync(&dhd->tx_dispatcher_work);
7101 	skb_queue_purge(&dhd->tx_pend_queue);
7102 	tasklet_kill(&dhd->tx_tasklet);
7103 #endif /* DHD_LB_TXP */
7104 #endif /* DHD_LB */
7105 }
7106 
7107 void
dhd_dpc_tasklet_kill(dhd_pub_t * dhdp)7108 dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
7109 {
7110 	dhd_info_t *dhd;
7111 
7112 	if (!dhdp) {
7113 		return;
7114 	}
7115 
7116 	dhd = dhdp->info;
7117 
7118 	if (!dhd) {
7119 		return;
7120 	}
7121 
7122 	if (dhd->thr_dpc_ctl.thr_pid < 0) {
7123 		tasklet_kill(&dhd->tasklet);
7124 	}
7125 }
7126 #endif /* BCMPCIE */
7127 
7128 static void
dhd_dpc(ulong data)7129 dhd_dpc(ulong data)
7130 {
7131 	dhd_info_t *dhd = (dhd_info_t *)data;
7132 
7133 	int curr_cpu = get_cpu();
7134 	put_cpu();
7135 
7136 	/* Store current cpu as dpc_cpu */
7137 	atomic_set(&dhd->dpc_cpu, curr_cpu);
7138 
7139 	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
7140 	 * down below , wake lock is set,
7141 	 * the tasklet is initialized in dhd_attach()
7142 	 */
7143 	/* Call bus dpc unless it indicated down (then clean stop) */
7144 	if (dhd->pub.busstate != DHD_BUS_DOWN) {
7145 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
7146 		DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
7147 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
7148 		if (dhd_bus_dpc(dhd->pub.bus)) {
7149 			tasklet_schedule(&dhd->tasklet);
7150 		}
7151 	} else {
7152 		dhd_bus_stop(dhd->pub.bus, TRUE);
7153 	}
7154 
7155 	/* Store as prev_dpc_cpu, which will be used in Rx load balancing for deciding candidacy */
7156 	atomic_set(&dhd->prev_dpc_cpu, curr_cpu);
7157 
7158 }
7159 
7160 void
dhd_sched_dpc(dhd_pub_t * dhdp)7161 dhd_sched_dpc(dhd_pub_t *dhdp)
7162 {
7163 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7164 
7165 	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
7166 		DHD_OS_WAKE_LOCK(dhdp);
7167 		/* If the semaphore does not get up,
7168 		* wake unlock should be done here
7169 		*/
7170 		if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
7171 			DHD_OS_WAKE_UNLOCK(dhdp);
7172 		}
7173 		return;
7174 	} else {
7175 		tasklet_schedule(&dhd->tasklet);
7176 	}
7177 }
7178 #endif /* BCMDBUS */
7179 
7180 static void
dhd_sched_rxf(dhd_pub_t * dhdp,void * skb)7181 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
7182 {
7183 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7184 #ifdef RXF_DEQUEUE_ON_BUSY
7185 	int ret = BCME_OK;
7186 	int retry = 2;
7187 #endif /* RXF_DEQUEUE_ON_BUSY */
7188 
7189 	DHD_OS_WAKE_LOCK(dhdp);
7190 
7191 	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
7192 #ifdef RXF_DEQUEUE_ON_BUSY
7193 	do {
7194 		ret = dhd_rxf_enqueue(dhdp, skb);
7195 		if (ret == BCME_OK || ret == BCME_ERROR)
7196 			break;
7197 		else
7198 			OSL_SLEEP(50); /* waiting for dequeueing */
7199 	} while (retry-- > 0);
7200 
7201 	if (retry <= 0 && ret == BCME_BUSY) {
7202 		void *skbp = skb;
7203 
7204 		while (skbp) {
7205 			void *skbnext = PKTNEXT(dhdp->osh, skbp);
7206 			PKTSETNEXT(dhdp->osh, skbp, NULL);
7207 			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
7208 				__FUNCTION__, __LINE__);
7209 			netif_rx_ni(skbp);
7210 			skbp = skbnext;
7211 		}
7212 		DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
7213 	} else {
7214 		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
7215 			up(&dhd->thr_rxf_ctl.sema);
7216 		}
7217 	}
7218 #else /* RXF_DEQUEUE_ON_BUSY */
7219 	do {
7220 		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
7221 			break;
7222 	} while (1);
7223 	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
7224 		up(&dhd->thr_rxf_ctl.sema);
7225 	} else {
7226 		DHD_OS_WAKE_UNLOCK(dhdp);
7227 	}
7228 	return;
7229 #endif /* RXF_DEQUEUE_ON_BUSY */
7230 }
7231 
7232 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7233 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7234 
7235 #ifdef TOE
7236 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
7237 static int
dhd_toe_get(dhd_info_t * dhd,int ifidx,uint32 * toe_ol)7238 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
7239 {
7240 	char buf[32];
7241 	int ret;
7242 
7243 	ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
7244 
7245 	if (ret < 0) {
7246 		if (ret == -EIO) {
7247 			DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
7248 				ifidx)));
7249 			return -EOPNOTSUPP;
7250 		}
7251 
7252 		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
7253 		return ret;
7254 	}
7255 
7256 	memcpy(toe_ol, buf, sizeof(uint32));
7257 	return 0;
7258 }
7259 
7260 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
7261 static int
dhd_toe_set(dhd_info_t * dhd,int ifidx,uint32 toe_ol)7262 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
7263 {
7264 	int toe, ret;
7265 
7266 	/* Set toe_ol as requested */
7267 	ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
7268 	if (ret < 0) {
7269 		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
7270 			dhd_ifname(&dhd->pub, ifidx), ret));
7271 		return ret;
7272 	}
7273 
7274 	/* Enable toe globally only if any components are enabled. */
7275 	toe = (toe_ol != 0);
7276 	ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
7277 	if (ret < 0) {
7278 		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
7279 		return ret;
7280 	}
7281 
7282 	return 0;
7283 }
7284 #endif /* TOE */
7285 
7286 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
dhd_set_scb_probe(dhd_pub_t * dhd)7287 void dhd_set_scb_probe(dhd_pub_t *dhd)
7288 {
7289 	wl_scb_probe_t scb_probe;
7290 	char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
7291 	int ret;
7292 
7293 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
7294 		return;
7295 	}
7296 
7297 	ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
7298 	if (ret < 0) {
7299 		DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
7300 	}
7301 
7302 	memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
7303 
7304 	scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
7305 
7306 	ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
7307 			TRUE);
7308 	if (ret < 0) {
7309 		DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
7310 		return;
7311 	}
7312 }
7313 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
7314 
7315 static void
dhd_ethtool_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)7316 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
7317 {
7318 	dhd_info_t *dhd = DHD_DEV_INFO(net);
7319 
7320 	snprintf(info->driver, sizeof(info->driver), "wl");
7321 	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
7322 }
7323 
7324 struct ethtool_ops dhd_ethtool_ops = {
7325 	.get_drvinfo = dhd_ethtool_get_drvinfo
7326 };
7327 
7328 static int
dhd_ethtool(dhd_info_t * dhd,void * uaddr)7329 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
7330 {
7331 	struct ethtool_drvinfo info;
7332 	char drvname[sizeof(info.driver)];
7333 	uint32 cmd;
7334 #ifdef TOE
7335 	struct ethtool_value edata;
7336 	uint32 toe_cmpnt, csum_dir;
7337 	int ret;
7338 #endif
7339 
7340 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7341 
7342 	/* all ethtool calls start with a cmd word */
7343 	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
7344 		return -EFAULT;
7345 
7346 	switch (cmd) {
7347 	case ETHTOOL_GDRVINFO:
7348 		/* Copy out any request driver name */
7349 		bzero(&info.driver, sizeof(info.driver));
7350 		if (copy_from_user(&info, uaddr, sizeof(info)))
7351 			return -EFAULT;
7352 		if (info.driver[sizeof(info.driver) - 1] != '\0') {
7353 			DHD_ERROR(("%s: Exceeds the size of info.driver"
7354 				"truncating last byte with null\n", __FUNCTION__));
7355 			info.driver[sizeof(info.driver) - 1] = '\0';
7356 		}
7357 		strlcpy(drvname, info.driver, sizeof(drvname));
7358 
7359 		/* clear struct for return */
7360 		memset(&info, 0, sizeof(info));
7361 		info.cmd = cmd;
7362 
7363 		/* if dhd requested, identify ourselves */
7364 		if (strcmp(drvname, "?dhd") == 0) {
7365 			snprintf(info.driver, sizeof(info.driver), "dhd");
7366 			strlcpy(info.version, EPI_VERSION_STR, sizeof(info.version));
7367 		}
7368 
7369 		/* otherwise, require dongle to be up */
7370 		else if (!dhd->pub.up) {
7371 			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
7372 			return -ENODEV;
7373 		}
7374 
7375 		/* finally, report dongle driver type */
7376 		else if (dhd->pub.iswl)
7377 			snprintf(info.driver, sizeof(info.driver), "wl");
7378 		else
7379 			snprintf(info.driver, sizeof(info.driver), "xx");
7380 
7381 		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
7382 		if (copy_to_user(uaddr, &info, sizeof(info)))
7383 			return -EFAULT;
7384 		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
7385 		         (int)sizeof(drvname), drvname, info.driver));
7386 		break;
7387 
7388 #ifdef TOE
7389 	/* Get toe offload components from dongle */
7390 	case ETHTOOL_GRXCSUM:
7391 	case ETHTOOL_GTXCSUM:
7392 		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7393 			return ret;
7394 
7395 		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7396 
7397 		edata.cmd = cmd;
7398 		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
7399 
7400 		if (copy_to_user(uaddr, &edata, sizeof(edata)))
7401 			return -EFAULT;
7402 		break;
7403 
7404 	/* Set toe offload components in dongle */
7405 	case ETHTOOL_SRXCSUM:
7406 	case ETHTOOL_STXCSUM:
7407 		if (copy_from_user(&edata, uaddr, sizeof(edata)))
7408 			return -EFAULT;
7409 
7410 		/* Read the current settings, update and write back */
7411 		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7412 			return ret;
7413 
7414 		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7415 
7416 		if (edata.data != 0)
7417 			toe_cmpnt |= csum_dir;
7418 		else
7419 			toe_cmpnt &= ~csum_dir;
7420 
7421 		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
7422 			return ret;
7423 
7424 		/* If setting TX checksum mode, tell Linux the new mode */
7425 		if (cmd == ETHTOOL_STXCSUM) {
7426 			if (edata.data)
7427 				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
7428 			else
7429 				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
7430 		}
7431 
7432 		break;
7433 #endif /* TOE */
7434 
7435 	default:
7436 		return -EOPNOTSUPP;
7437 	}
7438 
7439 	return 0;
7440 }
7441 
7442 /* XXX function to detect that FW is dead and send Event up */
dhd_check_hang(struct net_device * net,dhd_pub_t * dhdp,int error)7443 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
7444 {
7445 #if defined(OEM_ANDROID)
7446 	if (!dhdp) {
7447 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
7448 		return FALSE;
7449 	}
7450 
7451 	if (!dhdp->up)
7452 		return FALSE;
7453 
7454 #if (!defined(BCMDBUS) && !defined(BCMPCIE))
7455 	if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
7456 		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
7457 		return FALSE;
7458 	}
7459 #endif /* BCMDBUS */
7460 
7461 	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
7462 		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
7463 #ifdef BCMPCIE
7464 		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d d3acke=%d e=%d s=%d\n",
7465 			__FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
7466 			dhdp->d3ackcnt_timeout, error, dhdp->busstate));
7467 #else
7468 		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
7469 			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
7470 #endif /* BCMPCIE */
7471 		if (dhdp->hang_reason == 0) {
7472 			if (dhdp->dongle_trap_occured) {
7473 				dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
7474 #ifdef BCMPCIE
7475 			} else if (dhdp->d3ackcnt_timeout) {
7476 				dhdp->hang_reason = dhdp->is_sched_error ?
7477 					HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
7478 					HANG_REASON_D3_ACK_TIMEOUT;
7479 #endif /* BCMPCIE */
7480 			} else {
7481 				dhdp->hang_reason = dhdp->is_sched_error ?
7482 					HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
7483 					HANG_REASON_IOCTL_RESP_TIMEOUT;
7484 			}
7485 		}
7486 		printf("%s\n", info_string);
7487 		printf("MAC %pM\n", &dhdp->mac);
7488 		net_os_send_hang_message(net);
7489 		return TRUE;
7490 	}
7491 #endif /* OEM_ANDROID */
7492 	return FALSE;
7493 }
7494 
7495 #ifdef WL_MONITOR
7496 bool
dhd_monitor_enabled(dhd_pub_t * dhd,int ifidx)7497 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
7498 {
7499 	return (dhd->info->monitor_type != 0);
7500 }
7501 
7502 #ifdef BCMSDIO
7503 static void
dhd_rx_mon_pkt_sdio(dhd_pub_t * dhdp,void * pkt,int ifidx)7504 dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx)
7505 {
7506 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7507 
7508 	if (!dhd->monitor_skb) {
7509 		if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
7510 			return;
7511 	}
7512 
7513 	if (dhd->monitor_type && dhd->monitor_dev)
7514 		dhd->monitor_skb->dev = dhd->monitor_dev;
7515 	else {
7516 		PKTFREE(dhdp->osh, pkt, FALSE);
7517 		dhd->monitor_skb = NULL;
7518 		return;
7519 	}
7520 
7521 	dhd->monitor_skb->protocol =
7522 		eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7523 	dhd->monitor_len = 0;
7524 
7525 	netif_rx_ni(dhd->monitor_skb);
7526 
7527 	dhd->monitor_skb = NULL;
7528 }
7529 #elif defined(BCMPCIE)
7530 
7531 void
dhd_rx_mon_pkt(dhd_pub_t * dhdp,host_rxbuf_cmpl_t * msg,void * pkt,int ifidx)7532 dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
7533 {
7534 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7535 #ifdef HOST_RADIOTAP_CONV
7536 	if (dhd->host_radiotap_conv) {
7537 		uint16 len = 0, offset = 0;
7538 		monitor_pkt_info_t pkt_info;
7539 
7540 		memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
7541 		memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
7542 
7543 		if (!dhd->monitor_skb) {
7544 			if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
7545 				return;
7546 		}
7547 
7548 		len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
7549 			PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
7550 
7551 		if (dhd->monitor_type && dhd->monitor_dev)
7552 			dhd->monitor_skb->dev = dhd->monitor_dev;
7553 		else {
7554 			PKTFREE(dhdp->osh, pkt, FALSE);
7555 			dev_kfree_skb(dhd->monitor_skb);
7556 			return;
7557 		}
7558 
7559 		PKTFREE(dhdp->osh, pkt, FALSE);
7560 
7561 		if (!len) {
7562 			return;
7563 		}
7564 
7565 		skb_put(dhd->monitor_skb, len);
7566 		skb_pull(dhd->monitor_skb, offset);
7567 
7568 		dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb,
7569 			dhd->monitor_skb->dev);
7570 	}
7571 	else
7572 #endif /* HOST_RADIOTAP_CONV */
7573 	{
7574 		uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
7575 			BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
7576 		switch (amsdu_flag) {
7577 			case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
7578 			default:
7579 				if (!dhd->monitor_skb) {
7580 					if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
7581 						== NULL)
7582 						return;
7583 				}
7584 				if (dhd->monitor_type && dhd->monitor_dev)
7585 					dhd->monitor_skb->dev = dhd->monitor_dev;
7586 				else {
7587 					PKTFREE(dhdp->osh, pkt, FALSE);
7588 					dhd->monitor_skb = NULL;
7589 					return;
7590 				}
7591 				dhd->monitor_skb->protocol =
7592 					eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7593 				dhd->monitor_len = 0;
7594 				break;
7595 
7596 			case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
7597 				if (!dhd->monitor_skb) {
7598 					if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
7599 						== NULL)
7600 						return;
7601 					dhd->monitor_len = 0;
7602 				}
7603 				if (dhd->monitor_type && dhd->monitor_dev)
7604 					dhd->monitor_skb->dev = dhd->monitor_dev;
7605 				else {
7606 					PKTFREE(dhdp->osh, pkt, FALSE);
7607 					dev_kfree_skb(dhd->monitor_skb);
7608 					return;
7609 				}
7610 				memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
7611 				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7612 				dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
7613 				PKTFREE(dhdp->osh, pkt, FALSE);
7614 				return;
7615 
7616 			case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
7617 				memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7618 				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7619 				dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7620 				PKTFREE(dhdp->osh, pkt, FALSE);
7621 				return;
7622 
7623 			case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
7624 				memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7625 				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7626 				dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7627 				PKTFREE(dhdp->osh, pkt, FALSE);
7628 				skb_put(dhd->monitor_skb, dhd->monitor_len);
7629 				dhd->monitor_skb->protocol =
7630 					eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7631 				dhd->monitor_len = 0;
7632 				break;
7633 		}
7634 	}
7635 
7636 	if (skb_headroom(dhd->monitor_skb) < ETHER_HDR_LEN) {
7637 		struct sk_buff *skb2;
7638 
7639 		DHD_INFO(("%s: insufficient headroom\n",
7640 		          dhd_ifname(&dhd->pub, ifidx)));
7641 
7642 		skb2 = skb_realloc_headroom(dhd->monitor_skb, ETHER_HDR_LEN);
7643 
7644 		dev_kfree_skb(dhd->monitor_skb);
7645 		if ((dhd->monitor_skb = skb2) == NULL) {
7646 			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
7647 			           dhd_ifname(&dhd->pub, ifidx)));
7648 			return;
7649 		}
7650 	}
7651 	PKTPUSH(dhd->pub.osh, dhd->monitor_skb, ETHER_HDR_LEN);
7652 
7653 	/* XXX WL here makes sure data is 4-byte aligned? */
7654 	if (in_interrupt()) {
7655 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
7656 			__FUNCTION__, __LINE__);
7657 		netif_rx(dhd->monitor_skb);
7658 	} else {
7659 		/* If the receive is not processed inside an ISR,
7660 		 * the softirqd must be woken explicitly to service
7661 		 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
7662 		 * by netif_rx_ni(), but in earlier kernels, we need
7663 		 * to do it manually.
7664 		 */
7665 		bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
7666 			__FUNCTION__, __LINE__);
7667 
7668 		netif_rx_ni(dhd->monitor_skb);
7669 	}
7670 
7671 	dhd->monitor_skb = NULL;
7672 }
7673 #endif
7674 
7675 typedef struct dhd_mon_dev_priv {
7676 	struct net_device_stats stats;
7677 } dhd_mon_dev_priv_t;
7678 
7679 #define DHD_MON_DEV_PRIV_SIZE		(sizeof(dhd_mon_dev_priv_t))
7680 #define DHD_MON_DEV_PRIV(dev)		((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
7681 #define DHD_MON_DEV_STATS(dev)		(((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
7682 
7683 static int
dhd_monitor_start(struct sk_buff * skb,struct net_device * dev)7684 dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
7685 {
7686 	PKTFREE(NULL, skb, FALSE);
7687 	return 0;
7688 }
7689 
7690 #if defined(BT_OVER_SDIO)
7691 
7692 void
dhdsdio_bus_usr_cnt_inc(dhd_pub_t * dhdp)7693 dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
7694 {
7695 	dhdp->info->bus_user_count++;
7696 }
7697 
7698 void
dhdsdio_bus_usr_cnt_dec(dhd_pub_t * dhdp)7699 dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
7700 {
7701 	dhdp->info->bus_user_count--;
7702 }
7703 
7704 /* Return values:
7705  * Success: Returns 0
7706  * Failure: Returns -1 or errono code
7707  */
7708 int
dhd_bus_get(wlan_bt_handle_t handle,bus_owner_t owner)7709 dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
7710 {
7711 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7712 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7713 	int ret = 0;
7714 
7715 	mutex_lock(&dhd->bus_user_lock);
7716 	++dhd->bus_user_count;
7717 	if (dhd->bus_user_count < 0) {
7718 		DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7719 		ret = -1;
7720 		goto exit;
7721 	}
7722 
7723 	if (dhd->bus_user_count == 1) {
7724 
7725 		dhd->pub.hang_was_sent = 0;
7726 
7727 		/* First user, turn on WL_REG, start the bus */
7728 		DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
7729 
7730 		if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
7731 			/* Enable F1 */
7732 			ret = dhd_bus_resume(dhdp, 0);
7733 			if (ret) {
7734 				DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
7735 					__FUNCTION__, ret));
7736 				goto exit;
7737 			}
7738 		}
7739 
7740 		/* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware
7741 		 * name. This is indeed a hack but we have to make it work properly before we have
7742 		 * a better solution
7743 		 */
7744 		dhd_update_fw_nv_path(dhd);
7745 		/* update firmware and nvram path to sdio bus */
7746 		dhd_bus_update_fw_nv_path(dhd->pub.bus,
7747 			dhd->fw_path, dhd->nv_path);
7748 		/* download the firmware, Enable F2 */
7749 		/* TODO: Should be done only in case of FW switch */
7750 		ret = dhd_bus_devreset(dhdp, FALSE);
7751 		dhd_bus_resume(dhdp, 1);
7752 		if (!ret) {
7753 			if (dhd_sync_with_dongle(&dhd->pub) < 0) {
7754 				DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
7755 				ret = -EFAULT;
7756 			}
7757 		} else {
7758 			DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
7759 		}
7760 	} else {
7761 		DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
7762 			__FUNCTION__, dhd->bus_user_count));
7763 	}
7764 exit:
7765 	mutex_unlock(&dhd->bus_user_lock);
7766 	return ret;
7767 }
7768 EXPORT_SYMBOL(dhd_bus_get);
7769 
7770 /* Return values:
7771  * Success: Returns 0
7772  * Failure: Returns -1 or errono code
7773  */
7774 int
dhd_bus_put(wlan_bt_handle_t handle,bus_owner_t owner)7775 dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
7776 {
7777 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7778 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7779 	int ret = 0;
7780 	BCM_REFERENCE(owner);
7781 
7782 	mutex_lock(&dhd->bus_user_lock);
7783 	--dhd->bus_user_count;
7784 	if (dhd->bus_user_count < 0) {
7785 		DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7786 		dhd->bus_user_count = 0;
7787 		ret = -1;
7788 		goto exit;
7789 	}
7790 
7791 	if (dhd->bus_user_count == 0) {
7792 		/* Last user, stop the bus and turn Off WL_REG */
7793 		DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7794 			__FUNCTION__));
7795 #ifdef PROP_TXSTATUS
7796 		if (dhd->pub.wlfc_enabled) {
7797 			dhd_wlfc_deinit(&dhd->pub);
7798 		}
7799 #endif /* PROP_TXSTATUS */
7800 #ifdef PNO_SUPPORT
7801 		if (dhd->pub.pno_state) {
7802 			dhd_pno_deinit(&dhd->pub);
7803 		}
7804 #endif /* PNO_SUPPORT */
7805 #ifdef RTT_SUPPORT
7806 		if (dhd->pub.rtt_state) {
7807 			dhd_rtt_deinit(&dhd->pub);
7808 		}
7809 #endif /* RTT_SUPPORT */
7810 		ret = dhd_bus_devreset(dhdp, TRUE);
7811 		if (!ret) {
7812 			dhd_bus_suspend(dhdp);
7813 			wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
7814 		}
7815 	} else {
7816 		DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7817 			__FUNCTION__, dhd->bus_user_count));
7818 	}
7819 exit:
7820 	mutex_unlock(&dhd->bus_user_lock);
7821 	return ret;
7822 }
7823 EXPORT_SYMBOL(dhd_bus_put);
7824 
7825 int
dhd_net_bus_get(struct net_device * dev)7826 dhd_net_bus_get(struct net_device *dev)
7827 {
7828 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
7829 	return dhd_bus_get(&dhd->pub, WLAN_MODULE);
7830 }
7831 
7832 int
dhd_net_bus_put(struct net_device * dev)7833 dhd_net_bus_put(struct net_device *dev)
7834 {
7835 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
7836 	return dhd_bus_put(&dhd->pub, WLAN_MODULE);
7837 }
7838 
7839 /*
7840  * Function to enable the Bus Clock
7841  * Returns BCME_OK on success and BCME_xxx on failure
7842  *
7843  * This function is not callable from non-sleepable context
7844  */
dhd_bus_clk_enable(wlan_bt_handle_t handle,bus_owner_t owner)7845 int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
7846 {
7847 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7848 
7849 	int ret;
7850 
7851 	dhd_os_sdlock(dhdp);
7852 	/*
7853 	 * The second argument is TRUE, that means, we expect
7854 	 * the function to "wait" until the clocks are really
7855 	 * available
7856 	 */
7857 	ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
7858 	dhd_os_sdunlock(dhdp);
7859 
7860 	return ret;
7861 }
7862 EXPORT_SYMBOL(dhd_bus_clk_enable);
7863 
7864 /*
7865  * Function to disable the Bus Clock
7866  * Returns BCME_OK on success and BCME_xxx on failure
7867  *
7868  * This function is not callable from non-sleepable context
7869  */
dhd_bus_clk_disable(wlan_bt_handle_t handle,bus_owner_t owner)7870 int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
7871 {
7872 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7873 
7874 	int ret;
7875 
7876 	dhd_os_sdlock(dhdp);
7877 	/*
7878 	 * The second argument is TRUE, that means, we expect
7879 	 * the function to "wait" until the clocks are really
7880 	 * disabled
7881 	 */
7882 	ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
7883 	dhd_os_sdunlock(dhdp);
7884 
7885 	return ret;
7886 }
7887 EXPORT_SYMBOL(dhd_bus_clk_disable);
7888 
7889 /*
7890  * Function to reset bt_use_count counter to zero.
7891  *
7892  * This function is not callable from non-sleepable context
7893  */
dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)7894 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
7895 {
7896 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7897 
7898 	/* take the lock and reset bt use count */
7899 	dhd_os_sdlock(dhdp);
7900 	dhdsdio_reset_bt_use_count(dhdp->bus);
7901 	dhd_os_sdunlock(dhdp);
7902 }
7903 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
7904 
dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)7905 void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
7906 {
7907 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7908 	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7909 
7910 	dhdp->hang_was_sent = 0;
7911 
7912 	dhd_os_send_hang_message(&dhd->pub);
7913 }
7914 EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
7915 
7916 #endif /* BT_OVER_SDIO */
7917 
7918 static int
dhd_monitor_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7919 dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7920 {
7921 	return 0;
7922 }
7923 
7924 static struct net_device_stats*
dhd_monitor_get_stats(struct net_device * dev)7925 dhd_monitor_get_stats(struct net_device *dev)
7926 {
7927 	return &DHD_MON_DEV_STATS(dev);
7928 }
7929 
7930 static const struct net_device_ops netdev_monitor_ops =
7931 {
7932 	.ndo_start_xmit = dhd_monitor_start,
7933 	.ndo_get_stats = dhd_monitor_get_stats,
7934 	.ndo_do_ioctl = dhd_monitor_ioctl
7935 };
7936 
7937 static void
dhd_add_monitor_if(dhd_info_t * dhd)7938 dhd_add_monitor_if(dhd_info_t *dhd)
7939 {
7940 	struct net_device *dev;
7941 	char *devname;
7942 #ifdef HOST_RADIOTAP_CONV
7943 	dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
7944 #endif /* HOST_RADIOTAP_CONV */
7945 	uint32 scan_suppress = FALSE;
7946 	int ret = BCME_OK;
7947 
7948 	if (!dhd) {
7949 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7950 		return;
7951 	}
7952 
7953 	if (dhd->monitor_dev) {
7954 		DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
7955 		return;
7956 	}
7957 
7958 	dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
7959 	if (!dev) {
7960 		DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
7961 		return;
7962 	}
7963 
7964 	devname = "radiotap";
7965 
7966 	snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
7967 
7968 #ifndef ARPHRD_IEEE80211_PRISM  /* From Linux 2.4.18 */
7969 #define ARPHRD_IEEE80211_PRISM 802
7970 #endif
7971 
7972 #ifndef ARPHRD_IEEE80211_RADIOTAP
7973 #define ARPHRD_IEEE80211_RADIOTAP	803 /* IEEE 802.11 + radiotap header */
7974 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7975 
7976 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
7977 
7978 	dev->netdev_ops = &netdev_monitor_ops;
7979 
7980 	/* XXX: This is called from IOCTL path, in this case, rtnl_lock is already taken.
7981 	 * So, register_netdev() shouldn't be called. It leads to deadlock.
7982 	 * To avoid deadlock due to rtnl_lock(), register_netdevice() should be used.
7983 	 */
7984 	if (register_netdevice(dev)) {
7985 		DHD_ERROR(("%s, register_netdev failed for %s\n",
7986 			__FUNCTION__, dev->name));
7987 		free_netdev(dev);
7988 		return;
7989 	}
7990 
7991 	if (FW_SUPPORTED((&dhd->pub), monitor)) {
7992 #ifdef DHD_PCIE_RUNTIMEPM
7993 		/* Disable RuntimePM in monitor mode */
7994 		DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7995 		DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
7996 #endif /* DHD_PCIE_RUNTIME_PM */
7997 		scan_suppress = TRUE;
7998 		/* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
7999 		ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
8000 			sizeof(scan_suppress), NULL, 0, TRUE);
8001 		if (ret < 0) {
8002 			DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
8003 		}
8004 	}
8005 
8006 #ifdef HOST_RADIOTAP_CONV
8007 	bcmwifi_monitor_create(&dhd->monitor_info);
8008 	bcmwifi_set_corerev_major(dhd->monitor_info, dhdpcie_get_corerev_major(dhdp));
8009 	bcmwifi_set_corerev_minor(dhd->monitor_info, dhdpcie_get_corerev_minor(dhdp));
8010 #endif /* HOST_RADIOTAP_CONV */
8011 	dhd->monitor_dev = dev;
8012 }
8013 
8014 static void
dhd_del_monitor_if(dhd_info_t * dhd)8015 dhd_del_monitor_if(dhd_info_t *dhd)
8016 {
8017 	int ret = BCME_OK;
8018 	uint32 scan_suppress = FALSE;
8019 
8020 	if (!dhd) {
8021 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8022 		return;
8023 	}
8024 
8025 	if (!dhd->monitor_dev) {
8026 		DHD_ERROR(("%s: monitor i/f doesn't exist\n", __FUNCTION__));
8027 		return;
8028 	}
8029 
8030 	if (FW_SUPPORTED((&dhd->pub), monitor)) {
8031 #ifdef DHD_PCIE_RUNTIMEPM
8032 		/* Enable RuntimePM */
8033 		DHD_ENABLE_RUNTIME_PM(&dhd->pub);
8034 		DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
8035 #endif /* DHD_PCIE_RUNTIME_PM */
8036 		scan_suppress = FALSE;
8037 		/* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
8038 		ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
8039 				sizeof(scan_suppress), NULL, 0, TRUE);
8040 		if (ret < 0) {
8041 			DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
8042 		}
8043 	}
8044 
8045 	if (dhd->monitor_dev) {
8046 		if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
8047 			free_netdev(dhd->monitor_dev);
8048 		} else {
8049 			if (rtnl_is_locked()) {
8050 				unregister_netdevice(dhd->monitor_dev);
8051 			} else {
8052 				unregister_netdev(dhd->monitor_dev);
8053 			}
8054 		}
8055 		dhd->monitor_dev = NULL;
8056 	}
8057 #ifdef HOST_RADIOTAP_CONV
8058 	if (dhd->monitor_info) {
8059 		bcmwifi_monitor_delete(dhd->monitor_info);
8060 		dhd->monitor_info = NULL;
8061 	}
8062 #endif /* HOST_RADIOTAP_CONV */
8063 }
8064 
8065 void
dhd_set_monitor(dhd_pub_t * pub,int ifidx,int val)8066 dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
8067 {
8068 	dhd_info_t *dhd = pub->info;
8069 
8070 	DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
8071 
8072 	dhd_net_if_lock_local(dhd);
8073 	if (!val) {
8074 			/* Delete monitor */
8075 			dhd_del_monitor_if(dhd);
8076 	} else {
8077 			/* Add monitor */
8078 			dhd_add_monitor_if(dhd);
8079 	}
8080 	dhd->monitor_type = val;
8081 	dhd_net_if_unlock_local(dhd);
8082 }
8083 #endif /* WL_MONITOR */
8084 
8085 #if defined(DHD_H2D_LOG_TIME_SYNC)
8086 /*
8087  * Helper function:
8088  * Used for RTE console message time syncing with Host printk
8089  */
dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t * dhdp)8090 void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
8091 {
8092 	dhd_info_t *info = dhdp->info;
8093 
8094 	/* Ideally the "state" should be always TRUE */
8095 	dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
8096 			DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
8097 			dhd_deferred_work_rte_log_time_sync,
8098 			DHD_WQ_WORK_PRIORITY_LOW);
8099 }
8100 
8101 void
dhd_deferred_work_rte_log_time_sync(void * handle,void * event_info,u8 event)8102 dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
8103 {
8104 	dhd_info_t *dhd_info = handle;
8105 	dhd_pub_t *dhd;
8106 
8107 	if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
8108 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8109 		return;
8110 	}
8111 
8112 	if (!dhd_info) {
8113 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8114 		return;
8115 	}
8116 
8117 	dhd = &dhd_info->pub;
8118 
8119 	/*
8120 	 * Function to send IOVAR for console timesyncing
8121 	 * between Host and Dongle.
8122 	 * If the IOVAR fails,
8123 	 * 1. dhd_rte_time_sync_ms is set to 0 and
8124 	 * 2. HOST Dongle console time sync will *not* happen.
8125 	 */
8126 	dhd_h2d_log_time_sync(dhd);
8127 }
8128 #endif /* DHD_H2D_LOG_TIME_SYNC */
8129 
dhd_ioctl_process(dhd_pub_t * pub,int ifidx,dhd_ioctl_t * ioc,void * data_buf)8130 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
8131 {
8132 	int bcmerror = BCME_OK;
8133 	int buflen = 0;
8134 	struct net_device *net;
8135 
8136 	net = dhd_idx2net(pub, ifidx);
8137 	if (!net) {
8138 		bcmerror = BCME_BADARG;
8139 		/*
8140 		 * The netdev pointer is bad means the DHD can't communicate
8141 		 * to higher layers, so just return from here
8142 		 */
8143 		return bcmerror;
8144 	}
8145 
8146 	/* check for local dhd ioctl and handle it */
8147 	if (ioc->driver == DHD_IOCTL_MAGIC) {
8148 		if (data_buf) {
8149 			/* Return error if nvram size is too big */
8150 			if (!bcmstricmp((char *)data_buf, "vars")) {
8151 				DHD_ERROR(("%s: nvram len(%d) MAX_NVRAMBUF_SIZE(%d)\n",
8152 					__FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
8153 				if (ioc->len > MAX_NVRAMBUF_SIZE) {
8154 					DHD_ERROR(("%s: nvram len(%d) > MAX_NVRAMBUF_SIZE(%d)\n",
8155 						__FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
8156 					bcmerror = BCME_BUFTOOLONG;
8157 					goto done;
8158 				}
8159 				buflen = ioc->len;
8160 			} else if (!bcmstricmp((char *)data_buf, "dump")) {
8161 				buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN_32K);
8162 			} else {
8163 				/* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
8164 				buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
8165 			}
8166 		}
8167 		bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
8168 		if (bcmerror)
8169 			pub->bcmerror = bcmerror;
8170 		goto done;
8171 	}
8172 
8173 	/* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
8174 	if (data_buf)
8175 		buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
8176 
8177 #ifndef BCMDBUS
8178 	/* send to dongle (must be up, and wl). */
8179 	if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
8180 		if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
8181 			int ret;
8182 			if (atomic_read(&exit_in_progress)) {
8183 				DHD_ERROR(("%s module exit in progress\n", __func__));
8184 				bcmerror = BCME_DONGLE_DOWN;
8185 				goto done;
8186 			}
8187 			ret = dhd_bus_start(pub);
8188 			if (ret != 0) {
8189 				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8190 				bcmerror = BCME_DONGLE_DOWN;
8191 				goto done;
8192 			}
8193 		} else {
8194 			bcmerror = BCME_DONGLE_DOWN;
8195 			goto done;
8196 		}
8197 	}
8198 
8199 	if (!pub->iswl) {
8200 		bcmerror = BCME_DONGLE_DOWN;
8201 		goto done;
8202 	}
8203 #endif /* !BCMDBUS */
8204 
8205 	/*
8206 	 * Flush the TX queue if required for proper message serialization:
8207 	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
8208 	 * prevent M4 encryption and
8209 	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
8210 	 * prevent disassoc frame being sent before WPS-DONE frame.
8211 	 */
8212 	if (ioc->cmd == WLC_SET_KEY ||
8213 	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
8214 	     strncmp("wsec_key", data_buf, 9) == 0) ||
8215 	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
8216 	     strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
8217 	    ioc->cmd == WLC_DISASSOC)
8218 		dhd_wait_pend8021x(net);
8219 
8220 	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
8221 		data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
8222 		bcmerror = BCME_UNSUPPORTED;
8223 		goto done;
8224 	}
8225 
8226 	/* XXX this typecast is BAD !!! */
8227 	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
8228 
8229 #ifdef REPORT_FATAL_TIMEOUTS
8230 	/* ensure that the timeouts/flags are started/set after the ioctl returns success */
8231 	if (bcmerror == BCME_OK) {
8232 		if (ioc->cmd == WLC_SET_WPA_AUTH) {
8233 			int wpa_auth;
8234 
8235 			wpa_auth = *((int *)ioc->buf);
8236 			DHD_INFO(("wpa_auth:%d\n", wpa_auth));
8237 			if (wpa_auth != WPA_AUTH_DISABLED) {
8238 				/* If AP is with security then enable
8239 				* WLC_E_PSK_SUP event checking
8240 				*/
8241 				pub->secure_join = TRUE;
8242 			} else {
8243 				/* If AP is with open then disable
8244 				* WLC_E_PSK_SUP event checking
8245 				*/
8246 				pub->secure_join = FALSE;
8247 			}
8248 		}
8249 
8250 		if (ioc->cmd == WLC_SET_AUTH) {
8251 			int auth;
8252 			auth = *((int *)ioc->buf);
8253 			DHD_INFO(("Auth:%d\n", auth));
8254 
8255 			if (auth != WL_AUTH_OPEN_SYSTEM) {
8256 				/* If AP is with security then enable
8257 				* WLC_E_PSK_SUP event checking
8258 				*/
8259 				pub->secure_join = TRUE;
8260 			} else {
8261 				/* If AP is with open then disable WLC_E_PSK_SUP event checking */
8262 				pub->secure_join = FALSE;
8263 			}
8264 		}
8265 
8266 		if (ioc->cmd == WLC_SET_SSID) {
8267 			bool set_ssid_rcvd = OSL_ATOMIC_READ(pub->osh, &pub->set_ssid_rcvd);
8268 			if ((!set_ssid_rcvd) && (!pub->secure_join)) {
8269 				dhd_start_join_timer(pub);
8270 			} else {
8271 				DHD_ERROR(("%s: didnot start join timer."
8272 					"open join, set_ssid_rcvd: %d secure_join: %d\n",
8273 					__FUNCTION__, set_ssid_rcvd, pub->secure_join));
8274 				OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE);
8275 			}
8276 		}
8277 
8278 		if (ioc->cmd == WLC_SCAN) {
8279 			dhd_start_scan_timer(pub, 0);
8280 		}
8281 	}
8282 #endif  /* REPORT_FATAL_TIMEOUTS */
8283 
8284 done:
8285 #if defined(OEM_ANDROID)
8286 	dhd_check_hang(net, pub, bcmerror);
8287 #endif /* OEM_ANDROID */
8288 
8289 	return bcmerror;
8290 }
8291 
8292 #ifdef WL_NANHO
8293 static bool
dhd_nho_iovar_filter(dhd_ioctl_t * ioc)8294 dhd_nho_iovar_filter(dhd_ioctl_t *ioc)
8295 {
8296 	bool forward_to_nanho = FALSE;
8297 
8298 	if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
8299 		if ((ioc->len >= sizeof("nan")) && !strcmp(ioc->buf, "nan")) {
8300 			/* forward nan iovar to nanho module */
8301 			forward_to_nanho = TRUE;
8302 		} else if ((ioc->len >= sizeof("slot_bss")) && !strcmp(ioc->buf, "slot_bss")) {
8303 			/* forward slot_bss iovar to nanho module */
8304 			forward_to_nanho = TRUE;
8305 		}
8306 	}
8307 	return forward_to_nanho;
8308 }
8309 
8310 static int
dhd_nho_ioctl_process(dhd_pub_t * pub,int ifidx,dhd_ioctl_t * ioc,void * data_buf)8311 dhd_nho_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
8312 {
8313 	int err;
8314 
8315 	if (dhd_nho_iovar_filter(ioc)) {
8316 		/* forward iovar to nanho module */
8317 		err = bcm_nanho_iov(pub->nanhoi, ifidx, (wl_ioctl_t *)ioc);
8318 	} else {
8319 		/* all other iovars bypass nanho and issued through normal path */
8320 		err = dhd_ioctl_process(pub, ifidx, ioc, data_buf);
8321 	}
8322 	return err;
8323 }
8324 
8325 static int
dhd_nho_ioctl_cb(void * drv_ctx,int ifidx,wl_ioctl_t * ioc,bool drv_lock)8326 dhd_nho_ioctl_cb(void *drv_ctx, int ifidx, wl_ioctl_t *ioc, bool drv_lock)
8327 {
8328 	int err;
8329 
8330 	if (drv_lock) {
8331 		DHD_OS_WAKE_LOCK((dhd_pub_t *)drv_ctx);
8332 	}
8333 
8334 	err = dhd_ioctl_process((dhd_pub_t *)drv_ctx, ifidx, (dhd_ioctl_t *)ioc, ioc->buf);
8335 
8336 	if (drv_lock) {
8337 		DHD_OS_WAKE_UNLOCK((dhd_pub_t *)drv_ctx);
8338 	}
8339 
8340 	return err;
8341 }
8342 #endif /* WL_NANHO */
8343 
8344 /* XXX For the moment, local ioctls will return BCM errors */
8345 /* XXX Others return linux codes, need to be changed... */
8346 /**
8347  * Called by the OS (optionally via a wrapper function).
8348  * @param net  Linux per dongle instance
8349  * @param ifr  Linux request structure
8350  * @param cmd  e.g. SIOCETHTOOL
8351  */
8352 static int
dhd_ioctl_entry(struct net_device * net,struct ifreq * ifr,int cmd)8353 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
8354 {
8355 	dhd_info_t *dhd = DHD_DEV_INFO(net);
8356 	dhd_ioctl_t ioc;
8357 	int bcmerror = 0;
8358 	int ifidx;
8359 	int ret;
8360 	void *local_buf = NULL;           /**< buffer in kernel space */
8361 	void __user *ioc_buf_user = NULL; /**< buffer in user space */
8362 	u16 buflen = 0;
8363 
8364 	if (atomic_read(&exit_in_progress)) {
8365 		DHD_ERROR(("%s module exit in progress\n", __func__));
8366 		bcmerror = BCME_DONGLE_DOWN;
8367 		return OSL_ERROR(bcmerror);
8368 	}
8369 
8370 	DHD_OS_WAKE_LOCK(&dhd->pub);
8371 
8372 #if defined(OEM_ANDROID)
8373 	/* Interface up check for built-in type */
8374 	if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
8375 		DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
8376 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8377 		return OSL_ERROR(BCME_NOTUP);
8378 	}
8379 #endif /* (OEM_ANDROID) */
8380 
8381 	ifidx = dhd_net2idx(dhd, net);
8382 	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
8383 
8384 #if defined(WL_STATIC_IF)
8385 	/* skip for static ndev when it is down */
8386 	if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
8387 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8388 		return -1;
8389 	}
8390 #endif /* WL_STATIC_iF */
8391 
8392 	if (ifidx == DHD_BAD_IF) {
8393 		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
8394 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8395 		return -1;
8396 	}
8397 
8398 #if defined(WL_WIRELESS_EXT)
8399 	/* linux wireless extensions */
8400 	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
8401 		/* may recurse, do NOT lock */
8402 		ret = wl_iw_ioctl(net, ifr, cmd);
8403 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8404 		return ret;
8405 	}
8406 #endif /* defined(WL_WIRELESS_EXT) */
8407 
8408 	if (cmd == SIOCETHTOOL) {
8409 		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
8410 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8411 		return ret;
8412 	}
8413 
8414 #if defined(OEM_ANDROID)
8415 	if (cmd == SIOCDEVPRIVATE+1) {
8416 		ret = wl_android_priv_cmd(net, ifr);
8417 		dhd_check_hang(net, &dhd->pub, ret);
8418 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8419 		return ret;
8420 	}
8421 
8422 #endif /* OEM_ANDROID */
8423 
8424 	if (cmd != SIOCDEVPRIVATE) {
8425 		DHD_OS_WAKE_UNLOCK(&dhd->pub);
8426 		return -EOPNOTSUPP;
8427 	}
8428 
8429 	memset(&ioc, 0, sizeof(ioc));
8430 
8431 #ifdef CONFIG_COMPAT
8432 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
8433 	if (in_compat_syscall())
8434 #else
8435 	if (is_compat_task())
8436 #endif /* LINUX_VER >= 4.6 */
8437 	{
8438 		compat_wl_ioctl_t compat_ioc;
8439 		if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
8440 			bcmerror = BCME_BADADDR;
8441 			goto done;
8442 		}
8443 		ioc.cmd = compat_ioc.cmd;
8444 		if (ioc.cmd & WLC_SPEC_FLAG) {
8445 			memset(&ioc, 0, sizeof(ioc));
8446 			/* Copy the ioc control structure part of ioctl request */
8447 			if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
8448 				bcmerror = BCME_BADADDR;
8449 				goto done;
8450 			}
8451 			ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
8452 
8453 			/* To differentiate between wl and dhd read 4 more byes */
8454 			if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
8455 				sizeof(uint)) != 0)) {
8456 				bcmerror = BCME_BADADDR;
8457 				goto done;
8458 			}
8459 
8460 		} else { /* ioc.cmd & WLC_SPEC_FLAG */
8461 			ioc.buf = compat_ptr(compat_ioc.buf);
8462 			ioc.len = compat_ioc.len;
8463 			ioc.set = compat_ioc.set;
8464 			ioc.used = compat_ioc.used;
8465 			ioc.needed = compat_ioc.needed;
8466 			/* To differentiate between wl and dhd read 4 more byes */
8467 			if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
8468 				sizeof(uint)) != 0)) {
8469 				bcmerror = BCME_BADADDR;
8470 				goto done;
8471 			}
8472 		} /* ioc.cmd & WLC_SPEC_FLAG */
8473 	} else
8474 #endif /* CONFIG_COMPAT */
8475 	{
8476 		/* Copy the ioc control structure part of ioctl request */
8477 		if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
8478 			bcmerror = BCME_BADADDR;
8479 			goto done;
8480 		}
8481 #ifdef CONFIG_COMPAT
8482 		ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
8483 #endif
8484 
8485 		/* To differentiate between wl and dhd read 4 more byes */
8486 		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
8487 			sizeof(uint)) != 0)) {
8488 			bcmerror = BCME_BADADDR;
8489 			goto done;
8490 		}
8491 	}
8492 
8493 	if (!capable(CAP_NET_ADMIN)) {
8494 		bcmerror = BCME_EPERM;
8495 		goto done;
8496 	}
8497 
8498 	/* Take backup of ioc.buf and restore later */
8499 	ioc_buf_user = ioc.buf;
8500 
8501 	if (ioc.len > 0) {
8502 		/*
8503 		* some IOVARs in DHD require 32K user memory. So allocate the
8504 		* maximum local buffer.
8505 		*
8506 		* For IOVARS which donot require 32K user memory, dhd_ioctl_process()
8507 		* takes care of trimming the length to DHD_IOCTL_MAXLEN(16K). So that DHD
8508 		* will not overflow the buffer size while updating the buffer.
8509 		*/
8510 		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN_32K);
8511 		if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
8512 			bcmerror = BCME_NOMEM;
8513 			goto done;
8514 		}
8515 
8516 		if (copy_from_user(local_buf, ioc.buf, buflen)) {
8517 			bcmerror = BCME_BADADDR;
8518 			goto done;
8519 		}
8520 
8521 		*((char *)local_buf + buflen) = '\0';
8522 
8523 		/* For some platforms accessing userspace memory
8524 		 * of ioc.buf is causing kernel panic, so to avoid that
8525 		 * make ioc.buf pointing to kernel space memory local_buf
8526 		 */
8527 		ioc.buf = local_buf;
8528 	}
8529 
8530 #if defined(OEM_ANDROID)
8531 	/* Skip all the non DHD iovars (wl iovars) after f/w hang */
8532 	if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
8533 		DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
8534 		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
8535 		bcmerror = BCME_DONGLE_DOWN;
8536 		goto done;
8537 	}
8538 #endif /* OEM_ANDROID */
8539 
8540 #ifdef WL_NANHO
8541 	bcmerror = dhd_nho_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
8542 #else
8543 	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
8544 #endif  /* WL_NANHO */
8545 
8546 	/* Restore back userspace pointer to ioc.buf */
8547 	ioc.buf = ioc_buf_user;
8548 	if (!bcmerror && buflen && local_buf && ioc.buf) {
8549 		if (copy_to_user(ioc.buf, local_buf, buflen))
8550 			bcmerror = -EFAULT;
8551 	}
8552 
8553 done:
8554 	if (local_buf)
8555 		MFREE(dhd->pub.osh, local_buf, buflen+1);
8556 
8557 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
8558 
8559 	return OSL_ERROR(bcmerror);
8560 }
8561 
8562 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
8563 /* Flags to indicate if we distingish power off policy when
8564  * user set the memu "Keep Wi-Fi on during sleep" to "Never"
8565  */
8566 int trigger_deep_sleep = 0;
8567 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
8568 
8569 #ifdef FIX_CPU_MIN_CLOCK
dhd_init_cpufreq_fix(dhd_info_t * dhd)8570 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
8571 {
8572 	if (dhd) {
8573 		mutex_init(&dhd->cpufreq_fix);
8574 		dhd->cpufreq_fix_status = FALSE;
8575 	}
8576 	return 0;
8577 }
8578 
dhd_fix_cpu_freq(dhd_info_t * dhd)8579 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
8580 {
8581 	mutex_lock(&dhd->cpufreq_fix);
8582 	if (dhd && !dhd->cpufreq_fix_status) {
8583 		pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
8584 #ifdef FIX_BUS_MIN_CLOCK
8585 		pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
8586 #endif /* FIX_BUS_MIN_CLOCK */
8587 		DHD_ERROR(("pm_qos_add_requests called\n"));
8588 
8589 		dhd->cpufreq_fix_status = TRUE;
8590 	}
8591 	mutex_unlock(&dhd->cpufreq_fix);
8592 }
8593 
dhd_rollback_cpu_freq(dhd_info_t * dhd)8594 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
8595 {
8596 	mutex_lock(&dhd ->cpufreq_fix);
8597 	if (dhd && dhd->cpufreq_fix_status != TRUE) {
8598 		mutex_unlock(&dhd->cpufreq_fix);
8599 		return;
8600 	}
8601 
8602 	pm_qos_remove_request(&dhd->dhd_cpu_qos);
8603 #ifdef FIX_BUS_MIN_CLOCK
8604 	pm_qos_remove_request(&dhd->dhd_bus_qos);
8605 #endif /* FIX_BUS_MIN_CLOCK */
8606 	DHD_ERROR(("pm_qos_add_requests called\n"));
8607 
8608 	dhd->cpufreq_fix_status = FALSE;
8609 	mutex_unlock(&dhd->cpufreq_fix);
8610 }
8611 #endif /* FIX_CPU_MIN_CLOCK */
8612 
8613 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8614 static int
dhd_ioctl_entry_wrapper(struct net_device * net,struct ifreq * ifr,int cmd)8615 dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
8616 {
8617 	int error;
8618 	dhd_info_t *dhd = DHD_DEV_INFO(net);
8619 
8620 	if (atomic_read(&dhd->pub.block_bus))
8621 		return -EHOSTDOWN;
8622 
8623 	if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
8624 		return BCME_ERROR;
8625 
8626 	error = dhd_ioctl_entry(net, ifr, cmd);
8627 
8628 	pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
8629 	pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
8630 
8631 	return error;
8632 }
8633 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8634 
8635 #ifdef CONFIG_HAS_WAKELOCK
8636 #define dhd_wake_lock_unlock_destroy(wlock) \
8637 { \
8638 	if (dhd_wake_lock_active(wlock)) { \
8639 		dhd_wake_unlock(wlock); \
8640 	} \
8641 	dhd_wake_lock_destroy(wlock); \
8642 }
8643 #endif /* CONFIG_HAS_WAKELOCK */
8644 
8645 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
8646 #define DHD_TCP_LIMIT_OUTPUT_BYTES (4 * 1024 * 1024)
8647 #ifndef TCP_DEFAULT_LIMIT_OUTPUT
8648 #define TCP_DEFAULT_LIMIT_OUTPUT (256 * 1024)
8649 #endif /* TSQ_DEFAULT_LIMIT_OUTPUT */
8650 void
dhd_ctrl_tcp_limit_output_bytes(int level)8651 dhd_ctrl_tcp_limit_output_bytes(int level)
8652 {
8653 	if (level == 0) {
8654 		init_net.ipv4.sysctl_tcp_limit_output_bytes = TCP_DEFAULT_LIMIT_OUTPUT;
8655 	} else if (level == 1) {
8656 		init_net.ipv4.sysctl_tcp_limit_output_bytes = DHD_TCP_LIMIT_OUTPUT_BYTES;
8657 	}
8658 }
8659 #endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
8660 
8661 static int
dhd_stop(struct net_device * net)8662 dhd_stop(struct net_device *net)
8663 {
8664 	int ifidx = 0;
8665 	bool skip_reset = false;
8666 #ifdef WL_CFG80211
8667 	unsigned long flags = 0;
8668 #ifdef WL_STATIC_IF
8669 	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
8670 #endif /* WL_STATIC_IF */
8671 #endif /* WL_CFG80211 */
8672 	dhd_info_t *dhd = DHD_DEV_INFO(net);
8673 
8674 	DHD_OS_WAKE_LOCK(&dhd->pub);
8675 	WL_MSG(net->name, "Enter\n");
8676 	dhd->pub.rxcnt_timeout = 0;
8677 	dhd->pub.txcnt_timeout = 0;
8678 
8679 #ifdef BCMPCIE
8680 	dhd->pub.d3ackcnt_timeout = 0;
8681 #endif /* BCMPCIE */
8682 
8683 	mutex_lock(&dhd->pub.ndev_op_sync);
8684 	if (dhd->pub.up == 0) {
8685 		goto exit;
8686 	}
8687 #if defined(DHD_HANG_SEND_UP_TEST)
8688 	if (dhd->pub.req_hang_type) {
8689 		DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
8690 			__FUNCTION__, dhd->pub.req_hang_type));
8691 		dhd->pub.req_hang_type = 0;
8692 	}
8693 #endif /* DHD_HANG_SEND_UP_TEST */
8694 
8695 #if defined(WLAN_ACCEL_BOOT)
8696 	if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) {
8697 		DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
8698 		dhd->wl_accel_force_reg_on = TRUE;
8699 	}
8700 #endif /* WLAN_ACCEL_BOOT */
8701 
8702 #ifdef FIX_CPU_MIN_CLOCK
8703 	if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
8704 		dhd_rollback_cpu_freq(dhd);
8705 #endif /* FIX_CPU_MIN_CLOCK */
8706 
8707 	ifidx = dhd_net2idx(dhd, net);
8708 	BCM_REFERENCE(ifidx);
8709 
8710 	DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
8711 
8712 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8713 	/* If static if is operational, don't reset the chip */
8714 	if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
8715 		WL_MSG(net->name, "static if operational. skip chip reset.\n");
8716 		skip_reset = true;
8717 		wl_cfg80211_sta_ifdown(net);
8718 		goto exit;
8719 	}
8720 #endif /* WL_STATIC_IF && WL_CFG80211 */
8721 #ifdef DHD_NOTIFY_MAC_CHANGED
8722 	if (dhd->pub.skip_dhd_stop) {
8723 		WL_MSG(net->name, "skip chip reset.\n");
8724 		skip_reset = true;
8725 #if defined(WL_CFG80211)
8726 		wl_cfg80211_sta_ifdown(net);
8727 #endif /* WL_CFG80211 */
8728 		goto exit;
8729 	}
8730 #endif /* DHD_NOTIFY_MAC_CHANGED */
8731 
8732 #ifdef WL_CFG80211
8733 	if (ifidx == 0) {
8734 		dhd_if_t *ifp;
8735 		wl_cfg80211_down(net);
8736 
8737 		DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
8738 #ifdef WL_CFG80211
8739 		/* Disable Runtime PM before interface down */
8740 		DHD_STOP_RPM_TIMER(&dhd->pub);
8741 
8742 		DHD_UP_LOCK(&dhd->pub.up_lock, flags);
8743 		dhd->pub.up = 0;
8744 		DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
8745 #else
8746 		dhd->pub.up = 0;
8747 #endif /* WL_CFG80211 */
8748 #if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM)
8749 		dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up);
8750 #endif /* BCMPCIE && CONFIG_ARCH_MSM */
8751 
8752 		ifp = dhd->iflist[0];
8753 		/*
8754 		 * For CFG80211: Clean up all the left over virtual interfaces
8755 		 * when the primary Interface is brought down. [ifconfig wlan0 down]
8756 		 */
8757 		if (!dhd_download_fw_on_driverload) {
8758 			DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
8759 			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
8760 				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
8761 				int i;
8762 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
8763 				dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
8764 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8765 #ifdef DHD_PKTDUMP_ROAM
8766 				dhd_dump_pkt_clear(&dhd->pub);
8767 #endif /* DHD_PKTDUMP_ROAM */
8768 
8769 				dhd_net_if_lock_local(dhd);
8770 				for (i = 1; i < DHD_MAX_IFS; i++)
8771 					dhd_remove_if(&dhd->pub, i, FALSE);
8772 
8773 				if (ifp && ifp->net) {
8774 					dhd_if_del_sta_list(ifp);
8775 				}
8776 #ifdef ARP_OFFLOAD_SUPPORT
8777 				if (dhd_inetaddr_notifier_registered) {
8778 					dhd_inetaddr_notifier_registered = FALSE;
8779 					unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
8780 				}
8781 #endif /* ARP_OFFLOAD_SUPPORT */
8782 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8783 				if (dhd_inet6addr_notifier_registered) {
8784 					dhd_inet6addr_notifier_registered = FALSE;
8785 					unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
8786 				}
8787 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8788 				dhd_net_if_unlock_local(dhd);
8789 			}
8790 #if 0
8791 			// terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
8792 			cancel_work_sync(dhd->dhd_deferred_wq);
8793 #endif
8794 
8795 #ifdef SHOW_LOGTRACE
8796 			/* Wait till event logs work/kthread finishes */
8797 			dhd_cancel_logtrace_process_sync(dhd);
8798 #endif /* SHOW_LOGTRACE */
8799 
8800 #ifdef BTLOG
8801 			/* Wait till bt_log_dispatcher_work finishes */
8802 			cancel_work_sync(&dhd->bt_log_dispatcher_work);
8803 #endif /* BTLOG */
8804 
8805 #ifdef EWP_EDL
8806 			cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
8807 #endif
8808 
8809 #if defined(DHD_LB_RXP)
8810 			__skb_queue_purge(&dhd->rx_pend_queue);
8811 #endif /* DHD_LB_RXP */
8812 
8813 #if defined(DHD_LB_TXP)
8814 			skb_queue_purge(&dhd->tx_pend_queue);
8815 #endif /* DHD_LB_TXP */
8816 		}
8817 #ifdef DHDTCPACK_SUPPRESS
8818 		dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8819 #endif /* DHDTCPACK_SUPPRESS */
8820 #if defined(DHD_LB_RXP)
8821 		if (ifp && ifp->net == dhd->rx_napi_netdev) {
8822 			DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
8823 				__FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8824 			skb_queue_purge(&dhd->rx_napi_queue);
8825 			napi_disable(&dhd->rx_napi_struct);
8826 			netif_napi_del(&dhd->rx_napi_struct);
8827 			dhd->rx_napi_netdev = NULL;
8828 		}
8829 #endif /* DHD_LB_RXP */
8830 	}
8831 #endif /* WL_CFG80211 */
8832 
8833 #ifdef PROP_TXSTATUS
8834 	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
8835 #endif
8836 #ifdef SHOW_LOGTRACE
8837 	if (!dhd_download_fw_on_driverload) {
8838 		/* Release the skbs from queue for WLC_E_TRACE event */
8839 		dhd_event_logtrace_flush_queue(&dhd->pub);
8840 		if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
8841 			if (dhd->event_data.fmts) {
8842 				MFREE(dhd->pub.osh, dhd->event_data.fmts,
8843 					dhd->event_data.fmts_size);
8844 			}
8845 			if (dhd->event_data.raw_fmts) {
8846 				MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
8847 					dhd->event_data.raw_fmts_size);
8848 			}
8849 			if (dhd->event_data.raw_sstr) {
8850 				MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
8851 					dhd->event_data.raw_sstr_size);
8852 			}
8853 			if (dhd->event_data.rom_raw_sstr) {
8854 				MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
8855 					dhd->event_data.rom_raw_sstr_size);
8856 			}
8857 			dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
8858 		}
8859 	}
8860 #endif /* SHOW_LOGTRACE */
8861 #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
8862 	/* Stop all ring buffer */
8863 	dhd_os_reset_logging(&dhd->pub);
8864 #endif
8865 #ifdef APF
8866 	dhd_dev_apf_delete_filter(net);
8867 #endif /* APF */
8868 
8869 	/* Stop the protocol module */
8870 	dhd_prot_stop(&dhd->pub);
8871 
8872 	OLD_MOD_DEC_USE_COUNT;
8873 exit:
8874 	if (skip_reset == false) {
8875 #if defined(WL_WIRELESS_EXT)
8876 		if (ifidx == 0) {
8877 			wl_iw_down(net, &dhd->pub);
8878 		}
8879 #endif /* defined(WL_WIRELESS_EXT) */
8880 #ifdef WL_ESCAN
8881 		if (ifidx == 0) {
8882 			wl_escan_down(net, &dhd->pub);
8883 		}
8884 #endif /* WL_ESCAN */
8885 		if (ifidx == 0 && !dhd_download_fw_on_driverload) {
8886 #if defined(WLAN_ACCEL_BOOT)
8887 			wl_android_wifi_accel_off(net, dhd->wl_accel_force_reg_on);
8888 #else
8889 #if defined (BT_OVER_SDIO)
8890 			dhd_bus_put(&dhd->pub, WLAN_MODULE);
8891 			wl_android_set_wifi_on_flag(FALSE);
8892 #else
8893 			wl_android_wifi_off(net, TRUE);
8894 #ifdef WL_EXT_IAPSTA
8895 			wl_ext_iapsta_dettach_netdev(net, ifidx);
8896 #endif /* WL_EXT_IAPSTA */
8897 #ifdef WL_ESCAN
8898 			wl_escan_event_dettach(net, &dhd->pub);
8899 #endif /* WL_ESCAN */
8900 #ifdef WL_EVENT
8901 			wl_ext_event_dettach_netdev(net, ifidx);
8902 #endif /* WL_EVENT */
8903 #endif /* BT_OVER_SDIO */
8904 #endif /* WLAN_ACCEL_BOOT */
8905 		}
8906 #ifdef SUPPORT_DEEP_SLEEP
8907 		else {
8908 			/* CSP#505233: Flags to indicate if we distingish
8909 			 * power off policy when user set the memu
8910 			 * "Keep Wi-Fi on during sleep" to "Never"
8911 			 */
8912 			if (trigger_deep_sleep) {
8913 				dhd_deepsleep(net, 1);
8914 				trigger_deep_sleep = 0;
8915 			}
8916 		}
8917 #endif /* SUPPORT_DEEP_SLEEP */
8918 		dhd->pub.hang_was_sent = 0;
8919 		dhd->pub.hang_was_pending = 0;
8920 
8921 		/* Clear country spec for for built-in type driver */
8922 		if (!dhd_download_fw_on_driverload) {
8923 			dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
8924 			dhd->pub.dhd_cspec.rev = 0;
8925 			dhd->pub.dhd_cspec.ccode[0] = 0x00;
8926 		}
8927 
8928 #ifdef BCMDBGFS
8929 		dhd_dbgfs_remove();
8930 #endif
8931 	}
8932 
8933 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
8934 
8935 	/* Destroy wakelock */
8936 	if (!dhd_download_fw_on_driverload &&
8937 		(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
8938 		(skip_reset == false)) {
8939 		DHD_OS_WAKE_LOCK_DESTROY(dhd);
8940 		dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
8941 	}
8942 
8943 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
8944 	dhd_ctrl_tcp_limit_output_bytes(0);
8945 #endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
8946 	WL_MSG(net->name, "Exit\n");
8947 
8948 	mutex_unlock(&dhd->pub.ndev_op_sync);
8949 	return 0;
8950 }
8951 
8952 #if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
8953 	defined(USE_INITIAL_SHORT_DWELL_TIME))
8954 extern bool g_first_broadcast_scan;
8955 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
8956 
8957 #ifdef WL11U
dhd_interworking_enable(dhd_pub_t * dhd)8958 static int dhd_interworking_enable(dhd_pub_t *dhd)
8959 {
8960 	uint32 enable = true;
8961 	int ret = BCME_OK;
8962 
8963 	ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
8964 	if (ret < 0) {
8965 		DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
8966 	}
8967 
8968 	return ret;
8969 }
8970 #endif /* WL11u */
8971 
8972 #if defined(WLAN_ACCEL_BOOT)
8973 void
dhd_verify_firmware_mode_change(dhd_info_t * dhd)8974 dhd_verify_firmware_mode_change(dhd_info_t *dhd)
8975 {
8976 	int current_mode = 0;
8977 
8978 	/*
8979 	 * check for the FW change
8980 	 * previous FW mode - dhd->pub.op_mode remember the previous mode
8981 	 * current mode - update fw/nv path, get current FW  mode from dhd->fw_path
8982 	 */
8983 	dhd_update_fw_nv_path(dhd);
8984 #ifdef WL_MONITOR
8985 	DHD_INFO(("%s : check monitor mode with fw_path : %s\n", __FUNCTION__, dhd->fw_path));
8986 
8987 	if (strstr(dhd->fw_path, "_mon") != NULL) {
8988 		DHD_ERROR(("%s : monitor mode is enabled, set force reg on", __FUNCTION__));
8989 		dhd->wl_accel_force_reg_on = TRUE;
8990 		return;
8991 	} else if (dhd->pub.monitor_enable == TRUE) {
8992 		DHD_ERROR(("%s : monitor was enabled, changed to other fw_mode", __FUNCTION__));
8993 		dhd->wl_accel_force_reg_on = TRUE;
8994 		return;
8995 	}
8996 #endif /* WL_MONITOR */
8997 	current_mode = dhd_get_fw_mode(dhd);
8998 
8999 	DHD_ERROR(("%s: current_mode 0x%x, prev_opmode 0x%x", __FUNCTION__,
9000 		current_mode, dhd->pub.op_mode));
9001 
9002 	if (!(dhd->pub.op_mode & current_mode)) {
9003 		DHD_ERROR(("%s: firmware path has changed, set force reg on", __FUNCTION__));
9004 		dhd->wl_accel_force_reg_on = TRUE;
9005 	}
9006 }
9007 
9008 #ifndef DHD_FS_CHECK_RETRY_DELAY_MS
9009 #define DHD_FS_CHECK_RETRY_DELAY_MS 3000
9010 #endif
9011 
9012 #ifndef DHD_FS_CHECK_RETRIES
9013 #define DHD_FS_CHECK_RETRIES    3
9014 #endif
9015 
9016 static bool
dhd_check_filesystem_is_up(void)9017 dhd_check_filesystem_is_up(void)
9018 {
9019 	struct file *fp;
9020 	const char *clm = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
9021 	fp = filp_open(clm, O_RDONLY, 0);
9022 
9023 	if (IS_ERR(fp)) {
9024 		DHD_ERROR(("%s: filp_open(%s) failed(%d) schedule wl_accel_work\n",
9025 				__FUNCTION__, clm, (int)IS_ERR(fp)));
9026 		return FALSE;
9027 	}
9028 	filp_close(fp, NULL);
9029 
9030 	return TRUE;
9031 }
9032 
9033 static void
dhd_wifi_accel_on_work_cb(struct work_struct * work)9034 dhd_wifi_accel_on_work_cb(struct work_struct *work)
9035 {
9036 	int ret = 0;
9037 	struct delayed_work *dw = to_delayed_work(work);
9038 	struct dhd_info *dhd;
9039 	struct net_device *net;
9040 
9041 	/* Ignore compiler warnings due to -Werror=cast-qual */
9042 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
9043 	dhd = container_of(dw, struct dhd_info, wl_accel_work);
9044 	GCC_DIAGNOSTIC_POP();
9045 
9046 	DHD_ERROR(("%s\n", __FUNCTION__));
9047 
9048 	/* Initialise force regon to TRUE and it will be made FALSE at the end */
9049 	dhd->wl_accel_force_reg_on = TRUE;
9050 
9051 	if (!dhd_check_filesystem_is_up()) {
9052 		if (!dhd->fs_check_retry--) {
9053 			DHD_ERROR(("%s: max retry reached, BACKOFF\n", __FUNCTION__));
9054 			return;
9055 		}
9056 		schedule_delayed_work(&dhd->wl_accel_work,
9057 				msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
9058 		return;
9059 	}
9060 
9061 	net = dhd->iflist[0]->net;
9062 
9063 	/*
9064 	 * Keep wlan turn on and download firmware during bootup
9065 	 * by making g_wifi_on = FALSE
9066 	 */
9067 	ret = wl_android_wifi_on(net);
9068 	if (ret) {
9069 		DHD_ERROR(("%s: wl_android_wifi_on failed(%d)\n", __FUNCTION__, ret));
9070 		goto fail;
9071 	}
9072 
9073 	/* Disable host access from dongle */
9074 	ret = dhd_wl_ioctl_set_intiovar(&dhd->pub, "bus:host_access", 0, WLC_SET_VAR, TRUE, 0);
9075 	if (ret) {
9076 		/* Proceed even if iovar fails for backward compatibilty */
9077 		DHD_ERROR(("%s: bus:host_access(0) failed(%d)\n", __FUNCTION__, ret));
9078 	}
9079 
9080 	/* After bootup keep in suspend state */
9081 	ret = dhd_net_bus_suspend(net);
9082 	if (ret) {
9083 		DHD_ERROR(("%s: dhd_net_bus_suspend failed(%d)\n", __FUNCTION__, ret));
9084 		goto fail;
9085 	}
9086 
9087 	/* Set force regon to FALSE and it will be set for Big Hammer case */
9088 	dhd->wl_accel_force_reg_on = FALSE;
9089 
9090 fail:
9091 	/* mark wl_accel_boot_on_done for dhd_open to proceed */
9092 	dhd->wl_accel_boot_on_done = TRUE;
9093 	return;
9094 
9095 }
9096 #endif /* WLAN_ACCEL_BOOT */
9097 
9098 int
dhd_open(struct net_device * net)9099 dhd_open(struct net_device *net)
9100 {
9101 	dhd_info_t *dhd = DHD_DEV_INFO(net);
9102 #ifdef TOE
9103 	uint32 toe_ol;
9104 #endif
9105 	int ifidx;
9106 	int32 ret = 0;
9107 #if defined(OOB_INTR_ONLY)
9108 	uint32 bus_type = -1;
9109 	uint32 bus_num = -1;
9110 	uint32 slot_num = -1;
9111 	wifi_adapter_info_t *adapter = NULL;
9112 #endif
9113 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
9114 	int bytes_written = 0;
9115 #endif
9116 
9117 #if defined(PREVENT_REOPEN_DURING_HANG)
9118 	/* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
9119 	if (dhd->pub.hang_was_sent == 1) {
9120 		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
9121 		/* Force to bring down WLAN interface in case dhd_stop() is not called
9122 		 * from the upper layer when HANG event is triggered.
9123 		 */
9124 		if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
9125 			DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
9126 			dhd_stop(net);
9127 		} else {
9128 			return -1;
9129 		}
9130 	}
9131 #endif /* PREVENT_REOPEN_DURING_HANG */
9132 
9133 	mutex_lock(&dhd->pub.ndev_op_sync);
9134 
9135 	if (dhd->pub.up == 1) {
9136 		/* already up */
9137 		WL_MSG(net->name, "Primary net_device is already up\n");
9138 		mutex_unlock(&dhd->pub.ndev_op_sync);
9139 		return BCME_OK;
9140 	}
9141 
9142 	if (!dhd_download_fw_on_driverload) {
9143 #if defined(WLAN_ACCEL_BOOT)
9144 		if (dhd->wl_accel_boot_on_done == FALSE) {
9145 #if defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH)
9146 			dhd_wifi_accel_on_work_cb(&dhd->wl_accel_work.work);
9147 #else
9148 			DHD_ERROR(("%s: WLAN accel boot not done yet\n", __FUNCTION__));
9149 			mutex_unlock(&dhd->pub.ndev_op_sync);
9150 			return -1;
9151 #endif /* WLAN_ACCEL_SKIP_WQ_IN_ATTACH */
9152 		}
9153 		if (!dhd->wl_accel_force_reg_on && dhd_query_bus_erros(&dhd->pub)) {
9154 			DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
9155 			dhd->wl_accel_force_reg_on = TRUE;
9156 		}
9157 #endif /* WLAN_ACCEL_BOOT */
9158 		if (!dhd_driver_init_done) {
9159 			DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
9160 			mutex_unlock(&dhd->pub.ndev_op_sync);
9161 			return -1;
9162 		}
9163 	}
9164 
9165 	WL_MSG(net->name, "Enter\n");
9166 	DHD_ERROR(("%s\n", dhd_version));
9167 	DHD_MUTEX_LOCK();
9168 	/* Init wakelock */
9169 	if (!dhd_download_fw_on_driverload) {
9170 		if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
9171 			DHD_OS_WAKE_LOCK_INIT(dhd);
9172 			dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9173 		}
9174 
9175 #ifdef SHOW_LOGTRACE
9176 		skb_queue_head_init(&dhd->evt_trace_queue);
9177 
9178 		if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
9179 			ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
9180 			if (ret == BCME_OK) {
9181 				dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
9182 					st_str_file_path, map_file_path);
9183 				dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
9184 					rom_st_str_file_path, rom_map_file_path);
9185 				dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9186 			}
9187 		}
9188 #endif /* SHOW_LOGTRACE */
9189 	}
9190 
9191 	DHD_OS_WAKE_LOCK(&dhd->pub);
9192 	dhd->pub.dongle_trap_occured = 0;
9193 #ifdef BT_OVER_PCIE
9194 	dhd->pub.dongle_trap_due_to_bt = 0;
9195 #endif /* BT_OVER_PCIE */
9196 	dhd->pub.hang_was_sent = 0;
9197 	dhd->pub.hang_was_pending = 0;
9198 	dhd->pub.hang_reason = 0;
9199 	dhd->pub.iovar_timeout_occured = 0;
9200 #ifdef PCIE_FULL_DONGLE
9201 	dhd->pub.d3ack_timeout_occured = 0;
9202 	dhd->pub.livelock_occured = 0;
9203 	dhd->pub.pktid_audit_failed = 0;
9204 #endif /* PCIE_FULL_DONGLE */
9205 	dhd->pub.iface_op_failed = 0;
9206 	dhd->pub.scan_timeout_occurred = 0;
9207 	dhd->pub.scan_busy_occurred = 0;
9208 	dhd->pub.smmu_fault_occurred = 0;
9209 #ifdef DHD_LOSSLESS_ROAMING
9210 	dhd->pub.dequeue_prec_map = ALLPRIO;
9211 #endif
9212 #ifdef DHD_GRO_ENABLE_HOST_CTRL
9213 	dhd->pub.permitted_gro = TRUE;
9214 #endif /* DHD_GRO_ENABLE_HOST_CTRL */
9215 #if 0
9216 	/*
9217 	 * Force start if ifconfig_up gets called before START command
9218 	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
9219 	 *  This should be removed in the future
9220 	 */
9221 	ret = wl_control_wl_start(net);
9222 	if (ret != 0) {
9223 		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
9224 		ret = -1;
9225 		goto exit;
9226 	}
9227 
9228 #endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
9229 
9230 	ifidx = dhd_net2idx(dhd, net);
9231 	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9232 
9233 	if (ifidx < 0) {
9234 		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
9235 		ret = -1;
9236 		goto exit;
9237 	}
9238 
9239 	if (!dhd->iflist[ifidx]) {
9240 		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
9241 		ret = -1;
9242 		goto exit;
9243 	}
9244 
9245 	DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
9246 
9247 #if defined(WLAN_ACCEL_BOOT)
9248 	dhd_verify_firmware_mode_change(dhd);
9249 #endif /* WLAN_ACCEL_BOOT */
9250 
9251 	if (ifidx == 0) {
9252 		atomic_set(&dhd->pend_8021x_cnt, 0);
9253 		if (!dhd_download_fw_on_driverload) {
9254 			DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
9255 #ifdef WL_EVENT
9256 			wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
9257 #endif /* WL_EVENT */
9258 #ifdef WL_ESCAN
9259 			wl_escan_event_attach(net, &dhd->pub);
9260 #endif /* WL_ESCAN */
9261 #ifdef WL_EXT_IAPSTA
9262 			wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
9263 #endif /* WL_EXT_IAPSTA */
9264 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
9265 			g_first_broadcast_scan = TRUE;
9266 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
9267 #ifdef SHOW_LOGTRACE
9268 			/* dhd_cancel_logtrace_process_sync is called in dhd_stop
9269 			 * for built-in models. Need to start logtrace kthread before
9270 			 * calling wifi on, because once wifi is on, EDL will be in action
9271 			 * any moment, and if kthread is not active, FW event logs will
9272 			 * not be available
9273 			 */
9274 			if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
9275 				goto exit;
9276 			}
9277 #endif /* SHOW_LOGTRACE */
9278 #if defined(WLAN_ACCEL_BOOT)
9279 			ret = wl_android_wifi_accel_on(net, dhd->wl_accel_force_reg_on);
9280 			/* Enable wl_accel_force_reg_on if ON fails, else disable it */
9281 			if (ret) {
9282 				dhd->wl_accel_force_reg_on = TRUE;
9283 			} else {
9284 				dhd->wl_accel_force_reg_on = FALSE;
9285 			}
9286 #else
9287 #if defined(BT_OVER_SDIO)
9288 			ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
9289 			wl_android_set_wifi_on_flag(TRUE);
9290 #else
9291 			ret = wl_android_wifi_on(net);
9292 #endif /* BT_OVER_SDIO */
9293 #endif /* WLAN_ACCEL_BOOT */
9294 			if (ret != 0) {
9295 				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
9296 					__FUNCTION__, ret));
9297 				ret = -1;
9298 				goto exit;
9299 			}
9300 		}
9301 #ifdef SUPPORT_DEEP_SLEEP
9302 		else {
9303 			/* Flags to indicate if we distingish
9304 			 * power off policy when user set the memu
9305 			 * "Keep Wi-Fi on during sleep" to "Never"
9306 			 */
9307 			if (trigger_deep_sleep) {
9308 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
9309 				g_first_broadcast_scan = TRUE;
9310 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
9311 				dhd_deepsleep(net, 0);
9312 				trigger_deep_sleep = 0;
9313 			}
9314 		}
9315 #endif /* SUPPORT_DEEP_SLEEP */
9316 #ifdef FIX_CPU_MIN_CLOCK
9317 		if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
9318 			dhd_init_cpufreq_fix(dhd);
9319 			dhd_fix_cpu_freq(dhd);
9320 		}
9321 #endif /* FIX_CPU_MIN_CLOCK */
9322 #if defined(OOB_INTR_ONLY)
9323 		if (dhd->pub.conf->dpc_cpucore >= 0) {
9324 			dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
9325 			adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9326 			if (adapter) {
9327 				printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
9328 				irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
9329 			}
9330 		}
9331 #endif
9332 
9333 		if (dhd->pub.busstate != DHD_BUS_DATA) {
9334 #ifdef BCMDBUS
9335 			dhd_set_path(&dhd->pub);
9336 			DHD_MUTEX_UNLOCK();
9337 			wait_event_interruptible_timeout(dhd->adapter->status_event,
9338 				wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
9339 				msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
9340 			DHD_MUTEX_LOCK();
9341 			if ((ret = dbus_up(dhd->pub.bus)) != 0) {
9342 				DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
9343 				goto exit;
9344 			} else {
9345 				dhd->pub.busstate = DHD_BUS_DATA;
9346 			}
9347 			if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
9348 				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
9349 				goto exit;
9350 			}
9351 #else
9352 			/* try to bring up bus */
9353 
9354 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9355 			if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
9356 				ret = dhd_bus_start(&dhd->pub);
9357 				pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
9358 				pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
9359 			}
9360 #else
9361 			ret = dhd_bus_start(&dhd->pub);
9362 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9363 
9364 			if (ret) {
9365 				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
9366 				ret = -1;
9367 				goto exit;
9368 			}
9369 #endif /* !BCMDBUS */
9370 
9371 		}
9372 #ifdef WL_EXT_IAPSTA
9373 		wl_ext_iapsta_attach_name(net, ifidx);
9374 #endif
9375 
9376 #ifdef BT_OVER_SDIO
9377 		if (dhd->pub.is_bt_recovery_required) {
9378 			DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
9379 			bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
9380 		}
9381 		dhd->pub.is_bt_recovery_required = FALSE;
9382 #endif
9383 
9384 		/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
9385 		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9386 
9387 #ifdef TOE
9388 		/* Get current TOE mode from dongle */
9389 		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
9390 			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
9391 		} else {
9392 			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
9393 		}
9394 #endif /* TOE */
9395 
9396 #ifdef DHD_LB
9397 #ifdef ENABLE_DHD_GRO
9398 		dhd->iflist[ifidx]->net->features |= NETIF_F_GRO;
9399 #endif /* ENABLE_DHD_GRO */
9400 
9401 #ifdef HOST_SFH_LLC
9402 		dhd->iflist[ifidx]->net->needed_headroom = DOT11_LLC_SNAP_HDR_LEN;
9403 #endif
9404 
9405 #if defined(DHD_LB_RXP)
9406 		__skb_queue_head_init(&dhd->rx_pend_queue);
9407 		if (dhd->rx_napi_netdev == NULL) {
9408 			dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
9409 			memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
9410 			netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
9411 				dhd_napi_poll, dhd_napi_weight);
9412 			DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s> dhd_napi_weight: %d\n",
9413 				__FUNCTION__, &dhd->rx_napi_struct, net,
9414 				net->name, dhd_napi_weight));
9415 			napi_enable(&dhd->rx_napi_struct);
9416 			DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
9417 			skb_queue_head_init(&dhd->rx_napi_queue);
9418 			__skb_queue_head_init(&dhd->rx_process_queue);
9419 		} /* rx_napi_netdev == NULL */
9420 #endif /* DHD_LB_RXP */
9421 
9422 #if defined(DHD_LB_TXP)
9423 		/* Use the variant that uses locks */
9424 		skb_queue_head_init(&dhd->tx_pend_queue);
9425 #endif /* DHD_LB_TXP */
9426 		dhd->dhd_lb_candidacy_override = FALSE;
9427 #endif /* DHD_LB */
9428 		netdev_update_features(net);
9429 #ifdef DHD_PM_OVERRIDE
9430 		g_pm_override = FALSE;
9431 #endif /* DHD_PM_OVERRIDE */
9432 #if defined(WL_CFG80211)
9433 		if (unlikely(wl_cfg80211_up(net))) {
9434 			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
9435 			ret = -1;
9436 			goto exit;
9437 		}
9438 		if (!dhd_download_fw_on_driverload) {
9439 #ifdef ARP_OFFLOAD_SUPPORT
9440 			dhd->pend_ipaddr = 0;
9441 			if (!dhd_inetaddr_notifier_registered) {
9442 				dhd_inetaddr_notifier_registered = TRUE;
9443 				register_inetaddr_notifier(&dhd_inetaddr_notifier);
9444 			}
9445 #endif /* ARP_OFFLOAD_SUPPORT */
9446 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9447 			if (!dhd_inet6addr_notifier_registered) {
9448 				dhd_inet6addr_notifier_registered = TRUE;
9449 				register_inet6addr_notifier(&dhd_inet6addr_notifier);
9450 			}
9451 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9452 		}
9453 
9454 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
9455 		dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
9456 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
9457 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
9458 		dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
9459 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
9460 #if defined(NUM_SCB_MAX_PROBE)
9461 		dhd_set_scb_probe(&dhd->pub);
9462 #endif /* NUM_SCB_MAX_PROBE */
9463 #endif /* WL_CFG80211 */
9464 #if defined(WL_WIRELESS_EXT)
9465 		if (unlikely(wl_iw_up(net, &dhd->pub))) {
9466 			DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__));
9467 			ret = -1;
9468 			goto exit;
9469 		}
9470 #endif
9471 #ifdef WL_ESCAN
9472 		if (unlikely(wl_escan_up(net, &dhd->pub))) {
9473 			DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
9474 			ret = -1;
9475 			goto exit;
9476 		}
9477 #endif /* WL_ESCAN */
9478 #if defined(ISAM_PREINIT)
9479 		if (!dhd_download_fw_on_driverload) {
9480 			if (dhd->pub.conf) {
9481 				wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
9482 				wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
9483 				wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
9484 			}
9485 		}
9486 #endif
9487 	}
9488 
9489 	dhd->pub.up = 1;
9490 #if defined(BCMPCIE) && defined(CONFIG_ARCH_MSM)
9491 	dhd_bus_inform_ep_loaded_to_rc(&dhd->pub, dhd->pub.up);
9492 #endif /* BCMPCIE && CONFIG_ARCH_MSM */
9493 	DHD_START_RPM_TIMER(&dhd->pub);
9494 
9495 	if (wl_event_enable) {
9496 		/* For wl utility to receive events */
9497 		dhd->pub.wl_event_enabled = true;
9498 	} else {
9499 		dhd->pub.wl_event_enabled = false;
9500 	}
9501 
9502 	if (logtrace_pkt_sendup) {
9503 		/* For any deamon to recieve logtrace */
9504 		dhd->pub.logtrace_pkt_sendup = true;
9505 	} else {
9506 		dhd->pub.logtrace_pkt_sendup = false;
9507 	}
9508 
9509 	OLD_MOD_INC_USE_COUNT;
9510 
9511 #ifdef BCMDBGFS
9512 	dhd_dbgfs_init(&dhd->pub);
9513 #endif
9514 
9515 exit:
9516 	mutex_unlock(&dhd->pub.ndev_op_sync);
9517 #if defined(ENABLE_INSMOD_NO_POWER_OFF) && defined(ENABLE_INSMOD_NO_FW_LOAD)
9518 	dhd_download_fw_on_driverload = FALSE;
9519 	dhd_driver_init_done = TRUE;
9520 #endif
9521 	if (ret) {
9522 		dhd_stop(net);
9523 	}
9524 
9525 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
9526 	DHD_MUTEX_UNLOCK();
9527 
9528 	WL_MSG(net->name, "Exit ret=%d\n", ret);
9529 	return ret;
9530 }
9531 
9532 /*
9533  * ndo_start handler for primary ndev
9534  */
9535 static int
dhd_pri_open(struct net_device * net)9536 dhd_pri_open(struct net_device *net)
9537 {
9538 	s32 ret;
9539 
9540 	ret = dhd_open(net);
9541 	if (unlikely(ret)) {
9542 		DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
9543 		return ret;
9544 	}
9545 
9546 	/* Allow transmit calls */
9547 	dhd_tx_start_queues(net);
9548 	WL_MSG(net->name, "tx queue started\n");
9549 
9550 #if defined(SET_RPS_CPUS)
9551 	dhd_rps_cpus_enable(net, TRUE);
9552 #endif
9553 
9554 #if defined(SET_XPS_CPUS)
9555 	dhd_xps_cpus_enable(net, TRUE);
9556 #endif
9557 
9558 	return ret;
9559 }
9560 
9561 /*
9562  * ndo_stop handler for primary ndev
9563  */
9564 static int
dhd_pri_stop(struct net_device * net)9565 dhd_pri_stop(struct net_device *net)
9566 {
9567 	s32 ret;
9568 
9569 	/* Set state and stop OS transmissions */
9570 	dhd_tx_stop_queues(net);
9571 	WL_MSG(net->name, "tx queue stopped\n");
9572 
9573 	ret = dhd_stop(net);
9574 	if (unlikely(ret)) {
9575 		DHD_ERROR(("dhd_stop failed: %d\n", ret));
9576 		return ret;
9577 	}
9578 
9579 	return ret;
9580 }
9581 
9582 #ifdef PCIE_INB_DW
9583 bool
dhd_check_cfg_in_progress(dhd_pub_t * dhdp)9584 dhd_check_cfg_in_progress(dhd_pub_t *dhdp)
9585 {
9586 #if defined(WL_CFG80211)
9587 	return wl_cfg80211_check_in_progress(dhd_linux_get_primary_netdev(dhdp));
9588 #endif /* WL_CFG80211 */
9589 	return FALSE;
9590 }
9591 #endif
9592 
9593 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
9594 /*
9595  * For static I/Fs, the firmware interface init
9596  * is done from the IFF_UP context.
9597  */
9598 static int
dhd_static_if_open(struct net_device * net)9599 dhd_static_if_open(struct net_device *net)
9600 {
9601 	s32 ret = 0;
9602 	struct bcm_cfg80211 *cfg;
9603 	struct net_device *primary_netdev = NULL;
9604 #ifdef WLEASYMESH
9605 	dhd_info_t *dhd = DHD_DEV_INFO(net);
9606 #endif /* WLEASYMESH */
9607 
9608 	cfg = wl_get_cfg(net);
9609 	primary_netdev = bcmcfg_to_prmry_ndev(cfg);
9610 
9611 	if (!IS_CFG80211_STATIC_IF(cfg, net)) {
9612 		WL_MSG(net->name, "non-static interface ..do nothing\n");
9613 		ret = BCME_OK;
9614 		goto done;
9615 	}
9616 
9617 	WL_MSG(net->name, "Enter\n");
9618 	/* Ensure fw is initialized. If it is already initialized,
9619 	 * dhd_open will return success.
9620 	 */
9621 #ifdef WLEASYMESH
9622 	WL_MSG(net->name, "switch to EasyMesh fw\n");
9623 	dhd->pub.conf->fw_type = FW_TYPE_EZMESH;
9624 	ret = dhd_stop(primary_netdev);
9625 	if (unlikely(ret)) {
9626 		printf("===>%s, Failed to close primary dev ret %d\n", __FUNCTION__, ret);
9627 		goto done;
9628 	}
9629 	OSL_SLEEP(1);
9630 #endif /* WLEASYMESH */
9631 	ret = dhd_open(primary_netdev);
9632 	if (unlikely(ret)) {
9633 		DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
9634 		goto done;
9635 	}
9636 
9637 	ret = wl_cfg80211_static_if_open(net);
9638 	if (ret == BCME_OK) {
9639 		/* Allow transmit calls */
9640 		netif_start_queue(net);
9641 	}
9642 done:
9643 	WL_MSG(net->name, "Exit ret=%d\n", ret);
9644 	return ret;
9645 }
9646 
9647 static int
dhd_static_if_stop(struct net_device * net)9648 dhd_static_if_stop(struct net_device *net)
9649 {
9650 	struct bcm_cfg80211 *cfg;
9651 	struct net_device *primary_netdev = NULL;
9652 	int ret = BCME_OK;
9653 	dhd_info_t *dhd = DHD_DEV_INFO(net);
9654 
9655 	WL_MSG(net->name, "Enter\n");
9656 
9657 	cfg = wl_get_cfg(net);
9658 	if (!IS_CFG80211_STATIC_IF(cfg, net)) {
9659 		DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
9660 		return BCME_OK;
9661 	}
9662 #ifdef DHD_NOTIFY_MAC_CHANGED
9663 	if (dhd->pub.skip_dhd_stop) {
9664 		WL_MSG(net->name, "Exit skip stop\n");
9665 		return BCME_OK;
9666 	}
9667 #endif /* DHD_NOTIFY_MAC_CHANGED */
9668 
9669 	/* Ensure queue is disabled */
9670 	netif_tx_disable(net);
9671 
9672 	ret = wl_cfg80211_static_if_close(net);
9673 
9674 	if (dhd->pub.up == 0) {
9675 		/* If fw is down, return */
9676 		DHD_ERROR(("fw down\n"));
9677 		return BCME_OK;
9678 	}
9679 	/* If STA iface is not in operational, invoke dhd_close from this
9680 	* context.
9681 	*/
9682 	primary_netdev = bcmcfg_to_prmry_ndev(cfg);
9683 #ifdef WLEASYMESH
9684 	if (dhd->pub.conf->fw_type == FW_TYPE_EZMESH) {
9685 		WL_MSG(net->name, "switch to STA fw\n");
9686 		dhd->pub.conf->fw_type = FW_TYPE_STA;
9687 	} else
9688 #endif /* WLEASYMESH */
9689 	if (!(primary_netdev->flags & IFF_UP)) {
9690 		ret = dhd_stop(primary_netdev);
9691 	} else {
9692 		DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
9693 	}
9694 	WL_MSG(net->name, "Exit ret=%d\n", ret);
9695 
9696 	return ret;
9697 }
9698 #endif /* WL_STATIC_IF && WL_CF80211 */
9699 
dhd_do_driver_init(struct net_device * net)9700 int dhd_do_driver_init(struct net_device *net)
9701 {
9702 	dhd_info_t *dhd = NULL;
9703 
9704 	if (!net) {
9705 		DHD_ERROR(("Primary Interface not initialized \n"));
9706 		return -EINVAL;
9707 	}
9708 
9709 	DHD_MUTEX_IS_LOCK_RETURN();
9710 
9711 	/*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
9712 	dhd = DHD_DEV_INFO(net);
9713 
9714 	/* If driver is already initialized, do nothing
9715 	 */
9716 	if (dhd->pub.busstate == DHD_BUS_DATA) {
9717 		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
9718 		return 0;
9719 	}
9720 
9721 	if (dhd_open(net) < 0) {
9722 		DHD_ERROR(("Driver Init Failed \n"));
9723 		return -1;
9724 	}
9725 
9726 	return 0;
9727 }
9728 
9729 int
dhd_event_ifadd(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)9730 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
9731 {
9732 
9733 #ifdef WL_CFG80211
9734 		if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
9735 			ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
9736 		return BCME_OK;
9737 #endif
9738 
9739 	/* handle IF event caused by wl commands, SoftAP, WEXT and
9740 	 * anything else. This has to be done asynchronously otherwise
9741 	 * DPC will be blocked (and iovars will timeout as DPC has no chance
9742 	 * to read the response back)
9743 	 */
9744 	if (ifevent->ifidx > 0) {
9745 		dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
9746 		if (if_event == NULL) {
9747 			DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
9748 				MALLOCED(dhdinfo->pub.osh)));
9749 			return BCME_NOMEM;
9750 		}
9751 
9752 		memcpy(&if_event->event, ifevent, sizeof(if_event->event));
9753 		memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
9754 		strlcpy(if_event->name, name, sizeof(if_event->name));
9755 		dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
9756 			DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
9757 	}
9758 
9759 	return BCME_OK;
9760 }
9761 
9762 int
dhd_event_ifdel(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)9763 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
9764 {
9765 	dhd_if_event_t *if_event;
9766 
9767 #ifdef WL_CFG80211
9768 		if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
9769 			ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
9770 		return BCME_OK;
9771 #endif /* WL_CFG80211 */
9772 
9773 	/* handle IF event caused by wl commands, SoftAP, WEXT and
9774 	 * anything else
9775 	 */
9776 	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
9777 	if (if_event == NULL) {
9778 		DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
9779 			MALLOCED(dhdinfo->pub.osh)));
9780 		return BCME_NOMEM;
9781 	}
9782 	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
9783 	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
9784 	strlcpy(if_event->name, name, sizeof(if_event->name));
9785 	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
9786 		dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
9787 
9788 	return BCME_OK;
9789 }
9790 
9791 int
dhd_event_ifchange(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)9792 dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
9793 {
9794 #ifdef DHD_UPDATE_INTF_MAC
9795 	dhd_if_event_t *if_event;
9796 #endif /* DHD_UPDATE_INTF_MAC */
9797 
9798 #ifdef WL_CFG80211
9799 	wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
9800 		ifevent->ifidx, name, mac, ifevent->bssidx);
9801 #endif /* WL_CFG80211 */
9802 
9803 #ifdef DHD_UPDATE_INTF_MAC
9804 	/* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
9805 	 * anything else
9806 	 */
9807 	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
9808 	if (if_event == NULL) {
9809 		DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
9810 			MALLOCED(dhdinfo->pub.osh)));
9811 		return BCME_NOMEM;
9812 	}
9813 	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
9814 	// construct a change event
9815 	if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
9816 	if_event->event.opcode = WLC_E_IF_CHANGE;
9817 	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
9818 	strncpy(if_event->name, name, IFNAMSIZ);
9819 	if_event->name[IFNAMSIZ - 1] = '\0';
9820 	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
9821 		dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
9822 #endif /* DHD_UPDATE_INTF_MAC */
9823 
9824 	return BCME_OK;
9825 }
9826 
9827 #ifdef WL_NATOE
9828 /* Handler to update natoe info and bind with new subscriptions if there is change in config */
9829 static void
dhd_natoe_ct_event_hanlder(void * handle,void * event_info,u8 event)9830 dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
9831 {
9832 	dhd_info_t *dhd = handle;
9833 	wl_event_data_natoe_t *natoe = event_info;
9834 	dhd_nfct_info_t *nfct = dhd->pub.nfct;
9835 
9836 	if (event != DHD_WQ_WORK_NATOE_EVENT) {
9837 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9838 		return;
9839 	}
9840 
9841 	if (!dhd) {
9842 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
9843 		return;
9844 	}
9845 	if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
9846 			(natoe->start_port < natoe->end_port)) {
9847 		/* Rebind subscriptions to start receiving notifications from groups */
9848 		if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
9849 			dhd_ct_close(nfct);
9850 		}
9851 		dhd_ct_send_dump_req(nfct);
9852 	} else if (!natoe->natoe_active) {
9853 		/* Rebind subscriptions to stop receiving notifications from groups */
9854 		if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
9855 			dhd_ct_close(nfct);
9856 		}
9857 	}
9858 }
9859 
9860 /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
9861  * Scheduling workq to switch from tasklet context as bind call may sleep in handler
9862  */
9863 int
dhd_natoe_ct_event(dhd_pub_t * dhd,char * data)9864 dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
9865 {
9866 	wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
9867 
9868 	if (dhd->nfct) {
9869 		wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
9870 		uint8 prev_enable = natoe->natoe_active;
9871 
9872 		spin_lock_bh(&dhd->nfct_lock);
9873 		memcpy(natoe, event_data, sizeof(*event_data));
9874 		spin_unlock_bh(&dhd->nfct_lock);
9875 
9876 		if (prev_enable != event_data->natoe_active) {
9877 			dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
9878 					(void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
9879 					dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
9880 		}
9881 		return BCME_OK;
9882 	}
9883 	DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
9884 	return BCME_ERROR;
9885 }
9886 
9887 /* Handler to send natoe ioctl to dongle */
9888 static void
dhd_natoe_ct_ioctl_handler(void * handle,void * event_info,uint8 event)9889 dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
9890 {
9891 	dhd_info_t *dhd = handle;
9892 	dhd_ct_ioc_t *ct_ioc = event_info;
9893 
9894 	if (event != DHD_WQ_WORK_NATOE_IOCTL) {
9895 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9896 		return;
9897 	}
9898 
9899 	if (!dhd) {
9900 		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
9901 		return;
9902 	}
9903 
9904 	if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
9905 		DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
9906 	}
9907 }
9908 
9909 /* When Netlink message contains port collision info, the info must be sent to dongle FW
9910  * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
9911  */
9912 void
dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t * dhd,dhd_ct_ioc_t * ioc)9913 dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
9914 {
9915 
9916 	dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
9917 			DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
9918 			DHD_WQ_WORK_PRIORITY_HIGH);
9919 }
9920 #endif /* WL_NATOE */
9921 
9922 /* This API maps ndev to ifp inclusive of static IFs */
9923 static dhd_if_t *
dhd_get_ifp_by_ndev(dhd_pub_t * dhdp,struct net_device * ndev)9924 dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
9925 {
9926 	dhd_if_t *ifp = NULL;
9927 #ifdef WL_STATIC_IF
9928 	u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
9929 #else
9930 	u32 ifidx = (DHD_MAX_IFS - 1);
9931 #endif /* WL_STATIC_IF */
9932 
9933 	dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
9934 	do {
9935 		ifp = dhdinfo->iflist[ifidx];
9936 		if (ifp && (ifp->net == ndev)) {
9937 			DHD_TRACE(("match found for %s. ifidx:%d\n",
9938 				ndev->name, ifidx));
9939 			return ifp;
9940 		}
9941 	} while (ifidx--);
9942 
9943 	DHD_ERROR(("no entry found for %s\n", ndev->name));
9944 	return NULL;
9945 }
9946 
9947 bool
dhd_is_static_ndev(dhd_pub_t * dhdp,struct net_device * ndev)9948 dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
9949 {
9950 	dhd_if_t *ifp = NULL;
9951 
9952 	if (!dhdp || !ndev) {
9953 		DHD_ERROR(("wrong input\n"));
9954 		ASSERT(0);
9955 		return false;
9956 	}
9957 
9958 	ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
9959 	return (ifp && (ifp->static_if == true));
9960 }
9961 
9962 #ifdef WL_STATIC_IF
9963 /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
9964  * are not known. For e.g: static i/f case. This function lets to update it once
9965  * it is known.
9966  */
9967 s32
dhd_update_iflist_info(dhd_pub_t * dhdp,struct net_device * ndev,int ifidx,uint8 * mac,uint8 bssidx,const char * dngl_name,int if_state)9968 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
9969 	uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
9970 {
9971 	dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
9972 	dhd_if_t *ifp, *ifp_new;
9973 	s32 cur_idx;
9974 	dhd_dev_priv_t * dev_priv;
9975 
9976 	DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
9977 			if_state, ifidx));
9978 
9979 	ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
9980 
9981 	if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
9982 		return -ENODEV;
9983 	}
9984 	cur_idx = ifp->idx;
9985 
9986 	if (if_state == NDEV_STATE_OS_IF_CREATED) {
9987 		/* mark static if */
9988 		ifp->static_if = TRUE;
9989 		return BCME_OK;
9990 	}
9991 
9992 	ifp_new = dhdinfo->iflist[ifidx];
9993 	if (ifp_new && (ifp_new != ifp)) {
9994 		/* There should be only one entry for a given ifidx. */
9995 		DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
9996 		ASSERT(0);
9997 		dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
9998 		net_os_send_hang_message(ifp->net);
9999 		return -EINVAL;
10000 	}
10001 
10002 	/* For static if delete case, cleanup the if before ifidx update */
10003 	if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
10004 		(if_state == NDEV_STATE_FW_IF_FAILED)) {
10005 		dhd_cleanup_if(ifp->net);
10006 		dev_priv = DHD_DEV_PRIV(ndev);
10007 		dev_priv->ifidx = ifidx;
10008 	}
10009 
10010 	/* update the iflist ifidx slot with cached info */
10011 	dhdinfo->iflist[ifidx] = ifp;
10012 	dhdinfo->iflist[cur_idx] = NULL;
10013 
10014 	/* update the values */
10015 	ifp->idx = ifidx;
10016 	ifp->bssidx = bssidx;
10017 
10018 	if (if_state == NDEV_STATE_FW_IF_CREATED) {
10019 		dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
10020 		/* initialize the dongle provided if name */
10021 		if (dngl_name) {
10022 			strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
10023 		} else if (ndev->name[0] != '\0') {
10024 			strncpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
10025 		}
10026 		if (mac != NULL && ifp->set_macaddress == FALSE) {
10027 			/* To and fro locations have same size - ETHER_ADDR_LEN */
10028 			(void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
10029 		}
10030 #ifdef WL_EVENT
10031 		wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
10032 #endif /* WL_EVENT */
10033 #ifdef WL_ESCAN
10034 		wl_escan_event_attach(ndev, dhdp);
10035 #endif /* WL_ESCAN */
10036 #ifdef WL_EXT_IAPSTA
10037 		wl_ext_iapsta_ifadding(ndev, ifidx);
10038 		wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
10039 		wl_ext_iapsta_attach_name(ndev, ifidx);
10040 #endif /* WL_EXT_IAPSTA */
10041 	}
10042 	else if (if_state == NDEV_STATE_FW_IF_DELETED) {
10043 #ifdef WL_EXT_IAPSTA
10044 		wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
10045 #endif /* WL_EXT_IAPSTA */
10046 #ifdef WL_ESCAN
10047 		wl_escan_event_dettach(ndev, dhdp);
10048 #endif /* WL_ESCAN */
10049 #ifdef WL_EVENT
10050 		wl_ext_event_dettach_netdev(ndev, cur_idx);
10051 #endif /* WL_EVENT */
10052 	}
10053 	DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
10054 		ifidx, cur_idx, if_state));
10055 	return BCME_OK;
10056 }
10057 #endif /* WL_STATIC_IF */
10058 
10059 /* unregister and free the existing net_device interface (if any) in iflist and
10060  * allocate a new one. the slot is reused. this function does NOT register the
10061  * new interface to linux kernel. dhd_register_if does the job
10062  */
10063 struct net_device*
dhd_allocate_if(dhd_pub_t * dhdpub,int ifidx,const char * name,uint8 * mac,uint8 bssidx,bool need_rtnl_lock,const char * dngl_name)10064 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
10065 	uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
10066 {
10067 	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
10068 	dhd_if_t *ifp;
10069 
10070 	ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
10071 	if (!dhdinfo || ifidx < 0 || ifidx >= (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)) {
10072 		return NULL;
10073 	}
10074 
10075 	ifp = dhdinfo->iflist[ifidx];
10076 
10077 	if (ifp != NULL) {
10078 		if (ifp->net != NULL) {
10079 			DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
10080 				__FUNCTION__, ifp->net->name, ifidx));
10081 
10082 			if (ifidx == 0) {
10083 				/* For primary ifidx (0), there shouldn't be
10084 				 * any netdev present already.
10085 				 */
10086 				DHD_ERROR(("Primary ifidx populated already\n"));
10087 				ASSERT(0);
10088 				return NULL;
10089 			}
10090 
10091 			dhd_dev_priv_clear(ifp->net); /* clear net_device private */
10092 
10093 			/* in unregister_netdev case, the interface gets freed by net->destructor
10094 			 * (which is set to free_netdev)
10095 			 */
10096 #if defined(CONFIG_TIZEN)
10097 			net_stat_tizen_unregister(ifp->net);
10098 #endif /* CONFIG_TIZEN */
10099 			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
10100 				free_netdev(ifp->net);
10101 			} else {
10102 				dhd_tx_stop_queues(ifp->net);
10103 				if (need_rtnl_lock)
10104 					unregister_netdev(ifp->net);
10105 				else
10106 					unregister_netdevice(ifp->net);
10107 			}
10108 			ifp->net = NULL;
10109 		}
10110 	} else {
10111 		ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
10112 		if (ifp == NULL) {
10113 			DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
10114 			return NULL;
10115 		}
10116 	}
10117 
10118 	memset(ifp, 0, sizeof(dhd_if_t));
10119 	ifp->info = dhdinfo;
10120 	ifp->idx = ifidx;
10121 	ifp->bssidx = bssidx;
10122 #ifdef DHD_MCAST_REGEN
10123 	ifp->mcast_regen_bss_enable = FALSE;
10124 #endif
10125 	/* set to TRUE rx_pkt_chainable at alloc time */
10126 	ifp->rx_pkt_chainable = TRUE;
10127 
10128 	if (mac != NULL)
10129 		memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
10130 
10131 	/* Allocate etherdev, including space for private structure */
10132 #ifdef DHD_MQ
10133 	if (enable_mq) {
10134 		ifp->net = alloc_etherdev_mq(DHD_DEV_PRIV_SIZE, MQ_MAX_QUEUES);
10135 	} else {
10136 		ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
10137 	}
10138 #else
10139 	ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
10140 #endif /* DHD_MQ */
10141 
10142 	if (ifp->net == NULL) {
10143 		DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
10144 		goto fail;
10145 	}
10146 
10147 	/* Setup the dhd interface's netdevice private structure. */
10148 	dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
10149 
10150 	if (name && name[0]) {
10151 		strlcpy(ifp->net->name, name, IFNAMSIZ);
10152 	}
10153 
10154 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
10155 	/* as priv_destructor calls free_netdev, no need to set need_free_netdev */
10156 	ifp->net->needs_free_netdev = 0;
10157 #ifdef WL_CFG80211
10158 	if (ifidx == 0)
10159 		ifp->net->priv_destructor = free_netdev;
10160 	else
10161 		ifp->net->priv_destructor = dhd_netdev_free;
10162 #else
10163 	ifp->net->priv_destructor = free_netdev;
10164 #endif /* WL_CFG80211 */
10165 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
10166 #ifdef WL_CFG80211
10167 	if (ifidx == 0)
10168 		ifp->net->destructor = free_netdev;
10169 	else
10170 		ifp->net->destructor = dhd_netdev_free;
10171 #else
10172 	ifp->net->destructor = free_netdev;
10173 #endif /* WL_CFG80211 */
10174 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
10175 	strlcpy(ifp->name, ifp->net->name, sizeof(ifp->name));
10176 	dhdinfo->iflist[ifidx] = ifp;
10177 
10178 /* initialize the dongle provided if name */
10179 	if (dngl_name) {
10180 		strlcpy(ifp->dngl_name, dngl_name, sizeof(ifp->dngl_name));
10181 	} else if (name) {
10182 		strlcpy(ifp->dngl_name, name, sizeof(ifp->dngl_name));
10183 	}
10184 
10185 #ifdef PCIE_FULL_DONGLE
10186 	/* Initialize STA info list */
10187 	INIT_LIST_HEAD(&ifp->sta_list);
10188 	DHD_IF_STA_LIST_LOCK_INIT(&ifp->sta_list_lock);
10189 #endif /* PCIE_FULL_DONGLE */
10190 
10191 #ifdef DHD_L2_FILTER
10192 	ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
10193 	ifp->parp_allnode = TRUE;
10194 #endif /* DHD_L2_FILTER */
10195 
10196 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
10197 	ifp->qosmap_up_table = ((uint8*)MALLOCZ(dhdpub->osh, UP_TABLE_MAX));
10198 	ifp->qosmap_up_table_enable = FALSE;
10199 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
10200 
10201 	DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
10202 
10203 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
10204 	INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
10205 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
10206 
10207 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
10208 	ifp->recv_reassoc_evt = FALSE;
10209 	ifp->post_roam_evt = FALSE;
10210 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
10211 
10212 #ifdef DHDTCPSYNC_FLOOD_BLK
10213 	INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
10214 	dhd_reset_tcpsync_info_by_ifp(ifp);
10215 #endif /* DHDTCPSYNC_FLOOD_BLK */
10216 
10217 	return ifp->net;
10218 
10219 fail:
10220 	if (ifp != NULL) {
10221 		if (ifp->net != NULL) {
10222 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
10223 			if (ifp->net == dhdinfo->rx_napi_netdev) {
10224 				napi_disable(&dhdinfo->rx_napi_struct);
10225 				netif_napi_del(&dhdinfo->rx_napi_struct);
10226 				skb_queue_purge(&dhdinfo->rx_napi_queue);
10227 				dhdinfo->rx_napi_netdev = NULL;
10228 			}
10229 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
10230 			dhd_dev_priv_clear(ifp->net);
10231 			free_netdev(ifp->net);
10232 			ifp->net = NULL;
10233 		}
10234 		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
10235 	}
10236 	dhdinfo->iflist[ifidx] = NULL;
10237 	return NULL;
10238 }
10239 
10240 static void
dhd_cleanup_ifp(dhd_pub_t * dhdp,dhd_if_t * ifp)10241 dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
10242 {
10243 #ifdef PCIE_FULL_DONGLE
10244 	s32 ifidx = 0;
10245 	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
10246 #endif /* PCIE_FULL_DONGLE */
10247 
10248 	if (ifp != NULL) {
10249 		if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
10250 			DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
10251 			ASSERT(0);
10252 			return;
10253 		}
10254 #ifdef DHD_L2_FILTER
10255 		bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
10256 			NULL, FALSE, dhdpub->tickcnt);
10257 		deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
10258 		ifp->phnd_arp_table = NULL;
10259 #endif /* DHD_L2_FILTER */
10260 
10261 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
10262 		MFREE(dhdpub->osh, ifp->qosmap_up_table, UP_TABLE_MAX);
10263 		ifp->qosmap_up_table = NULL;
10264 		ifp->qosmap_up_table_enable = FALSE;
10265 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
10266 
10267 		dhd_if_del_sta_list(ifp);
10268 #ifdef PCIE_FULL_DONGLE
10269 		/* Delete flowrings of virtual interface */
10270 		ifidx = ifp->idx;
10271 		if ((ifidx != 0) &&
10272 		    ((if_flow_lkup != NULL) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP))) {
10273 			dhd_flow_rings_delete(dhdp, ifidx);
10274 		}
10275 #endif /* PCIE_FULL_DONGLE */
10276 	}
10277 }
10278 
10279 void
dhd_cleanup_if(struct net_device * net)10280 dhd_cleanup_if(struct net_device *net)
10281 {
10282 	dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
10283 	dhd_pub_t *dhdp = &dhdinfo->pub;
10284 	dhd_if_t *ifp;
10285 
10286 	ifp = dhd_get_ifp_by_ndev(dhdp, net);
10287 	if (ifp) {
10288 		if (ifp->idx >= DHD_MAX_IFS) {
10289 			DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp->idx));
10290 			ASSERT(0);
10291 			return;
10292 		}
10293 		dhd_cleanup_ifp(dhdp, ifp);
10294 	}
10295 }
10296 
10297 /* unregister and free the the net_device interface associated with the indexed
10298  * slot, also free the slot memory and set the slot pointer to NULL
10299  */
10300 #define DHD_TX_COMPLETION_TIMEOUT 5000
10301 int
dhd_remove_if(dhd_pub_t * dhdpub,int ifidx,bool need_rtnl_lock)10302 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
10303 {
10304 	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
10305 	dhd_if_t *ifp;
10306 	unsigned long flags;
10307 	long timeout;
10308 
10309 	ifp = dhdinfo->iflist[ifidx];
10310 
10311 	if (ifp != NULL) {
10312 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
10313 		cancel_delayed_work_sync(&ifp->m4state_work);
10314 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
10315 
10316 #ifdef DHDTCPSYNC_FLOOD_BLK
10317 		cancel_work_sync(&ifp->blk_tsfl_work);
10318 #endif /* DHDTCPSYNC_FLOOD_BLK */
10319 
10320 		dhd_cleanup_ifp(dhdpub, ifp);
10321 #ifdef WL_STATIC_IF
10322 		if (ifp->static_if) {
10323 			/* static IF will be handled in detach */
10324 			DHD_TRACE(("Skip del iface for static interface\n"));
10325 			return BCME_OK;
10326 		}
10327 #endif /* WL_STATIC_IF */
10328 		if (ifp->net != NULL) {
10329 			DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
10330 
10331 			DHD_GENERAL_LOCK(dhdpub, flags);
10332 			ifp->del_in_progress = true;
10333 			DHD_GENERAL_UNLOCK(dhdpub, flags);
10334 
10335 			/* If TX is in progress, hold the if del */
10336 			if (DHD_IF_IS_TX_ACTIVE(ifp)) {
10337 				DHD_INFO(("TX in progress. Wait for it to be complete."));
10338 				timeout = wait_event_timeout(dhdpub->tx_completion_wait,
10339 					((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
10340 					msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
10341 				if (!timeout) {
10342 					/* Tx completion timeout. Attempt proceeding ahead */
10343 					DHD_ERROR(("Tx completion timed out!\n"));
10344 					ASSERT(0);
10345 				}
10346 			} else {
10347 				DHD_TRACE(("No outstanding TX!\n"));
10348 			}
10349 			dhdinfo->iflist[ifidx] = NULL;
10350 			/* in unregister_netdev case, the interface gets freed by net->destructor
10351 			 * (which is set to free_netdev)
10352 			 */
10353 			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
10354 				free_netdev(ifp->net);
10355 			} else {
10356 				netif_tx_disable(ifp->net);
10357 
10358 #if defined(SET_RPS_CPUS)
10359 				custom_rps_map_clear(ifp->net->_rx);
10360 #endif /* SET_RPS_CPUS */
10361 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
10362 				if (dhdinfo->cih)
10363 					ctf_dev_unregister(dhdinfo->cih, ifp->net);
10364 #endif /* BCM_ROUTER_DHD && HNDCTF */
10365 
10366 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
10367 				dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
10368 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
10369 				if (need_rtnl_lock)
10370 					unregister_netdev(ifp->net);
10371 				else
10372 					unregister_netdevice(ifp->net);
10373 #ifdef WL_EXT_IAPSTA
10374 				wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
10375 #endif /* WL_EXT_IAPSTA */
10376 #ifdef WL_ESCAN
10377 				wl_escan_event_dettach(ifp->net, dhdpub);
10378 #endif /* WL_ESCAN */
10379 #ifdef WL_EVENT
10380 				wl_ext_event_dettach_netdev(ifp->net, ifidx);
10381 #endif /* WL_EVENT */
10382 			}
10383 			ifp->net = NULL;
10384 			DHD_GENERAL_LOCK(dhdpub, flags);
10385 			ifp->del_in_progress = false;
10386 			DHD_GENERAL_UNLOCK(dhdpub, flags);
10387 		}
10388 #ifdef DHD_WMF
10389 		dhd_wmf_cleanup(dhdpub, ifidx);
10390 #endif /* DHD_WMF */
10391 		DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
10392 
10393 		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
10394 		ifp = NULL;
10395 	}
10396 
10397 	return BCME_OK;
10398 }
10399 
10400 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
10401 int
dhd_set_qosmap_up_table(dhd_pub_t * dhdp,uint32 idx,bcm_tlv_t * qos_map_ie)10402 dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie)
10403 {
10404 	dhd_info_t *dhd = dhdp->info;
10405 	dhd_if_t *ifp;
10406 
10407 	ASSERT(idx < DHD_MAX_IFS);
10408 	ifp = dhd->iflist[idx];
10409 
10410 	if (!ifp)
10411 	    return BCME_ERROR;
10412 
10413 	wl_set_up_table(ifp->qosmap_up_table, qos_map_ie);
10414 	ifp->qosmap_up_table_enable = TRUE;
10415 
10416 	return BCME_OK;
10417 }
10418 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
10419 
10420 static struct net_device_ops dhd_ops_pri = {
10421 	.ndo_open = dhd_pri_open,
10422 	.ndo_stop = dhd_pri_stop,
10423 	.ndo_get_stats = dhd_get_stats,
10424 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10425 	.ndo_do_ioctl = dhd_ioctl_entry_wrapper,
10426 	.ndo_start_xmit = dhd_start_xmit_wrapper,
10427 #else
10428 	.ndo_do_ioctl = dhd_ioctl_entry,
10429 	.ndo_start_xmit = dhd_start_xmit,
10430 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10431 	.ndo_set_mac_address = dhd_set_mac_address,
10432 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
10433 	.ndo_set_rx_mode = dhd_set_multicast_list,
10434 #else
10435 	.ndo_set_multicast_list = dhd_set_multicast_list,
10436 #endif
10437 #ifdef DHD_MQ
10438 	.ndo_select_queue = dhd_select_queue
10439 #endif
10440 };
10441 
10442 static struct net_device_ops dhd_ops_virt = {
10443 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
10444 	.ndo_open = dhd_static_if_open,
10445 	.ndo_stop = dhd_static_if_stop,
10446 #endif
10447 	.ndo_get_stats = dhd_get_stats,
10448 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10449 	.ndo_do_ioctl = dhd_ioctl_entry_wrapper,
10450 	.ndo_start_xmit = dhd_start_xmit_wrapper,
10451 #else
10452 	.ndo_do_ioctl = dhd_ioctl_entry,
10453 	.ndo_start_xmit = dhd_start_xmit,
10454 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10455 	.ndo_set_mac_address = dhd_set_mac_address,
10456 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
10457 	.ndo_set_rx_mode = dhd_set_multicast_list,
10458 #else
10459 	.ndo_set_multicast_list = dhd_set_multicast_list,
10460 #endif
10461 };
10462 
10463 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
10464 static void
dhd_ctf_detach(ctf_t * ci,void * arg)10465 dhd_ctf_detach(ctf_t *ci, void *arg)
10466 {
10467 	dhd_info_t *dhd = (dhd_info_t *)arg;
10468 	dhd->cih = NULL;
10469 
10470 #ifdef CTFPOOL
10471 	/* free the buffers in fast pool */
10472 	osl_ctfpool_cleanup(dhd->pub.osh);
10473 #endif /* CTFPOOL */
10474 
10475 	return;
10476 }
10477 #endif /* BCM_ROUTER_DHD && HNDCTF */
10478 
10479 int
dhd_os_write_file_posn(void * fp,unsigned long * posn,void * buf,unsigned long buflen)10480 dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
10481 		unsigned long buflen)
10482 {
10483 	loff_t wr_posn = *posn;
10484 
10485 	if (!fp || !buf || buflen == 0)
10486 		return -1;
10487 
10488 	if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
10489 		return -1;
10490 
10491 	*posn = wr_posn;
10492 	return 0;
10493 }
10494 
10495 #ifdef SHOW_LOGTRACE
10496 int
dhd_os_read_file(void * file,char * buf,uint32 size)10497 dhd_os_read_file(void *file, char *buf, uint32 size)
10498 {
10499 	struct file *filep = (struct file *)file;
10500 
10501 	if (!file || !buf)
10502 		return -1;
10503 
10504 	return vfs_read(filep, buf, size, &filep->f_pos);
10505 }
10506 
10507 int
dhd_os_seek_file(void * file,int64 offset)10508 dhd_os_seek_file(void *file, int64 offset)
10509 {
10510 	struct file *filep = (struct file *)file;
10511 	if (!file)
10512 		return -1;
10513 
10514 	/* offset can be -ve */
10515 	filep->f_pos = filep->f_pos + offset;
10516 
10517 	return 0;
10518 }
10519 
10520 static int
dhd_init_logstrs_array(osl_t * osh,dhd_event_log_t * temp)10521 dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
10522 {
10523 	struct file *filep = NULL;
10524 	struct kstat stat;
10525 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10526 	mm_segment_t fs;
10527 #endif
10528 	char *raw_fmts =  NULL;
10529 	int logstrs_size = 0;
10530 	int error = 0;
10531 
10532 	if (control_logtrace != LOGTRACE_PARSED_FMT) {
10533 		DHD_ERROR_NO_HW4(("%s : turned off logstr parsing\n", __FUNCTION__));
10534 		return BCME_ERROR;
10535 	}
10536 
10537 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10538 	fs = get_fs();
10539 	set_fs(KERNEL_DS);
10540 #endif
10541 
10542 	filep = filp_open(logstrs_path, O_RDONLY, 0);
10543 
10544 	if (IS_ERR(filep)) {
10545 		DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
10546 		goto fail;
10547 	}
10548 	error = vfs_stat(logstrs_path, &stat);
10549 	if (error) {
10550 		DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
10551 		goto fail;
10552 	}
10553 	logstrs_size = (int) stat.size;
10554 
10555 	if (logstrs_size == 0) {
10556 		DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
10557 		goto fail1;
10558 	}
10559 
10560 	if (temp->raw_fmts != NULL) {
10561 		raw_fmts = temp->raw_fmts;	/* reuse already malloced raw_fmts */
10562 	} else {
10563 		raw_fmts = MALLOC(osh, logstrs_size);
10564 		if (raw_fmts == NULL) {
10565 			DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
10566 			goto fail;
10567 		}
10568 	}
10569 
10570 	if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=	logstrs_size) {
10571 		DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
10572 		goto fail;
10573 	}
10574 
10575 	if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
10576 				== BCME_OK) {
10577 		filp_close(filep, NULL);
10578 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10579 		set_fs(fs);
10580 #endif
10581 		return BCME_OK;
10582 	}
10583 
10584 fail:
10585 	if (raw_fmts) {
10586 		MFREE(osh, raw_fmts, logstrs_size);
10587 	}
10588 	if (temp->fmts != NULL) {
10589 		MFREE(osh, temp->fmts, temp->num_fmts * sizeof(char *));
10590 	}
10591 
10592 fail1:
10593 	if (!IS_ERR(filep))
10594 		filp_close(filep, NULL);
10595 
10596 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10597 	set_fs(fs);
10598 #endif
10599 	temp->fmts = NULL;
10600 	temp->raw_fmts = NULL;
10601 
10602 	return BCME_ERROR;
10603 }
10604 
10605 static int
dhd_read_map(osl_t * osh,char * fname,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)10606 dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
10607 		uint32 *rodata_end)
10608 {
10609 	struct file *filep = NULL;
10610 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10611 	mm_segment_t fs;
10612 #endif
10613 	int err = BCME_ERROR;
10614 
10615 	if (fname == NULL) {
10616 		DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
10617 		return BCME_ERROR;
10618 	}
10619 
10620 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10621 	fs = get_fs();
10622 	set_fs(KERNEL_DS);
10623 #endif
10624 
10625 	filep = filp_open(fname, O_RDONLY, 0);
10626 	if (IS_ERR(filep)) {
10627 		DHD_ERROR_NO_HW4(("%s: Failed to open %s \n",  __FUNCTION__, fname));
10628 		goto fail;
10629 	}
10630 
10631 	if ((err = dhd_parse_map_file(osh, filep, ramstart,
10632 			rodata_start, rodata_end)) < 0)
10633 		goto fail;
10634 
10635 fail:
10636 	if (!IS_ERR(filep))
10637 		filp_close(filep, NULL);
10638 
10639 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10640 	set_fs(fs);
10641 #endif
10642 
10643 	return err;
10644 }
10645 #ifdef DHD_COREDUMP
10646 #define PC_FOUND_BIT 0x01
10647 #define LR_FOUND_BIT 0x02
10648 #define ALL_ADDR_VAL (PC_FOUND_BIT | LR_FOUND_BIT)
10649 #define READ_NUM_BYTES 1000
10650 #define DHD_FUNC_STR_LEN 80
10651 static int
dhd_lookup_map(osl_t * osh,char * fname,uint32 pc,char * pc_fn,uint32 lr,char * lr_fn)10652 dhd_lookup_map(osl_t *osh, char *fname, uint32 pc, char *pc_fn,
10653 		uint32 lr, char *lr_fn)
10654 {
10655 	struct file *filep = NULL;
10656 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10657 	mm_segment_t fs;
10658 #endif
10659 	char *raw_fmts = NULL, *raw_fmts_loc = NULL, *cptr = NULL;
10660 	uint32 read_size = READ_NUM_BYTES;
10661 	int err = BCME_ERROR;
10662 	uint32 addr = 0, addr1 = 0, addr2 = 0;
10663 	char type = '?', type1 = '?', type2 = '?';
10664 	char func[DHD_FUNC_STR_LEN] = "\0";
10665 	char func1[DHD_FUNC_STR_LEN] = "\0";
10666 	char func2[DHD_FUNC_STR_LEN] = "\0";
10667 	uint8 count = 0;
10668 	int num, len = 0, offset;
10669 
10670 	DHD_TRACE(("%s: fname %s pc 0x%x lr 0x%x \n",
10671 		__FUNCTION__, fname, pc, lr));
10672 	if (fname == NULL) {
10673 		DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
10674 		return BCME_ERROR;
10675 	}
10676 
10677 	/* Allocate 1 byte more than read_size to terminate it with NULL */
10678 	raw_fmts = MALLOCZ(osh, read_size + 1);
10679 	if (raw_fmts == NULL) {
10680 		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n",
10681 			__FUNCTION__));
10682 		return BCME_ERROR;
10683 	}
10684 
10685 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10686 	fs = get_fs();
10687 	set_fs(KERNEL_DS);
10688 #endif
10689 
10690 	filep = filp_open(fname, O_RDONLY, 0);
10691 	if (IS_ERR(filep)) {
10692 		DHD_ERROR(("%s: Failed to open %s \n",  __FUNCTION__, fname));
10693 		goto fail;
10694 	}
10695 
10696 	if (pc_fn == NULL) {
10697 		count |= PC_FOUND_BIT;
10698 	}
10699 	if (lr_fn == NULL) {
10700 		count |= LR_FOUND_BIT;
10701 	}
10702 	while (count != ALL_ADDR_VAL)
10703 	{
10704 		err = dhd_os_read_file(filep, raw_fmts, read_size);
10705 		if (err < 0) {
10706 			DHD_ERROR(("%s: map file read failed err:%d \n",
10707 				__FUNCTION__, err));
10708 			goto fail;
10709 		}
10710 
10711 		/* End raw_fmts with NULL as strstr expects NULL terminated
10712 		* strings
10713 		*/
10714 		raw_fmts[read_size] = '\0';
10715 		raw_fmts_loc = raw_fmts;
10716 		offset = 0;
10717 
10718 		while ((count != ALL_ADDR_VAL) && (offset < read_size))
10719 		{
10720 			cptr = bcmstrtok(&raw_fmts_loc, "\n", 0);
10721 			if (cptr == NULL) {
10722 				DHD_TRACE(("%s: cptr is NULL, offset %d"
10723 					" raw_fmts_loc %s \n",
10724 					__FUNCTION__, offset, raw_fmts_loc));
10725 				break;
10726 			}
10727 			DHD_TRACE(("%s: %s \n", __FUNCTION__, cptr));
10728 			if ((type2 == 'A') ||
10729 				(type2 == 'T') ||
10730 				(type2 == 'W')) {
10731 				addr1 = addr2;
10732 				type1 = type2;
10733 				(void)memcpy_s(func1, DHD_FUNC_STR_LEN,
10734 					func2, DHD_FUNC_STR_LEN);
10735 				DHD_TRACE(("%s: %x %c %s \n",
10736 					__FUNCTION__, addr1, type1, func1));
10737 			}
10738 			len = strlen(cptr);
10739 			num = sscanf(cptr, "%x %c %79s", &addr, &type, func);
10740 			DHD_TRACE(("%s: num %d addr %x type %c func %s \n",
10741 				__FUNCTION__, num, addr, type, func));
10742 			if (num == 3) {
10743 				addr2 = addr;
10744 				type2 = type;
10745 				(void)memcpy_s(func2, DHD_FUNC_STR_LEN,
10746 					func, DHD_FUNC_STR_LEN);
10747 			}
10748 
10749 			if (!(count & PC_FOUND_BIT) &&
10750 				(pc >= addr1 && pc < addr2)) {
10751 				if ((cptr = strchr(func1, '$')) != NULL) {
10752 					(void)strncpy(func, cptr + 1,
10753 						DHD_FUNC_STR_LEN - 1);
10754 				} else {
10755 					(void)memcpy_s(func, DHD_FUNC_STR_LEN,
10756 						func1, DHD_FUNC_STR_LEN);
10757 				}
10758 				if ((cptr = strstr(func, "__bcmromfn"))
10759 					!= NULL) {
10760 					*cptr = 0;
10761 				}
10762 				if (pc > addr1) {
10763 					sprintf(pc_fn, "%.68s+0x%x",
10764 						func, pc - addr1);
10765 				} else {
10766 					(void)memcpy_s(pc_fn, DHD_FUNC_STR_LEN,
10767 						func, DHD_FUNC_STR_LEN);
10768 				}
10769 				count |= PC_FOUND_BIT;
10770 				DHD_INFO(("%s: found addr1 %x pc %x"
10771 					" addr2 %x \n",
10772 					__FUNCTION__, addr1, pc, addr2));
10773 			}
10774 			if (!(count & LR_FOUND_BIT) &&
10775 				(lr >= addr1 && lr < addr2)) {
10776 				if ((cptr = strchr(func1, '$')) != NULL) {
10777 					(void)strncpy(func, cptr + 1,
10778 						DHD_FUNC_STR_LEN - 1);
10779 				} else {
10780 					(void)memcpy_s(func, DHD_FUNC_STR_LEN,
10781 						func1, DHD_FUNC_STR_LEN);
10782 				}
10783 				if ((cptr = strstr(func, "__bcmromfn"))
10784 					!= NULL) {
10785 					*cptr = 0;
10786 				}
10787 				if (lr > addr1) {
10788 					sprintf(lr_fn, "%.68s+0x%x",
10789 						func, lr - addr1);
10790 				} else {
10791 					(void)memcpy_s(lr_fn, DHD_FUNC_STR_LEN,
10792 						func, DHD_FUNC_STR_LEN);
10793 				}
10794 				count |= LR_FOUND_BIT;
10795 				DHD_INFO(("%s: found addr1 %x lr %x"
10796 					" addr2 %x \n",
10797 					__FUNCTION__, addr1, lr, addr2));
10798 			}
10799 			offset += (len + 1);
10800 		}
10801 
10802 		if (err < (int)read_size) {
10803 			/*
10804 			* since we reset file pos back to earlier pos by
10805 			* bytes of one line we won't reach EOF.
10806 			* The reason for this is if string is spreaded across
10807 			* bytes, the read function should not miss it.
10808 			* So if ret value is less than read_size, reached EOF
10809 			* don't read further
10810 			*/
10811 			break;
10812 		}
10813 		memset(raw_fmts, 0, read_size);
10814 		/*
10815 		* go back to bytes of one line so that we won't miss
10816 		* the string and addr even if it comes as splited in next read.
10817 		*/
10818 		dhd_os_seek_file(filep, -(len + 1));
10819 		DHD_TRACE(("%s: seek %d \n", __FUNCTION__, -(len + 1)));
10820 	}
10821 
10822 fail:
10823 	if (!IS_ERR(filep))
10824 		filp_close(filep, NULL);
10825 
10826 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10827 	set_fs(fs);
10828 #endif
10829 
10830 	if (!(count & PC_FOUND_BIT)) {
10831 		sprintf(pc_fn, "0x%08x", pc);
10832 	}
10833 	if (!(count & LR_FOUND_BIT)) {
10834 		sprintf(lr_fn, "0x%08x", lr);
10835 	}
10836 	return err;
10837 }
10838 #endif /* DHD_COREDUMP */
10839 
10840 static int
dhd_init_static_strs_array(osl_t * osh,dhd_event_log_t * temp,char * str_file,char * map_file)10841 dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
10842 {
10843 	struct file *filep = NULL;
10844 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10845 	mm_segment_t fs;
10846 #endif
10847 	char *raw_fmts =  NULL;
10848 	uint32 logstrs_size = 0;
10849 	int error = 0;
10850 	uint32 ramstart = 0;
10851 	uint32 rodata_start = 0;
10852 	uint32 rodata_end = 0;
10853 	uint32 logfilebase = 0;
10854 
10855 	error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
10856 	if (error != BCME_OK) {
10857 		DHD_ERROR(("readmap Error!! \n"));
10858 		/* don't do event log parsing in actual case */
10859 		if (strstr(str_file, ram_file_str) != NULL) {
10860 			temp->raw_sstr = NULL;
10861 		} else if (strstr(str_file, rom_file_str) != NULL) {
10862 			temp->rom_raw_sstr = NULL;
10863 		}
10864 		return error;
10865 	}
10866 	DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
10867 		ramstart, rodata_start, rodata_end));
10868 
10869 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10870 	fs = get_fs();
10871 	set_fs(KERNEL_DS);
10872 #endif
10873 
10874 	filep = filp_open(str_file, O_RDONLY, 0);
10875 	if (IS_ERR(filep)) {
10876 		DHD_ERROR(("%s: Failed to open the file %s \n",  __FUNCTION__, str_file));
10877 		goto fail;
10878 	}
10879 
10880 	if (TRUE) {
10881 		/* Full file size is huge. Just read required part */
10882 		logstrs_size = rodata_end - rodata_start;
10883 		logfilebase = rodata_start - ramstart;
10884 	}
10885 
10886 	if (logstrs_size == 0) {
10887 		DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
10888 		goto fail1;
10889 	}
10890 
10891 	if (strstr(str_file, ram_file_str) != NULL && temp->raw_sstr != NULL) {
10892 		raw_fmts = temp->raw_sstr;	/* reuse already malloced raw_fmts */
10893 	} else if (strstr(str_file, rom_file_str) != NULL && temp->rom_raw_sstr != NULL) {
10894 		raw_fmts = temp->rom_raw_sstr;	/* reuse already malloced raw_fmts */
10895 	} else {
10896 		raw_fmts = MALLOC(osh, logstrs_size);
10897 
10898 		if (raw_fmts == NULL) {
10899 			DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
10900 			goto fail;
10901 		}
10902 	}
10903 
10904 	if (TRUE) {
10905 		error = generic_file_llseek(filep, logfilebase, SEEK_SET);
10906 		if (error < 0) {
10907 			DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
10908 			goto fail;
10909 		}
10910 	}
10911 
10912 	error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
10913 	if (error != logstrs_size) {
10914 		DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
10915 		goto fail;
10916 	}
10917 
10918 	if (strstr(str_file, ram_file_str) != NULL) {
10919 		temp->raw_sstr = raw_fmts;
10920 		temp->raw_sstr_size = logstrs_size;
10921 		temp->rodata_start = rodata_start;
10922 		temp->rodata_end = rodata_end;
10923 	} else if (strstr(str_file, rom_file_str) != NULL) {
10924 		temp->rom_raw_sstr = raw_fmts;
10925 		temp->rom_raw_sstr_size = logstrs_size;
10926 		temp->rom_rodata_start = rodata_start;
10927 		temp->rom_rodata_end = rodata_end;
10928 	}
10929 
10930 	filp_close(filep, NULL);
10931 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10932 	set_fs(fs);
10933 #endif
10934 
10935 	return BCME_OK;
10936 
10937 fail:
10938 	if (raw_fmts) {
10939 		MFREE(osh, raw_fmts, logstrs_size);
10940 	}
10941 
10942 fail1:
10943 	if (!IS_ERR(filep))
10944 		filp_close(filep, NULL);
10945 
10946 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
10947 	set_fs(fs);
10948 #endif
10949 
10950 	if (strstr(str_file, ram_file_str) != NULL) {
10951 		temp->raw_sstr = NULL;
10952 	} else if (strstr(str_file, rom_file_str) != NULL) {
10953 		temp->rom_raw_sstr = NULL;
10954 	}
10955 
10956 	return error;
10957 } /* dhd_init_static_strs_array */
10958 
10959 #endif /* SHOW_LOGTRACE */
10960 
10961 #ifdef BT_OVER_PCIE
10962 void request_bt_quiesce(bool quiesce) __attribute__ ((weak));
10963 void response_bt_quiesce(bool quiesce);
10964 
10965 static void (*request_bt_quiesce_ptr)(bool);
10966 typedef void (*response_bt_quiesce_ptr)(bool);
10967 
10968 response_bt_quiesce_ptr
register_request_bt_quiesce(void (* fnc)(bool))10969 register_request_bt_quiesce(void (*fnc)(bool))
10970 {
10971 	request_bt_quiesce_ptr = fnc;
10972 	return response_bt_quiesce;
10973 }
10974 EXPORT_SYMBOL(register_request_bt_quiesce);
10975 
10976 void
unregister_request_bt_quiesce(void)10977 unregister_request_bt_quiesce(void)
10978 {
10979 	request_bt_quiesce_ptr = NULL;
10980 	return;
10981 }
10982 EXPORT_SYMBOL(unregister_request_bt_quiesce);
10983 #endif /* BT_OVER_PCIE */
10984 
10985 #ifdef DHD_ERPOM
10986 uint enable_erpom = 0;
10987 module_param(enable_erpom, int, 0);
10988 
10989 int
dhd_wlan_power_off_handler(void * handler,unsigned char reason)10990 dhd_wlan_power_off_handler(void *handler, unsigned char reason)
10991 {
10992 	dhd_pub_t *dhdp = (dhd_pub_t *)handler;
10993 	bool dongle_isolation = dhdp->dongle_isolation;
10994 
10995 	DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
10996 
10997 	if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
10998 #if defined(DHD_FW_COREDUMP)
10999 		/* save core dump to a file */
11000 		if (dhdp->memdump_enabled) {
11001 #ifdef DHD_SSSR_DUMP
11002 			DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
11003 			dhdp->collect_sssr = TRUE;
11004 #endif /* DHD_SSSR_DUMP */
11005 			dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
11006 			dhd_bus_mem_dump(dhdp);
11007 		}
11008 #endif /* DHD_FW_COREDUMP */
11009 	}
11010 
11011 	/* pause data on all the interfaces */
11012 	dhd_bus_stop_queue(dhdp->bus);
11013 
11014 	/* Devreset function will perform FLR again, to avoid it set dongle_isolation */
11015 	dhdp->dongle_isolation = TRUE;
11016 	dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
11017 	dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
11018 	return 0;
11019 }
11020 
11021 int
dhd_wlan_power_on_handler(void * handler,unsigned char reason)11022 dhd_wlan_power_on_handler(void *handler, unsigned char reason)
11023 {
11024 	dhd_pub_t *dhdp = (dhd_pub_t *)handler;
11025 	bool dongle_isolation = dhdp->dongle_isolation;
11026 
11027 	DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
11028 	/* Devreset function will perform FLR again, to avoid it set dongle_isolation */
11029 	dhdp->dongle_isolation = TRUE;
11030 	dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
11031 	dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
11032 	/* resume data on all the interfaces */
11033 	dhd_bus_start_queue(dhdp->bus);
11034 	return 0;
11035 
11036 }
11037 
11038 #endif /* DHD_ERPOM */
11039 
11040 #ifdef BCMDBUS
11041 uint
dhd_get_rxsz(dhd_pub_t * pub)11042 dhd_get_rxsz(dhd_pub_t *pub)
11043 {
11044 	struct net_device *net = NULL;
11045 	dhd_info_t *dhd = NULL;
11046 	uint rxsz;
11047 
11048 	/* Assign rxsz for dbus_attach */
11049 	dhd = pub->info;
11050 	net = dhd->iflist[0]->net;
11051 	net->hard_header_len = ETH_HLEN + pub->hdrlen;
11052 	rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
11053 
11054 	return rxsz;
11055 }
11056 
11057 void
dhd_set_path(dhd_pub_t * pub)11058 dhd_set_path(dhd_pub_t *pub)
11059 {
11060 	dhd_info_t *dhd = NULL;
11061 
11062 	dhd = pub->info;
11063 
11064 	/* try to download image and nvram to the dongle */
11065 	if	(dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
11066 		DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
11067 			__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
11068 		dhd_bus_update_fw_nv_path(dhd->pub.bus,
11069 				dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
11070 	}
11071 }
11072 #endif
11073 
11074 /** Called once for each hardware (dongle) instance that this DHD manages */
11075 dhd_pub_t *
dhd_attach(osl_t * osh,struct dhd_bus * bus,uint bus_hdrlen,void * data)11076 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
11077 #ifdef BCMDBUS
11078 	, void *data
11079 #endif
11080 )
11081 {
11082 	dhd_info_t *dhd = NULL;
11083 	struct net_device *net = NULL;
11084 	char if_name[IFNAMSIZ] = {'\0'};
11085 #ifdef SHOW_LOGTRACE
11086 	int ret;
11087 #endif /* SHOW_LOGTRACE */
11088 #ifdef DHD_ERPOM
11089 	pom_func_handler_t *pom_handler;
11090 #endif /* DHD_ERPOM */
11091 #if defined(BCMSDIO) || defined(BCMPCIE)
11092 	uint32 bus_type = -1;
11093 	uint32 bus_num = -1;
11094 	uint32 slot_num = -1;
11095 	wifi_adapter_info_t *adapter = NULL;
11096 #elif defined(BCMDBUS)
11097 	wifi_adapter_info_t *adapter = data;
11098 #endif
11099 
11100 	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
11101 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11102 
11103 #ifdef PCIE_FULL_DONGLE
11104 	ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
11105 	ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
11106 #endif /* PCIE_FULL_DONGLE */
11107 
11108 	/* will implement get_ids for DBUS later */
11109 #if defined(BCMSDIO) || defined(BCMPCIE)
11110 	dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
11111 	adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
11112 #endif
11113 
11114 	/* Allocate primary dhd_info */
11115 	dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
11116 	if (dhd == NULL) {
11117 		dhd = MALLOC(osh, sizeof(dhd_info_t));
11118 		if (dhd == NULL) {
11119 			DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
11120 			goto dhd_null_flag;
11121 		}
11122 	}
11123 	memset(dhd, 0, sizeof(dhd_info_t));
11124 	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
11125 
11126 	dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
11127 
11128 	dhd->pub.osh = osh;
11129 #ifdef DUMP_IOCTL_IOV_LIST
11130 	dll_init(&(dhd->pub.dump_iovlist_head));
11131 #endif /* DUMP_IOCTL_IOV_LIST */
11132 
11133 	dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
11134 
11135 	dhd->adapter = adapter;
11136 	dhd->pub.adapter = (void *)adapter;
11137 #ifdef BT_OVER_SDIO
11138 	dhd->pub.is_bt_recovery_required = FALSE;
11139 	mutex_init(&dhd->bus_user_lock);
11140 #endif /* BT_OVER_SDIO */
11141 
11142 	g_dhd_pub = &dhd->pub;
11143 
11144 #ifdef DHD_DEBUG
11145 	dll_init(&(dhd->pub.mw_list_head));
11146 #endif /* DHD_DEBUG */
11147 
11148 #ifdef CUSTOM_FORCE_NODFS_FLAG
11149 	dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
11150 	dhd->pub.force_country_change = TRUE;
11151 #endif /* CUSTOM_FORCE_NODFS_FLAG */
11152 #ifdef CUSTOM_COUNTRY_CODE
11153 	get_customized_country_code(dhd->adapter,
11154 		dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
11155 		dhd->pub.dhd_cflags);
11156 #endif /* CUSTOM_COUNTRY_CODE */
11157 #ifndef BCMDBUS
11158 	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
11159 	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
11160 #ifdef DHD_WET
11161 	dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
11162 #endif /* DHD_WET */
11163 #ifdef WL_NANHO
11164 	/* initialize NANHO host module */
11165 	if (bcm_nanho_init(&dhd->pub.nanhoi, &dhd->pub,
11166 			dhd_nho_ioctl_cb, dhd_nho_evt_cb, NULL) != BCME_OK) {
11167 		goto fail;
11168 	}
11169 #endif /* WL_NANHO */
11170 	/* Initialize thread based operation and lock */
11171 	sema_init(&dhd->sdsem, 1);
11172 #endif /* BCMDBUS */
11173 #if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
11174 	dhd->host_radiotap_conv = FALSE;
11175 #endif /* WL_MONITOR */
11176 	dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
11177 
11178 	/* Link to info module */
11179 	dhd->pub.info = dhd;
11180 
11181 	/* Link to bus module */
11182 	dhd->pub.bus = bus;
11183 	dhd->pub.hdrlen = bus_hdrlen;
11184 	dhd->pub.txoff = FALSE;
11185 #ifdef CHECK_TRAP_ROT
11186 	dhd->pub.check_trap_rot = TRUE;
11187 #else
11188 	dhd->pub.check_trap_rot = FALSE;
11189 #endif /* CHECK_TRAP_ROT */
11190 
11191 	/* dhd_conf must be attached after linking dhd to dhd->pub.info,
11192 	 * because dhd_detech will check .info is NULL or not.
11193 	*/
11194 	if (dhd_conf_attach(&dhd->pub) != 0) {
11195 		DHD_ERROR(("dhd_conf_attach failed\n"));
11196 		goto fail;
11197 	}
11198 #ifndef BCMDBUS
11199 	dhd_conf_reset(&dhd->pub);
11200 	dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
11201 	dhd_conf_preinit(&dhd->pub);
11202 #endif /* !BCMDBUS */
11203 
11204 	/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
11205 	 * This is indeed a hack but we have to make it work properly before we have a better
11206 	 * solution
11207 	 */
11208 	dhd_update_fw_nv_path(dhd);
11209 
11210 	/* Set network interface name if it was provided as module parameter */
11211 	if (iface_name[0]) {
11212 		int len;
11213 		char ch;
11214 		strlcpy(if_name, iface_name, sizeof(if_name));
11215 		len = strlen(if_name);
11216 		ch = if_name[len - 1];
11217 		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) {
11218 			strncat(if_name, "%d", sizeof(if_name) - len - 1);
11219 		}
11220 	}
11221 
11222 	/* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
11223 	net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
11224 	if (net == NULL) {
11225 		goto fail;
11226 	}
11227 	mutex_init(&dhd->pub.ndev_op_sync);
11228 
11229 	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
11230 #ifdef DHD_L2_FILTER
11231 	/* initialize the l2_filter_cnt */
11232 	dhd->pub.l2_filter_cnt = 0;
11233 #endif
11234 	net->netdev_ops = NULL;
11235 
11236 	mutex_init(&dhd->dhd_iovar_mutex);
11237 	sema_init(&dhd->proto_sem, 1);
11238 
11239 #if defined(DHD_HANG_SEND_UP_TEST)
11240 	dhd->pub.req_hang_type = 0;
11241 #endif /* DHD_HANG_SEND_UP_TEST */
11242 
11243 #ifdef PROP_TXSTATUS
11244 	spin_lock_init(&dhd->wlfc_spinlock);
11245 
11246 	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
11247 	dhd->pub.plat_init = dhd_wlfc_plat_init;
11248 	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
11249 
11250 #ifdef DHD_WLFC_THREAD
11251 	init_waitqueue_head(&dhd->pub.wlfc_wqhead);
11252 	dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
11253 	if (IS_ERR(dhd->pub.wlfc_thread)) {
11254 		DHD_ERROR(("create wlfc thread failed\n"));
11255 		goto fail;
11256 	} else {
11257 		wake_up_process(dhd->pub.wlfc_thread);
11258 	}
11259 #endif /* DHD_WLFC_THREAD */
11260 #endif /* PROP_TXSTATUS */
11261 
11262 	/* Initialize other structure content */
11263 	/* XXX Some of this goes away, leftover from USB */
11264 	/* XXX Some could also move to bus_init()? */
11265 	init_waitqueue_head(&dhd->ioctl_resp_wait);
11266 	init_waitqueue_head(&dhd->pub.tx_tput_test_wait);
11267 	init_waitqueue_head(&dhd->d3ack_wait);
11268 #ifdef PCIE_INB_DW
11269 	init_waitqueue_head(&dhd->ds_exit_wait);
11270 #endif /* PCIE_INB_DW */
11271 	init_waitqueue_head(&dhd->ctrl_wait);
11272 	init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
11273 	init_waitqueue_head(&dhd->dmaxfer_wait);
11274 #ifdef BT_OVER_PCIE
11275 	init_waitqueue_head(&dhd->quiesce_wait);
11276 #endif /* BT_OVER_PCIE */
11277 	init_waitqueue_head(&dhd->pub.tx_completion_wait);
11278 	dhd->pub.dhd_bus_busy_state = 0;
11279 	/* Initialize the spinlocks */
11280 	spin_lock_init(&dhd->sdlock);
11281 	spin_lock_init(&dhd->txqlock);
11282 	spin_lock_init(&dhd->dhd_lock);
11283 	spin_lock_init(&dhd->rxf_lock);
11284 #ifdef WLTDLS
11285 	spin_lock_init(&dhd->pub.tdls_lock);
11286 #endif /* WLTDLS */
11287 #if defined(RXFRAME_THREAD)
11288 	dhd->rxthread_enabled = TRUE;
11289 #endif /* defined(RXFRAME_THREAD) */
11290 
11291 #ifdef DHDTCPACK_SUPPRESS
11292 	spin_lock_init(&dhd->tcpack_lock);
11293 #endif /* DHDTCPACK_SUPPRESS */
11294 
11295 #ifdef DHD_HP2P
11296 	spin_lock_init(&dhd->hp2p_lock);
11297 #endif
11298 	/* Initialize Wakelock stuff */
11299 	spin_lock_init(&dhd->wakelock_spinlock);
11300 	spin_lock_init(&dhd->wakelock_evt_spinlock);
11301 	DHD_OS_WAKE_LOCK_INIT(dhd);
11302 	dhd->wakelock_counter = 0;
11303 	/* wakelocks prevent a system from going into a low power state */
11304 #ifdef CONFIG_HAS_WAKELOCK
11305 	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
11306 	dhd_wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
11307 	dhd_wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
11308 #endif /* CONFIG_HAS_WAKELOCK */
11309 
11310 #if defined(OEM_ANDROID)
11311 	mutex_init(&dhd->dhd_net_if_mutex);
11312 	mutex_init(&dhd->dhd_suspend_mutex);
11313 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
11314 	mutex_init(&dhd->dhd_apf_mutex);
11315 #endif /* PKT_FILTER_SUPPORT && APF */
11316 #endif /* defined(OEM_ANDROID) */
11317 	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
11318 
11319 	/* Attach and link in the protocol */
11320 	if (dhd_prot_attach(&dhd->pub) != 0) {
11321 		DHD_ERROR(("dhd_prot_attach failed\n"));
11322 		goto fail;
11323 	}
11324 	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
11325 
11326 #ifdef DHD_TIMESYNC
11327 	/* attach the timesync module */
11328 	if (dhd_timesync_attach(&dhd->pub) != 0) {
11329 		DHD_ERROR(("dhd_timesync_attach failed\n"));
11330 		goto fail;
11331 	}
11332 	dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
11333 #endif /* DHD_TIMESYNC */
11334 
11335 #ifdef WL_CFG80211
11336 	spin_lock_init(&dhd->pub.up_lock);
11337 	/* Attach and link in the cfg80211 */
11338 	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
11339 		DHD_ERROR(("wl_cfg80211_attach failed\n"));
11340 		goto fail;
11341 	}
11342 
11343 	dhd_monitor_init(&dhd->pub);
11344 	dhd_state |= DHD_ATTACH_STATE_CFG80211;
11345 #endif
11346 
11347 #ifdef WL_EVENT
11348 	if (wl_ext_event_attach(net, &dhd->pub) != 0) {
11349 		DHD_ERROR(("wl_ext_event_attach failed\n"));
11350 		goto fail;
11351 	}
11352 #endif /* WL_EVENT */
11353 #ifdef WL_ESCAN
11354 	/* Attach and link in the escan */
11355 	if (wl_escan_attach(net, &dhd->pub) != 0) {
11356 		DHD_ERROR(("wl_escan_attach failed\n"));
11357 		goto fail;
11358 	}
11359 #endif /* WL_ESCAN */
11360 #ifdef WL_EXT_IAPSTA
11361 	if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
11362 		DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
11363 		goto fail;
11364 	}
11365 #endif /* WL_EXT_IAPSTA */
11366 #ifdef WL_EXT_GENL
11367 	if (wl_ext_genl_init(net)) {
11368 		DHD_ERROR(("wl_ext_genl_init failed\n"));
11369 		goto fail;
11370 	}
11371 #endif
11372 #if defined(WL_WIRELESS_EXT)
11373 	/* Attach and link in the iw */
11374 	if (wl_iw_attach(net, &dhd->pub) != 0) {
11375 		DHD_ERROR(("wl_iw_attach failed\n"));
11376 		goto fail;
11377 	}
11378 	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
11379 #endif /* defined(WL_WIRELESS_EXT) */
11380 
11381 #ifdef SHOW_LOGTRACE
11382 	ret = dhd_init_logstrs_array(osh, &dhd->event_data);
11383 	if (ret == BCME_OK) {
11384 		dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
11385 		dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
11386 			rom_map_file_path);
11387 		dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
11388 	}
11389 #endif /* SHOW_LOGTRACE */
11390 
11391 	/* attach debug if support */
11392 	if (dhd_os_dbg_attach(&dhd->pub)) {
11393 		DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
11394 		goto fail;
11395 	}
11396 #ifdef DEBUGABILITY
11397 #if !defined(OEM_ANDROID) && defined(SHOW_LOGTRACE)
11398 	/* enable verbose ring to support dump_trace_buf */
11399 	dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
11400 #endif /* !OEM_ANDROID && SHOW_LOGTRACE */
11401 
11402 #if !defined(OEM_ANDROID) && defined(BTLOG)
11403 	/* enable bt log ring to support dump_bt_log */
11404 	dhd_os_start_logging(&dhd->pub, BT_LOG_RING_NAME, 3, 0, 0, 0);
11405 #endif /* !OEM_ANDROID && BTLOG */
11406 #ifdef DBG_PKT_MON
11407 	dhd->pub.dbg->pkt_mon_lock = osl_spin_lock_init(dhd->pub.osh);
11408 #ifdef DBG_PKT_MON_INIT_DEFAULT
11409 	dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
11410 #endif /* DBG_PKT_MON_INIT_DEFAULT */
11411 #endif /* DBG_PKT_MON */
11412 
11413 #endif /* DEBUGABILITY */
11414 
11415 #ifdef DHD_MEM_STATS
11416 	dhd->pub.mem_stats_lock = osl_spin_lock_init(dhd->pub.osh);
11417 	dhd->pub.txpath_mem = 0;
11418 	dhd->pub.rxpath_mem = 0;
11419 #endif /* DHD_MEM_STATS */
11420 
11421 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
11422 	dhd->pub.awdl_stats_lock = osl_spin_lock_init(dhd->pub.osh);
11423 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
11424 
11425 #ifdef DHD_STATUS_LOGGING
11426 	dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
11427 		MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
11428 	if (dhd->pub.statlog == NULL) {
11429 		DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
11430 	}
11431 #endif /* DHD_STATUS_LOGGING */
11432 
11433 #ifdef DHD_LOG_DUMP
11434 	dhd_log_dump_init(&dhd->pub);
11435 #endif /* DHD_LOG_DUMP */
11436 #ifdef DHD_PKTDUMP_ROAM
11437 	dhd_dump_pkt_init(&dhd->pub);
11438 #endif /* DHD_PKTDUMP_ROAM */
11439 #ifdef DHD_PKT_LOGGING
11440 	dhd_os_attach_pktlog(&dhd->pub);
11441 #endif /* DHD_PKT_LOGGING */
11442 
11443 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
11444 	dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
11445 	if (dhd->pub.hang_info == NULL) {
11446 		DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
11447 	}
11448 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
11449 	if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
11450 		DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
11451 		goto fail;
11452 	}
11453 
11454 #ifdef BCM_ROUTER_DHD
11455 #if defined(HNDCTF)
11456 	dhd->cih = ctf_attach(dhd->pub.osh, "dhd", &dhd_msg_level, dhd_ctf_detach, dhd);
11457 	if (!dhd->cih) {
11458 		DHD_ERROR(("%s: ctf_attach() failed\n", __FUNCTION__));
11459 	}
11460 #ifdef CTFPOOL
11461 	{
11462 		int poolsz = RXBUFPOOLSZ;
11463 		if (CTF_ENAB(dhd->cih) && (osl_ctfpool_init(dhd->pub.osh,
11464 			poolsz, RXBUFSZ + BCMEXTRAHDROOM) < 0)) {
11465 			DHD_ERROR(("%s: osl_ctfpool_init() failed\n", __FUNCTION__));
11466 		}
11467 	}
11468 #endif /* CTFPOOL */
11469 #endif /* HNDCTF */
11470 #endif /* BCM_ROUTER_DHD */
11471 
11472 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11473 	dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
11474 	if (!dhd->tx_wq) {
11475 		DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
11476 		goto fail;
11477 	}
11478 	dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
11479 	if (!dhd->rx_wq) {
11480 		DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
11481 		destroy_workqueue(dhd->tx_wq);
11482 		dhd->tx_wq = NULL;
11483 		goto fail;
11484 	}
11485 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11486 
11487 #ifndef BCMDBUS
11488 	/* Set up the watchdog timer */
11489 	init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
11490 	dhd->default_wd_interval = dhd_watchdog_ms;
11491 
11492 	if (dhd_watchdog_prio >= 0) {
11493 		/* Initialize watchdog thread */
11494 		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
11495 		if (dhd->thr_wdt_ctl.thr_pid < 0) {
11496 			goto fail;
11497 		}
11498 
11499 	} else {
11500 		dhd->thr_wdt_ctl.thr_pid = -1;
11501 	}
11502 
11503 #ifdef DHD_PCIE_RUNTIMEPM
11504 	/* Setup up the runtime PM Idlecount timer */
11505 	init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
11506 	dhd->rpm_timer_valid = FALSE;
11507 
11508 	dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
11509 	PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
11510 	if (dhd->thr_rpm_ctl.thr_pid < 0) {
11511 		goto fail;
11512 	}
11513 #endif /* DHD_PCIE_RUNTIMEPM */
11514 
11515 #ifdef SHOW_LOGTRACE
11516 	skb_queue_head_init(&dhd->evt_trace_queue);
11517 
11518 	/* Create ring proc entries */
11519 	dhd_dbg_ring_proc_create(&dhd->pub);
11520 #endif /* SHOW_LOGTRACE */
11521 
11522 #ifdef BTLOG
11523 	skb_queue_head_init(&dhd->bt_log_queue);
11524 #endif /* BTLOG */
11525 
11526 #ifdef BT_OVER_PCIE
11527 	mutex_init(&dhd->quiesce_flr_lock);
11528 	mutex_init(&dhd->quiesce_lock);
11529 #endif
11530 
11531 	/* Set up the bottom half handler */
11532 	if (dhd_dpc_prio >= 0) {
11533 		/* Initialize DPC thread */
11534 		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
11535 		if (dhd->thr_dpc_ctl.thr_pid < 0) {
11536 			goto fail;
11537 		}
11538 	} else {
11539 		/*  use tasklet for dpc */
11540 		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
11541 		dhd->thr_dpc_ctl.thr_pid = -1;
11542 	}
11543 
11544 	if (dhd->rxthread_enabled) {
11545 		bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
11546 		/* Initialize RXF thread */
11547 		PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
11548 		if (dhd->thr_rxf_ctl.thr_pid < 0) {
11549 			goto fail;
11550 		}
11551 	}
11552 #endif /* !BCMDBUS */
11553 
11554 	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
11555 
11556 #if defined(CONFIG_PM_SLEEP)
11557 	if (!dhd_pm_notifier_registered) {
11558 		dhd_pm_notifier_registered = TRUE;
11559 		dhd->pm_notifier.notifier_call = dhd_pm_callback;
11560 		dhd->pm_notifier.priority = 10;
11561 		register_pm_notifier(&dhd->pm_notifier);
11562 	}
11563 
11564 #endif /* CONFIG_PM_SLEEP */
11565 
11566 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
11567 	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
11568 	dhd->early_suspend.suspend = dhd_early_suspend;
11569 	dhd->early_suspend.resume = dhd_late_resume;
11570 	register_early_suspend(&dhd->early_suspend);
11571 	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
11572 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
11573 
11574 #ifdef ARP_OFFLOAD_SUPPORT
11575 	dhd->pend_ipaddr = 0;
11576 	if (!dhd_inetaddr_notifier_registered) {
11577 		dhd_inetaddr_notifier_registered = TRUE;
11578 		register_inetaddr_notifier(&dhd_inetaddr_notifier);
11579 	}
11580 #endif /* ARP_OFFLOAD_SUPPORT */
11581 
11582 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
11583 	if (!dhd_inet6addr_notifier_registered) {
11584 		dhd_inet6addr_notifier_registered = TRUE;
11585 		register_inet6addr_notifier(&dhd_inet6addr_notifier);
11586 	}
11587 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
11588 	dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
11589 #if defined (OEM_ANDROID)
11590 	INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
11591 #endif /* OEM_ANDROID */
11592 #ifdef DEBUG_CPU_FREQ
11593 	dhd->new_freq = alloc_percpu(int);
11594 	dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
11595 	cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
11596 #endif
11597 #ifdef DHDTCPACK_SUPPRESS
11598 #ifdef BCMSDIO
11599 	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
11600 #elif defined(BCMPCIE)
11601 	/* xxx : In case of PCIe based Samsung Android project, enable TCP ACK Suppress
11602 	 *       when throughput is higher than threshold, following rps_cpus setting.
11603 	 */
11604 	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
11605 #else
11606 	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
11607 #endif /* BCMSDIO */
11608 #endif /* DHDTCPACK_SUPPRESS */
11609 
11610 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
11611 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
11612 
11613 #ifdef DHD_DEBUG_PAGEALLOC
11614 	register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
11615 #endif /* DHD_DEBUG_PAGEALLOC */
11616 
11617 	INIT_DELAYED_WORK(&dhd->dhd_dpc_dispatcher_work, dhd_dpc_tasklet_dispatcher_work);
11618 
11619 #if defined(DHD_LB)
11620 #if defined(DHD_LB_HOST_CTRL)
11621 	dhd->permitted_primary_cpu = FALSE;
11622 #endif /* DHD_LB_HOST_CTRL */
11623 	dhd_lb_set_default_cpus(dhd);
11624 	DHD_LB_STATS_INIT(&dhd->pub);
11625 
11626 	/* Initialize the CPU Masks */
11627 	if (dhd_cpumasks_init(dhd) == 0) {
11628 		/* Now we have the current CPU maps, run through candidacy */
11629 		dhd_select_cpu_candidacy(dhd);
11630 
11631 		/* Register the call backs to CPU Hotplug sub-system */
11632 		dhd_register_cpuhp_callback(dhd);
11633 
11634 	} else {
11635 		/*
11636 		* We are unable to initialize CPU masks, so candidacy algorithm
11637 		* won't run, but still Load Balancing will be honoured based
11638 		* on the CPUs allocated for a given job statically during init
11639 		*/
11640 		dhd->cpu_notifier.notifier_call = NULL;
11641 		DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
11642 			__FUNCTION__));
11643 	}
11644 
11645 #ifdef DHD_LB_TXP
11646 #ifdef DHD_LB_TXP_DEFAULT_ENAB
11647 	/* Trun ON the feature by default */
11648 	atomic_set(&dhd->lb_txp_active, 1);
11649 #else
11650 	/* Trun OFF the feature by default */
11651 	atomic_set(&dhd->lb_txp_active, 0);
11652 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
11653 #endif /* DHD_LB_TXP */
11654 
11655 #ifdef DHD_LB_RXP
11656 	/* Trun ON the feature by default */
11657 	atomic_set(&dhd->lb_rxp_active, 1);
11658 #endif /* DHD_LB_RXP */
11659 
11660 	/* Initialize the Load Balancing Tasklets and Napi object */
11661 #if defined(DHD_LB_RXP)
11662 	__skb_queue_head_init(&dhd->rx_pend_queue);
11663 	skb_queue_head_init(&dhd->rx_napi_queue);
11664 	__skb_queue_head_init(&dhd->rx_process_queue);
11665 	/* Initialize the work that dispatches NAPI job to a given core */
11666 	INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_work);
11667 	DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
11668 	/* Initialize the work that dispatches DPC tasklet to a given core */
11669 #endif /* DHD_LB_RXP */
11670 
11671 #if defined(DHD_LB_TXP)
11672 	INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
11673 	skb_queue_head_init(&dhd->tx_pend_queue);
11674 	/* Initialize the work that dispatches TX job to a given core */
11675 	tasklet_init(&dhd->tx_tasklet,
11676 		dhd_lb_tx_handler, (ulong)(dhd));
11677 	DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
11678 #endif /* DHD_LB_TXP */
11679 
11680 	dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
11681 #endif /* DHD_LB */
11682 
11683 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
11684 	INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
11685 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
11686 
11687 #ifdef BCMDBG
11688 	if (dhd_macdbg_attach(&dhd->pub) != BCME_OK) {
11689 		DHD_ERROR(("%s: dhd_macdbg_attach fail\n", __FUNCTION__));
11690 		goto fail;
11691 	}
11692 #endif /* BCMDBG */
11693 
11694 #ifdef REPORT_FATAL_TIMEOUTS
11695 	init_dhd_timeouts(&dhd->pub);
11696 #endif /* REPORT_FATAL_TIMEOUTS */
11697 #if defined(BCMPCIE)
11698 	dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
11699 	if (dhd->pub.extended_trap_data == NULL) {
11700 		DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
11701 	}
11702 #ifdef DNGL_AXI_ERROR_LOGGING
11703 	dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
11704 	if (dhd->pub.axi_err_dump == NULL) {
11705 		DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
11706 	}
11707 #endif /* DNGL_AXI_ERROR_LOGGING */
11708 #endif /* BCMPCIE */
11709 
11710 #ifdef SHOW_LOGTRACE
11711 	if (dhd_init_logtrace_process(dhd) != BCME_OK) {
11712 		goto fail;
11713 	}
11714 #endif /* SHOW_LOGTRACE */
11715 
11716 #ifdef BTLOG
11717 	INIT_WORK(&dhd->bt_log_dispatcher_work, dhd_bt_log_process);
11718 #endif /* BTLOG */
11719 
11720 #ifdef EWP_EDL
11721 	INIT_DELAYED_WORK(&dhd->edl_dispatcher_work, dhd_edl_process_work);
11722 #endif
11723 
11724 	DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
11725 	DHD_SSSR_REG_INFO_INIT(&dhd->pub);
11726 
11727 #ifdef DHD_SDTC_ETB_DUMP
11728 	dhd_sdtc_etb_mempool_init(&dhd->pub);
11729 #endif /* DHD_SDTC_ETB_DUMP */
11730 
11731 #ifdef EWP_EDL
11732 	if (host_edl_support) {
11733 		if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
11734 			host_edl_support = FALSE;
11735 		}
11736 	}
11737 #endif /* EWP_EDL */
11738 
11739 	dhd_init_sock_flows_buf(dhd, dhd_watchdog_ms);
11740 
11741 	(void)dhd_sysfs_init(dhd);
11742 
11743 #ifdef WL_NATOE
11744 	/* Open Netlink socket for NF_CONNTRACK notifications */
11745 	dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
11746 			CT_ALL);
11747 #endif /* WL_NATOE */
11748 #ifdef GDB_PROXY
11749 	dhd->pub.gdb_proxy_nodeadman = nodeadman != 0;
11750 #endif /* GDB_PROXY */
11751 	dhd_state |= DHD_ATTACH_STATE_DONE;
11752 	dhd->dhd_state = dhd_state;
11753 
11754 	dhd_found++;
11755 
11756 #ifdef CSI_SUPPORT
11757 	dhd_csi_init(&dhd->pub);
11758 #endif /* CSI_SUPPORT */
11759 
11760 #ifdef DHD_FW_COREDUMP
11761 	/* Set memdump default values */
11762 #ifdef CUSTOMER_HW4_DEBUG
11763 	dhd->pub.memdump_enabled = DUMP_DISABLED;
11764 #elif defined(OEM_ANDROID)
11765 #ifdef DHD_COREDUMP
11766 	dhd->pub.memdump_enabled = DUMP_MEMFILE;
11767 #else
11768 	dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
11769 #endif /* DHD_COREDUMP */
11770 #else
11771 	dhd->pub.memdump_enabled = DUMP_MEMFILE;
11772 #endif /* CUSTOMER_HW4_DEBUG */
11773 	/* Check the memdump capability */
11774 	dhd_get_memdump_info(&dhd->pub);
11775 #endif /* DHD_FW_COREDUMP */
11776 
11777 #ifdef DHD_ERPOM
11778 	if (enable_erpom) {
11779 		pom_handler = &dhd->pub.pom_wlan_handler;
11780 		pom_handler->func_id = WLAN_FUNC_ID;
11781 		pom_handler->handler = (void *)g_dhd_pub;
11782 		pom_handler->power_off = dhd_wlan_power_off_handler;
11783 		pom_handler->power_on = dhd_wlan_power_on_handler;
11784 
11785 		dhd->pub.pom_func_register = NULL;
11786 		dhd->pub.pom_func_deregister = NULL;
11787 		dhd->pub.pom_toggle_reg_on = NULL;
11788 
11789 		dhd->pub.pom_func_register = symbol_get(pom_func_register);
11790 		dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
11791 		dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
11792 
11793 		symbol_put(pom_func_register);
11794 		symbol_put(pom_func_deregister);
11795 		symbol_put(pom_toggle_reg_on);
11796 
11797 		if (!dhd->pub.pom_func_register ||
11798 			!dhd->pub.pom_func_deregister ||
11799 			!dhd->pub.pom_toggle_reg_on) {
11800 			DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
11801 				"POM is not loaded\n", __FUNCTION__));
11802 			ASSERT(0);
11803 			goto fail;
11804 		}
11805 		dhd->pub.pom_func_register(pom_handler);
11806 		dhd->pub.enable_erpom = TRUE;
11807 
11808 	}
11809 #endif /* DHD_ERPOM */
11810 
11811 #ifdef DHD_DUMP_MNGR
11812 	dhd->pub.dump_file_manage =
11813 		(dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
11814 	if (unlikely(!dhd->pub.dump_file_manage)) {
11815 		DHD_ERROR(("%s(): could not allocate memory for - "
11816 					"dhd_dump_file_manage_t\n", __FUNCTION__));
11817 	}
11818 #endif /* DHD_DUMP_MNGR */
11819 
11820 #ifdef BCMINTERNAL
11821 #ifdef DHD_FWTRACE
11822 	/* Attach the fwtrace */
11823 	if (dhd_fwtrace_attach(&dhd->pub) != 0) {
11824 		DHD_ERROR(("dhd_fwtrace_attach has failed\n"));
11825 		goto fail;
11826 	}
11827 #endif /* DHD_FWTRACE */
11828 #endif /* BCMINTERNAL */
11829 
11830 #ifdef RTT_SUPPORT
11831 	if (dhd_rtt_attach(&dhd->pub)) {
11832 		DHD_ERROR(("dhd_rtt_attach has failed\n"));
11833 		goto fail;
11834 	}
11835 #endif /* RTT_SUPPORT */
11836 
11837 #ifdef DHD_TX_PROFILE
11838 	if (dhd_tx_profile_attach(&dhd->pub) != BCME_OK) {
11839 		DHD_ERROR(("%s:\tdhd_tx_profile_attach has failed\n", __FUNCTION__));
11840 		goto fail;
11841 	}
11842 #endif /* defined(DHD_TX_PROFILE) */
11843 
11844 	return &dhd->pub;
11845 
11846 fail:
11847 	if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
11848 		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
11849 			__FUNCTION__, dhd_state, &dhd->pub));
11850 		dhd->dhd_state = dhd_state;
11851 		dhd_detach(&dhd->pub);
11852 		dhd_free(&dhd->pub);
11853 	}
11854 
11855 dhd_null_flag:
11856 	return NULL;
11857 }
11858 
dhd_get_fw_mode(dhd_info_t * dhdinfo)11859 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
11860 {
11861 	if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
11862 		return DHD_FLAG_HOSTAP_MODE;
11863 	if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
11864 		return DHD_FLAG_P2P_MODE;
11865 	if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
11866 		return DHD_FLAG_IBSS_MODE;
11867 	if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
11868 		return DHD_FLAG_MFG_MODE;
11869 
11870 	return DHD_FLAG_STA_MODE;
11871 }
11872 
dhd_bus_get_fw_mode(dhd_pub_t * dhdp)11873 int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
11874 {
11875 	return dhd_get_fw_mode(dhdp->info);
11876 }
11877 
11878 extern char * nvram_get(const char *name);
dhd_update_fw_nv_path(dhd_info_t * dhdinfo)11879 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
11880 {
11881 	int fw_len;
11882 	int nv_len;
11883 	int clm_len;
11884 	int conf_len;
11885 	const char *fw = NULL;
11886 	const char *nv = NULL;
11887 	const char *clm = NULL;
11888 	const char *conf = NULL;
11889 #ifdef DHD_UCODE_DOWNLOAD
11890 	int uc_len;
11891 	const char *uc = NULL;
11892 #endif /* DHD_UCODE_DOWNLOAD */
11893 	wifi_adapter_info_t *adapter = dhdinfo->adapter;
11894 	int fw_path_len = sizeof(dhdinfo->fw_path);
11895 	int nv_path_len = sizeof(dhdinfo->nv_path);
11896 
11897 
11898 	/* Update firmware and nvram path. The path may be from adapter info or module parameter
11899 	 * The path from adapter info is used for initialization only (as it won't change).
11900 	 *
11901 	 * The firmware_path/nvram_path module parameter may be changed by the system at run
11902 	 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
11903 	 * command may change dhdinfo->fw_path. As such we need to clear the path info in
11904 	 * module parameter after it is copied. We won't update the path until the module parameter
11905 	 * is changed again (first character is not '\0')
11906 	 */
11907 
11908 	/* set default firmware and nvram path for built-in type driver */
11909 //	if (!dhd_download_fw_on_driverload) {
11910 #ifdef CONFIG_BCMDHD_FW_PATH
11911 		fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
11912 #endif /* CONFIG_BCMDHD_FW_PATH */
11913 #ifdef CONFIG_BCMDHD_NVRAM_PATH
11914 		nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
11915 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
11916 //	}
11917 
11918 	/* check if we need to initialize the path */
11919 	if (dhdinfo->fw_path[0] == '\0') {
11920 		if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
11921 			fw = adapter->fw_path;
11922 
11923 	}
11924 	if (dhdinfo->nv_path[0] == '\0') {
11925 		if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
11926 			nv = adapter->nv_path;
11927 	}
11928 	if (dhdinfo->clm_path[0] == '\0') {
11929 		if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
11930 			clm = adapter->clm_path;
11931 	}
11932 	if (dhdinfo->conf_path[0] == '\0') {
11933 		if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
11934 			conf = adapter->conf_path;
11935 	}
11936 
11937 	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
11938 	 *
11939 	 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
11940 	 */
11941 	if (firmware_path[0] != '\0')
11942 		fw = firmware_path;
11943 
11944 	if (nvram_path[0] != '\0')
11945 		nv = nvram_path;
11946 	if (clm_path[0] != '\0')
11947 		clm = clm_path;
11948 	if (config_path[0] != '\0')
11949 		conf = config_path;
11950 
11951 #ifdef DHD_UCODE_DOWNLOAD
11952 	if (ucode_path[0] != '\0')
11953 		uc = ucode_path;
11954 #endif /* DHD_UCODE_DOWNLOAD */
11955 
11956 #ifdef BCM_ROUTER_DHD
11957 	if (!fw) {
11958 		char var[32];
11959 
11960 		snprintf(var, sizeof(var), "firmware_path%d", dhdinfo->unit);
11961 		fw = nvram_get(var);
11962 	}
11963 	if (!nv) {
11964 		char var[32];
11965 
11966 		snprintf(var, sizeof(var), "nvram_path%d", dhdinfo->unit);
11967 		nv = nvram_get(var);
11968 	}
11969 	DHD_ERROR(("dhd:%d: fw path:%s nv path:%s\n", dhdinfo->unit, fw, nv));
11970 #endif
11971 
11972 	if (fw && fw[0] != '\0') {
11973 		fw_len = strlen(fw);
11974 		if (fw_len >= fw_path_len) {
11975 			DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
11976 			return FALSE;
11977 		}
11978 		strlcpy(dhdinfo->fw_path, fw, fw_path_len);
11979 	}
11980 	if (nv && nv[0] != '\0') {
11981 		nv_len = strlen(nv);
11982 		if (nv_len >= nv_path_len) {
11983 			DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
11984 			return FALSE;
11985 		}
11986 		memset(dhdinfo->nv_path, 0, nv_path_len);
11987 		strlcpy(dhdinfo->nv_path, nv, nv_path_len);
11988 #ifdef DHD_USE_SINGLE_NVRAM_FILE
11989 		/* Remove "_net" or "_mfg" tag from current nvram path */
11990 		{
11991 			char *nvram_tag = "nvram_";
11992 			char *ext_tag = ".txt";
11993 			char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
11994 			bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
11995 				strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
11996 			if (valid_buf) {
11997 				char *sp = sp_nvram + strlen(nvram_tag) - 1;
11998 				uint32 padding_size = (uint32)(dhdinfo->nv_path +
11999 					nv_path_len - sp);
12000 				memset(sp, 0, padding_size);
12001 				strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
12002 				nv_len = strlen(dhdinfo->nv_path);
12003 				DHD_INFO(("%s: new nvram path = %s\n",
12004 					__FUNCTION__, dhdinfo->nv_path));
12005 			} else if (sp_nvram) {
12006 				DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
12007 					__FUNCTION__));
12008 				return FALSE;
12009 			} else {
12010 				DHD_ERROR(("%s: Couldn't find the nvram tag. current"
12011 					" nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
12012 			}
12013 		}
12014 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
12015 	}
12016 	if (clm && clm[0] != '\0') {
12017 		clm_len = strlen(clm);
12018 		if (clm_len >= sizeof(dhdinfo->clm_path)) {
12019 			DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
12020 			return FALSE;
12021 		}
12022 		strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
12023 		if (dhdinfo->clm_path[clm_len-1] == '\n')
12024 		       dhdinfo->clm_path[clm_len-1] = '\0';
12025 	}
12026 	if (conf && conf[0] != '\0') {
12027 		conf_len = strlen(conf);
12028 		if (conf_len >= sizeof(dhdinfo->conf_path)) {
12029 			DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
12030 			return FALSE;
12031 		}
12032 		strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
12033 		if (dhdinfo->conf_path[conf_len-1] == '\n')
12034 		       dhdinfo->conf_path[conf_len-1] = '\0';
12035 	}
12036 #ifdef DHD_UCODE_DOWNLOAD
12037 	if (uc && uc[0] != '\0') {
12038 		uc_len = strlen(uc);
12039 		if (uc_len >= sizeof(dhdinfo->uc_path)) {
12040 			DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
12041 			return FALSE;
12042 		}
12043 		strlcpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
12044 	}
12045 #endif /* DHD_UCODE_DOWNLOAD */
12046 
12047 #if 0
12048 	/* clear the path in module parameter */
12049 	if (dhd_download_fw_on_driverload) {
12050 		firmware_path[0] = '\0';
12051 		nvram_path[0] = '\0';
12052 		clm_path[0] = '\0';
12053 		config_path[0] = '\0';
12054 	}
12055 #endif
12056 #ifdef DHD_UCODE_DOWNLOAD
12057 	ucode_path[0] = '\0';
12058 	DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
12059 #endif /* DHD_UCODE_DOWNLOAD */
12060 
12061 #ifndef BCMEMBEDIMAGE
12062 	/* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
12063 	if (dhdinfo->fw_path[0] == '\0') {
12064 		DHD_ERROR(("firmware path not found\n"));
12065 		return FALSE;
12066 	}
12067 	if (dhdinfo->nv_path[0] == '\0') {
12068 		DHD_ERROR(("nvram path not found\n"));
12069 		return FALSE;
12070 	}
12071 #endif /* BCMEMBEDIMAGE */
12072 
12073 	return TRUE;
12074 }
12075 
12076 #if defined(BT_OVER_SDIO)
dhd_update_btfw_path(dhd_info_t * dhdinfo,char * btfw_path)12077 extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
12078 {
12079 	int fw_len;
12080 	const char *fw = NULL;
12081 	wifi_adapter_info_t *adapter = dhdinfo->adapter;
12082 
12083 	/* Update bt firmware path. The path may be from adapter info or module parameter
12084 	 * The path from adapter info is used for initialization only (as it won't change).
12085 	 *
12086 	 * The btfw_path module parameter may be changed by the system at run
12087 	 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
12088 	 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
12089 	 * module parameter after it is copied. We won't update the path until the module parameter
12090 	 * is changed again (first character is not '\0')
12091 	 */
12092 
12093 	/* set default firmware and nvram path for built-in type driver */
12094 	if (!dhd_download_fw_on_driverload) {
12095 #ifdef CONFIG_BCMDHD_BTFW_PATH
12096 		fw = CONFIG_BCMDHD_BTFW_PATH;
12097 #endif /* CONFIG_BCMDHD_FW_PATH */
12098 	}
12099 
12100 	/* check if we need to initialize the path */
12101 	if (dhdinfo->btfw_path[0] == '\0') {
12102 		if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
12103 			fw = adapter->btfw_path;
12104 	}
12105 
12106 	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
12107 	 */
12108 	if (btfw_path[0] != '\0')
12109 		fw = btfw_path;
12110 
12111 	if (fw && fw[0] != '\0') {
12112 		fw_len = strlen(fw);
12113 		if (fw_len >= sizeof(dhdinfo->btfw_path)) {
12114 			DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
12115 			return FALSE;
12116 		}
12117 		strlcpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
12118 	}
12119 
12120 	/* clear the path in module parameter */
12121 	btfw_path[0] = '\0';
12122 
12123 	if (dhdinfo->btfw_path[0] == '\0') {
12124 		DHD_ERROR(("bt firmware path not found\n"));
12125 		return FALSE;
12126 	}
12127 
12128 	return TRUE;
12129 }
12130 #endif /* defined (BT_OVER_SDIO) */
12131 
12132 #ifdef CUSTOMER_HW4_DEBUG
dhd_validate_chipid(dhd_pub_t * dhdp)12133 bool dhd_validate_chipid(dhd_pub_t *dhdp)
12134 {
12135 	uint chipid = dhd_bus_chip_id(dhdp);
12136 	uint config_chipid;
12137 
12138 #ifdef BCM4389_CHIP_DEF
12139 	config_chipid = BCM4389_CHIP_ID;
12140 #elif defined(BCM4375_CHIP)
12141 	config_chipid = BCM4375_CHIP_ID;
12142 #elif defined(BCM4361_CHIP)
12143 	config_chipid = BCM4361_CHIP_ID;
12144 #elif defined(BCM4359_CHIP)
12145 	config_chipid = BCM4359_CHIP_ID;
12146 #elif defined(BCM4358_CHIP)
12147 	config_chipid = BCM4358_CHIP_ID;
12148 #elif defined(BCM4354_CHIP)
12149 	config_chipid = BCM4354_CHIP_ID;
12150 #elif defined(BCM4339_CHIP)
12151 	config_chipid = BCM4339_CHIP_ID;
12152 #elif defined(BCM4335_CHIP)
12153 	config_chipid = BCM4335_CHIP_ID;
12154 #elif defined(BCM43430_CHIP)
12155 	config_chipid = BCM43430_CHIP_ID;
12156 #elif defined(BCM43018_CHIP)
12157 	config_chipid = BCM43018_CHIP_ID;
12158 #elif defined(BCM43455_CHIP)
12159 	config_chipid = BCM4345_CHIP_ID;
12160 #elif defined(BCM43454_CHIP)
12161 	config_chipid = BCM43454_CHIP_ID;
12162 #elif defined(BCM43012_CHIP_)
12163 	config_chipid = BCM43012_CHIP_ID;
12164 #elif defined(BCM43013_CHIP)
12165 	config_chipid = BCM43012_CHIP_ID;
12166 #else
12167 	DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
12168 		" please add CONFIG_BCMXXXX into the Kernel and"
12169 		" BCMXXXX_CHIP definition into the DHD driver\n",
12170 		__FUNCTION__));
12171 	config_chipid = 0;
12172 
12173 	return FALSE;
12174 #endif /* BCM4354_CHIP */
12175 
12176 #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
12177 	if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
12178 		return TRUE;
12179 	}
12180 #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
12181 #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
12182 	if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
12183 		return TRUE;
12184 	}
12185 #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
12186 #if defined(BCM4359_CHIP)
12187 	if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
12188 		return TRUE;
12189 	}
12190 #endif /* BCM4359_CHIP */
12191 #if defined(BCM4361_CHIP)
12192 	if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
12193 		return TRUE;
12194 	}
12195 #endif /* BCM4361_CHIP */
12196 
12197 	return config_chipid == chipid;
12198 }
12199 #endif /* CUSTOMER_HW4_DEBUG */
12200 
12201 #if defined(BT_OVER_SDIO)
dhd_bt_get_pub_hndl(void)12202 wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
12203 {
12204 	DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
12205 	/* assuming that dhd_pub_t type pointer is available from a global variable */
12206 	return (wlan_bt_handle_t) g_dhd_pub;
12207 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
12208 
dhd_download_btfw(wlan_bt_handle_t handle,char * btfw_path)12209 int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
12210 {
12211 	int ret = -1;
12212 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
12213 	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
12214 
12215 	/* Download BT firmware image to the dongle */
12216 	if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
12217 		DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
12218 		ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
12219 		if (ret < 0) {
12220 			DHD_ERROR(("%s: failed to download btfw from: %s\n",
12221 				__FUNCTION__, dhd->btfw_path));
12222 			return ret;
12223 		}
12224 	}
12225 	return ret;
12226 } EXPORT_SYMBOL(dhd_download_btfw);
12227 #endif /* defined (BT_OVER_SDIO) */
12228 
12229 #ifndef BCMDBUS
12230 int
dhd_bus_start(dhd_pub_t * dhdp)12231 dhd_bus_start(dhd_pub_t *dhdp)
12232 {
12233 	int ret = -1;
12234 	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
12235 	unsigned long flags;
12236 
12237 #if defined(DHD_DEBUG) && defined(BCMSDIO)
12238 	int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
12239 #endif /* DHD_DEBUG && BCMSDIO */
12240 	ASSERT(dhd);
12241 
12242 	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
12243 	dhdp->memdump_type = 0;
12244 	dhdp->dongle_trap_occured = 0;
12245 #if defined(BCMPCIE)
12246 	if (dhdp->extended_trap_data) {
12247 		memset(dhdp->extended_trap_data, 0, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
12248 	}
12249 #endif /* BCMPCIE */
12250 #ifdef DHD_SSSR_DUMP
12251 	/* Flag to indicate sssr dump is collected */
12252 	dhdp->sssr_dump_collected = 0;
12253 #endif /* DHD_SSSR_DUMP */
12254 #ifdef BT_OVER_PCIE
12255 	dhd->pub.dongle_trap_due_to_bt = 0;
12256 #endif /* BT_OVER_PCIE */
12257 	dhdp->iovar_timeout_occured = 0;
12258 #ifdef PCIE_FULL_DONGLE
12259 	dhdp->d3ack_timeout_occured = 0;
12260 	dhdp->livelock_occured = 0;
12261 	dhdp->pktid_audit_failed = 0;
12262 #endif /* PCIE_FULL_DONGLE */
12263 	dhd->pub.iface_op_failed = 0;
12264 	dhd->pub.scan_timeout_occurred = 0;
12265 	dhd->pub.scan_busy_occurred = 0;
12266 	/* Retain BH induced errors and clear induced error during initialize */
12267 	if (dhd->pub.dhd_induce_error) {
12268 		dhd->pub.dhd_induce_bh_error = dhd->pub.dhd_induce_error;
12269 	}
12270 	dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
12271 #ifdef DHD_PKTTS
12272 	dhd->latency = 0;
12273 #endif
12274 	dhd->pub.tput_test_done = FALSE;
12275 
12276 #if defined(BCMINTERNAL) && defined(BCMPCIE)
12277 	{
12278 		/* JIRA:SW4349-436 JIRA:HW4349-302 Work around for 4349a0 PCIE-D11 DMA bug */
12279 		uint chipid = dhd_bus_chip_id(&dhd->pub);
12280 		uint revid = dhd_bus_chiprev_id(&dhd->pub);
12281 
12282 		if ((chipid == BCM4349_CHIP_ID) && (revid == 1)) {
12283 			DHD_INFO(("%s:Detected 4349 A0 enable 16MB Mem restriction Flag",
12284 				__FUNCTION__));
12285 			osl_flag_set(dhd->pub.osh, OSL_PHYS_MEM_LESS_THAN_16MB);
12286 		}
12287 	}
12288 #endif /* BCMINTERNAL && BCMINTERNAL */
12289 	/* try to download image and nvram to the dongle */
12290 	if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
12291 		/* Indicate FW Download has not yet done */
12292 		dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
12293 		DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
12294 			__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
12295 #if defined(DHD_DEBUG) && defined(BCMSDIO)
12296 		fw_download_start = OSL_SYSUPTIME();
12297 #endif /* DHD_DEBUG && BCMSDIO */
12298 		ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
12299 			dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
12300 #if defined(DHD_DEBUG) && defined(BCMSDIO)
12301 		fw_download_end = OSL_SYSUPTIME();
12302 #endif /* DHD_DEBUG && BCMSDIO */
12303 		if (ret < 0) {
12304 			DHD_ERROR(("%s: failed to download firmware %s\n",
12305 			          __FUNCTION__, dhd->fw_path));
12306 			return ret;
12307 		}
12308 		/* Indicate FW Download has succeeded */
12309 		dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
12310 	}
12311 	if (dhd->pub.busstate != DHD_BUS_LOAD) {
12312 		return -ENETDOWN;
12313 	}
12314 
12315 #ifdef BCMSDIO
12316 	dhd_os_sdlock(dhdp);
12317 #endif /* BCMSDIO */
12318 
12319 	/* Start the watchdog timer */
12320 	dhd->pub.tickcnt = 0;
12321 	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
12322 
12323 	/* Bring up the bus */
12324 	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
12325 
12326 		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
12327 #ifdef BCMSDIO
12328 		dhd_os_sdunlock(dhdp);
12329 #endif /* BCMSDIO */
12330 		return ret;
12331 	}
12332 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12333 	/* Host registration for OOB interrupt */
12334 	if (dhd_bus_oob_intr_register(dhdp)) {
12335 		/* deactivate timer and wait for the handler to finish */
12336 #if !defined(BCMPCIE_OOB_HOST_WAKE)
12337 		DHD_GENERAL_LOCK(&dhd->pub, flags);
12338 		dhd->wd_timer_valid = FALSE;
12339 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12340 		del_timer_sync(&dhd->timer);
12341 
12342 #endif /* !BCMPCIE_OOB_HOST_WAKE */
12343 		DHD_STOP_RPM_TIMER(&dhd->pub);
12344 
12345 		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
12346 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
12347 		return -ENODEV;
12348 	}
12349 
12350 #if defined(BCMPCIE_OOB_HOST_WAKE)
12351 	dhd_bus_oob_intr_set(dhdp, TRUE);
12352 #else
12353 	/* Enable oob at firmware */
12354 	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
12355 #endif /* BCMPCIE_OOB_HOST_WAKE */
12356 #elif defined(FORCE_WOWLAN)
12357 	/* Enable oob at firmware */
12358 	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
12359 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12360 #ifdef PCIE_FULL_DONGLE
12361 	{
12362 		/* max_h2d_rings includes H2D common rings */
12363 		uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
12364 
12365 		DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
12366 			max_h2d_rings));
12367 		if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
12368 #ifdef BCMSDIO
12369 			dhd_os_sdunlock(dhdp);
12370 #endif /* BCMSDIO */
12371 			return ret;
12372 		}
12373 	}
12374 #endif /* PCIE_FULL_DONGLE */
12375 
12376 	/* set default value for now. Will be updated again in dhd_preinit_ioctls()
12377 	 * after querying FW
12378 	 */
12379 	dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
12380 	dhdp->event_log_max_sets_queried = FALSE;
12381 
12382 	dhdp->smmu_fault_occurred = 0;
12383 #ifdef DNGL_AXI_ERROR_LOGGING
12384 	dhdp->axi_error = FALSE;
12385 #endif /* DNGL_AXI_ERROR_LOGGING */
12386 
12387 	/* Do protocol initialization necessary for IOCTL/IOVAR */
12388 	ret = dhd_prot_init(&dhd->pub);
12389 	if (unlikely(ret) != BCME_OK) {
12390 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
12391 		return ret;
12392 	}
12393 
12394 	/* If bus is not ready, can't come up */
12395 	if (dhd->pub.busstate != DHD_BUS_DATA) {
12396 		DHD_GENERAL_LOCK(&dhd->pub, flags);
12397 		dhd->wd_timer_valid = FALSE;
12398 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12399 		del_timer_sync(&dhd->timer);
12400 		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
12401 		DHD_STOP_RPM_TIMER(&dhd->pub);
12402 #ifdef BCMSDIO
12403 		dhd_os_sdunlock(dhdp);
12404 #endif /* BCMSDIO */
12405 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
12406 		return -ENODEV;
12407 	}
12408 
12409 #ifdef BCMSDIO
12410 	dhd_os_sdunlock(dhdp);
12411 #endif /* BCMSDIO */
12412 
12413 	/* Bus is ready, query any dongle information */
12414 	/* XXX Since dhd_sync_with_dongle can sleep, should module count surround it? */
12415 #if defined(DHD_DEBUG) && defined(BCMSDIO)
12416 	f2_sync_start = OSL_SYSUPTIME();
12417 #endif /* DHD_DEBUG && BCMSDIO */
12418 	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
12419 		DHD_GENERAL_LOCK(&dhd->pub, flags);
12420 		dhd->wd_timer_valid = FALSE;
12421 		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12422 		del_timer_sync(&dhd->timer);
12423 		DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
12424 		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
12425 		return ret;
12426 	}
12427 
12428 #ifdef BT_OVER_PCIE
12429 	/* Enable L1SS of RC and EP */
12430 	dhd_bus_l1ss_enable_rc_ep(dhdp->bus, TRUE);
12431 #endif /* BT_OVER_PCIE */
12432 
12433 #if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
12434 #if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
12435 	/* XXX: JIRA SWWLAN-139454: Added L1ss enable
12436 	 * after firmware download completion due to link down issue
12437 	 * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
12438 	 */
12439 	DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
12440 #if defined(CONFIG_SOC_GS101)
12441 	exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1);
12442 #else
12443 	exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
12444 #endif /* CONFIG_SOC_GS101 */
12445 #endif /* !CONFIG_SOC_EXYNOS8890 && !SUPPORT_EXYNOS7420 */
12446 #endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
12447 #if defined(DHD_DEBUG) && defined(BCMSDIO)
12448 	f2_sync_end = OSL_SYSUPTIME();
12449 	DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
12450 			(fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
12451 #endif /* DHD_DEBUG && BCMSDIO */
12452 
12453 #ifdef ARP_OFFLOAD_SUPPORT
12454 	if (dhd->pend_ipaddr) {
12455 #ifdef AOE_IP_ALIAS_SUPPORT
12456 		/* XXX Assume pending ip address is belong to primary interface */
12457 		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
12458 #endif /* AOE_IP_ALIAS_SUPPORT */
12459 		dhd->pend_ipaddr = 0;
12460 	}
12461 #endif /* ARP_OFFLOAD_SUPPORT */
12462 
12463 #if defined(BCM_ROUTER_DHD)
12464 	bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
12465 #endif /* BCM_ROUTER_DHD */
12466 	return 0;
12467 }
12468 #endif /* !BCMDBUS */
12469 
12470 #ifdef WLTDLS
_dhd_tdls_enable(dhd_pub_t * dhd,bool tdls_on,bool auto_on,struct ether_addr * mac)12471 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
12472 {
12473 	uint32 tdls = tdls_on;
12474 	int ret = 0;
12475 	uint32 tdls_auto_op = 0;
12476 	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
12477 	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
12478 	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
12479 	uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
12480 	uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
12481 
12482 	BCM_REFERENCE(mac);
12483 	if (!FW_SUPPORTED(dhd, tdls))
12484 		return BCME_ERROR;
12485 
12486 	if (dhd->tdls_enable == tdls_on)
12487 		goto auto_mode;
12488 	ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
12489 	if (ret < 0) {
12490 		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
12491 		goto exit;
12492 	}
12493 	dhd->tdls_enable = tdls_on;
12494 auto_mode:
12495 
12496 	tdls_auto_op = auto_on;
12497 	ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
12498 			0, TRUE);
12499 	if (ret < 0) {
12500 		DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
12501 		goto exit;
12502 	}
12503 
12504 	if (tdls_auto_op) {
12505 		ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
12506 				sizeof(tdls_idle_time), NULL, 0, TRUE);
12507 		if (ret < 0) {
12508 			DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
12509 			goto exit;
12510 		}
12511 		ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
12512 				sizeof(tdls_rssi_high), NULL, 0, TRUE);
12513 		if (ret < 0) {
12514 			DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
12515 			goto exit;
12516 		}
12517 		ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
12518 				sizeof(tdls_rssi_low), NULL, 0, TRUE);
12519 		if (ret < 0) {
12520 			DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
12521 			goto exit;
12522 		}
12523 		ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
12524 				sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
12525 		if (ret < 0) {
12526 			DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
12527 			goto exit;
12528 		}
12529 		ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
12530 				sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
12531 		if (ret < 0) {
12532 			DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
12533 			goto exit;
12534 		}
12535 	}
12536 
12537 exit:
12538 	return ret;
12539 }
12540 
dhd_tdls_enable(struct net_device * dev,bool tdls_on,bool auto_on,struct ether_addr * mac)12541 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
12542 {
12543 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
12544 	int ret = 0;
12545 	if (dhd)
12546 		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
12547 	else
12548 		ret = BCME_ERROR;
12549 	return ret;
12550 }
12551 
12552 int
dhd_tdls_set_mode(dhd_pub_t * dhd,bool wfd_mode)12553 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
12554 {
12555 	int ret = 0;
12556 	bool auto_on = false;
12557 	uint32 mode =  wfd_mode;
12558 
12559 #ifdef ENABLE_TDLS_AUTO_MODE
12560 	if (wfd_mode) {
12561 		auto_on = false;
12562 	} else {
12563 		auto_on = true;
12564 	}
12565 #else
12566 	auto_on = false;
12567 #endif /* ENABLE_TDLS_AUTO_MODE */
12568 	ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
12569 	if (ret < 0) {
12570 		DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
12571 		return ret;
12572 	}
12573 
12574 	ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
12575 	if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
12576 		DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
12577 		return ret;
12578 	}
12579 
12580 	ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
12581 	if (ret < 0) {
12582 		DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
12583 		return ret;
12584 	}
12585 
12586 	dhd->tdls_mode = mode;
12587 	return ret;
12588 }
12589 #ifdef PCIE_FULL_DONGLE
dhd_tdls_update_peer_info(dhd_pub_t * dhdp,wl_event_msg_t * event)12590 int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
12591 {
12592 	dhd_pub_t *dhd_pub = dhdp;
12593 	tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
12594 	tdls_peer_node_t *new = NULL, *prev = NULL;
12595 	int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
12596 	uint8 *da = (uint8 *)&event->addr.octet[0];
12597 	bool connect = FALSE;
12598 	uint32 reason = ntoh32(event->reason);
12599 	unsigned long flags;
12600 
12601 	/* No handling needed for peer discovered reason */
12602 	if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
12603 		return BCME_ERROR;
12604 	}
12605 	if (reason == WLC_E_TDLS_PEER_CONNECTED)
12606 		connect = TRUE;
12607 	else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
12608 		connect = FALSE;
12609 	else
12610 	{
12611 		DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
12612 		return BCME_ERROR;
12613 	}
12614 	if (ifindex == DHD_BAD_IF)
12615 		return BCME_ERROR;
12616 
12617 	if (connect) {
12618 		while (cur != NULL) {
12619 			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
12620 				DHD_ERROR(("%s: TDLS Peer exist already %d\n",
12621 					__FUNCTION__, __LINE__));
12622 				return BCME_ERROR;
12623 			}
12624 			cur = cur->next;
12625 		}
12626 
12627 		new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
12628 		if (new == NULL) {
12629 			DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
12630 			return BCME_ERROR;
12631 		}
12632 		memcpy(new->addr, da, ETHER_ADDR_LEN);
12633 		DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
12634 		new->next = dhd_pub->peer_tbl.node;
12635 		dhd_pub->peer_tbl.node = new;
12636 		dhd_pub->peer_tbl.tdls_peer_count++;
12637 		DHD_ERROR(("%s: Add TDLS peer, count=%d " MACDBG "\n",
12638 			__FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count,
12639 			MAC2STRDBG((char *)da)));
12640 		DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
12641 
12642 	} else {
12643 		while (cur != NULL) {
12644 			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
12645 				dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
12646 				DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
12647 				if (prev)
12648 					prev->next = cur->next;
12649 				else
12650 					dhd_pub->peer_tbl.node = cur->next;
12651 				MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
12652 				dhd_pub->peer_tbl.tdls_peer_count--;
12653 				DHD_ERROR(("%s: Remove TDLS peer, count=%d " MACDBG "\n",
12654 					__FUNCTION__, dhd_pub->peer_tbl.tdls_peer_count,
12655 					MAC2STRDBG((char *)da)));
12656 				DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
12657 				return BCME_OK;
12658 			}
12659 			prev = cur;
12660 			cur = cur->next;
12661 		}
12662 		DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
12663 	}
12664 	return BCME_OK;
12665 }
12666 #endif /* PCIE_FULL_DONGLE */
12667 #endif /* BCMDBUS */
12668 
dhd_is_concurrent_mode(dhd_pub_t * dhd)12669 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
12670 {
12671 	if (!dhd)
12672 		return FALSE;
12673 
12674 	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
12675 		return TRUE;
12676 	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
12677 		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
12678 		return TRUE;
12679 	else
12680 		return FALSE;
12681 }
12682 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
12683 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
12684  * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
12685  * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
12686  * would still be named as fw_bcmdhd_apsta.
12687  */
12688 uint32
dhd_get_concurrent_capabilites(dhd_pub_t * dhd)12689 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
12690 {
12691 	int32 ret = 0;
12692 	char buf[WLC_IOCTL_SMLEN];
12693 	bool mchan_supported = FALSE;
12694 	/* if dhd->op_mode is already set for HOSTAP and Manufacturing
12695 	 * test mode, that means we only will use the mode as it is
12696 	 */
12697 	if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
12698 		return 0;
12699 	if (FW_SUPPORTED(dhd, vsdb)) {
12700 		mchan_supported = TRUE;
12701 	}
12702 	if (!FW_SUPPORTED(dhd, p2p)) {
12703 		DHD_TRACE(("Chip does not support p2p\n"));
12704 		return 0;
12705 	} else {
12706 		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
12707 		memset(buf, 0, sizeof(buf));
12708 		ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
12709 				sizeof(buf), FALSE);
12710 		if (ret < 0) {
12711 			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
12712 			return 0;
12713 		} else {
12714 			if (buf[0] == 1) {
12715 				/* By default, chip supports single chan concurrency,
12716 				* now lets check for mchan
12717 				*/
12718 				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
12719 				if (mchan_supported)
12720 					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
12721 				if (FW_SUPPORTED(dhd, rsdb)) {
12722 					ret |= DHD_FLAG_RSDB_MODE;
12723 				}
12724 #ifdef WL_SUPPORT_MULTIP2P
12725 				if (FW_SUPPORTED(dhd, mp2p)) {
12726 					ret |= DHD_FLAG_MP2P_MODE;
12727 				}
12728 #endif /* WL_SUPPORT_MULTIP2P */
12729 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
12730 				return ret;
12731 #else
12732 				return 0;
12733 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
12734 			}
12735 		}
12736 	}
12737 	return 0;
12738 }
12739 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
12740 
12741 #ifdef SUPPORT_AP_POWERSAVE
12742 #define RXCHAIN_PWRSAVE_PPS			10
12743 #define RXCHAIN_PWRSAVE_QUIET_TIME		10
12744 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK	0
dhd_set_ap_powersave(dhd_pub_t * dhdp,int ifidx,int enable)12745 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
12746 {
12747 	int32 pps = RXCHAIN_PWRSAVE_PPS;
12748 	int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
12749 	int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
12750 	int ret;
12751 
12752 	if (enable) {
12753 		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
12754 				NULL, 0, TRUE);
12755 		if (ret != BCME_OK) {
12756 			DHD_ERROR(("Failed to enable AP power save"));
12757 		}
12758 		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
12759 				TRUE);
12760 		if (ret != BCME_OK) {
12761 			DHD_ERROR(("Failed to set pps"));
12762 		}
12763 		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
12764 				sizeof(quiet_time), NULL, 0, TRUE);
12765 		if (ret != BCME_OK) {
12766 			DHD_ERROR(("Failed to set quiet time"));
12767 		}
12768 		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
12769 				(char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
12770 		if (ret != BCME_OK) {
12771 			DHD_ERROR(("Failed to set stas assoc check"));
12772 		}
12773 	} else {
12774 		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
12775 				NULL, 0, TRUE);
12776 		if (ret != BCME_OK) {
12777 			DHD_ERROR(("Failed to disable AP power save"));
12778 		}
12779 	}
12780 
12781 	return 0;
12782 }
12783 #endif /* SUPPORT_AP_POWERSAVE */
12784 
12785 #if defined(READ_CONFIG_FROM_FILE)
12786 #include <linux/fs.h>
12787 #include <linux/ctype.h>
12788 
12789 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
12790 bool PM_control = TRUE;
12791 
dhd_preinit_proc(dhd_pub_t * dhd,int ifidx,char * name,char * value)12792 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
12793 {
12794 	int var_int;
12795 	wl_country_t cspec = {{0}, -1, {0}};
12796 	char *revstr;
12797 	char *endptr = NULL;
12798 #ifdef ROAM_AP_ENV_DETECTION
12799 	int roam_env_mode = AP_ENV_INDETERMINATE;
12800 #endif /* ROAM_AP_ENV_DETECTION */
12801 
12802 	if (!strcmp(name, "country")) {
12803 		revstr = strchr(value, '/');
12804 #if defined(DHD_BLOB_EXISTENCE_CHECK)
12805 		if (dhd->is_blob) {
12806 			cspec.rev = 0;
12807 			memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
12808 			memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
12809 		} else
12810 #endif /* DHD_BLOB_EXISTENCE_CHECK */
12811 		{
12812 			if (revstr) {
12813 				cspec.rev = strtoul(revstr + 1, &endptr, 10);
12814 				memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
12815 				cspec.country_abbrev[2] = '\0';
12816 				memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
12817 			} else {
12818 				cspec.rev = -1;
12819 				memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
12820 				memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
12821 				get_customized_country_code(dhd->info->adapter,
12822 						(char *)&cspec.country_abbrev, &cspec);
12823 			}
12824 
12825 		}
12826 		DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
12827 			cspec.country_abbrev, cspec.rev));
12828 		return dhd_iovar(dhd, 0, "country", (char*)&cspec, sizeof(cspec), NULL, 0, TRUE);
12829 	} else if (!strcmp(name, "roam_scan_period")) {
12830 		var_int = (int)simple_strtol(value, NULL, 0);
12831 		return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
12832 			&var_int, sizeof(var_int), TRUE, 0);
12833 	} else if (!strcmp(name, "roam_delta")) {
12834 		struct {
12835 			int val;
12836 			int band;
12837 		} x;
12838 		x.val = (int)simple_strtol(value, NULL, 0);
12839 		/* x.band = WLC_BAND_AUTO; */
12840 		x.band = WLC_BAND_ALL;
12841 		return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
12842 	} else if (!strcmp(name, "roam_trigger")) {
12843 		int ret = 0;
12844 		int roam_trigger[2];
12845 
12846 		roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
12847 		roam_trigger[1] = WLC_BAND_ALL;
12848 		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
12849 			sizeof(roam_trigger), TRUE, 0);
12850 
12851 #ifdef ROAM_AP_ENV_DETECTION
12852 		if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
12853 			if (dhd_iovar(dhd, 0, "roam_env_detection",
12854 			    (char *)&roam_env_mode, sizeof(roam_env_mode), NULL,
12855 			    0, TRUE) == BCME_OK) {
12856 				dhd->roam_env_detection = TRUE;
12857 			} else {
12858 				dhd->roam_env_detection = FALSE;
12859 			}
12860 		}
12861 #endif /* ROAM_AP_ENV_DETECTION */
12862 		return ret;
12863 	} else if (!strcmp(name, "PM")) {
12864 		int ret = 0;
12865 		var_int = (int)simple_strtol(value, NULL, 0);
12866 
12867 		ret =  dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
12868 			&var_int, sizeof(var_int), TRUE, 0);
12869 
12870 #if defined(DHD_PM_CONTROL_FROM_FILE) || defined(CONFIG_PM_LOCK)
12871 		if (var_int == 0) {
12872 			g_pm_control = TRUE;
12873 			printk("%s var_int=%d don't control PM\n", __func__, var_int);
12874 		} else {
12875 			g_pm_control = FALSE;
12876 			printk("%s var_int=%d do control PM\n", __func__, var_int);
12877 		}
12878 #endif
12879 
12880 		return ret;
12881 	}
12882 	else if (!strcmp(name, "band")) {
12883 		int ret;
12884 		if (!strcmp(value, "auto"))
12885 			var_int = WLC_BAND_AUTO;
12886 		else if (!strcmp(value, "a"))
12887 			var_int = WLC_BAND_5G;
12888 		else if (!strcmp(value, "b"))
12889 			var_int = WLC_BAND_2G;
12890 		else if (!strcmp(value, "all"))
12891 			var_int = WLC_BAND_ALL;
12892 		else {
12893 			printk(" set band value should be one of the a or b or all\n");
12894 			var_int = WLC_BAND_AUTO;
12895 		}
12896 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
12897 			sizeof(var_int), TRUE, 0)) < 0)
12898 			printk(" set band err=%d\n", ret);
12899 		return ret;
12900 	} else if (!strcmp(name, "cur_etheraddr")) {
12901 		struct ether_addr ea;
12902 		int ret;
12903 
12904 		bcm_ether_atoe(value, &ea);
12905 
12906 		ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
12907 		if (ret == 0) {
12908 			DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
12909 			return 0;
12910 		}
12911 
12912 		DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
12913 			ea.octet[0], ea.octet[1], ea.octet[2],
12914 			ea.octet[3], ea.octet[4], ea.octet[5]));
12915 
12916 		ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, NULL, 0, TRUE);
12917 		if (ret < 0) {
12918 			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
12919 			return ret;
12920 		} else {
12921 			memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
12922 			return ret;
12923 		}
12924 	} else if (!strcmp(name, "lpc")) {
12925 		int ret = 0;
12926 		var_int = (int)simple_strtol(value, NULL, 0);
12927 		if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
12928 			DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
12929 		}
12930 		ret = dhd_iovar(dhd, 0, "lpc", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE);
12931 		if (ret < 0) {
12932 			DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
12933 		}
12934 		if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
12935 			DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
12936 		}
12937 		return ret;
12938 	} else if (!strcmp(name, "vht_features")) {
12939 		int ret = 0;
12940 		var_int = (int)simple_strtol(value, NULL, 0);
12941 
12942 		if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
12943 			DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
12944 		}
12945 		ret = dhd_iovar(dhd, 0, "vht_features", (char *)&var_int, sizeof(var_int), NULL, 0,
12946 				TRUE);
12947 		if (ret < 0) {
12948 			DHD_ERROR(("%s Set vht_features failed  %d\n", __FUNCTION__, ret));
12949 		}
12950 		if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
12951 			DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
12952 		}
12953 		return ret;
12954 	} else {
12955 		/* wlu_iovar_setint */
12956 		var_int = (int)simple_strtol(value, NULL, 0);
12957 
12958 		/* Setup timeout bcm_timeout from dhd driver 4.217.48 */
12959 
12960 		DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
12961 
12962 		return dhd_iovar(dhd, 0, name, (char *)&var_int,
12963 				sizeof(var_int), NULL, 0, TRUE);
12964 	}
12965 
12966 	return 0;
12967 }
12968 
dhd_preinit_config(dhd_pub_t * dhd,int ifidx)12969 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
12970 {
12971 	mm_segment_t old_fs;
12972 	struct kstat stat;
12973 	struct file *fp = NULL;
12974 	unsigned int len;
12975 	char *buf = NULL, *p, *name, *value;
12976 	int ret = 0;
12977 	char *config_path;
12978 
12979 	config_path = CONFIG_BCMDHD_CONFIG_PATH;
12980 
12981 	if (!config_path)
12982 	{
12983 		printk(KERN_ERR "config_path can't read. \n");
12984 		return 0;
12985 	}
12986 
12987 	old_fs = get_fs();
12988 	set_fs(get_ds());
12989 	if ((ret = vfs_stat(config_path, &stat))) {
12990 		set_fs(old_fs);
12991 		printk(KERN_ERR "%s: Failed to get information (%d)\n",
12992 			config_path, ret);
12993 		return ret;
12994 	}
12995 	set_fs(old_fs);
12996 
12997 	if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
12998 		printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
12999 		return -ENOMEM;
13000 	}
13001 	memset(buf, 0x0, stat.size + 1);
13002 	printk("dhd_preinit_config : config path : %s \n", config_path);
13003 
13004 	if (!(fp = dhd_os_open_image1(dhd, config_path)) ||
13005 		(len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
13006 		goto err;
13007 
13008 	if (len != stat.size) {
13009 		printk("dhd_preinit_config : Error - read length mismatched len = %d\n", len);
13010 		goto err;
13011 	}
13012 
13013 	buf[stat.size] = '\0';
13014 	for (p = buf; *p; p++) {
13015 		if (isspace(*p))
13016 			continue;
13017 		for (name = p++; *p && !isspace(*p); p++) {
13018 			if (*p == '=') {
13019 				*p = '\0';
13020 				p++;
13021 				for (value = p; *p && !isspace(*p); p++);
13022 				*p = '\0';
13023 				if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
13024 					printk(KERN_ERR "%s: %s=%s\n",
13025 						bcmerrorstr(ret), name, value);
13026 				}
13027 				break;
13028 			}
13029 		}
13030 	}
13031 	ret = 0;
13032 
13033 out:
13034 	if (fp)
13035 		dhd_os_close_image1(dhd, fp);
13036 	if (buf)
13037 		MFREE(dhd->osh, buf, stat.size+1);
13038 	return ret;
13039 
13040 err:
13041 	ret = -1;
13042 	goto out;
13043 }
13044 #endif /* READ_CONFIG_FROM_FILE */
13045 
13046 #ifdef WLAIBSS
13047 int
dhd_preinit_aibss_ioctls(dhd_pub_t * dhd,char * iov_buf_smlen)13048 dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
13049 {
13050 	int ret = BCME_OK;
13051 	aibss_bcn_force_config_t bcn_config;
13052 	uint32 aibss;
13053 #ifdef WLAIBSS_PS
13054 	uint32 aibss_ps;
13055 	s32 atim;
13056 #endif /* WLAIBSS_PS */
13057 	int ibss_coalesce;
13058 
13059 	aibss = 1;
13060 	ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
13061 	if (ret < 0) {
13062 		if (ret == BCME_UNSUPPORTED) {
13063 			DHD_ERROR(("%s aibss , UNSUPPORTED\n", __FUNCTION__));
13064 			return BCME_OK;
13065 		} else {
13066 			DHD_ERROR(("%s Set aibss to %d err(%d)\n", __FUNCTION__, aibss, ret));
13067 			return ret;
13068 		}
13069 	}
13070 
13071 #ifdef WLAIBSS_PS
13072 	aibss_ps = 1;
13073 	ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
13074 	if (ret < 0) {
13075 		DHD_ERROR(("%s Set aibss PS to %d failed  %d\n",
13076 			__FUNCTION__, aibss, ret));
13077 		return ret;
13078 	}
13079 
13080 	atim = 10;
13081 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
13082 		(char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
13083 		DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
13084 			__FUNCTION__, ret));
13085 		return ret;
13086 	}
13087 #endif /* WLAIBSS_PS */
13088 
13089 	memset(&bcn_config, 0, sizeof(bcn_config));
13090 	bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
13091 	bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
13092 	bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
13093 	bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
13094 	bcn_config.len = sizeof(bcn_config);
13095 
13096 	ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
13097 			sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
13098 	if (ret < 0) {
13099 		DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
13100 			__FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
13101 			AIBSS_BCN_FLOOD_DUR, ret));
13102 		return ret;
13103 	}
13104 
13105 	ibss_coalesce = IBSS_COALESCE_DEFAULT;
13106 	ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
13107 			sizeof(ibss_coalesce), NULL, 0, TRUE);
13108 	if (ret < 0) {
13109 		DHD_ERROR(("%s Set ibss_coalesce_allowed failed  %d\n",
13110 			__FUNCTION__, ret));
13111 		return ret;
13112 	}
13113 
13114 	dhd->op_mode |= DHD_FLAG_IBSS_MODE;
13115 	return BCME_OK;
13116 }
13117 #endif /* WLAIBSS */
13118 
13119 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
13120 #ifdef WL_BAM
13121 static int
dhd_check_adps_bad_ap(dhd_pub_t * dhd)13122 dhd_check_adps_bad_ap(dhd_pub_t *dhd)
13123 {
13124 	struct net_device *ndev;
13125 	struct bcm_cfg80211 *cfg;
13126 	struct wl_profile *profile;
13127 	struct ether_addr bssid;
13128 
13129 	if (!dhd_is_associated(dhd, 0, NULL)) {
13130 		DHD_ERROR(("%s - not associated\n", __FUNCTION__));
13131 		return BCME_OK;
13132 	}
13133 
13134 	ndev = dhd_linux_get_primary_netdev(dhd);
13135 	if (!ndev) {
13136 		DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
13137 		return -ENODEV;
13138 	}
13139 
13140 	cfg = wl_get_cfg(ndev);
13141 	if (!cfg) {
13142 		DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
13143 		return -EINVAL;
13144 	}
13145 
13146 	profile = wl_get_profile_by_netdev(cfg, ndev);
13147 	memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
13148 	if (wl_adps_bad_ap_check(cfg, &bssid)) {
13149 		if (wl_adps_enabled(cfg, ndev)) {
13150 			wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
13151 		}
13152 	}
13153 
13154 	return BCME_OK;
13155 }
13156 #endif	/* WL_BAM */
13157 
13158 int
dhd_enable_adps(dhd_pub_t * dhd,uint8 on)13159 dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
13160 {
13161 	int i;
13162 	int len;
13163 	int ret = BCME_OK;
13164 
13165 	bcm_iov_buf_t *iov_buf = NULL;
13166 	wl_adps_params_v1_t *data = NULL;
13167 
13168 	len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
13169 	iov_buf = MALLOC(dhd->osh, len);
13170 	if (iov_buf == NULL) {
13171 		DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
13172 		ret = BCME_NOMEM;
13173 		goto exit;
13174 	}
13175 
13176 	iov_buf->version = WL_ADPS_IOV_VER;
13177 	iov_buf->len = sizeof(*data);
13178 	iov_buf->id = WL_ADPS_IOV_MODE;
13179 
13180 	data = (wl_adps_params_v1_t *)iov_buf->data;
13181 	data->version = ADPS_SUB_IOV_VERSION_1;
13182 	data->length = sizeof(*data);
13183 	data->mode = on;
13184 
13185 	for (i = 1; i <= MAX_BANDS; i++) {
13186 		data->band = i;
13187 		ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
13188 		if (ret < 0) {
13189 			if (ret == BCME_UNSUPPORTED) {
13190 				DHD_ERROR(("%s adps, UNSUPPORTED\n", __FUNCTION__));
13191 				ret = BCME_OK;
13192 				goto exit;
13193 			}
13194 			else {
13195 				DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
13196 					__FUNCTION__, on ? "On" : "Off", i, ret));
13197 				goto exit;
13198 			}
13199 		}
13200 	}
13201 
13202 #ifdef WL_BAM
13203 	if (on) {
13204 		dhd_check_adps_bad_ap(dhd);
13205 	}
13206 #endif	/* WL_BAM */
13207 
13208 exit:
13209 	if (iov_buf) {
13210 		MFREE(dhd->osh, iov_buf, len);
13211 	}
13212 	return ret;
13213 }
13214 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
13215 
13216 int
dhd_get_preserve_log_numbers(dhd_pub_t * dhd,uint32 * logset_mask)13217 dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
13218 {
13219 	wl_el_set_type_t logset_type, logset_op;
13220 	wl_el_set_all_type_v1_t *logset_all_type_op = NULL;
13221 	bool use_logset_all_type = FALSE;
13222 	int ret = BCME_ERROR;
13223 	int err = 0;
13224 	uint8 i = 0;
13225 	int el_set_all_type_len;
13226 
13227 	if (!dhd || !logset_mask)
13228 		return BCME_BADARG;
13229 
13230 	el_set_all_type_len = OFFSETOF(wl_el_set_all_type_v1_t, set_type) +
13231 		(sizeof(wl_el_set_type_v1_t) * dhd->event_log_max_sets);
13232 
13233 	logset_all_type_op = (wl_el_set_all_type_v1_t *) MALLOC(dhd->osh, el_set_all_type_len);
13234 	if (logset_all_type_op == NULL) {
13235 		DHD_ERROR(("%s: failed to allocate %d bytes for logset_all_type_op\n",
13236 			__FUNCTION__, el_set_all_type_len));
13237 		return BCME_NOMEM;
13238 	}
13239 
13240 	*logset_mask = 0;
13241 	memset(&logset_type, 0, sizeof(logset_type));
13242 	memset(&logset_op, 0, sizeof(logset_op));
13243 	logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
13244 	logset_type.len = htod16(sizeof(wl_el_set_type_t));
13245 
13246 	/* Try with set = event_log_max_sets, if fails, use legacy event_log_set_type */
13247 	logset_type.set = dhd->event_log_max_sets;
13248 	err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, sizeof(logset_type),
13249 		(char *)logset_all_type_op, el_set_all_type_len, FALSE);
13250 	if (err == BCME_OK) {
13251 		DHD_ERROR(("%s: use optimised use_logset_all_type\n", __FUNCTION__));
13252 		use_logset_all_type = TRUE;
13253 	}
13254 
13255 	for (i = 0; i < dhd->event_log_max_sets; i++) {
13256 		if (use_logset_all_type) {
13257 			logset_op.type = logset_all_type_op->set_type[i].type_val;
13258 		} else {
13259 			logset_type.set = i;
13260 			err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
13261 				sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
13262 		}
13263 		/* the iovar may return 'unsupported' error if a log set number is not present
13264 		* in the fw, so we should not return on error !
13265 		*/
13266 		if (err == BCME_OK &&
13267 				logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
13268 			*logset_mask |= 0x01u << i;
13269 			ret = BCME_OK;
13270 			DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
13271 		}
13272 	}
13273 
13274 	MFREE(dhd->osh, logset_all_type_op, el_set_all_type_len);
13275 	return ret;
13276 }
13277 
13278 #ifndef OEM_ANDROID
13279 /* For non-android FC modular builds, override firmware preinited values */
13280 void
dhd_override_fwprenit(dhd_pub_t * dhd)13281 dhd_override_fwprenit(dhd_pub_t * dhd)
13282 {
13283 	int ret = 0;
13284 
13285 	{
13286 		/* Disable bcn_li_bcn */
13287 		uint32 bcn_li_bcn = 0;
13288 		ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
13289 				sizeof(bcn_li_bcn), NULL, 0, TRUE);
13290 		if (ret < 0) {
13291 			DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
13292 				__FUNCTION__, ret));
13293 		}
13294 	}
13295 
13296 	{
13297 		/* Disable apsta */
13298 		uint32 apsta = 0;
13299 		ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta,
13300 				sizeof(apsta), NULL, 0, TRUE);
13301 		if (ret < 0) {
13302 			DHD_ERROR(("%s: apsta failed:%d\n",
13303 				__FUNCTION__, ret));
13304 		}
13305 	}
13306 
13307 	{
13308 		int ap_mode = 0;
13309 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, (char *)&ap_mode,
13310 			sizeof(ap_mode), TRUE, 0)) < 0) {
13311 			DHD_ERROR(("%s: set apmode failed :%d\n", __FUNCTION__, ret));
13312 		}
13313 	}
13314 }
13315 #endif /* !OEM_ANDROID */
13316 
13317 int
dhd_get_fw_capabilities(dhd_pub_t * dhd)13318 dhd_get_fw_capabilities(dhd_pub_t * dhd)
13319 {
13320 
13321 	int ret = 0;
13322 	uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
13323 	memset(dhd->fw_capabilities, 0, cap_buf_size);
13324 	ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
13325 		FALSE);
13326 
13327 	if (ret < 0) {
13328 		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
13329 		__FUNCTION__, ret));
13330 		return ret;
13331 	}
13332 
13333 	memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
13334 	dhd->fw_capabilities[0] = ' ';
13335 	dhd->fw_capabilities[cap_buf_size - 2] = ' ';
13336 	dhd->fw_capabilities[cap_buf_size - 1] = '\0';
13337 
13338 	return 0;
13339 }
13340 
13341 int
dhd_optimised_preinit_ioctls(dhd_pub_t * dhd)13342 dhd_optimised_preinit_ioctls(dhd_pub_t * dhd)
13343 {
13344 	int ret = 0;
13345 	/*  Room for "event_msgs_ext" + '\0' + bitvec  */
13346 	char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
13347 #ifdef DHD_PKTTS
13348 	uint32 val = 0;
13349 #endif
13350 	uint32 event_log_max_sets = 0;
13351 	char* iov_buf = NULL;
13352 	/* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
13353 	*	based on FW build tag.
13354 	*/
13355 	int ret2 = 0;
13356 #if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
13357 	uint monitor = 0;
13358 	dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info;
13359 #endif /* WL_MONITOR */
13360 #if defined(BCMSUP_4WAY_HANDSHAKE)
13361 	uint32 sup_wpa = 1;
13362 #endif /* BCMSUP_4WAY_HANDSHAKE */
13363 
13364 	uint32 frameburst = CUSTOM_FRAMEBURST_SET;
13365 	uint wnm_bsstrans_resp = 0;
13366 #ifdef DHD_BUS_MEM_ACCESS
13367 	uint32 enable_memuse = 1;
13368 #endif /* DHD_BUS_MEM_ACCESS */
13369 #ifdef DHD_PM_CONTROL_FROM_FILE
13370 	uint power_mode = PM_FAST;
13371 #endif /* DHD_PM_CONTROL_FROM_FILE */
13372 	char buf[WLC_IOCTL_SMLEN];
13373 	char *ptr;
13374 #ifdef ROAM_ENABLE
13375 	uint roamvar = 0;
13376 #ifdef ROAM_AP_ENV_DETECTION
13377 	int roam_env_mode = 0;
13378 #endif /* ROAM_AP_ENV_DETECTION */
13379 #endif /* ROAM_ENABLE */
13380 #if defined(SOFTAP)
13381 	uint dtim = 1;
13382 #endif
13383 /* xxx andrey tmp fix for dk8000 build error  */
13384 	struct ether_addr p2p_ea;
13385 #ifdef GET_CUSTOM_MAC_ENABLE
13386 	struct ether_addr ea_addr;
13387 #endif /* GET_CUSTOM_MAC_ENABLE */
13388 #ifdef BCMPCIE_OOB_HOST_WAKE
13389 	uint32 hostwake_oob = 0;
13390 #endif /* BCMPCIE_OOB_HOST_WAKE */
13391 	wl_wlc_version_t wlc_ver;
13392 
13393 #if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
13394 	uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2;
13395 	uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME;
13396 #endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
13397 #ifdef PKT_FILTER_SUPPORT
13398 	dhd_pkt_filter_enable = TRUE;
13399 #ifdef APF
13400 	dhd->apf_set = FALSE;
13401 #endif /* APF */
13402 #endif /* PKT_FILTER_SUPPORT */
13403 	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
13404 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
13405 	dhd->max_dtim_enable = TRUE;
13406 #else
13407 	dhd->max_dtim_enable = FALSE;
13408 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
13409 	dhd->disable_dtim_in_suspend = FALSE;
13410 #ifdef CUSTOM_SET_OCLOFF
13411 	dhd->ocl_off = FALSE;
13412 #endif /* CUSTOM_SET_OCLOFF */
13413 #ifdef SUPPORT_SET_TID
13414 	dhd->tid_mode = SET_TID_OFF;
13415 	dhd->target_uid = 0;
13416 	dhd->target_tid = 0;
13417 #endif /* SUPPORT_SET_TID */
13418 	DHD_TRACE(("Enter %s\n", __FUNCTION__));
13419 	dhd->op_mode = 0;
13420 
13421 #ifdef ARP_OFFLOAD_SUPPORT
13422 	/* arpoe will be applied from the supsend context */
13423 	dhd->arpoe_enable = TRUE;
13424 	dhd->arpol_configured = FALSE;
13425 #endif /* ARP_OFFLOAD_SUPPORT */
13426 
13427 	/* clear AP flags */
13428 #if defined(CUSTOM_COUNTRY_CODE)
13429 	dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
13430 #endif /* CUSTOM_COUNTRY_CODE */
13431 
13432 #ifdef CUSTOMER_HW4_DEBUG
13433 	if (!dhd_validate_chipid(dhd)) {
13434 		DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
13435 			__FUNCTION__, dhd_bus_chip_id(dhd)));
13436 #ifndef SUPPORT_MULTIPLE_CHIPS
13437 		ret = BCME_BADARG;
13438 		goto done;
13439 #endif /* !SUPPORT_MULTIPLE_CHIPS */
13440 	}
13441 #endif /* CUSTOMER_HW4_DEBUG */
13442 
13443 	/* query for 'ver' to get version info from firmware */
13444 	memset(buf, 0, sizeof(buf));
13445 	ptr = buf;
13446 	ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
13447 	if (ret < 0)
13448 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
13449 	else {
13450 		bcmstrtok(&ptr, "\n", 0);
13451 		/* Print fw version info */
13452 		DHD_ERROR(("Firmware version = %s\n", buf));
13453 		strncpy(fw_version, buf, FW_VER_STR_LEN);
13454 		fw_version[FW_VER_STR_LEN-1] = '\0';
13455 #if defined(BCMSDIO) || defined(BCMPCIE)
13456 		dhd_set_version_info(dhd, buf);
13457 #endif /* BCMSDIO || BCMPCIE */
13458 	}
13459 
13460 	/* query for 'wlc_ver' to get version info from firmware */
13461 	/* memsetting to zero */
13462 	memset_s(&wlc_ver, sizeof(wl_wlc_version_t), 0,
13463 		sizeof(wl_wlc_version_t));
13464 	ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
13465 			sizeof(wl_wlc_version_t), FALSE);
13466 	if (ret < 0)
13467 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
13468 	else {
13469 		dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
13470 		dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
13471 	}
13472 #ifdef BOARD_HIKEY
13473 	/* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
13474 	if (strstr(fw_version, "WLTEST") != NULL) {
13475 		DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
13476 			__FUNCTION__));
13477 		op_mode = DHD_FLAG_MFG_MODE;
13478 	}
13479 #endif /* BOARD_HIKEY */
13480 	/* get a capabilities from firmware */
13481 	ret = dhd_get_fw_capabilities(dhd);
13482 
13483 	if (ret < 0) {
13484 		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
13485 			__FUNCTION__, ret));
13486 		goto done;
13487 	}
13488 
13489 	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
13490 		(op_mode == DHD_FLAG_MFG_MODE)) {
13491 		dhd->op_mode = DHD_FLAG_MFG_MODE;
13492 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13493 		/* disable runtimePM by default in MFG mode. */
13494 		pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
13495 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13496 #ifdef DHD_PCIE_RUNTIMEPM
13497 		/* Disable RuntimePM in mfg mode */
13498 		DHD_DISABLE_RUNTIME_PM(dhd);
13499 		DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
13500 #endif /* DHD_PCIE_RUNTIME_PM */
13501 		/* Check and adjust IOCTL response timeout for Manufactring firmware */
13502 		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
13503 		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
13504 			__FUNCTION__));
13505 
13506 #if defined(ARP_OFFLOAD_SUPPORT)
13507 		dhd->arpoe_enable = FALSE;
13508 #endif /* ARP_OFFLOAD_SUPPORT */
13509 #ifdef PKT_FILTER_SUPPORT
13510 		dhd_pkt_filter_enable = FALSE;
13511 #endif /* PKT_FILTER_SUPPORT */
13512 #ifndef CUSTOM_SET_ANTNPM
13513 		if (FW_SUPPORTED(dhd, rsdb)) {
13514 			wl_config_t rsdb_mode;
13515 			memset(&rsdb_mode, 0, sizeof(rsdb_mode));
13516 			ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
13517 				NULL, 0, TRUE);
13518 			if (ret < 0) {
13519 				DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
13520 					__FUNCTION__, ret));
13521 			}
13522 		}
13523 #endif /* !CUSTOM_SET_ANTNPM */
13524 	} else {
13525 		uint32 concurrent_mode = 0;
13526 		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
13527 		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
13528 
13529 		BCM_REFERENCE(concurrent_mode);
13530 
13531 		dhd->op_mode = DHD_FLAG_STA_MODE;
13532 
13533 		BCM_REFERENCE(p2p_ea);
13534 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
13535 		if ((concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
13536 			dhd->op_mode |= concurrent_mode;
13537 		}
13538 
13539 		/* Check if we are enabling p2p */
13540 		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
13541 			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
13542 			ETHER_SET_LOCALADDR(&p2p_ea);
13543 			ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
13544 					NULL, 0, TRUE);
13545 			if (ret < 0)
13546 				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
13547 			else
13548 				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
13549 		}
13550 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
13551 
13552 	}
13553 
13554 #ifdef BCMPCIE_OOB_HOST_WAKE
13555 	ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
13556 		sizeof(hostwake_oob), FALSE);
13557 	if (ret < 0) {
13558 		DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
13559 	} else {
13560 		if (hostwake_oob == 0) {
13561 			DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
13562 				__FUNCTION__));
13563 			ret = BCME_UNSUPPORTED;
13564 			goto done;
13565 		} else {
13566 			DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
13567 		}
13568 	}
13569 #endif /* BCMPCIE_OOB_HOST_WAKE */
13570 
13571 #ifdef DNGL_AXI_ERROR_LOGGING
13572 	ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
13573 		sizeof(dhd->axierror_logbuf_addr), FALSE);
13574 	if (ret < 0) {
13575 		DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
13576 		dhd->axierror_logbuf_addr = 0;
13577 	} else {
13578 		DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
13579 			__FUNCTION__, dhd->axierror_logbuf_addr));
13580 	}
13581 #endif /* DNGL_AXI_ERROR_LOGGING */
13582 
13583 #ifdef GET_CUSTOM_MAC_ENABLE
13584 	ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet, iface_name);
13585 	if (!ret) {
13586 		ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
13587 				TRUE);
13588 		if (ret < 0) {
13589 			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
13590 			ret = BCME_NOTUP;
13591 			goto done;
13592 		}
13593 		memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
13594 	} else
13595 #endif /* GET_CUSTOM_MAC_ENABLE */
13596 	{
13597 		/* Get the default device MAC address directly from firmware */
13598 		ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
13599 		if (ret < 0) {
13600 			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
13601 			ret = BCME_NOTUP;
13602 			goto done;
13603 		}
13604 
13605 		DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
13606 			__FUNCTION__, MAC2STRDBG(&buf)));
13607 
13608 #ifdef MACADDR_PROVISION_ENFORCED
13609 		if (ETHER_IS_LOCALADDR(buf)) {
13610 			DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
13611 			ret = BCME_BADADDR;
13612 			goto done;
13613 		}
13614 #endif /* MACADDR_PROVISION_ENFORCED */
13615 
13616 		/* Update public MAC address after reading from Firmware */
13617 		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
13618 	}
13619 
13620 	if (ETHER_ISNULLADDR(dhd->mac.octet)) {
13621 		DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
13622 		ret = BCME_BADADDR;
13623 		goto done;
13624 	} else {
13625 		(void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
13626 			dhd->mac.octet, ETHER_ADDR_LEN);
13627 	}
13628 
13629 	if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
13630 		DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
13631 		goto done;
13632 	}
13633 
13634 	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
13635 		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
13636 #if defined(DHD_BLOB_EXISTENCE_CHECK)
13637 	if (!dhd->is_blob)
13638 #endif /* DHD_BLOB_EXISTENCE_CHECK */
13639 	{
13640 		/* get a ccode and revision for the country code */
13641 #if defined(CUSTOM_COUNTRY_CODE)
13642 		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
13643 			&dhd->dhd_cspec, dhd->dhd_cflags);
13644 #else
13645 		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
13646 			&dhd->dhd_cspec);
13647 #endif /* CUSTOM_COUNTRY_CODE */
13648 	}
13649 
13650 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
13651 	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
13652 		dhd->info->rxthread_enabled = FALSE;
13653 	else
13654 		dhd->info->rxthread_enabled = TRUE;
13655 #endif
13656 	/* Set Country code  */
13657 	if (dhd->dhd_cspec.ccode[0] != 0) {
13658 		ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
13659 				NULL, 0, TRUE);
13660 		if (ret < 0)
13661 			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
13662 	}
13663 
13664 #if defined(ROAM_ENABLE)
13665 	BCM_REFERENCE(roamvar);
13666 #ifdef USE_WFA_CERT_CONF
13667 	if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
13668 		DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
13669 	}
13670 	/* roamvar is set to 0 by preinit fw, change only if roamvar is non-zero */
13671 	if (roamvar != 0) {
13672 		/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
13673 		ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0,
13674 			TRUE);
13675 		if (ret < 0) {
13676 			DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
13677 		}
13678 	}
13679 #endif /* USE_WFA_CERT_CONF */
13680 
13681 #ifdef ROAM_AP_ENV_DETECTION
13682 	/* Changed to GET iovar to read roam_env_mode */
13683 	dhd->roam_env_detection = FALSE;
13684 	ret = dhd_iovar(dhd, 0, "roam_env_detection", NULL, 0, (char *)&roam_env_mode,
13685 			sizeof(roam_env_mode), FALSE);
13686 	if (ret < 0) {
13687 		DHD_ERROR(("%s: roam_env_detection IOVAR not present\n", __FUNCTION__));
13688 	} else {
13689 		if (roam_env_mode == AP_ENV_INDETERMINATE) {
13690 			dhd->roam_env_detection = TRUE;
13691 		}
13692 	}
13693 #endif /* ROAM_AP_ENV_DETECTION */
13694 #ifdef CONFIG_ROAM_RSSI_LIMIT
13695 	ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
13696 	if (ret < 0) {
13697 		DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
13698 	}
13699 #endif /* CONFIG_ROAM_RSSI_LIMIT */
13700 #ifdef CONFIG_ROAM_MIN_DELTA
13701 	ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA);
13702 	if (ret < 0) {
13703 		DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret));
13704 	}
13705 #endif /* CONFIG_ROAM_MIN_DELTA */
13706 #endif /* ROAM_ENABLE */
13707 
13708 #ifdef WLTDLS
13709 	dhd->tdls_enable = FALSE;
13710 	/* query tdls_eable */
13711 	ret = dhd_iovar(dhd, 0, "tdls_enable", NULL, 0, (char *)&dhd->tdls_enable,
13712 		sizeof(dhd->tdls_enable), FALSE);
13713 	DHD_ERROR(("%s: tdls_enable=%d ret=%d\n", __FUNCTION__, dhd->tdls_enable, ret));
13714 #endif /* WLTDLS */
13715 
13716 #ifdef DHD_PM_CONTROL_FROM_FILE
13717 #ifdef CUSTOMER_HW10
13718 	dhd_control_pm(dhd, &power_mode);
13719 #else
13720 	sec_control_pm(dhd, &power_mode);
13721 #endif /* CUSTOMER_HW10 */
13722 #endif /* DHD_PM_CONTROL_FROM_FILE */
13723 
13724 #ifdef MIMO_ANT_SETTING
13725 	dhd_sel_ant_from_file(dhd);
13726 #endif /* MIMO_ANT_SETTING */
13727 
13728 #if defined(OEM_ANDROID) && defined(SOFTAP)
13729 	if (ap_fw_loaded == TRUE) {
13730 		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
13731 	}
13732 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
13733 
13734 #if defined(KEEP_ALIVE)
13735 	/* Set Keep Alive : be sure to use FW with -keepalive */
13736 	if (!(dhd->op_mode &
13737 		(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
13738 		if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
13739 			DHD_ERROR(("%s set keeplive failed %d\n",
13740 			__FUNCTION__, ret));
13741 	}
13742 #endif /* defined(KEEP_ALIVE) */
13743 
13744 	ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
13745 		sizeof(event_log_max_sets), FALSE);
13746 	if (ret == BCME_OK) {
13747 		dhd->event_log_max_sets = event_log_max_sets;
13748 	} else {
13749 		dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
13750 	}
13751 	BCM_REFERENCE(iovbuf);
13752 	/* Make sure max_sets is set first with wmb and then sets_queried,
13753 	 * this will be used during parsing the logsets in the reverse order.
13754 	 */
13755 	OSL_SMP_WMB();
13756 	dhd->event_log_max_sets_queried = TRUE;
13757 	DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
13758 		__FUNCTION__, dhd->event_log_max_sets, ret));
13759 #ifdef DHD_BUS_MEM_ACCESS
13760 	ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
13761 			sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
13762 	if (ret < 0) {
13763 		DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
13764 			__FUNCTION__, ret));
13765 	} else {
13766 		DHD_ERROR(("%s: enable_memuse = %d\n",
13767 			__FUNCTION__, enable_memuse));
13768 	}
13769 #endif /* DHD_BUS_MEM_ACCESS */
13770 
13771 #ifdef USE_WFA_CERT_CONF
13772 #ifdef USE_WL_FRAMEBURST
13773 	 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
13774 		DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
13775 	 }
13776 #endif /* USE_WL_FRAMEBURST */
13777 	 g_frameburst = frameburst;
13778 #endif /* USE_WFA_CERT_CONF */
13779 
13780 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
13781 	/* Disable Framebursting for SofAP */
13782 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
13783 		frameburst = 0;
13784 	}
13785 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
13786 
13787 	BCM_REFERENCE(frameburst);
13788 #if defined(USE_WL_FRAMEBURST) || defined(DISABLE_WL_FRAMEBURST_SOFTAP)
13789 	/* frameburst is set to 1 by preinit fw, change if otherwise */
13790 	if (frameburst != 1) {
13791 		/* Set frameburst to value */
13792 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
13793 			sizeof(frameburst), TRUE, 0)) < 0) {
13794 			DHD_INFO(("%s frameburst not supported	%d\n", __FUNCTION__, ret));
13795 		}
13796 	}
13797 #endif /* USE_WL_FRAMEBURST || DISABLE_WL_FRAMEBURST_SOFTAP */
13798 
13799 	iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
13800 	if (iov_buf == NULL) {
13801 		DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
13802 		ret = BCME_NOMEM;
13803 		goto done;
13804 	}
13805 
13806 #if defined(BCMSUP_4WAY_HANDSHAKE)
13807 	/* Read 4-way handshake requirements */
13808 	if (dhd_use_idsup == 1) {
13809 		ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
13810 				(char *)&iovbuf, sizeof(iovbuf), FALSE);
13811 		/* sup_wpa iovar returns NOTREADY status on some platforms using modularized
13812 		 * in-dongle supplicant.
13813 		 */
13814 		if (ret >= 0 || ret == BCME_NOTREADY)
13815 			dhd->fw_4way_handshake = TRUE;
13816 		DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
13817 	}
13818 #endif /* BCMSUP_4WAY_HANDSHAKE */
13819 
13820 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
13821 	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
13822 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
13823 
13824 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
13825 	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
13826 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
13827 
13828 #ifdef ARP_OFFLOAD_SUPPORT
13829 	DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
13830 		dhd->arpoe_enable, dhd->arpol_configured));
13831 #endif /* ARP_OFFLOAD_SUPPORT */
13832 	/*
13833 	 * Retaining pktfilter fotr temporary, once fw preinit includes this,
13834 	 * this will be removed. Caution is to skip the pktfilter check during
13835 	 * each pktfilter removal.
13836 	 */
13837 #ifdef PKT_FILTER_SUPPORT
13838 	/* Setup default defintions for pktfilter , enable in suspend */
13839 	dhd->pktfilter_count = 6;
13840 	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
13841 	if (!FW_SUPPORTED(dhd, pf6)) {
13842 		dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
13843 		dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
13844 	} else {
13845 		/* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
13846 		dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
13847 		dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
13848 	}
13849 	/* apply APP pktfilter */
13850 	dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
13851 
13852 #ifdef BLOCK_IPV6_PACKET
13853 	/* Setup filter to allow only IPv4 unicast frames */
13854 	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
13855 		HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
13856 		" "
13857 		HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
13858 #else
13859 	/* Setup filter to allow only unicast */
13860 	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
13861 #endif /* BLOCK_IPV6_PACKET */
13862 
13863 #ifdef PASS_IPV4_SUSPEND
13864 	/* XXX customer want to get IPv4 multicast packets */
13865 	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
13866 #else
13867 	/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
13868 	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
13869 #endif /* PASS_IPV4_SUSPEND */
13870 	if (FW_SUPPORTED(dhd, pf6)) {
13871 		/* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
13872 		dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
13873 		/* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
13874 		dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
13875 		/* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
13876 		dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
13877 		/* Immediately pkt filter TYPE 6 Dicard NETBIOS packet(port 137) */
13878 		dhd->pktfilter[DHD_UDPNETBIOS_DROP_FILTER_NUM] = DISCARD_UDPNETBIOS;
13879 		dhd->pktfilter_count = 11;
13880 	}
13881 
13882 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
13883 	dhd->pktfilter_count = 4;
13884 	/* Setup filter to block broadcast and NAT Keepalive packets */
13885 	/* discard all broadcast packets */
13886 	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
13887 	/* discard NAT Keepalive packets */
13888 	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
13889 	/* discard NAT Keepalive packets */
13890 	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
13891 	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
13892 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
13893 
13894 #if defined(SOFTAP)
13895 	if (ap_fw_loaded) {
13896 		/* XXX Andrey: fo SOFTAP disable pkt filters (if there were any )  */
13897 		dhd_enable_packet_filter(0, dhd);
13898 	}
13899 #endif /* defined(SOFTAP) */
13900 	dhd_set_packet_filter(dhd);
13901 #endif /* PKT_FILTER_SUPPORT */
13902 
13903 	/* query for 'clmver' to get clm version info from firmware */
13904 	bzero(buf, sizeof(buf));
13905 	ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
13906 	if (ret < 0)
13907 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
13908 	else {
13909 		char *ver_temp_buf = NULL;
13910 
13911 		if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
13912 			DHD_ERROR(("Couldn't find \"Data:\"\n"));
13913 		} else {
13914 			ptr = (ver_temp_buf + strlen("Data:"));
13915 			if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
13916 				DHD_ERROR(("Couldn't find New line character\n"));
13917 			} else {
13918 				bzero(clm_version, CLM_VER_STR_LEN);
13919 				strlcpy(clm_version, ver_temp_buf,
13920 					MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN));
13921 				DHD_INFO(("CLM version = %s\n", clm_version));
13922 			}
13923 		}
13924 
13925 #if defined(CUSTOMER_HW4_DEBUG)
13926 		if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
13927 			DHD_ERROR(("Couldn't find \"Customization:\"\n"));
13928 		} else {
13929 			char tokenlim;
13930 			ptr = (ver_temp_buf + strlen("Customization:"));
13931 			if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
13932 				DHD_ERROR(("Couldn't find project blob version"
13933 					"or New line character\n"));
13934 			} else if (tokenlim == '(') {
13935 				snprintf(clm_version,
13936 					CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
13937 					clm_version, ver_temp_buf);
13938 				DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
13939 				if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
13940 					DHD_ERROR(("Couldn't find New line character\n"));
13941 				} else {
13942 					snprintf(clm_version,
13943 						strlen(clm_version) + strlen(ver_temp_buf),
13944 						"%s%s", clm_version, ver_temp_buf);
13945 					DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
13946 						clm_version));
13947 
13948 				}
13949 			} else if (tokenlim == '\n') {
13950 				snprintf(clm_version,
13951 					strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
13952 					"%s, Blob ver = Major : ", clm_version);
13953 				snprintf(clm_version,
13954 					strlen(clm_version) + strlen(ver_temp_buf) + 1,
13955 					"%s%s", clm_version, ver_temp_buf);
13956 				DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
13957 			}
13958 		}
13959 #endif /* CUSTOMER_HW4_DEBUG */
13960 		if (strlen(clm_version)) {
13961 			DHD_ERROR(("CLM version = %s\n", clm_version));
13962 		} else {
13963 			DHD_ERROR(("Couldn't find CLM version!\n"));
13964 		}
13965 
13966 	}
13967 
13968 #ifdef WRITE_WLANINFO
13969 	sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
13970 #endif /* WRITE_WLANINFO */
13971 
13972 #ifdef GEN_SOFTAP_INFO_FILE
13973 	sec_save_softap_info();
13974 #endif /* GEN_SOFTAP_INFO_FILE */
13975 
13976 #ifdef PNO_SUPPORT
13977 	if (!dhd->pno_state) {
13978 		dhd_pno_init(dhd);
13979 	}
13980 #endif
13981 
13982 #ifdef DHD_PKTTS
13983 	/* get the pkt metadata buffer length supported by FW */
13984 	if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val,
13985 			WLC_GET_VAR, FALSE, 0) != BCME_OK) {
13986 		DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n",
13987 				__FUNCTION__));
13988 		/*
13989 		 * if iovar fails, IPC method of collecting
13990 		 * TS should be used, hence set metadata_buflen as
13991 		 * 0 here. This will be checked later on Tx completion
13992 		 * to decide if IPC or metadata method of reading TS
13993 		 * should be used
13994 		 */
13995 		dhd->pkt_metadata_version = 0;
13996 		dhd->pkt_metadata_buflen = 0;
13997 	} else {
13998 		dhd->pkt_metadata_version  = GET_METADATA_VER(val);
13999 		dhd->pkt_metadata_buflen  = GET_METADATA_BUFLEN(val);
14000 	}
14001 
14002 	/* Check FW supports pktlat, if supports enable pktts_enab iovar */
14003 	ret = dhd_set_pktts_enab(dhd, TRUE);
14004 	if (ret < 0) {
14005 		DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
14006 	}
14007 #endif /* DHD_PKTTS */
14008 
14009 #ifdef RTT_SUPPORT
14010 	if (dhd->rtt_state) {
14011 		ret = dhd_rtt_init(dhd);
14012 		if (ret < 0) {
14013 			DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
14014 		}
14015 	}
14016 #endif
14017 
14018 #ifdef FILTER_IE
14019 	/* Failure to configure filter IE is not a fatal error, ignore it. */
14020 	if (FW_SUPPORTED(dhd, fie) &&
14021 		!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
14022 		dhd_read_from_file(dhd);
14023 	}
14024 #endif /* FILTER_IE */
14025 
14026 #ifdef NDO_CONFIG_SUPPORT
14027 	dhd->ndo_enable = FALSE;
14028 	dhd->ndo_host_ip_overflow = FALSE;
14029 	dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
14030 #endif /* NDO_CONFIG_SUPPORT */
14031 
14032 	/* ND offload version supported */
14033 	dhd->ndo_version = dhd_ndo_get_version(dhd);
14034 
14035 	/* check dongle supports wbtext (product policy) or not */
14036 	dhd->wbtext_support = FALSE;
14037 	if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
14038 			WLC_GET_VAR, FALSE, 0) != BCME_OK) {
14039 		DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
14040 	}
14041 	dhd->wbtext_policy = wnm_bsstrans_resp;
14042 	if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
14043 		dhd->wbtext_support = TRUE;
14044 	}
14045 #ifndef WBTEXT
14046 	/* driver can turn off wbtext feature through makefile */
14047 	if (dhd->wbtext_support) {
14048 		if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
14049 				WL_BSSTRANS_POLICY_ROAM_ALWAYS,
14050 				WLC_SET_VAR, FALSE, 0) != BCME_OK) {
14051 			DHD_ERROR(("failed to disable WBTEXT\n"));
14052 		}
14053 	}
14054 #endif /* !WBTEXT */
14055 
14056 #ifdef DHD_NON_DMA_M2M_CORRUPTION
14057 	/* check pcie non dma loopback */
14058 	if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
14059 		(dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
14060 			goto done;
14061 	}
14062 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
14063 
14064 #ifdef CUSTOM_ASSOC_TIMEOUT
14065 	/* set recreate_bi_timeout to increase assoc timeout :
14066 	* 20 * 100TU * 1024 / 1000 = 2 secs
14067 	* (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
14068 	*/
14069 	if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
14070 			CUSTOM_ASSOC_TIMEOUT,
14071 			WLC_SET_VAR, TRUE, 0) != BCME_OK) {
14072 		DHD_ERROR(("failed to set assoc timeout\n"));
14073 	}
14074 #endif /* CUSTOM_ASSOC_TIMEOUT */
14075 
14076 	BCM_REFERENCE(ret2);
14077 #if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
14078 	if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win",
14079 		(char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win),
14080 		NULL, 0, TRUE) < 0) {
14081 		DHD_ERROR(("failed to set RRM BCN request thrtl_win\n"));
14082 	}
14083 	if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time",
14084 		(char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time),
14085 		NULL, 0, TRUE) < 0) {
14086 		DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n"));
14087 	}
14088 #endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
14089 #ifdef WL_MONITOR
14090 #ifdef HOST_RADIOTAP_CONV
14091 	/* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not.
14092 	 * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion
14093 	 * will be enabled or disabled.
14094 	 * 0 - Host supports Radiotap conversion.
14095 	 * 1 - FW supports Radiotap conversion.
14096 	 */
14097 	bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf));
14098 	if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf,
14099 		sizeof(iovbuf), FALSE, 0)) == 0) {
14100 		memcpy(&monitor, iovbuf, sizeof(monitor));
14101 		dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE;
14102 	} else {
14103 		DHD_ERROR(("%s Failed to get monitor mode, err %d\n",
14104 			__FUNCTION__, ret2));
14105 	}
14106 #endif /* HOST_RADIOTAP_CONV */
14107 	if (FW_SUPPORTED(dhd, monitor)) {
14108 		dhd->monitor_enable = TRUE;
14109 		DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
14110 	} else {
14111 		dhd->monitor_enable = FALSE;
14112 		DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
14113 	}
14114 #endif /* WL_MONITOR */
14115 
14116 	/* store the preserve log set numbers */
14117 	if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
14118 			!= BCME_OK) {
14119 		DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
14120 	}
14121 
14122 #ifdef CONFIG_SILENT_ROAM
14123 	dhd->sroam_turn_on = TRUE;
14124 	dhd->sroamed = FALSE;
14125 #endif /* CONFIG_SILENT_ROAM */
14126 
14127 #ifndef OEM_ANDROID
14128 	/* For non-android FC modular builds, override firmware preinited values */
14129 	dhd_override_fwprenit(dhd);
14130 #endif /* !OEM_ANDROID */
14131 	dhd_set_bandlock(dhd);
14132 
14133 done:
14134 	if (iov_buf) {
14135 		MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
14136 	}
14137 	return ret;
14138 }
14139 
14140 int
dhd_legacy_preinit_ioctls(dhd_pub_t * dhd)14141 dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
14142 {
14143 	int ret = 0;
14144 	/*  Room for "event_msgs_ext" + '\0' + bitvec  */
14145 	char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
14146 	char *mask;
14147 	uint32 buf_key_b4_m4 = 1;
14148 #ifdef DHD_PKTTS
14149 	uint32 val = 0;
14150 #endif
14151 	uint8 msglen;
14152 	eventmsgs_ext_t *eventmask_msg = NULL;
14153 	uint32 event_log_max_sets = 0;
14154 	char* iov_buf = NULL;
14155 	/* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
14156 	*	based on FW build tag.
14157 	*/
14158 	int ret2 = 0;
14159 	uint32 wnm_cap = 0;
14160 #if defined(WL_MONITOR) && defined(HOST_RADIOTAP_CONV)
14161 	uint monitor = 0;
14162 	dhd_info_t *dhdinfo = (dhd_info_t*)dhd->info;
14163 #endif /* WL_MONITOR */
14164 #if defined(BCMSUP_4WAY_HANDSHAKE)
14165 	uint32 sup_wpa = 1;
14166 #endif /* BCMSUP_4WAY_HANDSHAKE */
14167 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
14168 	defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
14169 	uint32 ampdu_ba_wsize = 0;
14170 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
14171 #if defined(CUSTOM_AMPDU_MPDU)
14172 	int32 ampdu_mpdu = 0;
14173 #endif
14174 #if defined(CUSTOM_AMPDU_RELEASE)
14175 	int32 ampdu_release = 0;
14176 #endif
14177 #if defined(CUSTOM_AMSDU_AGGSF)
14178 	int32 amsdu_aggsf = 0;
14179 #endif
14180 
14181 #if defined(BCMSDIO) || defined(BCMDBUS)
14182 #ifdef PROP_TXSTATUS
14183 	int wlfc_enable = TRUE;
14184 #ifndef DISABLE_11N
14185 	uint32 hostreorder = 1;
14186 	uint wl_down = 1;
14187 #endif /* DISABLE_11N */
14188 #endif /* PROP_TXSTATUS */
14189 #endif /* defined(BCMSDIO) || defined(BCMDBUS) */
14190 
14191 #ifndef PCIE_FULL_DONGLE
14192 	uint32 wl_ap_isolate;
14193 #endif /* PCIE_FULL_DONGLE */
14194 	uint32 frameburst = CUSTOM_FRAMEBURST_SET;
14195 	uint wnm_bsstrans_resp = 0;
14196 #ifdef SUPPORT_SET_CAC
14197 	uint32 cac = 1;
14198 #endif /* SUPPORT_SET_CAC */
14199 #ifdef DHD_BUS_MEM_ACCESS
14200 	uint32 enable_memuse = 1;
14201 #endif /* DHD_BUS_MEM_ACCESS */
14202 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
14203 	uint32 vht_features = 0; /* init to 0, will be set based on each support */
14204 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
14205 
14206 #ifdef OEM_ANDROID
14207 #ifdef DHD_ENABLE_LPC
14208 	uint32 lpc = 1;
14209 #endif /* DHD_ENABLE_LPC */
14210 	uint power_mode = PM_FAST;
14211 #if defined(BCMSDIO)
14212 	uint32 dongle_align = DHD_SDALIGN;
14213 	uint32 glom = CUSTOM_GLOM_SETTING;
14214 #endif /* defined(BCMSDIO) */
14215 	uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
14216 	uint scancache_enab = TRUE;
14217 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
14218 	uint32 bcn_li_bcn = 1;
14219 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
14220 	uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
14221 	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
14222 	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
14223 	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
14224 	char buf[WLC_IOCTL_SMLEN];
14225 	char *ptr;
14226 	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
14227 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
14228 	wl_el_tag_params_t *el_tag = NULL;
14229 #endif /* DHD_8021X_DUMP */
14230 #ifdef DHD_RANDMAC_LOGGING
14231 	uint privacy_mask = 0;
14232 #endif /* DHD_RANDMAC_LOGGING */
14233 #ifdef ROAM_ENABLE
14234 	uint roamvar = 0;
14235 	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
14236 	int roam_scan_period[2] = {10, WLC_BAND_ALL};
14237 	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
14238 #ifdef ROAM_AP_ENV_DETECTION
14239 	int roam_env_mode = AP_ENV_INDETERMINATE;
14240 #endif /* ROAM_AP_ENV_DETECTION */
14241 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
14242 	int roam_fullscan_period = 60;
14243 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
14244 	int roam_fullscan_period = 120;
14245 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
14246 #ifdef DISABLE_BCNLOSS_ROAM
14247 	uint roam_bcnloss_off = 1;
14248 #endif /* DISABLE_BCNLOSS_ROAM */
14249 #else
14250 #ifdef DISABLE_BUILTIN_ROAM
14251 	uint roamvar = 1;
14252 #endif /* DISABLE_BUILTIN_ROAM */
14253 #endif /* ROAM_ENABLE */
14254 
14255 #if defined(SOFTAP)
14256 	uint dtim = 1;
14257 #endif
14258 /* xxx andrey tmp fix for dk8000 build error  */
14259 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
14260 	struct ether_addr p2p_ea;
14261 #endif
14262 #ifdef BCMCCX
14263 	uint32 ccx = 1;
14264 #endif
14265 #ifdef SOFTAP_UAPSD_OFF
14266 	uint32 wme_apsd = 0;
14267 #endif /* SOFTAP_UAPSD_OFF */
14268 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
14269 	uint32 apsta = 1; /* Enable APSTA mode */
14270 #elif defined(SOFTAP_AND_GC)
14271 	uint32 apsta = 0;
14272 	int ap_mode = 1;
14273 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
14274 #ifdef GET_CUSTOM_MAC_ENABLE
14275 	struct ether_addr ea_addr;
14276 	char hw_ether[62];
14277 #endif /* GET_CUSTOM_MAC_ENABLE */
14278 #ifdef OKC_SUPPORT
14279 	uint32 okc = 1;
14280 #endif
14281 
14282 #ifdef DISABLE_11N
14283 	uint32 nmode = 0;
14284 #endif /* DISABLE_11N */
14285 
14286 #if defined(DISABLE_11AC)
14287 	uint32 vhtmode = 0;
14288 #endif /* DISABLE_11AC */
14289 #ifdef USE_WL_TXBF
14290 	uint32 txbf = 1;
14291 #endif /* USE_WL_TXBF */
14292 #ifdef DISABLE_TXBFR
14293 	uint32 txbf_bfr_cap = 0;
14294 #endif /* DISABLE_TXBFR */
14295 #ifdef AMPDU_VO_ENABLE
14296 	/* XXX: Enabling VO AMPDU to reduce FER */
14297 	struct ampdu_tid_control tid;
14298 #endif
14299 #if defined(PROP_TXSTATUS)
14300 #ifdef USE_WFA_CERT_CONF
14301 	uint32 proptx = 0;
14302 #endif /* USE_WFA_CERT_CONF */
14303 #endif /* PROP_TXSTATUS */
14304 #ifdef DHD_SET_FW_HIGHSPEED
14305 	uint32 ack_ratio = 250;
14306 	uint32 ack_ratio_depth = 64;
14307 #endif /* DHD_SET_FW_HIGHSPEED */
14308 #ifdef DISABLE_11N_PROPRIETARY_RATES
14309 	uint32 ht_features = 0;
14310 #endif /* DISABLE_11N_PROPRIETARY_RATES */
14311 #ifdef CUSTOM_PSPRETEND_THR
14312 	uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
14313 #endif
14314 #ifdef CUSTOM_EVENT_PM_WAKE
14315 	uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
14316 #endif	/* CUSTOM_EVENT_PM_WAKE */
14317 #ifdef DISABLE_PRUNED_SCAN
14318 	uint32 scan_features = 0;
14319 #endif /* DISABLE_PRUNED_SCAN */
14320 #ifdef BCMPCIE_OOB_HOST_WAKE
14321 	uint32 hostwake_oob = 0;
14322 #endif /* BCMPCIE_OOB_HOST_WAKE */
14323 #ifdef EVENT_LOG_RATE_HC
14324 	/* threshold number of lines per second */
14325 #define EVENT_LOG_RATE_HC_THRESHOLD	1000
14326 	uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
14327 #endif /* EVENT_LOG_RATE_HC */
14328 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
14329 	uint32 btmdelta = WBTEXT_BTMDELTA;
14330 #endif /* WBTEXT && WBTEXT_BTMDELTA */
14331 #if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
14332 	uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2;
14333 	uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME;
14334 #endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
14335 #endif  /* OEM_ANDROID */
14336 
14337 	BCM_REFERENCE(iovbuf);
14338 	DHD_TRACE(("Enter %s\n", __FUNCTION__));
14339 
14340 #ifdef ARP_OFFLOAD_SUPPORT
14341 	/* arpoe will be applied from the supsend context */
14342 	dhd->arpoe_enable = TRUE;
14343 	dhd->arpol_configured = FALSE;
14344 #endif /* ARP_OFFLOAD_SUPPORT */
14345 
14346 #ifdef OEM_ANDROID
14347 #ifdef PKT_FILTER_SUPPORT
14348 	dhd_pkt_filter_enable = TRUE;
14349 #ifdef APF
14350 	dhd->apf_set = FALSE;
14351 #endif /* APF */
14352 #endif /* PKT_FILTER_SUPPORT */
14353 	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
14354 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
14355 	dhd->max_dtim_enable = TRUE;
14356 #else
14357 	dhd->max_dtim_enable = FALSE;
14358 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
14359 	dhd->disable_dtim_in_suspend = FALSE;
14360 #ifdef CUSTOM_SET_OCLOFF
14361 	dhd->ocl_off = FALSE;
14362 #endif /* CUSTOM_SET_OCLOFF */
14363 #ifdef SUPPORT_SET_TID
14364 	dhd->tid_mode = SET_TID_OFF;
14365 	dhd->target_uid = 0;
14366 	dhd->target_tid = 0;
14367 #endif /* SUPPORT_SET_TID */
14368 #ifdef DHDTCPACK_SUPPRESS
14369 	dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
14370 #endif
14371 	dhd->op_mode = 0;
14372 
14373 	/* clear AP flags */
14374 #if defined(CUSTOM_COUNTRY_CODE)
14375 	dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
14376 #endif /* CUSTOM_COUNTRY_CODE */
14377 
14378 #ifdef CUSTOMER_HW4_DEBUG
14379 	if (!dhd_validate_chipid(dhd)) {
14380 		DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
14381 			__FUNCTION__, dhd_bus_chip_id(dhd)));
14382 #ifndef SUPPORT_MULTIPLE_CHIPS
14383 		ret = BCME_BADARG;
14384 		goto done;
14385 #endif /* !SUPPORT_MULTIPLE_CHIPS */
14386 	}
14387 #endif /* CUSTOMER_HW4_DEBUG */
14388 
14389 	/* query for 'ver' to get version info from firmware */
14390 	memset(buf, 0, sizeof(buf));
14391 	ptr = buf;
14392 	ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
14393 	if (ret < 0)
14394 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
14395 	else {
14396 		bcmstrtok(&ptr, "\n", 0);
14397 		/* Print fw version info */
14398 		strncpy(fw_version, buf, FW_VER_STR_LEN);
14399 		fw_version[FW_VER_STR_LEN-1] = '\0';
14400 	}
14401 
14402 #ifdef BOARD_HIKEY
14403 	/* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
14404 	if (strstr(fw_version, "WLTEST") != NULL) {
14405 		DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
14406 			__FUNCTION__));
14407 		op_mode = DHD_FLAG_MFG_MODE;
14408 	}
14409 #endif /* BOARD_HIKEY */
14410 
14411 	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
14412 		(op_mode == DHD_FLAG_MFG_MODE)) {
14413 		dhd->op_mode = DHD_FLAG_MFG_MODE;
14414 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14415 		/* disable runtimePM by default in MFG mode. */
14416 		pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
14417 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14418 #ifdef DHD_PCIE_RUNTIMEPM
14419 		/* Disable RuntimePM in mfg mode */
14420 		DHD_DISABLE_RUNTIME_PM(dhd);
14421 		DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
14422 #endif /* DHD_PCIE_RUNTIME_PM */
14423 		/* Check and adjust IOCTL response timeout for Manufactring firmware */
14424 		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
14425 		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
14426 			__FUNCTION__));
14427 	} else {
14428 		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
14429 		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
14430 	}
14431 #ifdef BCMPCIE_OOB_HOST_WAKE
14432 	ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
14433 		sizeof(hostwake_oob), FALSE);
14434 	if (ret < 0) {
14435 		DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
14436 	} else {
14437 		if (hostwake_oob == 0) {
14438 			DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
14439 				__FUNCTION__));
14440 			ret = BCME_UNSUPPORTED;
14441 			goto done;
14442 		} else {
14443 			DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
14444 		}
14445 	}
14446 #endif /* BCMPCIE_OOB_HOST_WAKE */
14447 
14448 #ifdef DNGL_AXI_ERROR_LOGGING
14449 	ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
14450 		sizeof(dhd->axierror_logbuf_addr), FALSE);
14451 	if (ret < 0) {
14452 		DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
14453 		dhd->axierror_logbuf_addr = 0;
14454 	} else {
14455 		DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
14456 			__FUNCTION__, dhd->axierror_logbuf_addr));
14457 	}
14458 #endif /* DNGL_AXI_ERROR_LOGGING */
14459 
14460 #ifdef EVENT_LOG_RATE_HC
14461 	ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
14462 		sizeof(event_log_rate_hc), NULL, 0, TRUE);
14463 	if (ret < 0) {
14464 		DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
14465 	} else  {
14466 		DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
14467 			event_log_rate_hc));
14468 	}
14469 #endif /* EVENT_LOG_RATE_HC */
14470 
14471 #ifdef GET_CUSTOM_MAC_ENABLE
14472 	memset(hw_ether, 0, sizeof(hw_ether));
14473 	ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, iface_name);
14474 #ifdef GET_CUSTOM_MAC_FROM_CONFIG
14475 	if (!memcmp(&ether_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
14476 		ret = 0;
14477 	} else
14478 #endif
14479 	if (!ret) {
14480 		memset(buf, 0, sizeof(buf));
14481 #ifdef GET_CUSTOM_MAC_FROM_CONFIG
14482 		memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
14483 #endif
14484 		bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
14485 		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
14486 		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
14487 		if (ret < 0) {
14488 			memset(buf, 0, sizeof(buf));
14489 			bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
14490 			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
14491 			if (ret) {
14492 				DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
14493 					__FUNCTION__, MAC2STRDBG(hw_ether), ret));
14494 				prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
14495 				ret = BCME_NOTUP;
14496 				goto done;
14497 			}
14498 		}
14499 	} else {
14500 		DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
14501 		ret = BCME_NOTUP;
14502 		goto done;
14503 	}
14504 #endif /* GET_CUSTOM_MAC_ENABLE */
14505 	/* Get the default device MAC address directly from firmware */
14506 	ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
14507 	if (ret < 0) {
14508 		DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
14509 		ret = BCME_NOTUP;
14510 		goto done;
14511 	}
14512 
14513 	DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
14514 		__FUNCTION__, MAC2STRDBG(&buf)));
14515 
14516 #ifdef MACADDR_PROVISION_ENFORCED
14517 	if (ETHER_IS_LOCALADDR(buf)) {
14518 		DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
14519 		ret = BCME_BADADDR;
14520 		goto done;
14521 	}
14522 #endif /* MACADDR_PROVISION_ENFORCED */
14523 
14524 	/* Update public MAC address after reading from Firmware */
14525 	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
14526 
14527 	if (ETHER_ISNULLADDR(dhd->mac.octet)) {
14528 		DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
14529 		ret = BCME_BADADDR;
14530 		goto done;
14531 	} else {
14532 		(void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
14533 			dhd->mac.octet, ETHER_ADDR_LEN);
14534 	}
14535 #if defined(WL_STA_ASSOC_RAND) && defined(WL_STA_INIT_RAND)
14536 	/* Set cur_etheraddr of primary interface to randomized address to ensure
14537 	 * that any action frame transmission will happen using randomized macaddr
14538 	 * primary netdev->perm_addr will hold the original factory MAC.
14539 	 */
14540 	{
14541 		if ((ret = dhd_update_rand_mac_addr(dhd)) < 0) {
14542 			DHD_ERROR(("%s: failed to set macaddress\n", __FUNCTION__));
14543 			goto done;
14544 		}
14545 	}
14546 #endif /* WL_STA_ASSOC_RAND && WL_STA_INIT_RAND */
14547 
14548 	if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
14549 		DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
14550 		goto done;
14551 	}
14552 
14553 	/* get a capabilities from firmware */
14554 	{
14555 		uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
14556 		memset(dhd->fw_capabilities, 0, cap_buf_size);
14557 		ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
14558 				FALSE);
14559 		if (ret < 0) {
14560 			DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
14561 				__FUNCTION__, ret));
14562 			return 0;
14563 		}
14564 
14565 		memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
14566 		dhd->fw_capabilities[0] = ' ';
14567 		dhd->fw_capabilities[cap_buf_size - 2] = ' ';
14568 		dhd->fw_capabilities[cap_buf_size - 1] = '\0';
14569 	}
14570 
14571 	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
14572 		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
14573 #ifdef SET_RANDOM_MAC_SOFTAP
14574 		uint rand_mac;
14575 #endif /* SET_RANDOM_MAC_SOFTAP */
14576 		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
14577 #ifdef PKT_FILTER_SUPPORT
14578 		if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
14579 			dhd_pkt_filter_enable = TRUE;
14580 		else
14581 			dhd_pkt_filter_enable = FALSE;
14582 #endif
14583 #ifdef SET_RANDOM_MAC_SOFTAP
14584 		SRANDOM32((uint)jiffies);
14585 		rand_mac = RANDOM32();
14586 		iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02;	/* local admin bit */
14587 		iovbuf[1] = (unsigned char)(vendor_oui >> 8);
14588 		iovbuf[2] = (unsigned char)vendor_oui;
14589 		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
14590 		iovbuf[4] = (unsigned char)(rand_mac >> 8);
14591 		iovbuf[5] = (unsigned char)(rand_mac >> 16);
14592 
14593 		ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
14594 				TRUE);
14595 		if (ret < 0) {
14596 			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
14597 		} else
14598 			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
14599 #endif /* SET_RANDOM_MAC_SOFTAP */
14600 #ifdef USE_DYNAMIC_F2_BLKSIZE
14601 		dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
14602 #endif /* USE_DYNAMIC_F2_BLKSIZE */
14603 #ifdef SUPPORT_AP_POWERSAVE
14604 		dhd_set_ap_powersave(dhd, 0, TRUE);
14605 #endif /* SUPPORT_AP_POWERSAVE */
14606 #ifdef SOFTAP_UAPSD_OFF
14607 		ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
14608 				TRUE);
14609 		if (ret < 0) {
14610 			DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
14611 				__FUNCTION__, ret));
14612 		}
14613 #endif /* SOFTAP_UAPSD_OFF */
14614 
14615 		/* set AP flag for specific country code of SOFTAP */
14616 #if defined(CUSTOM_COUNTRY_CODE)
14617 		dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
14618 #endif /* CUSTOM_COUNTRY_CODE */
14619 	} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
14620 		(op_mode == DHD_FLAG_MFG_MODE)) {
14621 #if defined(ARP_OFFLOAD_SUPPORT)
14622 		dhd->arpoe_enable = FALSE;
14623 #endif /* ARP_OFFLOAD_SUPPORT */
14624 #ifdef PKT_FILTER_SUPPORT
14625 		dhd_pkt_filter_enable = FALSE;
14626 #endif /* PKT_FILTER_SUPPORT */
14627 		dhd->op_mode = DHD_FLAG_MFG_MODE;
14628 #ifdef USE_DYNAMIC_F2_BLKSIZE
14629 		/* XXX The 'wl counters' command triggers SDIO bus error
14630 		 * if F2 block size is greater than 128 bytes using 4354A1
14631 		 * manufacturing firmware. To avoid this problem, F2 block
14632 		 * size is set to 128 bytes only for DHD_FLAG_MFG_MODE.
14633 		 * There is no problem for other chipset since big data
14634 		 * transcation through SDIO bus is not happened during
14635 		 * manufacturing test.
14636 		 */
14637 		dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
14638 #endif /* USE_DYNAMIC_F2_BLKSIZE */
14639 #ifndef CUSTOM_SET_ANTNPM
14640 		if (FW_SUPPORTED(dhd, rsdb)) {
14641 			wl_config_t rsdb_mode;
14642 			memset(&rsdb_mode, 0, sizeof(rsdb_mode));
14643 			ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
14644 				NULL, 0, TRUE);
14645 			if (ret < 0) {
14646 				DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
14647 					__FUNCTION__, ret));
14648 			}
14649 		}
14650 #endif /* !CUSTOM_SET_ANTNPM */
14651 	} else {
14652 		uint32 concurrent_mode = 0;
14653 		if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
14654 			(op_mode == DHD_FLAG_P2P_MODE)) {
14655 #ifdef PKT_FILTER_SUPPORT
14656 			dhd_pkt_filter_enable = FALSE;
14657 #endif
14658 			dhd->op_mode = DHD_FLAG_P2P_MODE;
14659 		} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
14660 			(op_mode == DHD_FLAG_IBSS_MODE)) {
14661 			dhd->op_mode = DHD_FLAG_IBSS_MODE;
14662 		} else
14663 			dhd->op_mode = DHD_FLAG_STA_MODE;
14664 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
14665 		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
14666 			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
14667 			dhd->op_mode |= concurrent_mode;
14668 		}
14669 
14670 		/* Check if we are enabling p2p */
14671 		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
14672 			ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
14673 					TRUE);
14674 			if (ret < 0)
14675 				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
14676 
14677 #if defined(SOFTAP_AND_GC)
14678 		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
14679 			(char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
14680 				DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
14681 		}
14682 #endif
14683 			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
14684 			ETHER_SET_LOCALADDR(&p2p_ea);
14685 			ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
14686 					NULL, 0, TRUE);
14687 			if (ret < 0)
14688 				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
14689 			else
14690 				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
14691 		}
14692 #else
14693 	(void)concurrent_mode;
14694 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
14695 	}
14696 
14697 #ifdef DISABLE_PRUNED_SCAN
14698 	if (FW_SUPPORTED(dhd, rsdb)) {
14699 		ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
14700 				sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
14701 		if (ret < 0) {
14702 			if (ret == BCME_UNSUPPORTED) {
14703 				DHD_ERROR(("%s get scan_features, UNSUPPORTED\n",
14704 					__FUNCTION__));
14705 			} else {
14706 				DHD_ERROR(("%s get scan_features err(%d)\n",
14707 					__FUNCTION__, ret));
14708 			}
14709 
14710 		} else {
14711 			memcpy(&scan_features, iovbuf, 4);
14712 			scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
14713 			ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
14714 					sizeof(scan_features), NULL, 0, TRUE);
14715 			if (ret < 0) {
14716 				DHD_ERROR(("%s set scan_features err(%d)\n",
14717 					__FUNCTION__, ret));
14718 			}
14719 		}
14720 	}
14721 #endif /* DISABLE_PRUNED_SCAN */
14722 
14723 	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
14724 		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
14725 #if defined(DHD_BLOB_EXISTENCE_CHECK)
14726 	if (!dhd->is_blob)
14727 #endif /* DHD_BLOB_EXISTENCE_CHECK */
14728 	{
14729 		/* get a ccode and revision for the country code */
14730 #if defined(CUSTOM_COUNTRY_CODE)
14731 		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
14732 			&dhd->dhd_cspec, dhd->dhd_cflags);
14733 #else
14734 		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
14735 			&dhd->dhd_cspec);
14736 #endif /* CUSTOM_COUNTRY_CODE */
14737 	}
14738 
14739 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
14740 	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
14741 		dhd->info->rxthread_enabled = FALSE;
14742 	else
14743 		dhd->info->rxthread_enabled = TRUE;
14744 #endif
14745 	/* Set Country code  */
14746 	if (dhd->dhd_cspec.ccode[0] != 0) {
14747 		ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
14748 				NULL, 0, TRUE);
14749 		if (ret < 0)
14750 			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
14751 	}
14752 
14753 #if defined(DISABLE_11AC)
14754 	ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode, sizeof(vhtmode), NULL, 0, TRUE);
14755 	if (ret < 0)
14756 		DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
14757 #endif /* DISABLE_11AC */
14758 
14759 	/* Set Listen Interval */
14760 	ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
14761 			NULL, 0, TRUE);
14762 	if (ret < 0)
14763 		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
14764 
14765 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
14766 #ifdef USE_WFA_CERT_CONF
14767 	if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
14768 		DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
14769 	}
14770 #endif /* USE_WFA_CERT_CONF */
14771 	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
14772 	ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
14773 	if (ret < 0) {
14774 		DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
14775 	}
14776 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
14777 #if defined(ROAM_ENABLE)
14778 #ifdef DISABLE_BCNLOSS_ROAM
14779 	ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
14780 			sizeof(roam_bcnloss_off), NULL, 0, TRUE);
14781 	if (ret < 0) {
14782 		DHD_ERROR(("%s roam_bcnloss_off failed %d\n", __FUNCTION__, ret));
14783 	}
14784 #endif /* DISABLE_BCNLOSS_ROAM */
14785 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
14786 		sizeof(roam_trigger), TRUE, 0)) < 0)
14787 		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
14788 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
14789 		sizeof(roam_scan_period), TRUE, 0)) < 0)
14790 		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
14791 	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
14792 		sizeof(roam_delta), TRUE, 0)) < 0)
14793 		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
14794 	ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
14795 			sizeof(roam_fullscan_period), NULL, 0, TRUE);
14796 	if (ret < 0)
14797 		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
14798 #ifdef ROAM_AP_ENV_DETECTION
14799 	if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
14800 		if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
14801 				sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
14802 			dhd->roam_env_detection = TRUE;
14803 		else
14804 			dhd->roam_env_detection = FALSE;
14805 	}
14806 #endif /* ROAM_AP_ENV_DETECTION */
14807 #ifdef CONFIG_ROAM_RSSI_LIMIT
14808 	ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
14809 	if (ret < 0) {
14810 		DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
14811 	}
14812 #endif /* CONFIG_ROAM_RSSI_LIMIT */
14813 #ifdef CONFIG_ROAM_MIN_DELTA
14814 	ret = dhd_roam_min_delta_set(dhd, CUSTOM_ROAM_MIN_DELTA, CUSTOM_ROAM_MIN_DELTA);
14815 	if (ret < 0) {
14816 		DHD_ERROR(("%s set roam_min_delta failed ret %d\n", __FUNCTION__, ret));
14817 	}
14818 #endif /* CONFIG_ROAM_MIN_DELTA */
14819 #endif /* ROAM_ENABLE */
14820 
14821 #ifdef CUSTOM_EVENT_PM_WAKE
14822 	/* XXX need to check time value */
14823 	ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
14824 			sizeof(pm_awake_thresh), NULL, 0, TRUE);
14825 	if (ret < 0) {
14826 		DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
14827 	}
14828 #endif	/* CUSTOM_EVENT_PM_WAKE */
14829 #ifdef OKC_SUPPORT
14830 	dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
14831 #endif
14832 #ifdef BCMCCX
14833 	dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
14834 #endif /* BCMCCX */
14835 
14836 #ifdef WLTDLS
14837 	dhd->tdls_enable = FALSE;
14838 	dhd_tdls_set_mode(dhd, false);
14839 #endif /* WLTDLS */
14840 
14841 #ifdef DHD_ENABLE_LPC
14842 	/* Set lpc 1 */
14843 	ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
14844 	if (ret < 0) {
14845 		DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
14846 
14847 		if (ret == BCME_NOTDOWN) {
14848 			uint wl_down = 1;
14849 			ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
14850 				(char *)&wl_down, sizeof(wl_down), TRUE, 0);
14851 			DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
14852 
14853 			ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
14854 			DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
14855 		}
14856 	}
14857 #endif /* DHD_ENABLE_LPC */
14858 
14859 #ifdef WLADPS
14860 	if (dhd->op_mode & DHD_FLAG_STA_MODE) {
14861 		if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK &&
14862 			(ret != BCME_UNSUPPORTED)) {
14863 			DHD_ERROR(("%s dhd_enable_adps failed %d\n",
14864 				__FUNCTION__, ret));
14865 		}
14866 	}
14867 #endif /* WLADPS */
14868 
14869 #ifdef DHD_PM_CONTROL_FROM_FILE
14870 #ifdef CUSTOMER_HW10
14871 	dhd_control_pm(dhd, &power_mode);
14872 #else
14873 	sec_control_pm(dhd, &power_mode);
14874 #endif /* CUSTOMER_HW10 */
14875 #else
14876 	/* Set PowerSave mode */
14877 	(void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
14878 #endif /* DHD_PM_CONTROL_FROM_FILE */
14879 
14880 #if defined(BCMSDIO)
14881 	/* Match Host and Dongle rx alignment */
14882 	ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
14883 			NULL, 0, TRUE);
14884 	if (ret < 0) {
14885 		DHD_ERROR(("%s set bus:txglomalign failed %d\n", __FUNCTION__, ret));
14886 	}
14887 
14888 #ifdef USE_WFA_CERT_CONF
14889 	if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
14890 		DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
14891 	}
14892 #endif /* USE_WFA_CERT_CONF */
14893 	if (glom != DEFAULT_GLOM_VALUE) {
14894 		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
14895 		ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
14896 		if (ret < 0) {
14897 			DHD_ERROR(("%s set bus:txglom failed %d\n", __FUNCTION__, ret));
14898 		}
14899 	}
14900 #endif /* defined(BCMSDIO) */
14901 
14902 	/* Setup timeout if Beacons are lost and roam is off to report link down */
14903 	ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout),
14904 			NULL, 0, TRUE);
14905 	if (ret < 0) {
14906 		DHD_ERROR(("%s set bcn_timeout failed %d\n", __FUNCTION__, ret));
14907 	}
14908 
14909 	/* Setup assoc_retry_max count to reconnect target AP in dongle */
14910 	ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max),
14911 			NULL, 0, TRUE);
14912 	if (ret < 0) {
14913 		DHD_ERROR(("%s set assoc_retry_max failed %d\n", __FUNCTION__, ret));
14914 	}
14915 
14916 #if defined(AP) && !defined(WLP2P)
14917 	ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
14918 	if (ret < 0) {
14919 		DHD_ERROR(("%s set apsta failed %d\n", __FUNCTION__, ret));
14920 	}
14921 
14922 #endif /* defined(AP) && !defined(WLP2P) */
14923 
14924 #ifdef MIMO_ANT_SETTING
14925 	dhd_sel_ant_from_file(dhd);
14926 #endif /* MIMO_ANT_SETTING */
14927 
14928 #if defined(OEM_ANDROID) && defined(SOFTAP)
14929 	if (ap_fw_loaded == TRUE) {
14930 		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
14931 	}
14932 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
14933 
14934 #if defined(KEEP_ALIVE)
14935 	{
14936 	/* Set Keep Alive : be sure to use FW with -keepalive */
14937 	int res;
14938 
14939 #if defined(OEM_ANDROID) && defined(SOFTAP)
14940 	if (ap_fw_loaded == FALSE)
14941 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
14942 		if (!(dhd->op_mode &
14943 			(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
14944 			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
14945 				DHD_ERROR(("%s set keeplive failed %d\n",
14946 				__FUNCTION__, res));
14947 		}
14948 	}
14949 #endif /* defined(KEEP_ALIVE) */
14950 
14951 #ifdef USE_WL_TXBF
14952 	ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
14953 	if (ret < 0)
14954 		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
14955 
14956 #endif /* USE_WL_TXBF */
14957 
14958 	ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
14959 			0, TRUE);
14960 	if (ret < 0) {
14961 		DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
14962 	}
14963 
14964 #else /* OEM_ANDROID */
14965 	if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
14966 		DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
14967 		goto done;
14968 	}
14969 
14970 #if defined(KEEP_ALIVE)
14971 	if (!(dhd->op_mode &
14972 		(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
14973 		if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
14974 			DHD_ERROR(("%s set keeplive failed %d\n",
14975 			__FUNCTION__, ret));
14976 	}
14977 #endif
14978 
14979 	/* get a capabilities from firmware */
14980 	memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
14981 	ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities),
14982 			FALSE);
14983 	if (ret < 0) {
14984 		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
14985 			__FUNCTION__, ret));
14986 		goto done;
14987 	}
14988 #endif  /* OEM_ANDROID */
14989 
14990 	ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
14991 		sizeof(event_log_max_sets), FALSE);
14992 	if (ret == BCME_OK) {
14993 		dhd->event_log_max_sets = event_log_max_sets;
14994 	} else {
14995 		dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
14996 	}
14997 	/* Make sure max_sets is set first with wmb and then sets_queried,
14998 	 * this will be used during parsing the logsets in the reverse order.
14999 	 */
15000 	OSL_SMP_WMB();
15001 	dhd->event_log_max_sets_queried = TRUE;
15002 	DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
15003 		__FUNCTION__, dhd->event_log_max_sets, ret));
15004 #ifdef DHD_BUS_MEM_ACCESS
15005 	ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
15006 			sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
15007 	if (ret < 0) {
15008 		DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
15009 			__FUNCTION__, ret));
15010 	} else {
15011 		DHD_ERROR(("%s: enable_memuse = %d\n",
15012 			__FUNCTION__, enable_memuse));
15013 	}
15014 #endif /* DHD_BUS_MEM_ACCESS */
15015 
15016 #ifdef DISABLE_TXBFR
15017 	ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
15018 			0, TRUE);
15019 	if (ret < 0) {
15020 		DHD_ERROR(("%s Clear txbf_bfr_cap failed  %d\n", __FUNCTION__, ret));
15021 	}
15022 #endif /* DISABLE_TXBFR */
15023 
15024 #ifdef USE_WFA_CERT_CONF
15025 #ifdef USE_WL_FRAMEBURST
15026 	 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
15027 		DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
15028 	 }
15029 #endif /* USE_WL_FRAMEBURST */
15030 	 g_frameburst = frameburst;
15031 #endif /* USE_WFA_CERT_CONF */
15032 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
15033 	/* Disable Framebursting for SofAP */
15034 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
15035 		frameburst = 0;
15036 	}
15037 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
15038 	/* Set frameburst to value */
15039 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
15040 		sizeof(frameburst), TRUE, 0)) < 0) {
15041 		DHD_INFO(("%s frameburst not supported  %d\n", __FUNCTION__, ret));
15042 	}
15043 #ifdef DHD_SET_FW_HIGHSPEED
15044 	/* Set ack_ratio */
15045 	ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
15046 	if (ret < 0) {
15047 		DHD_ERROR(("%s Set ack_ratio failed  %d\n", __FUNCTION__, ret));
15048 	}
15049 
15050 	/* Set ack_ratio_depth */
15051 	ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
15052 			sizeof(ack_ratio_depth), NULL, 0, TRUE);
15053 	if (ret < 0) {
15054 		DHD_ERROR(("%s Set ack_ratio_depth failed  %d\n", __FUNCTION__, ret));
15055 	}
15056 #endif /* DHD_SET_FW_HIGHSPEED */
15057 
15058 	iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
15059 	if (iov_buf == NULL) {
15060 		DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
15061 		ret = BCME_NOMEM;
15062 		goto done;
15063 	}
15064 
15065 	BCM_REFERENCE(ret2);
15066 
15067 #ifdef WLAIBSS
15068 	/* Apply AIBSS configurations */
15069 	if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
15070 		DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
15071 				__FUNCTION__, ret));
15072 		goto done;
15073 	}
15074 #endif /* WLAIBSS */
15075 
15076 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
15077 	defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
15078 	/* Set ampdu ba wsize to 64 or 16 */
15079 #ifdef CUSTOM_AMPDU_BA_WSIZE
15080 	ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
15081 #endif
15082 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
15083 	if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
15084 		ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
15085 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
15086 	if (ampdu_ba_wsize != 0) {
15087 		ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
15088 				sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
15089 		if (ret < 0) {
15090 			DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
15091 				__FUNCTION__, ampdu_ba_wsize, ret));
15092 		}
15093 	}
15094 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
15095 
15096 #if defined(CUSTOM_AMPDU_MPDU)
15097 	ampdu_mpdu = CUSTOM_AMPDU_MPDU;
15098 	if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
15099 		ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
15100 				NULL, 0, TRUE);
15101 		if (ret < 0) {
15102 			DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
15103 				__FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
15104 		}
15105 	}
15106 #endif /* CUSTOM_AMPDU_MPDU */
15107 
15108 #if defined(CUSTOM_AMPDU_RELEASE)
15109 	ampdu_release = CUSTOM_AMPDU_RELEASE;
15110 	if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
15111 		ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
15112 				sizeof(ampdu_release), NULL, 0, TRUE);
15113 		if (ret < 0) {
15114 			DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
15115 				__FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
15116 		}
15117 	}
15118 #endif /* CUSTOM_AMPDU_RELEASE */
15119 
15120 #if defined(CUSTOM_AMSDU_AGGSF)
15121 	amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
15122 	if (amsdu_aggsf != 0) {
15123 		ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
15124 				NULL, 0, TRUE);
15125 		if (ret < 0) {
15126 			DHD_ERROR(("%s Set amsdu_aggsf to %d failed  %d\n",
15127 				__FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
15128 		}
15129 	}
15130 #endif /* CUSTOM_AMSDU_AGGSF */
15131 
15132 #if defined(BCMSUP_4WAY_HANDSHAKE)
15133 	/* Read 4-way handshake requirements */
15134 	if (dhd_use_idsup == 1) {
15135 		ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
15136 				(char *)&iovbuf, sizeof(iovbuf), FALSE);
15137 		/* sup_wpa iovar returns NOTREADY status on some platforms using modularized
15138 		 * in-dongle supplicant.
15139 		 */
15140 		if (ret >= 0 || ret == BCME_NOTREADY)
15141 			dhd->fw_4way_handshake = TRUE;
15142 		DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
15143 	}
15144 #endif /* BCMSUP_4WAY_HANDSHAKE */
15145 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
15146 	ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
15147 			(char *)&vht_features, sizeof(vht_features), FALSE);
15148 	if (ret < 0) {
15149 		DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
15150 		vht_features = 0;
15151 	} else {
15152 #ifdef SUPPORT_2G_VHT
15153 		vht_features |= 0x3; /* 2G support */
15154 #endif /* SUPPORT_2G_VHT */
15155 #ifdef SUPPORT_5G_1024QAM_VHT
15156 		vht_features |= 0x6; /* 5G 1024 QAM support */
15157 #endif /* SUPPORT_5G_1024QAM_VHT */
15158 	}
15159 	if (vht_features) {
15160 		ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
15161 				NULL, 0, TRUE);
15162 		if (ret < 0) {
15163 			if (ret == BCME_NOTDOWN) {
15164 				uint wl_down = 1;
15165 				ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
15166 					(char *)&wl_down, sizeof(wl_down), TRUE, 0);
15167 				DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
15168 					" vht_features = 0x%x\n",
15169 					__FUNCTION__, ret, vht_features));
15170 
15171 				ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
15172 						sizeof(vht_features), NULL, 0, TRUE);
15173 
15174 				DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
15175 			}
15176 			if (ret != BCME_BADOPTION) {
15177 				DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
15178 			} else {
15179 				DHD_INFO(("%s vht_features ret(%d) - need to check BANDLOCK\n",
15180 					__FUNCTION__, ret));
15181 			}
15182 		}
15183 	}
15184 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
15185 #ifdef DISABLE_11N_PROPRIETARY_RATES
15186 	ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
15187 			TRUE);
15188 	if (ret < 0) {
15189 		DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
15190 	}
15191 #endif /* DISABLE_11N_PROPRIETARY_RATES */
15192 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
15193 #if defined(DISABLE_HE_ENAB)
15194 	/* XXX DISABLE_HE_ENAB has higher priority than CUSTOM_CONTROL_HE_ENAB */
15195 	control_he_enab = 0;
15196 #endif /* DISABLE_HE_ENAB */
15197 	dhd_control_he_enab(dhd, control_he_enab);
15198 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
15199 
15200 #ifdef CUSTOM_PSPRETEND_THR
15201 	/* Turn off MPC in AP mode */
15202 	ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
15203 			sizeof(pspretend_thr), NULL, 0, TRUE);
15204 	if (ret < 0) {
15205 		DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
15206 			__FUNCTION__, ret));
15207 	}
15208 #endif
15209 
15210 	/* XXX Enable firmware key buffering before sent 4-way M4 */
15211 	ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
15212 			NULL, 0, TRUE);
15213 	if (ret < 0) {
15214 		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
15215 	}
15216 #ifdef SUPPORT_SET_CAC
15217 	ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
15218 	if (ret < 0) {
15219 		DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
15220 	}
15221 #endif /* SUPPORT_SET_CAC */
15222 	/* make up event mask ext message iovar for event larger than 128 */
15223 	msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
15224 	eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
15225 	if (eventmask_msg == NULL) {
15226 		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
15227 		ret = BCME_NOMEM;
15228 		goto done;
15229 	}
15230 	bzero(eventmask_msg, msglen);
15231 	eventmask_msg->ver = EVENTMSGS_VER;
15232 	eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
15233 
15234 	/* Read event_msgs_ext mask */
15235 	ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
15236 			WLC_IOCTL_SMLEN, FALSE);
15237 
15238 	/* event_msgs_ext must be supported */
15239 	if (ret != BCME_OK) {
15240 		DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret));
15241 		goto done;
15242 	}
15243 
15244 	bcopy(iov_buf, eventmask_msg, msglen);
15245 	/* make up event mask ext message iovar for event larger than 128 */
15246 	mask = eventmask_msg->mask;
15247 
15248 	/* Setup event_msgs */
15249 	setbit(mask, WLC_E_SET_SSID);
15250 	setbit(mask, WLC_E_PRUNE);
15251 	setbit(mask, WLC_E_AUTH);
15252 	setbit(mask, WLC_E_AUTH_IND);
15253 	setbit(mask, WLC_E_ASSOC);
15254 	setbit(mask, WLC_E_REASSOC);
15255 	setbit(mask, WLC_E_REASSOC_IND);
15256 	if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
15257 		setbit(mask, WLC_E_DEAUTH);
15258 	setbit(mask, WLC_E_DEAUTH_IND);
15259 	setbit(mask, WLC_E_DISASSOC_IND);
15260 	setbit(mask, WLC_E_DISASSOC);
15261 	setbit(mask, WLC_E_JOIN);
15262 	setbit(mask, WLC_E_START);
15263 	setbit(mask, WLC_E_ASSOC_IND);
15264 	setbit(mask, WLC_E_PSK_SUP);
15265 	setbit(mask, WLC_E_LINK);
15266 	setbit(mask, WLC_E_MIC_ERROR);
15267 	setbit(mask, WLC_E_ASSOC_REQ_IE);
15268 	setbit(mask, WLC_E_ASSOC_RESP_IE);
15269 #ifdef LIMIT_BORROW
15270 	setbit(mask, WLC_E_ALLOW_CREDIT_BORROW);
15271 #endif
15272 #ifndef WL_CFG80211
15273 	setbit(mask, WLC_E_PMKID_CACHE);
15274 //	setbit(mask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
15275 #endif
15276 	setbit(mask, WLC_E_JOIN_START);
15277 //	setbit(mask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
15278 #ifdef DHD_DEBUG
15279 	setbit(mask, WLC_E_SCAN_CONFIRM_IND);
15280 #endif
15281 #ifdef PNO_SUPPORT
15282 	setbit(mask, WLC_E_PFN_NET_FOUND);
15283 	setbit(mask, WLC_E_PFN_BEST_BATCHING);
15284 	setbit(mask, WLC_E_PFN_BSSID_NET_FOUND);
15285 	setbit(mask, WLC_E_PFN_BSSID_NET_LOST);
15286 #endif /* PNO_SUPPORT */
15287 	/* enable dongle roaming event */
15288 #ifdef WL_CFG80211
15289 #if !defined(ROAM_EVT_DISABLE)
15290 	setbit(mask, WLC_E_ROAM);
15291 #endif /* !ROAM_EVT_DISABLE */
15292 	setbit(mask, WLC_E_BSSID);
15293 #endif /* WL_CFG80211 */
15294 #ifdef BCMCCX
15295 	setbit(mask, WLC_E_ADDTS_IND);
15296 	setbit(mask, WLC_E_DELTS_IND);
15297 #endif /* BCMCCX */
15298 #ifdef WLTDLS
15299 	setbit(mask, WLC_E_TDLS_PEER_EVENT);
15300 #endif /* WLTDLS */
15301 #ifdef WL_ESCAN
15302 	setbit(mask, WLC_E_ESCAN_RESULT);
15303 #endif /* WL_ESCAN */
15304 #ifdef CSI_SUPPORT
15305 	setbit(mask, WLC_E_CSI);
15306 #endif /* CSI_SUPPORT */
15307 #ifdef RTT_SUPPORT
15308 	setbit(mask, WLC_E_PROXD);
15309 #endif /* RTT_SUPPORT */
15310 #if !defined(WL_CFG80211) && !defined(OEM_ANDROID)
15311 	setbit(mask, WLC_E_ESCAN_RESULT);
15312 #endif
15313 #ifdef WL_CFG80211
15314 	setbit(mask, WLC_E_ESCAN_RESULT);
15315 	setbit(mask, WLC_E_AP_STARTED);
15316 	setbit(mask, WLC_E_ACTION_FRAME_RX);
15317 	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
15318 		setbit(mask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
15319 	}
15320 #endif /* WL_CFG80211 */
15321 #ifdef WLAIBSS
15322 	setbit(mask, WLC_E_AIBSS_TXFAIL);
15323 #endif /* WLAIBSS */
15324 
15325 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
15326 	if (dhd_logtrace_from_file(dhd)) {
15327 		setbit(mask, WLC_E_TRACE);
15328 	} else {
15329 		clrbit(mask, WLC_E_TRACE);
15330 	}
15331 #elif defined(SHOW_LOGTRACE)
15332 	setbit(mask, WLC_E_TRACE);
15333 #else
15334 	clrbit(mask, WLC_E_TRACE);
15335 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
15336 
15337 	setbit(mask, WLC_E_CSA_COMPLETE_IND);
15338 #ifdef DHD_WMF
15339 	setbit(mask, WLC_E_PSTA_PRIMARY_INTF_IND);
15340 #endif
15341 #ifdef CUSTOM_EVENT_PM_WAKE
15342 	setbit(mask, WLC_E_EXCESS_PM_WAKE_EVENT);
15343 #endif	/* CUSTOM_EVENT_PM_WAKE */
15344 #ifdef DHD_LOSSLESS_ROAMING
15345 	setbit(mask, WLC_E_ROAM_PREP);
15346 #endif
15347 	/* nan events */
15348 	setbit(mask, WLC_E_NAN);
15349 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
15350 	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
15351 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
15352 
15353 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
15354 	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
15355 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
15356 
15357 #ifdef RSSI_MONITOR_SUPPORT
15358 	setbit(mask, WLC_E_RSSI_LQM);
15359 #endif /* RSSI_MONITOR_SUPPORT */
15360 #ifdef GSCAN_SUPPORT
15361 	setbit(mask, WLC_E_PFN_GSCAN_FULL_RESULT);
15362 	setbit(mask, WLC_E_PFN_SCAN_COMPLETE);
15363 	setbit(mask, WLC_E_PFN_SSID_EXT);
15364 	setbit(mask, WLC_E_ROAM_EXP_EVENT);
15365 #endif /* GSCAN_SUPPORT */
15366 	setbit(mask, WLC_E_RSSI_LQM);
15367 #ifdef BT_WIFI_HANDOVER
15368 	setbit(mask, WLC_E_BT_WIFI_HANDOVER_REQ);
15369 #endif /* BT_WIFI_HANDOVER */
15370 #ifdef DBG_PKT_MON
15371 	setbit(mask, WLC_E_ROAM_PREP);
15372 #endif /* DBG_PKT_MON */
15373 #ifdef WL_NATOE
15374 	setbit(mask, WLC_E_NATOE_NFCT);
15375 #endif /* WL_NATOE */
15376 #ifdef BCM_ROUTER_DHD
15377 	setbit(mask, WLC_E_DPSTA_INTF_IND);
15378 #endif /* BCM_ROUTER_DHD */
15379 	setbit(mask, WLC_E_SLOTTED_BSS_PEER_OP);
15380 #ifdef WL_BCNRECV
15381 	setbit(mask, WLC_E_BCNRECV_ABORTED);
15382 #endif /* WL_BCNRECV */
15383 #ifdef WL_MBO
15384 	setbit(mask, WLC_E_MBO);
15385 #endif /* WL_MBO */
15386 #ifdef WL_CLIENT_SAE
15387 	setbit(mask, WLC_E_JOIN_START);
15388 #endif /* WL_CLIENT_SAE */
15389 #ifdef WL_CAC_TS
15390 	setbit(mask, WLC_E_ADDTS_IND);
15391 	setbit(mask, WLC_E_DELTS_IND);
15392 #endif /* WL_BCNRECV */
15393 #ifdef CUSTOMER_HW6
15394 	setbit(mask, WLC_E_COUNTRY_CODE_CHANGED);
15395 #endif /* CUSTOMER_HW6 */
15396 
15397 	/* Write updated Event mask */
15398 	eventmask_msg->ver = EVENTMSGS_VER;
15399 	eventmask_msg->command = EVENTMSGS_SET_MASK;
15400 	eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
15401 	ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
15402 			TRUE);
15403 	if (ret < 0) {
15404 		DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
15405 		goto done;
15406 	}
15407 
15408 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
15409 	/* Enabling event log trace for EAP events */
15410 	el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
15411 	if (el_tag == NULL) {
15412 		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
15413 				(int)sizeof(wl_el_tag_params_t)));
15414 		ret = BCME_NOMEM;
15415 		goto done;
15416 	}
15417 	el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
15418 	el_tag->set = 1;
15419 	el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
15420 	ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL,
15421 			0, TRUE);
15422 	if (ret < 0) {
15423 		DHD_ERROR(("%s set event_log_tag_control fail %d\n", __FUNCTION__, ret));
15424 	}
15425 #endif /* DHD_8021X_DUMP */
15426 #ifdef DHD_RANDMAC_LOGGING
15427 	if (FW_SUPPORTED((dhd), event_log)) {
15428 		if (dhd_iovar(dhd, 0, "privacy_mask", (char *)&privacy_mask, sizeof(privacy_mask),
15429 			NULL, 0, TRUE) < 0) {
15430 			DHD_ERROR(("failed to set privacy mask\n"));
15431 		}
15432 	} else {
15433 		/* Don't enable feature to prevent macaddr print in clr text */
15434 		DHD_ERROR(("skip privacy_mask set. event_log not enabled\n"));
15435 	}
15436 #endif /* DHD_RANDMAC_LOGGING */
15437 
15438 #ifdef OEM_ANDROID
15439 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
15440 			sizeof(scan_assoc_time), TRUE, 0);
15441 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
15442 			sizeof(scan_unassoc_time), TRUE, 0);
15443 	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
15444 			sizeof(scan_passive_time), TRUE, 0);
15445 
15446 #ifdef ARP_OFFLOAD_SUPPORT
15447 	DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
15448 		dhd->arpoe_enable, dhd->arpol_configured));
15449 #endif /* ARP_OFFLOAD_SUPPORT */
15450 
15451 #ifdef PKT_FILTER_SUPPORT
15452 	/* Setup default defintions for pktfilter , enable in suspend */
15453 	if (dhd_master_mode) {
15454 		dhd->pktfilter_count = 6;
15455 		dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
15456 		if (!FW_SUPPORTED(dhd, pf6)) {
15457 			dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
15458 			dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
15459 		} else {
15460 			/* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
15461 			dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
15462 			dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
15463 		}
15464 		/* apply APP pktfilter */
15465 		dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
15466 
15467 #ifdef BLOCK_IPV6_PACKET
15468 		/* Setup filter to allow only IPv4 unicast frames */
15469 		dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
15470 			HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
15471 			" "
15472 			HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
15473 #else
15474 		/* Setup filter to allow only unicast */
15475 		dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
15476 #endif /* BLOCK_IPV6_PACKET */
15477 
15478 #ifdef PASS_IPV4_SUSPEND
15479 		/* XXX customer want to get IPv4 multicast packets */
15480 		dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
15481 #else
15482 		/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
15483 		dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
15484 #endif /* PASS_IPV4_SUSPEND */
15485 		if (FW_SUPPORTED(dhd, pf6)) {
15486 			/* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
15487 			dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
15488 			dhd->pktfilter_count = 8;
15489 		}
15490 
15491 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
15492 		dhd->pktfilter_count = 4;
15493 		/* Setup filter to block broadcast and NAT Keepalive packets */
15494 		/* discard all broadcast packets */
15495 		dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
15496 		/* discard NAT Keepalive packets */
15497 		dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
15498 		/* discard NAT Keepalive packets */
15499 		dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
15500 		dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
15501 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
15502 	} else
15503 		dhd_conf_discard_pkt_filter(dhd);
15504 	dhd_conf_add_pkt_filter(dhd);
15505 
15506 #if defined(SOFTAP)
15507 	if (ap_fw_loaded) {
15508 		/* XXX Andrey: fo SOFTAP disable pkt filters (if there were any )  */
15509 		dhd_enable_packet_filter(0, dhd);
15510 	}
15511 #endif /* defined(SOFTAP) */
15512 	dhd_set_packet_filter(dhd);
15513 #endif /* PKT_FILTER_SUPPORT */
15514 #ifdef DISABLE_11N
15515 	ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
15516 	if (ret < 0)
15517 		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
15518 #endif /* DISABLE_11N */
15519 
15520 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
15521 	ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn),
15522 			NULL, 0, TRUE);
15523 	if (ret < 0) {
15524 		DHD_ERROR(("%s: set bcn_li_bcn failed %d\n", __FUNCTION__, ret));
15525 	}
15526 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
15527 #ifdef AMPDU_VO_ENABLE
15528 	/* XXX: Enabling VO AMPDU to reduce FER */
15529 	tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
15530 	tid.enable = TRUE;
15531 	ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
15532 	if (ret < 0) {
15533 		DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
15534 	}
15535 
15536 	tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
15537 	tid.enable = TRUE;
15538 	ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
15539 	if (ret < 0) {
15540 		DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
15541 	}
15542 #endif
15543 #if defined(SOFTAP_TPUT_ENHANCE)
15544 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
15545 #if defined(BCMSDIO)
15546 		dhd_bus_setidletime(dhd, (int)100);
15547 #endif /* BCMSDIO */
15548 #ifdef DHDTCPACK_SUPPRESS
15549 		dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
15550 #endif
15551 #if defined(DHD_TCP_WINSIZE_ADJUST)
15552 		dhd_use_tcp_window_size_adjust = TRUE;
15553 #endif
15554 
15555 #if defined(BCMSDIO)
15556 		memset(buf, 0, sizeof(buf));
15557 		ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", NULL, 0, buf, sizeof(buf),
15558 				FALSE);
15559 		if (ret < 0) {
15560 			glom = 0;
15561 			ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom),
15562 					NULL, 0, TRUE);
15563 			if (ret < 0) {
15564 				DHD_ERROR(("%s bus:txglom failed %d\n", __FUNCTION__, ret));
15565 			}
15566 		} else {
15567 			if (buf[0] == 0) {
15568 				glom = 1;
15569 				ret = dhd_iovar(dhd, 0, "bus:txglom_auto_control", (char *)&glom,
15570 						sizeof(glom), NULL, 0, TRUE);
15571 				if (ret < 0) {
15572 					DHD_ERROR(("%s bus:txglom_auto_control failed %d\n",
15573 						__FUNCTION__, ret));
15574 				}
15575 			}
15576 		}
15577 #endif /* BCMSDIO */
15578 	}
15579 #endif /* SOFTAP_TPUT_ENHANCE */
15580 	/* query for 'clmver' to get clm version info from firmware */
15581 	bzero(buf, sizeof(buf));
15582 	ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
15583 	if (ret < 0)
15584 		DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret));
15585 	else {
15586 		char *ver_temp_buf = NULL, *ver_date_buf = NULL;
15587 		int len;
15588 
15589 		if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
15590 			DHD_ERROR(("Couldn't find \"Data:\"\n"));
15591 		} else {
15592 			ver_date_buf = bcmstrstr(buf, "Creation:");
15593 			ptr = (ver_temp_buf + strlen("Data:"));
15594 			if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
15595 				DHD_ERROR(("Couldn't find New line character\n"));
15596 			} else {
15597 				memset(clm_version, 0, CLM_VER_STR_LEN);
15598 				len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
15599 				if (ver_date_buf) {
15600 					ptr = (ver_date_buf + strlen("Creation:"));
15601 					ver_date_buf = bcmstrtok(&ptr, "\n", 0);
15602 					if (ver_date_buf)
15603 						snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
15604 							" (%s)", ver_date_buf);
15605 				}
15606 				DHD_INFO(("CLM version = %s\n", clm_version));
15607 			}
15608 		}
15609 
15610 #if defined(CUSTOMER_HW4_DEBUG)
15611 		if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
15612 			DHD_ERROR(("Couldn't find \"Customization:\"\n"));
15613 		} else {
15614 			char tokenlim;
15615 			ptr = (ver_temp_buf + strlen("Customization:"));
15616 			if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
15617 				DHD_ERROR(("Couldn't find project blob version"
15618 					"or New line character\n"));
15619 			} else if (tokenlim == '(') {
15620 				snprintf(clm_version,
15621 					CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
15622 					clm_version, ver_temp_buf);
15623 				DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
15624 				if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
15625 					DHD_ERROR(("Couldn't find New line character\n"));
15626 				} else {
15627 					snprintf(clm_version,
15628 						strlen(clm_version) + strlen(ver_temp_buf),
15629 						"%s%s",	clm_version, ver_temp_buf);
15630 					DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
15631 						clm_version));
15632 
15633 				}
15634 			} else if (tokenlim == '\n') {
15635 				snprintf(clm_version,
15636 					strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
15637 					"%s, Blob ver = Major : ", clm_version);
15638 				snprintf(clm_version,
15639 					strlen(clm_version) + strlen(ver_temp_buf) + 1,
15640 					"%s%s",	clm_version, ver_temp_buf);
15641 				DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
15642 			}
15643 		}
15644 #endif /* CUSTOMER_HW4_DEBUG */
15645 		if (strlen(clm_version)) {
15646 			DHD_INFO(("CLM version = %s\n", clm_version));
15647 		} else {
15648 			DHD_ERROR(("Couldn't find CLM version!\n"));
15649 		}
15650 	}
15651 	dhd_set_version_info(dhd, fw_version);
15652 
15653 #ifdef WRITE_WLANINFO
15654 	sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
15655 #endif /* WRITE_WLANINFO */
15656 
15657 #endif /* defined(OEM_ANDROID) */
15658 #ifdef GEN_SOFTAP_INFO_FILE
15659 	sec_save_softap_info();
15660 #endif /* GEN_SOFTAP_INFO_FILE */
15661 
15662 #if defined(BCMSDIO)
15663 	dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
15664 #endif /* defined(BCMSDIO) */
15665 
15666 #if defined(BCMSDIO) || defined(BCMDBUS)
15667 #ifdef PROP_TXSTATUS
15668 	if (disable_proptx ||
15669 #ifdef PROP_TXSTATUS_VSDB
15670 		/* enable WLFC only if the firmware is VSDB when it is in STA mode */
15671 		(dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
15672 		 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
15673 #endif /* PROP_TXSTATUS_VSDB */
15674 		FALSE) {
15675 		wlfc_enable = FALSE;
15676 	}
15677 	ret = dhd_conf_get_disable_proptx(dhd);
15678 	if (ret == 0){
15679 		disable_proptx = 0;
15680 		wlfc_enable = TRUE;
15681 	} else if (ret >= 1) {
15682 		disable_proptx = 1;
15683 		wlfc_enable = FALSE;
15684 		/* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
15685 		hostreorder = 0;
15686 	}
15687 
15688 #if defined(PROP_TXSTATUS)
15689 #ifdef USE_WFA_CERT_CONF
15690 	if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
15691 		DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
15692 		wlfc_enable = proptx;
15693 	}
15694 #endif /* USE_WFA_CERT_CONF */
15695 #endif /* PROP_TXSTATUS */
15696 
15697 #ifndef DISABLE_11N
15698 	ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
15699 	ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
15700 			NULL, 0, TRUE);
15701 	if (ret2 < 0) {
15702 		DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
15703 		if (ret2 != BCME_UNSUPPORTED)
15704 			ret = ret2;
15705 
15706 		if (ret == BCME_NOTDOWN) {
15707 			uint wl_down = 1;
15708 			ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
15709 				sizeof(wl_down), TRUE, 0);
15710 			DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
15711 				__FUNCTION__, ret2, hostreorder));
15712 
15713 			ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
15714 					sizeof(hostreorder), NULL, 0, TRUE);
15715 			DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
15716 			if (ret2 != BCME_UNSUPPORTED)
15717 					ret = ret2;
15718 		}
15719 		if (ret2 != BCME_OK)
15720 			hostreorder = 0;
15721 	}
15722 #endif /* DISABLE_11N */
15723 
15724 #ifdef READ_CONFIG_FROM_FILE
15725 	dhd_preinit_config(dhd, 0);
15726 #endif /* READ_CONFIG_FROM_FILE */
15727 
15728 	if (wlfc_enable) {
15729 		dhd_wlfc_init(dhd);
15730 		/* terence 20161229: enable ampdu_hostreorder if tlv enabled */
15731 		dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
15732 	}
15733 #ifndef DISABLE_11N
15734 	else if (hostreorder)
15735 		dhd_wlfc_hostreorder_init(dhd);
15736 #endif /* DISABLE_11N */
15737 #else
15738 	/* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
15739 	printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
15740 	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
15741 #endif /* PROP_TXSTATUS */
15742 #endif /* BCMSDIO || BCMDBUS */
15743 #ifndef PCIE_FULL_DONGLE
15744 	/* For FD we need all the packets at DHD to handle intra-BSS forwarding */
15745 	if (FW_SUPPORTED(dhd, ap)) {
15746 		wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
15747 		ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
15748 				NULL, 0, TRUE);
15749 		if (ret < 0)
15750 			DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
15751 	}
15752 #endif /* PCIE_FULL_DONGLE */
15753 #ifdef PNO_SUPPORT
15754 	if (!dhd->pno_state) {
15755 		dhd_pno_init(dhd);
15756 	}
15757 #endif
15758 
15759 #ifdef DHD_PKTTS
15760 	/* get the pkt metadata buffer length supported by FW */
15761 	if (dhd_wl_ioctl_get_intiovar(dhd, "bus:metadata_info", &val,
15762 			WLC_GET_VAR, FALSE, 0) != BCME_OK) {
15763 		DHD_ERROR(("%s: failed to get pkt metadata buflen, use IPC pkt TS.\n",
15764 				__FUNCTION__));
15765 		/*
15766 		 * if iovar fails, IPC method of collecting
15767 		 * TS should be used, hence set metadata_buflen as
15768 		 * 0 here. This will be checked later on Tx completion
15769 		 * to decide if IPC or metadata method of reading TS
15770 		 * should be used
15771 		 */
15772 		dhd->pkt_metadata_version = 0;
15773 		dhd->pkt_metadata_buflen = 0;
15774 	} else {
15775 		dhd->pkt_metadata_version  = GET_METADATA_VER(val);
15776 		dhd->pkt_metadata_buflen  = GET_METADATA_BUFLEN(val);
15777 	}
15778 
15779 	/* Check FW supports pktlat, if supports enable pktts_enab iovar */
15780 	ret = dhd_set_pktts_enab(dhd, TRUE);
15781 	if (ret < 0) {
15782 		DHD_ERROR(("%s: Enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
15783 	}
15784 #endif /* DHD_PKTTS */
15785 
15786 #ifdef RTT_SUPPORT
15787 	if (dhd->rtt_state) {
15788 		ret = dhd_rtt_init(dhd);
15789 		if (ret < 0) {
15790 			DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
15791 		}
15792 	}
15793 #endif
15794 #ifdef FILTER_IE
15795 	/* Failure to configure filter IE is not a fatal error, ignore it. */
15796 	if (FW_SUPPORTED(dhd, fie) &&
15797 		!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
15798 		dhd_read_from_file(dhd);
15799 	}
15800 #endif /* FILTER_IE */
15801 #ifdef WL11U
15802 	dhd_interworking_enable(dhd);
15803 #endif /* WL11U */
15804 
15805 #ifdef NDO_CONFIG_SUPPORT
15806 	dhd->ndo_enable = FALSE;
15807 	dhd->ndo_host_ip_overflow = FALSE;
15808 	dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
15809 #endif /* NDO_CONFIG_SUPPORT */
15810 
15811 	/* ND offload version supported */
15812 	dhd->ndo_version = dhd_ndo_get_version(dhd);
15813 	if (dhd->ndo_version > 0) {
15814 		DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
15815 
15816 #ifdef NDO_CONFIG_SUPPORT
15817 		/* enable Unsolicited NA filter */
15818 		ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
15819 		if (ret < 0) {
15820 			DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
15821 		}
15822 #endif /* NDO_CONFIG_SUPPORT */
15823 	}
15824 
15825 	/* check dongle supports wbtext (product policy) or not */
15826 	dhd->wbtext_support = FALSE;
15827 	if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
15828 			WLC_GET_VAR, FALSE, 0) != BCME_OK) {
15829 		DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
15830 	}
15831 	dhd->wbtext_policy = wnm_bsstrans_resp;
15832 	if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
15833 		dhd->wbtext_support = TRUE;
15834 	}
15835 #ifndef WBTEXT
15836 	/* driver can turn off wbtext feature through makefile */
15837 	if (dhd->wbtext_support) {
15838 		if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
15839 				WL_BSSTRANS_POLICY_ROAM_ALWAYS,
15840 				WLC_SET_VAR, FALSE, 0) != BCME_OK) {
15841 			DHD_ERROR(("failed to disable WBTEXT\n"));
15842 		}
15843 	}
15844 #endif /* !WBTEXT */
15845 
15846 #ifdef DHD_NON_DMA_M2M_CORRUPTION
15847 	/* check pcie non dma loopback */
15848 	if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
15849 		(dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
15850 			goto done;
15851 	}
15852 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
15853 
15854 	/* WNM capabilities */
15855 	wnm_cap = 0
15856 #ifdef WL11U
15857 		| WL_WNM_BSSTRANS | WL_WNM_NOTIF
15858 #endif
15859 #ifdef WBTEXT
15860 		| WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
15861 #endif
15862 		;
15863 #if defined(WL_MBO) && defined(WL_OCE)
15864 	if (FW_SUPPORTED(dhd, estm)) {
15865 		wnm_cap |= WL_WNM_ESTM;
15866 	}
15867 #endif /* WL_MBO && WL_OCE */
15868 	if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
15869 		DHD_ERROR(("failed to set WNM capabilities\n"));
15870 	}
15871 
15872 #ifdef CUSTOM_ASSOC_TIMEOUT
15873 	/* set recreate_bi_timeout to increase assoc timeout :
15874 	* 20 * 100TU * 1024 / 1000 = 2 secs
15875 	* (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
15876 	*/
15877 	if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
15878 			CUSTOM_ASSOC_TIMEOUT,
15879 			WLC_SET_VAR, TRUE, 0) != BCME_OK) {
15880 		DHD_ERROR(("failed to set assoc timeout\n"));
15881 	}
15882 #endif /* CUSTOM_ASSOC_TIMEOUT */
15883 
15884 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
15885 	if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
15886 			NULL, 0, TRUE) < 0) {
15887 		DHD_ERROR(("failed to set BTM delta\n"));
15888 	}
15889 #endif /* WBTEXT && WBTEXT_BTMDELTA */
15890 #if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
15891 	if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win",
15892 		(char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win),
15893 		NULL, 0, TRUE) < 0) {
15894 		DHD_ERROR(("failed to set RRM BCN request thrtl_win\n"));
15895 	}
15896 	if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time",
15897 		(char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time),
15898 		NULL, 0, TRUE) < 0) {
15899 		DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n"));
15900 	}
15901 #endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
15902 
15903 #ifdef WL_MONITOR
15904 #ifdef HOST_RADIOTAP_CONV
15905 	/* 'Wl monitor' IOVAR is fired to check whether the FW supports radiotap conversion or not.
15906 	 * This is indicated through MSB(1<<31) bit, based on which host radiotap conversion
15907 	 * will be enabled or disabled.
15908 	 * 0 - Host supports Radiotap conversion.
15909 	 * 1 - FW supports Radiotap conversion.
15910 	 */
15911 	bcm_mkiovar("monitor", (char *)&monitor, sizeof(monitor), iovbuf, sizeof(iovbuf));
15912 	if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_MONITOR, iovbuf,
15913 		sizeof(iovbuf), FALSE, 0)) == 0) {
15914 		memcpy(&monitor, iovbuf, sizeof(monitor));
15915 		dhdinfo->host_radiotap_conv = (monitor & HOST_RADIOTAP_CONV_BIT) ? TRUE : FALSE;
15916 	} else {
15917 		DHD_ERROR(("%s Failed to get monitor mode, err %d\n",
15918 			__FUNCTION__, ret2));
15919 	}
15920 #endif /* HOST_RADIOTAP_CONV */
15921 	if (FW_SUPPORTED(dhd, monitor)) {
15922 		dhd->monitor_enable = TRUE;
15923 		DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
15924 	} else {
15925 		dhd->monitor_enable = FALSE;
15926 		DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
15927 	}
15928 #endif /* WL_MONITOR */
15929 
15930 	/* store the preserve log set numbers */
15931 	if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
15932 			!= BCME_OK) {
15933 		DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
15934 	}
15935 
15936 	if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
15937 		dhd_ecounter_configure(dhd, TRUE);
15938 	}
15939 
15940 #ifdef CONFIG_SILENT_ROAM
15941 	dhd->sroam_turn_on = TRUE;
15942 	dhd->sroamed = FALSE;
15943 #endif /* CONFIG_SILENT_ROAM */
15944 	dhd_set_bandlock(dhd);
15945 
15946 	dhd_conf_postinit_ioctls(dhd);
15947 done:
15948 
15949 	if (eventmask_msg) {
15950 		MFREE(dhd->osh, eventmask_msg, msglen);
15951 	}
15952 	if (iov_buf) {
15953 		MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
15954 	}
15955 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
15956 	if (el_tag) {
15957 		MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
15958 	}
15959 #endif /* DHD_8021X_DUMP */
15960 	return ret;
15961 }
15962 
15963 /* Deafult enable preinit optimisation */
15964 #define DHD_PREINIT_OPTIMISATION
15965 
15966 int
dhd_preinit_ioctls(dhd_pub_t * dhd)15967 dhd_preinit_ioctls(dhd_pub_t *dhd)
15968 {
15969 	int ret = 0;
15970 
15971 #ifdef DHD_PREINIT_OPTIMISATION
15972 	int preinit_status = 0;
15973 	ret = dhd_iovar(dhd, 0, "preinit_status", NULL, 0, (char *)&preinit_status,
15974 		sizeof(preinit_status), FALSE);
15975 
15976 	if (ret == BCME_OK) {
15977 		DHD_ERROR(("%s: preinit_status IOVAR present, use optimised preinit\n",
15978 			__FUNCTION__));
15979 		dhd->fw_preinit = TRUE;
15980 		ret = dhd_optimised_preinit_ioctls(dhd);
15981 	} else if (ret == BCME_UNSUPPORTED) {
15982 		DHD_ERROR(("%s: preinit_status IOVAR not supported, use legacy preinit\n",
15983 			__FUNCTION__));
15984 		dhd->fw_preinit = FALSE;
15985 		ret = dhd_legacy_preinit_ioctls(dhd);
15986 	} else {
15987 		DHD_ERROR(("%s: preinit_status IOVAR returned err(%d), ABORT\n",
15988 			__FUNCTION__, ret));
15989 	}
15990 #else
15991 	dhd->fw_preinit = FALSE;
15992 	ret = dhd_legacy_preinit_ioctls(dhd);
15993 #endif /* DHD_PREINIT_OPTIMISATION */
15994 	return ret;
15995 }
15996 
15997 int
dhd_getiovar(dhd_pub_t * pub,int ifidx,char * name,char * cmd_buf,uint cmd_len,char ** resptr,uint resp_len)15998 dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
15999 	uint cmd_len, char **resptr, uint resp_len)
16000 {
16001 	int len = resp_len;
16002 	int ret;
16003 	char *buf = *resptr;
16004 	wl_ioctl_t ioc;
16005 	if (resp_len > WLC_IOCTL_MAXLEN)
16006 		return BCME_BADARG;
16007 
16008 	memset(buf, 0, resp_len);
16009 
16010 	ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
16011 	if (ret == 0) {
16012 		return BCME_BUFTOOSHORT;
16013 	}
16014 
16015 	memset(&ioc, 0, sizeof(ioc));
16016 
16017 	ioc.cmd = WLC_GET_VAR;
16018 	ioc.buf = buf;
16019 	ioc.len = len;
16020 	ioc.set = 0;
16021 
16022 	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
16023 
16024 	return ret;
16025 }
16026 
dhd_change_mtu(dhd_pub_t * dhdp,int new_mtu,int ifidx)16027 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
16028 {
16029 	struct dhd_info *dhd = dhdp->info;
16030 	struct net_device *dev = NULL;
16031 
16032 	ASSERT(dhd && dhd->iflist[ifidx]);
16033 	dev = dhd->iflist[ifidx]->net;
16034 	ASSERT(dev);
16035 
16036 #ifndef DHD_TPUT_PATCH
16037 	if (netif_running(dev)) {
16038 		DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
16039 		return BCME_NOTDOWN;
16040 	}
16041 #endif
16042 
16043 #define DHD_MIN_MTU 1500
16044 #define DHD_MAX_MTU 1752
16045 
16046 	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
16047 		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
16048 		return BCME_BADARG;
16049 	}
16050 
16051 	dev->mtu = new_mtu;
16052 	return 0;
16053 }
16054 
16055 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
dhd_wait_for_file_dump(dhd_pub_t * dhdp)16056 static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
16057 {
16058 	int ret = BCME_OK;
16059 	struct net_device *primary_ndev;
16060 	struct bcm_cfg80211 *cfg;
16061 	unsigned long flags = 0;
16062 	primary_ndev = dhd_linux_get_primary_netdev(dhdp);
16063 
16064 	if (!primary_ndev) {
16065 		DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
16066 		return BCME_ERROR;
16067 	}
16068 	cfg = wl_get_cfg(primary_ndev);
16069 
16070 	if (!cfg) {
16071 		DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
16072 		return BCME_ERROR;
16073 	}
16074 
16075 	DHD_GENERAL_LOCK(dhdp, flags);
16076 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
16077 		DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
16078 		dhd_os_busbusy_wake(dhdp);
16079 		DHD_GENERAL_UNLOCK(dhdp, flags);
16080 		DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
16081 		return BCME_ERROR;
16082 	}
16083 	DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
16084 	DHD_GENERAL_UNLOCK(dhdp, flags);
16085 
16086 	DHD_OS_WAKE_LOCK(dhdp);
16087 	/* check for hal started and only then send event if not clear dump state here */
16088 	if (wl_cfg80211_is_hal_started(cfg)) {
16089 		int timeleft = 0;
16090 
16091 		DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
16092 		dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
16093 
16094 		DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
16095 			__FUNCTION__, dhdp->dhd_bus_busy_state));
16096 		timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
16097 				&dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
16098 		if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
16099 			DHD_ERROR(("%s: Timed out(%d) dhd_bus_busy_state=0x%x\n",
16100 					__FUNCTION__, timeleft, dhdp->dhd_bus_busy_state));
16101 			ret = BCME_ERROR;
16102 		}
16103 	} else {
16104 		DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
16105 		ret = BCME_ERROR;
16106 	}
16107 
16108 	DHD_OS_WAKE_UNLOCK(dhdp);
16109 	/* In case of dhd_os_busbusy_wait_bitmask() timeout,
16110 	 * hal dump bit will not be cleared. Hence clearing it here.
16111 	 */
16112 	DHD_GENERAL_LOCK(dhdp, flags);
16113 	DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
16114 	dhd_os_busbusy_wake(dhdp);
16115 	DHD_GENERAL_UNLOCK(dhdp, flags);
16116 
16117 	return ret;
16118 }
16119 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
16120 
16121 #ifdef ARP_OFFLOAD_SUPPORT
16122 /* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
16123 /* XXX add operation is more efficent */
16124 void
aoe_update_host_ipv4_table(dhd_pub_t * dhd_pub,u32 ipa,bool add,int idx)16125 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
16126 {
16127 	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
16128 	int i;
16129 	int ret;
16130 
16131 	bzero(ipv4_buf, sizeof(ipv4_buf));
16132 
16133 	/* display what we've got */
16134 	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
16135 	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
16136 #ifdef AOE_DBG
16137 	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
16138 #endif
16139 	/* now we saved hoste_ip table, clr it in the dongle AOE */
16140 	dhd_aoe_hostip_clr(dhd_pub, idx);
16141 
16142 	if (ret) {
16143 		DHD_ERROR(("%s failed\n", __FUNCTION__));
16144 		return;
16145 	}
16146 
16147 	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
16148 		if (add && (ipv4_buf[i] == 0)) {
16149 				ipv4_buf[i] = ipa;
16150 				add = FALSE; /* added ipa to local table  */
16151 				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
16152 				__FUNCTION__, i));
16153 		} else if (ipv4_buf[i] == ipa) {
16154 			ipv4_buf[i]	= 0;
16155 			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
16156 				__FUNCTION__, ipa, i));
16157 		}
16158 
16159 		if (ipv4_buf[i] != 0) {
16160 			/* add back host_ip entries from our local cache */
16161 			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
16162 			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
16163 				__FUNCTION__, ipv4_buf[i], i));
16164 		}
16165 	}
16166 #ifdef AOE_DBG
16167 	/* see the resulting hostip table */
16168 	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
16169 	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
16170 	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
16171 #endif
16172 }
16173 
16174 /* XXX this function is only for IP address */
16175 /*
16176  * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
16177  * whenever there is an event related to an IP address.
16178  * ptr : kernel provided pointer to IP address that has changed
16179  */
dhd_inetaddr_notifier_call(struct notifier_block * this,unsigned long event,void * ptr)16180 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
16181 	unsigned long event,
16182 	void *ptr)
16183 {
16184 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
16185 
16186 	dhd_info_t *dhd;
16187 	dhd_pub_t *dhd_pub;
16188 	int idx;
16189 
16190 	if (!ifa || !(ifa->ifa_dev->dev))
16191 		return NOTIFY_DONE;
16192 
16193 	/* Filter notifications meant for non Broadcom devices */
16194 	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
16195 	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
16196 #if defined(WL_ENABLE_P2P_IF)
16197 		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
16198 #endif /* WL_ENABLE_P2P_IF */
16199 			return NOTIFY_DONE;
16200 	}
16201 
16202 	dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
16203 	if (!dhd)
16204 		return NOTIFY_DONE;
16205 
16206 	dhd_pub = &dhd->pub;
16207 
16208 	if (!dhd_pub->arpoe_enable) {
16209 		DHD_ERROR(("arpoe_enable not set"));
16210 		return NOTIFY_DONE;
16211 	}
16212 
16213 	if (dhd_pub->arp_version == 1) {
16214 		idx = 0;
16215 	} else {
16216 		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
16217 			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
16218 			break;
16219 		}
16220 		if (idx < DHD_MAX_IFS)
16221 			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
16222 				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
16223 		else {
16224 			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
16225 			idx = 0;
16226 		}
16227 	}
16228 
16229 	switch (event) {
16230 		case NETDEV_UP:
16231 			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
16232 				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
16233 
16234 			/*
16235 			 * Skip if Bus is not in a state to transport the IOVAR
16236 			 * (or) the Dongle is not ready.
16237 			 */
16238 			if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
16239 				dhd->pub.busstate ==  DHD_BUS_LOAD) {
16240 				DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
16241 					__FUNCTION__, dhd->pub.busstate));
16242 				if (dhd->pend_ipaddr) {
16243 					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
16244 						__FUNCTION__, dhd->pend_ipaddr));
16245 				}
16246 				dhd->pend_ipaddr = ifa->ifa_address;
16247 				break;
16248 			}
16249 
16250 #ifdef AOE_IP_ALIAS_SUPPORT
16251 			/* XXX HOSTAPD will be rerturned at first */
16252 			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
16253 				__FUNCTION__));
16254 			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
16255 #endif /* AOE_IP_ALIAS_SUPPORT */
16256 			dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
16257 			break;
16258 
16259 		case NETDEV_DOWN:
16260 			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
16261 				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
16262 			dhd->pend_ipaddr = 0;
16263 #ifdef AOE_IP_ALIAS_SUPPORT
16264 			/* XXX HOSTAPD will be rerturned at first */
16265 			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
16266 				__FUNCTION__));
16267 			if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
16268 				(ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
16269 				aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
16270 			} else
16271 #endif /* AOE_IP_ALIAS_SUPPORT */
16272 			{
16273 				/* XXX clear ALL arp and hostip tables */
16274 				dhd_aoe_hostip_clr(&dhd->pub, idx);
16275 				dhd_aoe_arp_clr(&dhd->pub, idx);
16276 			}
16277 			dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
16278 			break;
16279 
16280 		default:
16281 			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
16282 				__func__, ifa->ifa_label, event));
16283 			break;
16284 	}
16285 	return NOTIFY_DONE;
16286 }
16287 #endif /* ARP_OFFLOAD_SUPPORT */
16288 
16289 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
16290 /* Neighbor Discovery Offload: defered handler */
16291 static void
dhd_inet6_work_handler(void * dhd_info,void * event_data,u8 event)16292 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
16293 {
16294 	struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
16295 	dhd_info_t *dhd = (dhd_info_t *)dhd_info;
16296 	dhd_pub_t *dhdp;
16297 	int ret;
16298 
16299 	if (!dhd) {
16300 		DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
16301 		goto done;
16302 	}
16303 	dhdp = &dhd->pub;
16304 
16305 	if (event != DHD_WQ_WORK_IPV6_NDO) {
16306 		DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
16307 		goto done;
16308 	}
16309 
16310 	if (!ndo_work) {
16311 		DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
16312 		return;
16313 	}
16314 
16315 	switch (ndo_work->event) {
16316 		case NETDEV_UP:
16317 #ifndef NDO_CONFIG_SUPPORT
16318 			DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
16319 			ret = dhd_ndo_enable(dhdp, TRUE);
16320 			if (ret < 0) {
16321 				DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
16322 			}
16323 #endif /* !NDO_CONFIG_SUPPORT */
16324 			DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
16325 			if (dhdp->ndo_version > 0) {
16326 				/* inet6 addr notifier called only for unicast address */
16327 				ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
16328 					WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
16329 			} else {
16330 				ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
16331 					ndo_work->if_idx);
16332 			}
16333 			if (ret < 0) {
16334 				DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
16335 					__FUNCTION__, ret));
16336 			}
16337 			break;
16338 		case NETDEV_DOWN:
16339 			if (dhdp->ndo_version > 0) {
16340 				DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
16341 				ret = dhd_ndo_remove_ip_by_addr(dhdp,
16342 					&ndo_work->ipv6_addr[0], ndo_work->if_idx);
16343 			} else {
16344 				DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
16345 				ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
16346 			}
16347 			if (ret < 0) {
16348 				DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
16349 					__FUNCTION__, ret));
16350 				goto done;
16351 			}
16352 #ifdef NDO_CONFIG_SUPPORT
16353 			if (dhdp->ndo_host_ip_overflow) {
16354 				ret = dhd_dev_ndo_update_inet6addr(
16355 					dhd_idx2net(dhdp, ndo_work->if_idx));
16356 				if ((ret < 0) && (ret != BCME_NORESOURCE)) {
16357 					DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
16358 						__FUNCTION__, ret));
16359 					goto done;
16360 				}
16361 			}
16362 #else /* !NDO_CONFIG_SUPPORT */
16363 			DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
16364 			ret = dhd_ndo_enable(dhdp, FALSE);
16365 			if (ret < 0) {
16366 				DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
16367 				goto done;
16368 			}
16369 #endif /* NDO_CONFIG_SUPPORT */
16370 			break;
16371 
16372 		default:
16373 			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
16374 			break;
16375 	}
16376 done:
16377 
16378 	/* free ndo_work. alloced while scheduling the work */
16379 	if (ndo_work) {
16380 		kfree(ndo_work);
16381 	}
16382 
16383 	return;
16384 } /* dhd_init_logstrs_array */
16385 
16386 /*
16387  * Neighbor Discovery Offload: Called when an interface
16388  * is assigned with ipv6 address.
16389  * Handles only primary interface
16390  */
dhd_inet6addr_notifier_call(struct notifier_block * this,unsigned long event,void * ptr)16391 int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
16392 {
16393 	dhd_info_t *dhd;
16394 	dhd_pub_t *dhdp;
16395 	struct inet6_ifaddr *inet6_ifa = ptr;
16396 	struct ipv6_work_info_t *ndo_info;
16397 	int idx;
16398 
16399 	/* Filter notifications meant for non Broadcom devices */
16400 	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
16401 			return NOTIFY_DONE;
16402 	}
16403 
16404 	dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
16405 	if (!dhd) {
16406 		return NOTIFY_DONE;
16407 	}
16408 	dhdp = &dhd->pub;
16409 
16410 	/* Supports only primary interface */
16411 	idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
16412 	if (idx != 0) {
16413 		return NOTIFY_DONE;
16414 	}
16415 
16416 	/* FW capability */
16417 	if (!FW_SUPPORTED(dhdp, ndoe)) {
16418 		return NOTIFY_DONE;
16419 	}
16420 
16421 	ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
16422 	if (!ndo_info) {
16423 		DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
16424 		return NOTIFY_DONE;
16425 	}
16426 
16427 	/* fill up ndo_info */
16428 	ndo_info->event = event;
16429 	ndo_info->if_idx = idx;
16430 	memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
16431 
16432 	/* defer the work to thread as it may block kernel */
16433 	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
16434 		dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
16435 	return NOTIFY_DONE;
16436 }
16437 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
16438 
16439 /* Network attach to be invoked from the bus probe handlers */
16440 int
dhd_attach_net(dhd_pub_t * dhdp,bool need_rtnl_lock)16441 dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
16442 {
16443 	struct net_device *primary_ndev;
16444 #ifdef GET_CUSTOM_MAC_ENABLE
16445 	char hw_ether[62];
16446 #endif /* GET_CUSTOM_MAC_ENABLE */
16447 #if defined(GET_CUSTOM_MAC_ENABLE) || defined(GET_OTP_MAC_ENABLE)
16448 	int ret = BCME_ERROR;
16449 #endif /* GET_CUSTOM_MAC_ENABLE || GET_OTP_MAC_ENABLE */
16450 
16451 	BCM_REFERENCE(primary_ndev);
16452 
16453 #ifdef GET_CUSTOM_MAC_ENABLE
16454 	ret = wifi_platform_get_mac_addr(dhdp->adapter, hw_ether, iface_name);
16455 	if (!ret)
16456 		bcopy(hw_ether, dhdp->mac.octet, ETHER_ADDR_LEN);
16457 #endif /* GET_CUSTOM_MAC_ENABLE */
16458 
16459 #ifdef GET_OTP_MAC_ENABLE
16460 	if (ret && memcmp(&ether_null, &dhdp->conf->otp_mac, ETHER_ADDR_LEN))
16461 		bcopy(&dhdp->conf->otp_mac, &dhdp->mac, ETHER_ADDR_LEN);
16462 #endif /* GET_OTP_MAC_ENABLE */
16463 
16464 	/* Register primary net device */
16465 	if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
16466 		return BCME_ERROR;
16467 	}
16468 
16469 #if defined(WL_CFG80211)
16470 	primary_ndev =  dhd_linux_get_primary_netdev(dhdp);
16471 	if (wl_cfg80211_net_attach(primary_ndev) < 0) {
16472 		/* fail the init */
16473 		dhd_remove_if(dhdp, 0, TRUE);
16474 		return BCME_ERROR;
16475 	}
16476 #endif /* WL_CFG80211 */
16477 	return BCME_OK;
16478 }
16479 
16480 int
dhd_register_if(dhd_pub_t * dhdp,int ifidx,bool need_rtnl_lock)16481 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
16482 {
16483 	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
16484 	dhd_if_t *ifp;
16485 	struct net_device *net = NULL;
16486 	int err = 0;
16487 	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
16488 
16489 	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
16490 
16491 	if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
16492 		DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
16493 		return BCME_ERROR;
16494 	}
16495 
16496 	ASSERT(dhd && dhd->iflist[ifidx]);
16497 	ifp = dhd->iflist[ifidx];
16498 	net = ifp->net;
16499 	ASSERT(net && (ifp->idx == ifidx));
16500 
16501 	ASSERT(!net->netdev_ops);
16502 	net->netdev_ops = &dhd_ops_virt;
16503 
16504 	/* Ok, link into the network layer... */
16505 	if (ifidx == 0) {
16506 		/*
16507 		 * device functions for the primary interface only
16508 		 */
16509 		net->netdev_ops = &dhd_ops_pri;
16510 		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
16511 			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
16512 	} else {
16513 		/*
16514 		 * We have to use the primary MAC for virtual interfaces
16515 		 */
16516 		memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
16517 #if defined(OEM_ANDROID)
16518 		/*
16519 		 * Android sets the locally administered bit to indicate that this is a
16520 		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
16521 		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
16522 		 */
16523 		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
16524 			ETHER_ADDR_LEN)) {
16525 			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
16526 			__func__, net->name));
16527 			temp_addr[0] |= 0x02;
16528 		}
16529 #endif /* defined(OEM_ANDROID) */
16530 	}
16531 
16532 	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
16533 #ifdef HOST_SFH_LLC
16534 	net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
16535 #endif
16536 
16537 #ifdef DHD_AWDL
16538 	if (dhdp->awdl_ifidx &&
16539 		ifidx == dhdp->awdl_ifidx) {
16540 		/* A total of 30 bytes are required for the
16541 		 * ethernet + AWDL LLC header. Out of this 14
16542 		 * bytes in the form of ethernet header is already
16543 		 * present in the skb handed over by the stack.
16544 		 * So we need to reserve an additonal 16 bytes as
16545 		 * headroom. Out of these 16 bytes, if the host
16546 		 * sfh llc feature is being used, then additonal
16547 		 * 8 bytes are already being reserved
16548 		 * during dhd_register_if (below), hence reserving
16549 		 * only an additional 8 bytes is enough. If the host
16550 		 * sfh llc feature is not used, then all of the 16
16551 		 * bytes need to be reserved from here
16552 		 */
16553 		net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
16554 #ifndef HOST_SFH_LLC
16555 		net->needed_headroom += DOT11_LLC_SNAP_HDR_LEN;
16556 #endif /* HOST_SFH_LLC */
16557 	}
16558 #endif /* DHD_AWDL */
16559 
16560 	net->ethtool_ops = &dhd_ethtool_ops;
16561 
16562 #if defined(WL_WIRELESS_EXT)
16563 #if WIRELESS_EXT < 19
16564 	net->get_wireless_stats = dhd_get_wireless_stats;
16565 #endif /* WIRELESS_EXT < 19 */
16566 #if WIRELESS_EXT > 12
16567 	net->wireless_handlers = &wl_iw_handler_def;
16568 #endif /* WIRELESS_EXT > 12 */
16569 #endif /* defined(WL_WIRELESS_EXT) */
16570 
16571 	/* XXX Set up an MTU change notifier as per linux/notifier.h? */
16572 	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
16573 
16574 #ifdef WLMESH
16575 	if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
16576 		temp_addr[4] ^= 0x80;
16577 		temp_addr[4] += ifidx;
16578 		temp_addr[5] += ifidx;
16579 	}
16580 #endif
16581 	/*
16582 	 * XXX Linux 2.6.25 does not like a blank MAC address, so use a
16583 	 * dummy address until the interface is brought up.
16584 	 */
16585 	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
16586 
16587 	if (ifidx == 0)
16588 		printf("%s\n", dhd_version);
16589 	else {
16590 #ifdef WL_EXT_IAPSTA
16591 		wl_ext_iapsta_update_net_device(net, ifidx);
16592 #endif /* WL_EXT_IAPSTA */
16593 		if (dhd->pub.up == 1) {
16594 			if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr, FALSE) == 0)
16595 				DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
16596 			else
16597 				DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
16598 		}
16599 	}
16600 
16601 	if (need_rtnl_lock)
16602 		err = register_netdev(net);
16603 	else
16604 		err = register_netdevice(net);
16605 
16606 	if (err != 0) {
16607 		DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
16608 		goto fail;
16609 	}
16610 
16611 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
16612 	if ((ctf_dev_register(dhd->cih, net, FALSE) != BCME_OK) ||
16613 	    (ctf_enable(dhd->cih, net, TRUE, &dhd->brc_hot) != BCME_OK)) {
16614 		DHD_ERROR(("%s:%d: ctf_dev_register/ctf_enable failed for interface %d\n",
16615 			__FUNCTION__, __LINE__, ifidx));
16616 		goto fail;
16617 	}
16618 #endif /* BCM_ROUTER_DHD && HNDCTF */
16619 
16620 #ifdef WL_EVENT
16621 	wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
16622 #endif /* WL_EVENT */
16623 #ifdef WL_ESCAN
16624 	wl_escan_event_attach(net, dhdp);
16625 #endif /* WL_ESCAN */
16626 #ifdef WL_EXT_IAPSTA
16627 	wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
16628 	wl_ext_iapsta_attach_name(net, ifidx);
16629 #endif /* WL_EXT_IAPSTA */
16630 
16631 #if defined(CONFIG_TIZEN)
16632 	net_stat_tizen_register(net);
16633 #endif /* CONFIG_TIZEN */
16634 
16635 	printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
16636 #if defined(CUSTOMER_HW4_DEBUG)
16637 		MAC2STRDBG(dhd->pub.mac.octet));
16638 #else
16639 		MAC2STRDBG(net->dev_addr));
16640 #endif /* CUSTOMER_HW4_DEBUG */
16641 
16642 #if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
16643 	if (ifidx == 0) {
16644 #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
16645 		up(&dhd_registration_sem);
16646 #endif /* BCMLXSDMMC */
16647 		if (!dhd_download_fw_on_driverload) {
16648 #ifdef WL_CFG80211
16649 			wl_terminate_event_handler(net);
16650 #endif /* WL_CFG80211 */
16651 #if defined(DHD_LB_RXP)
16652 			__skb_queue_purge(&dhd->rx_pend_queue);
16653 #endif /* DHD_LB_RXP */
16654 
16655 #if defined(DHD_LB_TXP)
16656 			skb_queue_purge(&dhd->tx_pend_queue);
16657 #endif /* DHD_LB_TXP */
16658 
16659 #ifdef SHOW_LOGTRACE
16660 			/* Release the skbs from queue for WLC_E_TRACE event */
16661 			dhd_event_logtrace_flush_queue(dhdp);
16662 #endif /* SHOW_LOGTRACE */
16663 
16664 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
16665 			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
16666 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
16667 
16668 #if defined(WLAN_ACCEL_BOOT)
16669 			dhd->fs_check_retry = DHD_FS_CHECK_RETRIES;
16670 			dhd->wl_accel_boot_on_done = FALSE;
16671 			INIT_DELAYED_WORK(&dhd->wl_accel_work, dhd_wifi_accel_on_work_cb);
16672 #if !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH)
16673 			/* If the WLAN_ACCEL_SKIP_WQ_IN_ATTACH feature is enabled,
16674 			* the dhd_wifi_accel_on_work_cb() is called in dhd_open()
16675 			* to skip dongle firmware downloading during insmod and dhd_attach.
16676 			*/
16677 			schedule_delayed_work(&dhd->wl_accel_work,
16678 					msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
16679 #endif /* !defined(WLAN_ACCEL_SKIP_WQ_IN_ATTACH) */
16680 #else
16681 			/* Turn off Wifi after boot up */
16682 #if defined (BT_OVER_SDIO)
16683 			dhd_bus_put(&dhd->pub, WLAN_MODULE);
16684 			wl_android_set_wifi_on_flag(FALSE);
16685 #else
16686 			wl_android_wifi_off(net, TRUE);
16687 #endif /* BT_OVER_SDIO */
16688 #endif /* WLAN_ACCEL_BOOT */
16689 
16690 		}
16691 #if defined(WL_WIRELESS_EXT)
16692 		wl_iw_down(net, &dhd->pub);
16693 #endif /* defined(WL_WIRELESS_EXT) */
16694 	}
16695 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC) */
16696 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
16697 	gdb_proxy_fs_try_create(ifp->info, net->name);
16698 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
16699 	return 0;
16700 
16701 fail:
16702 	net->netdev_ops = NULL;
16703 	return err;
16704 }
16705 
16706 void
dhd_bus_detach(dhd_pub_t * dhdp)16707 dhd_bus_detach(dhd_pub_t *dhdp)
16708 {
16709 	dhd_info_t *dhd;
16710 
16711 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
16712 
16713 	if (dhdp) {
16714 		dhd = (dhd_info_t *)dhdp->info;
16715 		if (dhd) {
16716 
16717 			/*
16718 			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
16719 			 *  calling stop again will cuase SD read/write errors.
16720 			 */
16721 			if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
16722 				/* Stop the protocol module */
16723 				dhd_prot_stop(&dhd->pub);
16724 
16725 				/* Stop the bus module */
16726 #ifdef BCMDBUS
16727 				/* Force Dongle terminated */
16728 				if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
16729 					DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
16730 						__FUNCTION__));
16731 				dbus_stop(dhd->pub.bus);
16732 				DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
16733 				dhd->pub.busstate = DHD_BUS_DOWN;
16734 #else
16735 				dhd_bus_stop(dhd->pub.bus, TRUE);
16736 #endif /* BCMDBUS */
16737 			}
16738 
16739 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
16740 			dhd_bus_oob_intr_unregister(dhdp);
16741 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
16742 		}
16743 	}
16744 }
16745 
dhd_detach(dhd_pub_t * dhdp)16746 void dhd_detach(dhd_pub_t *dhdp)
16747 {
16748 	dhd_info_t *dhd;
16749 	unsigned long flags;
16750 	int timer_valid = FALSE;
16751 	struct net_device *dev = NULL;
16752 	dhd_if_t *ifp;
16753 #ifdef WL_CFG80211
16754 	struct bcm_cfg80211 *cfg = NULL;
16755 #endif
16756 	if (!dhdp)
16757 		return;
16758 
16759 	dhd = (dhd_info_t *)dhdp->info;
16760 	if (!dhd)
16761 		return;
16762 
16763 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
16764 	gdb_proxy_fs_remove(dhd);
16765 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
16766 
16767 	/* primary interface 0 */
16768 	ifp = dhd->iflist[0];
16769 	if (ifp && ifp->net) {
16770 		dev = ifp->net;
16771 	}
16772 
16773 	if (dev) {
16774 		rtnl_lock();
16775 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
16776 		if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
16777 			cfg = wl_get_cfg(dev);
16778 			if (cfg && cfg->static_ndev && (cfg->static_ndev->flags & IFF_UP)) {
16779 				dev_close(cfg->static_ndev);
16780 			}
16781 		}
16782 #endif /* WL_CFG80211 && WL_STATIC_IF */
16783 		if (dev->flags & IFF_UP) {
16784 			/* If IFF_UP is still up, it indicates that
16785 			 * "ifconfig wlan0 down" hasn't been called.
16786 			 * So invoke dev_close explicitly here to
16787 			 * bring down the interface.
16788 			 */
16789 			DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
16790 			dev_close(dev);
16791 		}
16792 		rtnl_unlock();
16793 	}
16794 
16795 	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
16796 
16797 	/* XXX	kernel panic issue when first bootup time,
16798 	 *	 rmmod without interface down make unnecessary hang event.
16799 	 */
16800 	DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
16801 	dhd->pub.up = 0;
16802 	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
16803 		/* Give sufficient time for threads to start running in case
16804 		 * dhd_attach() has failed
16805 		 */
16806 		OSL_SLEEP(100);
16807 	}
16808 #ifdef DHD_WET
16809 	dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
16810 #endif /* DHD_WET */
16811 #ifdef WL_NANHO
16812 	/* deinit NANHO host module */
16813 	bcm_nanho_deinit(dhd->pub.nanhoi);
16814 #endif /* WL_NANHO */
16815 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
16816 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
16817 
16818 #ifdef PROP_TXSTATUS
16819 #ifdef DHD_WLFC_THREAD
16820 	if (dhd->pub.wlfc_thread) {
16821 		kthread_stop(dhd->pub.wlfc_thread);
16822 		dhdp->wlfc_thread_go = TRUE;
16823 		wake_up_interruptible(&dhdp->wlfc_wqhead);
16824 	}
16825 	dhd->pub.wlfc_thread = NULL;
16826 #endif /* DHD_WLFC_THREAD */
16827 #endif /* PROP_TXSTATUS */
16828 
16829 #ifdef DHD_TIMESYNC
16830 	if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
16831 		dhd_timesync_detach(dhdp);
16832 	}
16833 #endif /* DHD_TIMESYNC */
16834 
16835 	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
16836 
16837 #if defined(OEM_ANDROID) || !defined(BCMSDIO)
16838 		dhd_bus_detach(dhdp);
16839 #endif /* OEM_ANDROID || !BCMSDIO */
16840 #ifdef OEM_ANDROID
16841 #ifdef BCMPCIE
16842 		if (is_reboot == SYS_RESTART) {
16843 			extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
16844 			if (dhd_wifi_platdata && !dhdp->dongle_reset) {
16845 				dhdpcie_bus_stop_host_dev(dhdp->bus);
16846 				wifi_platform_set_power(dhd_wifi_platdata->adapters,
16847 					FALSE, WIFI_TURNOFF_DELAY);
16848 			}
16849 		}
16850 #endif /* BCMPCIE */
16851 #endif /* OEM_ANDROID */
16852 #ifndef PCIE_FULL_DONGLE
16853 #if defined(OEM_ANDROID) || !defined(BCMSDIO)
16854 		if (dhdp->prot)
16855 			dhd_prot_detach(dhdp);
16856 #endif /* OEM_ANDROID || !BCMSDIO */
16857 #endif /* !PCIE_FULL_DONGLE */
16858 	}
16859 
16860 #ifdef ARP_OFFLOAD_SUPPORT
16861 	if (dhd_inetaddr_notifier_registered) {
16862 		dhd_inetaddr_notifier_registered = FALSE;
16863 		unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
16864 	}
16865 #endif /* ARP_OFFLOAD_SUPPORT */
16866 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
16867 	if (dhd_inet6addr_notifier_registered) {
16868 		dhd_inet6addr_notifier_registered = FALSE;
16869 		unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
16870 	}
16871 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
16872 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
16873 	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
16874 		if (dhd->early_suspend.suspend)
16875 			unregister_early_suspend(&dhd->early_suspend);
16876 	}
16877 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
16878 
16879 #if defined(WL_WIRELESS_EXT)
16880 	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
16881 		/* Detatch and unlink in the iw */
16882 		wl_iw_detach(dev, dhdp);
16883 	}
16884 #endif /* defined(WL_WIRELESS_EXT) */
16885 #ifdef WL_EXT_GENL
16886 	wl_ext_genl_deinit(dev);
16887 #endif
16888 #ifdef WL_EXT_IAPSTA
16889 	wl_ext_iapsta_dettach(dhdp);
16890 #endif /* WL_EXT_IAPSTA */
16891 #ifdef WL_ESCAN
16892 	wl_escan_detach(dev, dhdp);
16893 #endif /* WL_ESCAN */
16894 #ifdef WL_EVENT
16895 	wl_ext_event_dettach(dhdp);
16896 #endif /* WL_EVENT */
16897 
16898 	/* delete all interfaces, start with virtual  */
16899 	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
16900 		int i = 1;
16901 
16902 		/* Cleanup virtual interfaces */
16903 		dhd_net_if_lock_local(dhd);
16904 		for (i = 1; i < DHD_MAX_IFS; i++) {
16905 			if (dhd->iflist[i]) {
16906 				dhd_remove_if(&dhd->pub, i, TRUE);
16907 			}
16908 		}
16909 		dhd_net_if_unlock_local(dhd);
16910 
16911 		/* 'ifp' indicates primary interface 0, clean it up. */
16912 		if (ifp && ifp->net) {
16913 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
16914 			if (dhd->cih)
16915 				ctf_dev_unregister(dhd->cih, ifp->net);
16916 #endif /* BCM_ROUTER_DHD && HNDCTF */
16917 
16918 #ifdef WL_CFG80211
16919 			cfg = wl_get_cfg(ifp->net);
16920 #endif
16921 			/* in unregister_netdev case, the interface gets freed by net->destructor
16922 			 * (which is set to free_netdev)
16923 			 */
16924 			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
16925 				free_netdev(ifp->net);
16926 			} else {
16927 #ifdef SET_RPS_CPUS
16928 				custom_rps_map_clear(ifp->net->_rx);
16929 #endif /* SET_RPS_CPUS */
16930 				netif_tx_disable(ifp->net);
16931 				unregister_netdev(ifp->net);
16932 			}
16933 #ifdef PCIE_FULL_DONGLE
16934 			ifp->net = DHD_NET_DEV_NULL;
16935 #else
16936 			ifp->net = NULL;
16937 #endif /* PCIE_FULL_DONGLE */
16938 #if defined(BCMSDIO) && !defined(OEM_ANDROID)
16939 			dhd_bus_detach(dhdp);
16940 
16941 			if (dhdp->prot)
16942 				dhd_prot_detach(dhdp);
16943 #endif /* BCMSDIO && !OEM_ANDROID */
16944 
16945 #ifdef DHD_WMF
16946 			dhd_wmf_cleanup(dhdp, 0);
16947 #endif /* DHD_WMF */
16948 #ifdef DHD_L2_FILTER
16949 			bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
16950 				NULL, FALSE, dhdp->tickcnt);
16951 			deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
16952 			ifp->phnd_arp_table = NULL;
16953 #endif /* DHD_L2_FILTER */
16954 
16955 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
16956 			MFREE(dhdp->osh, ifp->qosmap_up_table, UP_TABLE_MAX);
16957 			ifp->qosmap_up_table_enable = FALSE;
16958 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
16959 
16960 			dhd_if_del_sta_list(ifp);
16961 
16962 			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
16963 			ifp = NULL;
16964 #ifdef WL_CFG80211
16965 			if (cfg && cfg->wdev)
16966 				cfg->wdev->netdev = NULL;
16967 #endif
16968 		}
16969 	}
16970 
16971 	/* Clear the watchdog timer */
16972 	DHD_GENERAL_LOCK(&dhd->pub, flags);
16973 	timer_valid = dhd->wd_timer_valid;
16974 	dhd->wd_timer_valid = FALSE;
16975 	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
16976 	if (timer_valid)
16977 		del_timer_sync(&dhd->timer);
16978 	DHD_STOP_RPM_TIMER(&dhd->pub);
16979 
16980 #ifdef BCMDBUS
16981 	tasklet_kill(&dhd->tasklet);
16982 #else
16983 	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
16984 #ifdef DHD_PCIE_RUNTIMEPM
16985 		if (dhd->thr_rpm_ctl.thr_pid >= 0) {
16986 			PROC_STOP(&dhd->thr_rpm_ctl);
16987 		}
16988 #endif /* DHD_PCIE_RUNTIMEPM */
16989 		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
16990 			PROC_STOP(&dhd->thr_wdt_ctl);
16991 		}
16992 
16993 		if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
16994 			PROC_STOP(&dhd->thr_rxf_ctl);
16995 		}
16996 
16997 		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
16998 			PROC_STOP(&dhd->thr_dpc_ctl);
16999 		} else
17000 		{
17001 			tasklet_kill(&dhd->tasklet);
17002 		}
17003 	}
17004 #endif /* BCMDBUS */
17005 
17006 #ifdef WL_NATOE
17007 	if (dhd->pub.nfct) {
17008 		dhd_ct_close(dhd->pub.nfct);
17009 	}
17010 #endif /* WL_NATOE */
17011 
17012 	cancel_delayed_work_sync(&dhd->dhd_dpc_dispatcher_work);
17013 #ifdef DHD_LB
17014 	if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
17015 		/* Clear the flag first to avoid calling the cpu notifier */
17016 		dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
17017 
17018 		/* Kill the Load Balancing Tasklets */
17019 #ifdef DHD_LB_RXP
17020 		cancel_work_sync(&dhd->rx_napi_dispatcher_work);
17021 		__skb_queue_purge(&dhd->rx_pend_queue);
17022 #endif /* DHD_LB_RXP */
17023 #ifdef DHD_LB_TXP
17024 		cancel_work_sync(&dhd->tx_dispatcher_work);
17025 		tasklet_kill(&dhd->tx_tasklet);
17026 		__skb_queue_purge(&dhd->tx_pend_queue);
17027 #endif /* DHD_LB_TXP */
17028 
17029 		/* Unregister from CPU Hotplug framework */
17030 		dhd_unregister_cpuhp_callback(dhd);
17031 
17032 		dhd_cpumasks_deinit(dhd);
17033 		DHD_LB_STATS_DEINIT(&dhd->pub);
17034 	}
17035 #endif /* DHD_LB */
17036 
17037 #ifdef CSI_SUPPORT
17038 	dhd_csi_deinit(dhdp);
17039 #endif /* CSI_SUPPORT */
17040 
17041 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
17042 	cancel_work_sync(&dhd->axi_error_dispatcher_work);
17043 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
17044 
17045 	DHD_SSSR_REG_INFO_DEINIT(&dhd->pub);
17046 	DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
17047 
17048 #ifdef DHD_SDTC_ETB_DUMP
17049 	dhd_sdtc_etb_mempool_deinit(&dhd->pub);
17050 #endif /* DHD_SDTC_ETB_DUMP */
17051 
17052 #ifdef EWP_EDL
17053 	if (host_edl_support) {
17054 		DHD_EDL_MEM_DEINIT(dhdp);
17055 		host_edl_support = FALSE;
17056 	}
17057 #endif /* EWP_EDL */
17058 
17059 #ifdef WL_CFG80211
17060 	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
17061 		if (!cfg) {
17062 			DHD_ERROR(("cfg NULL!\n"));
17063 			ASSERT(0);
17064 		} else {
17065 			wl_cfg80211_detach(cfg);
17066 			dhd_monitor_uninit();
17067 		}
17068 	}
17069 #endif
17070 
17071 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
17072 	destroy_workqueue(dhd->tx_wq);
17073 	dhd->tx_wq = NULL;
17074 	destroy_workqueue(dhd->rx_wq);
17075 	dhd->rx_wq = NULL;
17076 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
17077 #ifdef DEBUGABILITY
17078 	if (dhdp->dbg) {
17079 #ifdef DBG_PKT_MON
17080 		dhd_os_dbg_detach_pkt_monitor(dhdp);
17081 		osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
17082 #endif /* DBG_PKT_MON */
17083 	}
17084 #endif /* DEBUGABILITY */
17085 	if (dhdp->dbg) {
17086 		dhd_os_dbg_detach(dhdp);
17087 	}
17088 #ifdef DHD_MEM_STATS
17089 	osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.mem_stats_lock);
17090 #endif /* DHD_MEM_STATS */
17091 
17092 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
17093 	osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.awdl_stats_lock);
17094 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
17095 #ifdef DHD_PKT_LOGGING
17096 	dhd_os_detach_pktlog(dhdp);
17097 #endif /* DHD_PKT_LOGGING */
17098 #ifdef DHD_STATUS_LOGGING
17099 	dhd_detach_statlog(dhdp);
17100 #endif /* DHD_STATUS_LOGGING */
17101 #ifdef DHD_PKTDUMP_ROAM
17102 	dhd_dump_pkt_deinit(dhdp);
17103 #endif /* DHD_PKTDUMP_ROAM */
17104 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
17105 	if (dhd->pub.hang_info) {
17106 		MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
17107 	}
17108 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
17109 #ifdef SHOW_LOGTRACE
17110 	/* Release the skbs from queue for WLC_E_TRACE event */
17111 	dhd_event_logtrace_flush_queue(dhdp);
17112 
17113 	/* Wait till event logtrace context finishes */
17114 	dhd_cancel_logtrace_process_sync(dhd);
17115 
17116 	/* Remove ring proc entries */
17117 	dhd_dbg_ring_proc_destroy(&dhd->pub);
17118 
17119 	if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
17120 		if (dhd->event_data.fmts) {
17121 			MFREE(dhd->pub.osh, dhd->event_data.fmts,
17122 					dhd->event_data.fmts_size);
17123 		}
17124 		if (dhd->event_data.raw_fmts) {
17125 			MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
17126 					dhd->event_data.raw_fmts_size);
17127 		}
17128 		if (dhd->event_data.raw_sstr) {
17129 			MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
17130 					dhd->event_data.raw_sstr_size);
17131 		}
17132 		if (dhd->event_data.rom_raw_sstr) {
17133 			MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
17134 					dhd->event_data.rom_raw_sstr_size);
17135 		}
17136 		dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
17137 	}
17138 #endif /* SHOW_LOGTRACE */
17139 #ifdef BTLOG
17140 	skb_queue_purge(&dhd->bt_log_queue);
17141 #endif	/* BTLOG */
17142 #ifdef PNO_SUPPORT
17143 	if (dhdp->pno_state)
17144 		dhd_pno_deinit(dhdp);
17145 #endif
17146 #ifdef RTT_SUPPORT
17147 	if (dhdp->rtt_state) {
17148 		dhd_rtt_detach(dhdp);
17149 	}
17150 #endif
17151 #if defined(CONFIG_PM_SLEEP)
17152 	if (dhd_pm_notifier_registered) {
17153 		unregister_pm_notifier(&dhd->pm_notifier);
17154 		dhd_pm_notifier_registered = FALSE;
17155 	}
17156 #endif /* CONFIG_PM_SLEEP */
17157 
17158 #ifdef DEBUG_CPU_FREQ
17159 		if (dhd->new_freq)
17160 			free_percpu(dhd->new_freq);
17161 		dhd->new_freq = NULL;
17162 		cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
17163 #endif
17164 	DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
17165 #ifdef CONFIG_HAS_WAKELOCK
17166 	dhd->wakelock_wd_counter = 0;
17167 	dhd_wake_lock_unlock_destroy(&dhd->wl_wdwake);
17168 	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17169 	dhd_wake_lock_unlock_destroy(&dhd->wl_wifi);
17170 #endif /* CONFIG_HAS_WAKELOCK */
17171 	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
17172 		DHD_OS_WAKE_LOCK_DESTROY(dhd);
17173 	}
17174 
17175 #ifdef DHDTCPACK_SUPPRESS
17176 	/* This will free all MEM allocated for TCPACK SUPPRESS */
17177 	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
17178 #endif /* DHDTCPACK_SUPPRESS */
17179 
17180 #ifdef PCIE_FULL_DONGLE
17181 	dhd_flow_rings_deinit(dhdp);
17182 	if (dhdp->prot)
17183 		dhd_prot_detach(dhdp);
17184 #endif
17185 
17186 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
17187 		dhd_free_tdls_peer_list(dhdp);
17188 #endif
17189 
17190 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
17191 	/* Release CTF pool ONLY after the prot layer is dettached and
17192 	 * pkts, possibly from fast ctfpool are freed into ctfpool/kernel
17193 	 */
17194 #ifdef CTFPOOL
17195 	/* free the buffers in fast pool */
17196 	osl_ctfpool_cleanup(dhd->pub.osh);
17197 #endif /* CTFPOOL */
17198 
17199 	/* free ctf resources */
17200 	if (dhd->cih)
17201 		ctf_detach(dhd->cih);
17202 #endif /* BCM_ROUTER_DHD && HNDCTF */
17203 #ifdef BCMDBG
17204 	dhd_macdbg_detach(dhdp);
17205 #endif /* BCMDBG */
17206 
17207 #ifdef DUMP_IOCTL_IOV_LIST
17208 	dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
17209 #endif /* DUMP_IOCTL_IOV_LIST */
17210 #ifdef DHD_DEBUG
17211 	/* memory waste feature list initilization */
17212 	dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
17213 #endif /* DHD_DEBUG */
17214 #ifdef WL_MONITOR
17215 	dhd_del_monitor_if(dhd);
17216 #endif /* WL_MONITOR */
17217 
17218 #ifdef DHD_ERPOM
17219 	if (dhdp->enable_erpom) {
17220 		dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
17221 	}
17222 #endif /* DHD_ERPOM */
17223 
17224 	cancel_work_sync(&dhd->dhd_hang_process_work);
17225 
17226 	/* Prefer adding de-init code above this comment unless necessary.
17227 	 * The idea is to cancel work queue, sysfs and flags at the end.
17228 	 */
17229 	dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
17230 	dhd->dhd_deferred_wq = NULL;
17231 
17232 	/* log dump related buffers should be freed after wq is purged */
17233 #ifdef DHD_LOG_DUMP
17234 	dhd_log_dump_deinit(&dhd->pub);
17235 #endif /* DHD_LOG_DUMP */
17236 #if defined(BCMPCIE)
17237 	if (dhdp->extended_trap_data)
17238 	{
17239 		MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
17240 		dhdp->extended_trap_data = NULL;
17241 	}
17242 #ifdef DNGL_AXI_ERROR_LOGGING
17243 	if (dhdp->axi_err_dump)
17244 	{
17245 		MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
17246 		dhdp->axi_err_dump = NULL;
17247 	}
17248 #endif /* DNGL_AXI_ERROR_LOGGING */
17249 #endif /* BCMPCIE */
17250 
17251 #ifdef BTLOG
17252 	/* Wait till bt_log_dispatcher_work finishes */
17253 	cancel_work_sync(&dhd->bt_log_dispatcher_work);
17254 #endif /* BTLOG */
17255 
17256 #ifdef EWP_EDL
17257 	cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
17258 #endif
17259 
17260 	(void)dhd_deinit_sock_flows_buf(dhd);
17261 
17262 #ifdef DHD_DUMP_MNGR
17263 	if (dhd->pub.dump_file_manage) {
17264 		MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
17265 				sizeof(dhd_dump_file_manage_t));
17266 	}
17267 #endif /* DHD_DUMP_MNGR */
17268 
17269 	dhd_sysfs_exit(dhd);
17270 	dhd->pub.fw_download_status = FW_UNLOADED;
17271 
17272 #if defined(BT_OVER_SDIO)
17273 	mutex_destroy(&dhd->bus_user_lock);
17274 #endif /* BT_OVER_SDIO */
17275 
17276 #ifdef BCMINTERNAL
17277 #ifdef DHD_FWTRACE
17278 	(void) dhd_fwtrace_detach(dhdp);
17279 #endif /* DHD_FWTRACE */
17280 #endif /* BCMINTERNAL */
17281 
17282 #ifdef DHD_TX_PROFILE
17283 	(void)dhd_tx_profile_detach(dhdp);
17284 #endif /* defined(DHD_TX_PROFILE) */
17285 	dhd_conf_detach(dhdp);
17286 
17287 } /* dhd_detach */
17288 
17289 void
dhd_free(dhd_pub_t * dhdp)17290 dhd_free(dhd_pub_t *dhdp)
17291 {
17292 	dhd_info_t *dhd;
17293 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
17294 
17295 	if (dhdp) {
17296 		int i;
17297 		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
17298 			if (dhdp->reorder_bufs[i]) {
17299 				reorder_info_t *ptr;
17300 				uint32 buf_size = sizeof(struct reorder_info);
17301 
17302 				ptr = dhdp->reorder_bufs[i];
17303 
17304 				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
17305 				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
17306 					i, ptr->max_idx, buf_size));
17307 
17308 				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
17309 			}
17310 		}
17311 
17312 		dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
17313 
17314 		dhd = (dhd_info_t *)dhdp->info;
17315 		if (dhdp->soc_ram) {
17316 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
17317 			DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
17318 #else
17319 			if (is_vmalloc_addr(dhdp->soc_ram)) {
17320 				VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
17321 			}
17322 			else {
17323 				MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
17324 			}
17325 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
17326 			dhdp->soc_ram = NULL;
17327 		}
17328 #ifdef CACHE_FW_IMAGES
17329 		if (dhdp->cached_fw) {
17330 			MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
17331 		}
17332 
17333 		if (dhdp->cached_nvram) {
17334 			MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
17335 		}
17336 #endif
17337 		if (dhd != NULL) {
17338 #ifdef REPORT_FATAL_TIMEOUTS
17339 			deinit_dhd_timeouts(&dhd->pub);
17340 #endif /* REPORT_FATAL_TIMEOUTS */
17341 
17342 			/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
17343 			if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
17344 					DHD_PREALLOC_DHD_INFO, 0, FALSE))
17345 				MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
17346 			dhd = NULL;
17347 		}
17348 	}
17349 }
17350 
17351 void
dhd_clear(dhd_pub_t * dhdp)17352 dhd_clear(dhd_pub_t *dhdp)
17353 {
17354 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
17355 
17356 	if (dhdp) {
17357 		int i;
17358 #ifdef DHDTCPACK_SUPPRESS
17359 		/* Clean up timer/data structure for any remaining/pending packet or timer. */
17360 		dhd_tcpack_info_tbl_clean(dhdp);
17361 #endif /* DHDTCPACK_SUPPRESS */
17362 		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
17363 			if (dhdp->reorder_bufs[i]) {
17364 				reorder_info_t *ptr;
17365 				uint32 buf_size = sizeof(struct reorder_info);
17366 
17367 				ptr = dhdp->reorder_bufs[i];
17368 
17369 				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
17370 				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
17371 					i, ptr->max_idx, buf_size));
17372 
17373 				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
17374 			}
17375 		}
17376 
17377 		dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
17378 
17379 		if (dhdp->soc_ram) {
17380 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
17381 			DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
17382 #else
17383 			if (is_vmalloc_addr(dhdp->soc_ram)) {
17384 				VMFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
17385 			}
17386 			else {
17387 				MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
17388 			}
17389 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
17390 			dhdp->soc_ram = NULL;
17391 		}
17392 	}
17393 }
17394 
17395 static void
dhd_module_cleanup(void)17396 dhd_module_cleanup(void)
17397 {
17398 	printf("%s: Enter\n", __FUNCTION__);
17399 
17400 	dhd_bus_unregister();
17401 
17402 #if defined(OEM_ANDROID)
17403 	wl_android_exit();
17404 #endif /* OEM_ANDROID */
17405 
17406 	dhd_wifi_platform_unregister_drv();
17407 	printf("%s: Exit\n", __FUNCTION__);
17408 }
17409 
17410 static void
dhd_module_exit(void)17411 dhd_module_exit(void)
17412 {
17413 	atomic_set(&exit_in_progress, 1);
17414 #ifdef DHD_BUZZZ_LOG_ENABLED
17415 	dhd_buzzz_detach();
17416 #endif /* DHD_BUZZZ_LOG_ENABLED */
17417 	dhd_module_cleanup();
17418 	unregister_reboot_notifier(&dhd_reboot_notifier);
17419 	dhd_destroy_to_notifier_skt();
17420 #ifdef DHD_PKTTS
17421 	dhd_destroy_to_notifier_ts();
17422 #endif /* DHD_PKTTS */
17423 }
17424 
17425 static int
_dhd_module_init(void)17426 _dhd_module_init(void)
17427 {
17428 	int err;
17429 	int retry = POWERUP_MAX_RETRY;
17430 
17431 	printf("%s: in %s\n", __FUNCTION__, dhd_version);
17432 
17433 #ifdef DHD_BUZZZ_LOG_ENABLED
17434 	dhd_buzzz_attach();
17435 #endif /* DHD_BUZZZ_LOG_ENABLED */
17436 
17437 #if defined(BCM_ROUTER_DHD)
17438 	{	/* XXX Should we maintain nvram budget/thresholds per 5G|2G radio? */
17439 		char * var;
17440 		if ((var = getvar(NULL, "dhd_queue_budget")) != NULL) {
17441 			dhd_queue_budget = bcm_strtoul(var, NULL, 0);
17442 		}
17443 		DHD_ERROR(("dhd_queue_budget = %d\n", dhd_queue_budget));
17444 
17445 		if ((var = getvar(NULL, "dhd_sta_threshold")) != NULL) {
17446 			dhd_sta_threshold = bcm_strtoul(var, NULL, 0);
17447 		}
17448 		DHD_ERROR(("dhd_sta_threshold = %d\n", dhd_sta_threshold));
17449 
17450 		if ((var = getvar(NULL, "dhd_if_threshold")) != NULL) {
17451 			dhd_if_threshold = bcm_strtoul(var, NULL, 0);
17452 		}
17453 		DHD_ERROR(("dhd_if_threshold = %d\n", dhd_if_threshold));
17454 	}
17455 #endif /* BCM_ROUTER_DHD */
17456 
17457 	if (firmware_path[0] != '\0') {
17458 		strlcpy(fw_bak_path, firmware_path, sizeof(fw_bak_path));
17459 	}
17460 
17461 	if (nvram_path[0] != '\0') {
17462 		strlcpy(nv_bak_path, nvram_path, sizeof(nv_bak_path));
17463 	}
17464 
17465 	do {
17466 		err = dhd_wifi_platform_register_drv();
17467 		if (!err) {
17468 			register_reboot_notifier(&dhd_reboot_notifier);
17469 			break;
17470 		} else {
17471 			DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
17472 				__FUNCTION__, retry));
17473 			strlcpy(firmware_path, fw_bak_path, sizeof(firmware_path));
17474 			strlcpy(nvram_path, nv_bak_path, sizeof(nvram_path));
17475 		}
17476 	} while (retry--);
17477 
17478 	dhd_create_to_notifier_skt();
17479 
17480 #ifdef DHD_PKTTS
17481 	dhd_create_to_notifier_ts();
17482 #endif /* DHD_PKTTS */
17483 
17484 	if (err) {
17485 		DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
17486 	} else {
17487 		if (!dhd_download_fw_on_driverload) {
17488 			dhd_driver_init_done = TRUE;
17489 		}
17490 	}
17491 
17492 	printf("%s: Exit err=%d\n", __FUNCTION__, err);
17493 	return err;
17494 }
17495 
17496 static int
dhd_module_init(void)17497 dhd_module_init(void)
17498 {
17499 	int err;
17500 
17501 	err = _dhd_module_init();
17502 #ifdef DHD_SUPPORT_HDM
17503 	if (err && !dhd_download_fw_on_driverload) {
17504 		dhd_hdm_wlan_sysfs_init();
17505 	}
17506 #endif /* DHD_SUPPORT_HDM */
17507 	return err;
17508 
17509 }
17510 
17511 #ifdef DHD_SUPPORT_HDM
17512 bool hdm_trigger_init = FALSE;
17513 struct delayed_work hdm_sysfs_wq;
17514 
17515 int
dhd_module_init_hdm(void)17516 dhd_module_init_hdm(void)
17517 {
17518 	int err = 0;
17519 
17520 	hdm_trigger_init = TRUE;
17521 
17522 	if (dhd_driver_init_done) {
17523 		DHD_INFO(("%s : Module is already inited\n", __FUNCTION__));
17524 		return err;
17525 	}
17526 
17527 	err = _dhd_module_init();
17528 
17529 	/* remove sysfs file after module load properly */
17530 	if (!err && !dhd_download_fw_on_driverload) {
17531 		INIT_DELAYED_WORK(&hdm_sysfs_wq, dhd_hdm_wlan_sysfs_deinit);
17532 		schedule_delayed_work(&hdm_sysfs_wq, msecs_to_jiffies(SYSFS_DEINIT_MS));
17533 	}
17534 
17535 	hdm_trigger_init = FALSE;
17536 	return err;
17537 }
17538 #endif /* DHD_SUPPORT_HDM */
17539 
17540 static int
dhd_reboot_callback(struct notifier_block * this,unsigned long code,void * unused)17541 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
17542 {
17543 	DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
17544 	if (code == SYS_RESTART) {
17545 #ifdef OEM_ANDROID
17546 #ifdef BCMPCIE
17547 		is_reboot = code;
17548 #endif /* BCMPCIE */
17549 #else
17550 		dhd_module_cleanup();
17551 #endif /* OEM_ANDROID */
17552 	}
17553 	return NOTIFY_DONE;
17554 }
17555 
17556 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
wifi_init_thread(void * data)17557 static int wifi_init_thread(void *data)
17558 {
17559 	dhd_module_init();
17560 	return 0;
17561 }
17562 #endif
17563 
rockchip_wifi_init_module_rkwifi(void)17564 int rockchip_wifi_init_module_rkwifi(void)
17565 {
17566 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
17567 	struct task_struct *kthread = NULL;
17568 
17569 	kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
17570 	if (IS_ERR(kthread))
17571 		pr_err("create wifi_init_thread failed.\n");
17572 #else
17573 	dhd_module_init();
17574 #endif
17575 	return 0;
17576 }
17577 
rockchip_wifi_exit_module_rkwifi(void)17578 void rockchip_wifi_exit_module_rkwifi(void)
17579 {
17580 	dhd_module_exit();
17581 }
17582 #ifdef CONFIG_WIFI_BUILD_MODULE
17583 module_init(rockchip_wifi_init_module_rkwifi);
17584 module_exit(rockchip_wifi_exit_module_rkwifi);
17585 #else
17586 #ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
17587 late_initcall(rockchip_wifi_init_module_rkwifi);
17588 module_exit(rockchip_wifi_exit_module_rkwifi);
17589 #else
17590 module_init(rockchip_wifi_init_module_rkwifi);
17591 module_exit(rockchip_wifi_exit_module_rkwifi);
17592 #endif
17593 #endif
17594 
17595 #if 0
17596 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
17597 /* XXX To decrease the device boot time, deferred_module_init() macro can be
17598  * used. The detailed principle and implemenation of deferred_module_init()
17599  * is found at http://elinux.org/Deferred_Initcalls
17600  * To enable this feature for module build, it needs to add another
17601  * deferred_module_init() definition to include/linux/init.h in Linux Kernel.
17602  * #define deferred_module_init(fn)	module_init(fn)
17603  */
17604 #if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
17605 deferred_module_init_sync(dhd_module_init);
17606 #else
17607 deferred_module_init(dhd_module_init);
17608 #endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
17609 #elif defined(USE_LATE_INITCALL_SYNC)
17610 late_initcall_sync(dhd_module_init);
17611 #else
17612 late_initcall(dhd_module_init);
17613 #endif /* USE_LATE_INITCALL_SYNC */
17614 
17615 module_exit(dhd_module_exit);
17616 #endif
17617 
17618 /*
17619  * OS specific functions required to implement DHD driver in OS independent way
17620  */
17621 int
dhd_os_proto_block(dhd_pub_t * pub)17622 dhd_os_proto_block(dhd_pub_t *pub)
17623 {
17624 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17625 
17626 	if (dhd) {
17627 		down(&dhd->proto_sem);
17628 
17629 		return 1;
17630 	}
17631 
17632 	return 0;
17633 }
17634 
17635 int
dhd_os_proto_unblock(dhd_pub_t * pub)17636 dhd_os_proto_unblock(dhd_pub_t *pub)
17637 {
17638 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17639 
17640 	if (dhd) {
17641 		up(&dhd->proto_sem);
17642 		return 1;
17643 	}
17644 
17645 	return 0;
17646 }
17647 
17648 void
dhd_os_dhdiovar_lock(dhd_pub_t * pub)17649 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
17650 {
17651 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17652 
17653 	if (dhd) {
17654 		mutex_lock(&dhd->dhd_iovar_mutex);
17655 	}
17656 }
17657 
17658 void
dhd_os_dhdiovar_unlock(dhd_pub_t * pub)17659 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
17660 {
17661 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17662 
17663 	if (dhd) {
17664 		mutex_unlock(&dhd->dhd_iovar_mutex);
17665 	}
17666 }
17667 
17668 void
dhd_os_logdump_lock(dhd_pub_t * pub)17669 dhd_os_logdump_lock(dhd_pub_t *pub)
17670 {
17671 	dhd_info_t *dhd = NULL;
17672 
17673 	if (!pub)
17674 		return;
17675 
17676 	dhd = (dhd_info_t *)(pub->info);
17677 
17678 	if (dhd) {
17679 		mutex_lock(&dhd->logdump_lock);
17680 	}
17681 }
17682 
17683 void
dhd_os_logdump_unlock(dhd_pub_t * pub)17684 dhd_os_logdump_unlock(dhd_pub_t *pub)
17685 {
17686 	dhd_info_t *dhd = NULL;
17687 
17688 	if (!pub)
17689 		return;
17690 
17691 	dhd = (dhd_info_t *)(pub->info);
17692 
17693 	if (dhd) {
17694 		mutex_unlock(&dhd->logdump_lock);
17695 	}
17696 }
17697 
17698 unsigned long
dhd_os_dbgring_lock(void * lock)17699 dhd_os_dbgring_lock(void *lock)
17700 {
17701 	if (!lock)
17702 		return 0;
17703 
17704 	mutex_lock((struct mutex *)lock);
17705 
17706 	return 0;
17707 }
17708 
17709 void
dhd_os_dbgring_unlock(void * lock,unsigned long flags)17710 dhd_os_dbgring_unlock(void *lock, unsigned long flags)
17711 {
17712 	BCM_REFERENCE(flags);
17713 
17714 	if (!lock)
17715 		return;
17716 
17717 	mutex_unlock((struct mutex *)lock);
17718 }
17719 
17720 unsigned int
dhd_os_get_ioctl_resp_timeout(void)17721 dhd_os_get_ioctl_resp_timeout(void)
17722 {
17723 	return ((unsigned int)dhd_ioctl_timeout_msec);
17724 }
17725 
17726 void
dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)17727 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
17728 {
17729 	dhd_ioctl_timeout_msec = (int)timeout_msec;
17730 }
17731 
17732 int
dhd_os_ioctl_resp_wait(dhd_pub_t * pub,uint * condition)17733 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
17734 {
17735 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17736 	int timeout;
17737 
17738 	/* Convert timeout in millsecond to jiffies */
17739 	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
17740 
17741 #ifdef BCMQT_HW
17742 	DHD_ERROR(("%s, Timeout wait until %d mins (%d ms) in QT mode\n",
17743 		__FUNCTION__, (dhd_ioctl_timeout_msec / (60 * 1000)), dhd_ioctl_timeout_msec));
17744 #endif /* BCMQT_HW */
17745 
17746 	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
17747 
17748 	return timeout;
17749 }
17750 
17751 int
dhd_os_ioctl_resp_wake(dhd_pub_t * pub)17752 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
17753 {
17754 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17755 
17756 	wake_up(&dhd->ioctl_resp_wait);
17757 	return 0;
17758 }
17759 
17760 int
dhd_os_d3ack_wait(dhd_pub_t * pub,uint * condition)17761 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
17762 {
17763 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17764 	int timeout;
17765 
17766 	/* Convert timeout in millsecond to jiffies */
17767 	timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
17768 #ifdef BCMSLTGT
17769 	timeout *= htclkratio;
17770 #endif /* BCMSLTGT */
17771 
17772 	timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
17773 
17774 	return timeout;
17775 }
17776 
17777 #ifdef PCIE_INB_DW
17778 int
dhd_os_ds_exit_wait(dhd_pub_t * pub,uint * condition)17779 dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
17780 {
17781 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17782 	int timeout;
17783 
17784 	/* Convert timeout in millsecond to jiffies */
17785 	timeout = msecs_to_jiffies(ds_exit_timeout_msec);
17786 #ifdef BCMSLTGT
17787 	timeout *= htclkratio;
17788 #endif /* BCMSLTGT */
17789 
17790 	timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
17791 
17792 	return timeout;
17793 }
17794 
17795 int
dhd_os_ds_exit_wake(dhd_pub_t * pub)17796 dhd_os_ds_exit_wake(dhd_pub_t *pub)
17797 {
17798 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17799 
17800 	wake_up_all(&dhd->ds_exit_wait);
17801 	return 0;
17802 }
17803 
17804 #endif /* PCIE_INB_DW */
17805 
17806 int
dhd_os_d3ack_wake(dhd_pub_t * pub)17807 dhd_os_d3ack_wake(dhd_pub_t *pub)
17808 {
17809 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17810 
17811 	wake_up(&dhd->d3ack_wait);
17812 	return 0;
17813 }
17814 
17815 int
dhd_os_busbusy_wait_negation(dhd_pub_t * pub,uint * condition)17816 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
17817 {
17818 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17819 	int timeout;
17820 
17821 	/* Wait for bus usage contexts to gracefully exit within some timeout value
17822 	 * Set time out to little higher than dhd_ioctl_timeout_msec,
17823 	 * so that IOCTL timeout should not get affected.
17824 	 */
17825 	/* Convert timeout in millsecond to jiffies */
17826 	timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
17827 
17828 	timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
17829 
17830 	return timeout;
17831 }
17832 
17833 /*
17834  * Wait until the condition *var == condition is met.
17835  * Returns 0 if the @condition evaluated to false after the timeout elapsed
17836  * Returns 1 if the @condition evaluated to true
17837  */
17838 int
dhd_os_busbusy_wait_condition(dhd_pub_t * pub,uint * var,uint condition)17839 dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
17840 {
17841 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17842 	int timeout;
17843 
17844 	/* Convert timeout in millsecond to jiffies */
17845 	timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
17846 
17847 	timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
17848 
17849 	return timeout;
17850 }
17851 
17852 /*
17853  * Wait until the '(*var & bitmask) == condition' is met.
17854  * Returns 0 if the @condition evaluated to false after the timeout elapsed
17855  * Returns 1 if the @condition evaluated to true
17856  */
17857 int
dhd_os_busbusy_wait_bitmask(dhd_pub_t * pub,uint * var,uint bitmask,uint condition)17858 dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
17859 		uint bitmask, uint condition)
17860 {
17861 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17862 	int timeout;
17863 
17864 	/* Convert timeout in millsecond to jiffies */
17865 	timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
17866 
17867 	timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
17868 			((*var & bitmask) == condition), timeout);
17869 
17870 	return timeout;
17871 }
17872 
17873 int
dhd_os_dmaxfer_wait(dhd_pub_t * pub,uint * condition)17874 dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
17875 {
17876 	int ret = 0;
17877 	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
17878 	int timeout;
17879 
17880 	timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
17881 
17882 	ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
17883 
17884 	return ret;
17885 
17886 }
17887 
17888 int
dhd_os_dmaxfer_wake(dhd_pub_t * pub)17889 dhd_os_dmaxfer_wake(dhd_pub_t *pub)
17890 {
17891 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17892 
17893 	wake_up(&dhd->dmaxfer_wait);
17894 	return 0;
17895 }
17896 
17897 void
dhd_os_tx_completion_wake(dhd_pub_t * dhd)17898 dhd_os_tx_completion_wake(dhd_pub_t *dhd)
17899 {
17900 	/* Call wmb() to make sure before waking up the other event value gets updated */
17901 	OSL_SMP_WMB();
17902 	wake_up(&dhd->tx_completion_wait);
17903 }
17904 
17905 /* Fix compilation error for FC11 */
17906 INLINE int
dhd_os_busbusy_wake(dhd_pub_t * pub)17907 dhd_os_busbusy_wake(dhd_pub_t *pub)
17908 {
17909 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17910 	/* Call wmb() to make sure before waking up the other event value gets updated */
17911 	OSL_SMP_WMB();
17912 	wake_up(&dhd->dhd_bus_busy_state_wait);
17913 	return 0;
17914 }
17915 
17916 void
dhd_os_wd_timer_extend(void * bus,bool extend)17917 dhd_os_wd_timer_extend(void *bus, bool extend)
17918 {
17919 #ifndef BCMDBUS
17920 	dhd_pub_t *pub = bus;
17921 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
17922 
17923 	if (extend)
17924 		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
17925 	else
17926 		dhd_os_wd_timer(bus, dhd->default_wd_interval);
17927 #endif /* !BCMDBUS */
17928 }
17929 
17930 void
dhd_os_wd_timer(void * bus,uint wdtick)17931 dhd_os_wd_timer(void *bus, uint wdtick)
17932 {
17933 #ifndef BCMDBUS
17934 	dhd_pub_t *pub = bus;
17935 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
17936 	unsigned long flags;
17937 
17938 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
17939 
17940 	if (!dhd) {
17941 		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
17942 		return;
17943 	}
17944 
17945 	DHD_GENERAL_LOCK(pub, flags);
17946 
17947 	/* don't start the wd until fw is loaded */
17948 	if (pub->busstate == DHD_BUS_DOWN) {
17949 		DHD_GENERAL_UNLOCK(pub, flags);
17950 #ifdef BCMSDIO
17951 		if (!wdtick) {
17952 			DHD_OS_WD_WAKE_UNLOCK(pub);
17953 		}
17954 #endif /* BCMSDIO */
17955 		return;
17956 	}
17957 
17958 	/* Totally stop the timer */
17959 	if (!wdtick && dhd->wd_timer_valid == TRUE) {
17960 		dhd->wd_timer_valid = FALSE;
17961 		DHD_GENERAL_UNLOCK(pub, flags);
17962 		del_timer_sync(&dhd->timer);
17963 #ifdef BCMSDIO
17964 		DHD_OS_WD_WAKE_UNLOCK(pub);
17965 #endif /* BCMSDIO */
17966 		return;
17967 	}
17968 
17969 	 if (wdtick) {
17970 #ifdef BCMSDIO
17971 		DHD_OS_WD_WAKE_LOCK(pub);
17972 		dhd_watchdog_ms = (uint)wdtick;
17973 #endif /* BCMSDIO */
17974 		/* Re arm the timer, at last watchdog period */
17975 		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
17976 		dhd->wd_timer_valid = TRUE;
17977 	}
17978 	DHD_GENERAL_UNLOCK(pub, flags);
17979 #endif /* BCMDBUS */
17980 }
17981 
17982 #ifdef DHD_PCIE_RUNTIMEPM
17983 void
dhd_os_runtimepm_timer(void * bus,uint tick)17984 dhd_os_runtimepm_timer(void *bus, uint tick)
17985 {
17986 	dhd_pub_t *pub = bus;
17987 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
17988 	unsigned long flags;
17989 
17990 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
17991 
17992 	if (!dhd) {
17993 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17994 		return;
17995 	}
17996 
17997 	DHD_GENERAL_LOCK(pub, flags);
17998 
17999 	/* don't start the RPM until fw is loaded */
18000 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
18001 		DHD_GENERAL_UNLOCK(pub, flags);
18002 		return;
18003 	}
18004 
18005 	/* If tick is non-zero, the request is to start the timer */
18006 	if (tick) {
18007 		/* Start the timer only if its not already running */
18008 		if (dhd->rpm_timer_valid == FALSE) {
18009 			mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
18010 			dhd->rpm_timer_valid = TRUE;
18011 			DHD_ERROR(("DHD Runtime PM Timer ON\n"));
18012 		}
18013 	} else {
18014 		/* tick is zero, we have to stop the timer */
18015 		/* Stop the timer only if its running, otherwise we don't have to do anything */
18016 		if (dhd->rpm_timer_valid == TRUE) {
18017 			dhd->rpm_timer_valid = FALSE;
18018 			DHD_GENERAL_UNLOCK(pub, flags);
18019 			del_timer_sync(&dhd->rpm_timer);
18020 			DHD_ERROR(("DHD Runtime PM Timer OFF \n"));
18021 			/* we have already released the lock, so just go to exit */
18022 			goto exit;
18023 		}
18024 	}
18025 
18026 	DHD_GENERAL_UNLOCK(pub, flags);
18027 exit:
18028 	return;
18029 
18030 }
18031 
18032 #endif /* DHD_PCIE_RUNTIMEPM */
18033 
18034 void *
dhd_os_open_image1(dhd_pub_t * pub,char * filename)18035 dhd_os_open_image1(dhd_pub_t *pub, char *filename)
18036 {
18037 	struct file *fp;
18038 	int size;
18039 
18040 	fp = filp_open(filename, O_RDONLY, 0);
18041 	/*
18042 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
18043 	 * Alternative:
18044 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
18045 	 * ???
18046 	 */
18047 	 if (IS_ERR(fp)) {
18048 		 fp = NULL;
18049 		 goto err;
18050 	 }
18051 
18052 	 if (!S_ISREG(file_inode(fp)->i_mode)) {
18053 		 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
18054 		 fp = NULL;
18055 		 goto err;
18056 	 }
18057 
18058 	 size = i_size_read(file_inode(fp));
18059 	 if (size <= 0) {
18060 		 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
18061 		 fp = NULL;
18062 		 goto err;
18063 	 }
18064 
18065 	 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
18066 
18067 err:
18068 	 return fp;
18069 }
18070 
18071 int
dhd_os_get_image_block(char * buf,int len,void * image)18072 dhd_os_get_image_block(char *buf, int len, void *image)
18073 {
18074 	struct file *fp = (struct file *)image;
18075 	int rdlen;
18076 	int size;
18077 
18078 	if (!image) {
18079 		return 0;
18080 	}
18081 
18082 	size = i_size_read(file_inode(fp));
18083 	rdlen = kernel_read_compat(fp, fp->f_pos, buf, MIN(len, size));
18084 
18085 	if (len >= size && size != rdlen) {
18086 		return -EIO;
18087 	}
18088 
18089 	if (rdlen > 0) {
18090 		fp->f_pos += rdlen;
18091 	}
18092 
18093 	return rdlen;
18094 }
18095 
18096 #if defined(BT_OVER_SDIO)
18097 int
dhd_os_gets_image(dhd_pub_t * pub,char * str,int len,void * image)18098 dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
18099 {
18100 	struct file *fp = (struct file *)image;
18101 	int rd_len;
18102 	uint str_len = 0;
18103 	char *str_end = NULL;
18104 
18105 	if (!image)
18106 		return 0;
18107 
18108 	rd_len = kernel_read_compat(fp, fp->f_pos, str, len);
18109 	str_end = strnchr(str, len, '\n');
18110 	if (str_end == NULL) {
18111 		goto err;
18112 	}
18113 	str_len = (uint)(str_end - str);
18114 
18115 	/* Advance file pointer past the string length */
18116 	fp->f_pos += str_len + 1;
18117 	bzero(str_end, rd_len - str_len);
18118 
18119 err:
18120 	return str_len;
18121 }
18122 #endif /* defined (BT_OVER_SDIO) */
18123 
18124 int
dhd_os_get_image_size(void * image)18125 dhd_os_get_image_size(void *image)
18126 {
18127 	struct file *fp = (struct file *)image;
18128 	int size;
18129 	if (!image) {
18130 		return 0;
18131 	}
18132 
18133 	size = i_size_read(file_inode(fp));
18134 
18135 	return size;
18136 }
18137 
18138 void
dhd_os_close_image1(dhd_pub_t * pub,void * image)18139 dhd_os_close_image1(dhd_pub_t *pub, void *image)
18140 {
18141 	if (image) {
18142 		filp_close((struct file *)image, NULL);
18143 	}
18144 }
18145 
18146 void
dhd_os_sdlock(dhd_pub_t * pub)18147 dhd_os_sdlock(dhd_pub_t *pub)
18148 {
18149 	dhd_info_t *dhd;
18150 
18151 	dhd = (dhd_info_t *)(pub->info);
18152 
18153 #ifndef BCMDBUS
18154 	if (dhd_dpc_prio >= 0)
18155 		down(&dhd->sdsem);
18156 	else
18157 		spin_lock_bh(&dhd->sdlock);
18158 #else
18159 	spin_lock_bh(&dhd->sdlock);
18160 #endif /* BCMDBUS */
18161 }
18162 
18163 void
dhd_os_sdunlock(dhd_pub_t * pub)18164 dhd_os_sdunlock(dhd_pub_t *pub)
18165 {
18166 	dhd_info_t *dhd;
18167 
18168 	dhd = (dhd_info_t *)(pub->info);
18169 
18170 #ifndef BCMDBUS
18171 	if (dhd_dpc_prio >= 0)
18172 		up(&dhd->sdsem);
18173 	else
18174 		spin_unlock_bh(&dhd->sdlock);
18175 #else
18176 	spin_unlock_bh(&dhd->sdlock);
18177 #endif /* BCMDBUS */
18178 }
18179 
18180 void
dhd_os_sdlock_txq(dhd_pub_t * pub)18181 dhd_os_sdlock_txq(dhd_pub_t *pub)
18182 {
18183 	dhd_info_t *dhd;
18184 
18185 	dhd = (dhd_info_t *)(pub->info);
18186 #ifdef BCMDBUS
18187 	spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
18188 #else
18189 	spin_lock_bh(&dhd->txqlock);
18190 #endif
18191 }
18192 
18193 void
dhd_os_sdunlock_txq(dhd_pub_t * pub)18194 dhd_os_sdunlock_txq(dhd_pub_t *pub)
18195 {
18196 	dhd_info_t *dhd;
18197 
18198 	dhd = (dhd_info_t *)(pub->info);
18199 #ifdef BCMDBUS
18200 	spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
18201 #else
18202 	spin_unlock_bh(&dhd->txqlock);
18203 #endif
18204 }
18205 
18206 void
dhd_os_sdlock_rxq(dhd_pub_t * pub)18207 dhd_os_sdlock_rxq(dhd_pub_t *pub)
18208 {
18209 }
18210 
18211 void
dhd_os_sdunlock_rxq(dhd_pub_t * pub)18212 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
18213 {
18214 }
18215 
18216 static void
dhd_os_rxflock(dhd_pub_t * pub)18217 dhd_os_rxflock(dhd_pub_t *pub)
18218 {
18219 	dhd_info_t *dhd;
18220 
18221 	dhd = (dhd_info_t *)(pub->info);
18222 	spin_lock_bh(&dhd->rxf_lock);
18223 
18224 }
18225 
18226 static void
dhd_os_rxfunlock(dhd_pub_t * pub)18227 dhd_os_rxfunlock(dhd_pub_t *pub)
18228 {
18229 	dhd_info_t *dhd;
18230 
18231 	dhd = (dhd_info_t *)(pub->info);
18232 	spin_unlock_bh(&dhd->rxf_lock);
18233 }
18234 
18235 #ifdef DHDTCPACK_SUPPRESS
18236 unsigned long
dhd_os_tcpacklock(dhd_pub_t * pub)18237 dhd_os_tcpacklock(dhd_pub_t *pub)
18238 {
18239 	dhd_info_t *dhd;
18240 	unsigned long flags = 0;
18241 
18242 	dhd = (dhd_info_t *)(pub->info);
18243 
18244 	if (dhd) {
18245 #ifdef BCMSDIO
18246 		spin_lock_bh(&dhd->tcpack_lock);
18247 #else
18248 		flags = osl_spin_lock(&dhd->tcpack_lock);
18249 #endif /* BCMSDIO */
18250 	}
18251 
18252 	return flags;
18253 }
18254 
18255 void
dhd_os_tcpackunlock(dhd_pub_t * pub,unsigned long flags)18256 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
18257 {
18258 	dhd_info_t *dhd;
18259 
18260 #ifdef BCMSDIO
18261 	BCM_REFERENCE(flags);
18262 #endif /* BCMSDIO */
18263 
18264 	dhd = (dhd_info_t *)(pub->info);
18265 
18266 	if (dhd) {
18267 #ifdef BCMSDIO
18268 		spin_unlock_bh(&dhd->tcpack_lock);
18269 #else
18270 		osl_spin_unlock(&dhd->tcpack_lock, flags);
18271 #endif /* BCMSDIO */
18272 	}
18273 }
18274 #endif /* DHDTCPACK_SUPPRESS */
18275 
dhd_os_prealloc(dhd_pub_t * dhdpub,int section,uint size,bool kmalloc_if_fail)18276 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
18277 {
18278 	uint8* buf;
18279 	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
18280 
18281 	buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
18282 	if (buf == NULL && kmalloc_if_fail)
18283 		buf = kmalloc(size, flags);
18284 
18285 	return buf;
18286 }
18287 
dhd_os_prefree(dhd_pub_t * dhdpub,void * addr,uint size)18288 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
18289 {
18290 }
18291 
18292 #if defined(WL_WIRELESS_EXT)
18293 struct iw_statistics *
dhd_get_wireless_stats(struct net_device * dev)18294 dhd_get_wireless_stats(struct net_device *dev)
18295 {
18296 	int res = 0;
18297 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18298 
18299 	if (!dhd->pub.up) {
18300 		return NULL;
18301 	}
18302 
18303 	if (!(dev->flags & IFF_UP)) {
18304 		return NULL;
18305 	}
18306 
18307 	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
18308 
18309 	if (res == 0)
18310 		return &dhd->iw.wstats;
18311 	else
18312 		return NULL;
18313 }
18314 #endif /* defined(WL_WIRELESS_EXT) */
18315 
18316 static int
dhd_wl_host_event(dhd_info_t * dhd,int ifidx,void * pktdata,uint16 pktlen,wl_event_msg_t * event,void ** data)18317 dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
18318 	wl_event_msg_t *event, void **data)
18319 {
18320 	int bcmerror = 0;
18321 #ifdef WL_CFG80211
18322 	unsigned long flags = 0;
18323 #endif /* WL_CFG80211 */
18324 	ASSERT(dhd != NULL);
18325 
18326 #ifdef SHOW_LOGTRACE
18327 	bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
18328 		&dhd->event_data);
18329 #else
18330 	bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
18331 		NULL);
18332 #endif /* SHOW_LOGTRACE */
18333 	if (unlikely(bcmerror != BCME_OK)) {
18334 		return bcmerror;
18335 	}
18336 
18337 	if (ntoh32(event->event_type) == WLC_E_IF) {
18338 		/* WLC_E_IF event types are consumed by wl_process_host_event.
18339 		 * For ifadd/del ops, the netdev ptr may not be valid at this
18340 		 * point. so return before invoking cfg80211/wext handlers.
18341 		 */
18342 		return BCME_OK;
18343 	}
18344 
18345 #ifdef WL_EVENT
18346 	wl_ext_event_send(dhd->pub.event_params, event, *data);
18347 #endif
18348 
18349 #ifdef WL_CFG80211
18350 	if (dhd->iflist[ifidx]->net) {
18351 		DHD_UP_LOCK(&dhd->pub.up_lock, flags);
18352 		if (dhd->pub.up) {
18353 			wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
18354 		}
18355 		DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
18356 	}
18357 #endif /* defined(WL_CFG80211) */
18358 
18359 	return (bcmerror);
18360 }
18361 
18362 /* send up locally generated event */
18363 void
dhd_sendup_event(dhd_pub_t * dhdp,wl_event_msg_t * event,void * data)18364 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
18365 {
18366 	switch (ntoh32(event->event_type)) {
18367 	/* Handle error case or further events here */
18368 	default:
18369 		break;
18370 	}
18371 }
18372 
18373 #ifdef LOG_INTO_TCPDUMP
18374 void
dhd_sendup_log(dhd_pub_t * dhdp,void * data,int data_len)18375 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
18376 {
18377 	struct sk_buff *p, *skb;
18378 	uint32 pktlen;
18379 	int len;
18380 	dhd_if_t *ifp;
18381 	dhd_info_t *dhd;
18382 	uchar *skb_data;
18383 	int ifidx = 0;
18384 	struct ether_header eth;
18385 
18386 	pktlen = sizeof(eth) + data_len;
18387 	dhd = dhdp->info;
18388 
18389 	if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
18390 		ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
18391 
18392 		bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
18393 		bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
18394 		ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
18395 		eth.ether_type = hton16(ETHER_TYPE_BRCM);
18396 
18397 		bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
18398 		bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
18399 		skb = PKTTONATIVE(dhdp->osh, p);
18400 		skb_data = skb->data;
18401 		len = skb->len;
18402 
18403 		ifidx = dhd_ifname2idx(dhd, "wlan0");
18404 		ifp = dhd->iflist[ifidx];
18405 		if (ifp == NULL)
18406 			 ifp = dhd->iflist[0];
18407 
18408 		ASSERT(ifp);
18409 		skb->dev = ifp->net;
18410 		skb->protocol = eth_type_trans(skb, skb->dev);
18411 		skb->data = skb_data;
18412 		skb->len = len;
18413 
18414 		/* Strip header, count, deliver upward */
18415 		skb_pull(skb, ETH_HLEN);
18416 
18417 		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
18418 			__FUNCTION__, __LINE__);
18419 		/* Send the packet */
18420 		if (in_interrupt()) {
18421 			netif_rx(skb);
18422 		} else {
18423 			netif_rx_ni(skb);
18424 		}
18425 	} else {
18426 		/* Could not allocate a sk_buf */
18427 		DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
18428 	}
18429 }
18430 #endif /* LOG_INTO_TCPDUMP */
18431 
dhd_wait_for_event(dhd_pub_t * dhd,bool * lockvar)18432 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
18433 {
18434 #if defined(BCMSDIO)
18435 	struct dhd_info *dhdinfo =  dhd->info;
18436 
18437 	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
18438 
18439 	dhd_os_sdunlock(dhd);
18440 	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
18441 	dhd_os_sdlock(dhd);
18442 #endif /* defined(BCMSDIO) */
18443 	return;
18444 } /* dhd_init_static_strs_array */
18445 
dhd_wait_event_wakeup(dhd_pub_t * dhd)18446 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
18447 {
18448 #if defined(BCMSDIO)
18449 	struct dhd_info *dhdinfo =  dhd->info;
18450 	if (waitqueue_active(&dhdinfo->ctrl_wait))
18451 		wake_up(&dhdinfo->ctrl_wait);
18452 #endif
18453 	return;
18454 }
18455 
18456 #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
18457 int
dhd_net_bus_devreset(struct net_device * dev,uint8 flag)18458 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
18459 {
18460 	int ret;
18461 
18462 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18463 
18464 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
18465 	if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
18466 		return BCME_ERROR;
18467 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
18468 
18469 	if (flag == TRUE) {
18470 #ifndef WL_CFG80211
18471 		/* Issue wl down command for non-cfg before resetting the chip */
18472 		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
18473 			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
18474 		}
18475 #endif /* !WL_CFG80211 */
18476 #ifdef PROP_TXSTATUS
18477 		if (dhd->pub.wlfc_enabled) {
18478 			dhd_wlfc_deinit(&dhd->pub);
18479 		}
18480 #endif /* PROP_TXSTATUS */
18481 #ifdef PNO_SUPPORT
18482 		if (dhd->pub.pno_state) {
18483 			dhd_pno_deinit(&dhd->pub);
18484 		}
18485 #endif
18486 #ifdef RTT_SUPPORT
18487 		if (dhd->pub.rtt_state) {
18488 			dhd_rtt_deinit(&dhd->pub);
18489 		}
18490 #endif /* RTT_SUPPORT */
18491 
18492 		DHD_SSSR_DUMP_DEINIT(&dhd->pub);
18493 #ifdef DHD_SDTC_ETB_DUMP
18494 		if (dhd->pub.sdtc_etb_inited) {
18495 			dhd_sdtc_etb_deinit(&dhd->pub);
18496 		}
18497 #endif /* DHD_SDTC_ETB_DUMP */
18498 /*
18499  * XXX Detach only if the module is not attached by default at dhd_attach.
18500  * If attached by default, we need to keep it till dhd_detach, so that
18501  * module is not detached at wifi on/off
18502  */
18503 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
18504 		dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
18505 #endif /* DBG_PKT_MON */
18506 	}
18507 
18508 #ifdef BCMSDIO
18509 	/* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
18510 	 * This is indeed a hack but we have to make it work properly before we have a better
18511 	 * solution
18512 	 */
18513 	if (!flag) {
18514 		dhd_update_fw_nv_path(dhd);
18515 		/* update firmware and nvram path to sdio bus */
18516 		dhd_bus_update_fw_nv_path(dhd->pub.bus,
18517 			dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
18518 	}
18519 #endif /* BCMSDIO */
18520 #if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
18521 #if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
18522 	/* XXX: JIRA SWWLAN-139454: Added L1ss enable
18523 	 * after firmware download completion due to link down issue
18524 	 * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
18525 	 */
18526 	DHD_ERROR(("%s Disable L1ss EP side\n", __FUNCTION__));
18527 	if (flag == FALSE && dhd->pub.busstate == DHD_BUS_DOWN) {
18528 #if defined(CONFIG_SOC_GS101)
18529 		exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1);
18530 #else
18531 		exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
18532 #endif /* CONFIG_SOC_GS101  */
18533 	}
18534 #endif /* !CONFIG_SOC_EXYNOS8890 && !defined(SUPPORT_EXYNOS7420)  */
18535 #endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
18536 
18537 	ret = dhd_bus_devreset(&dhd->pub, flag);
18538 
18539 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
18540 	pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
18541 	pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
18542 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
18543 
18544 	if (flag) {
18545 		/* Clear some flags for recovery logic */
18546 		dhd->pub.dongle_trap_occured = 0;
18547 #ifdef BT_OVER_PCIE
18548 		dhd->pub.dongle_trap_due_to_bt = 0;
18549 #endif /* BT_OVER_PCIE */
18550 		dhd->pub.iovar_timeout_occured = 0;
18551 #ifdef PCIE_FULL_DONGLE
18552 		dhd->pub.d3ack_timeout_occured = 0;
18553 		dhd->pub.livelock_occured = 0;
18554 		dhd->pub.pktid_audit_failed = 0;
18555 #endif /* PCIE_FULL_DONGLE */
18556 		dhd->pub.smmu_fault_occurred = 0;
18557 		dhd->pub.iface_op_failed = 0;
18558 		dhd->pub.scan_timeout_occurred = 0;
18559 		dhd->pub.scan_busy_occurred = 0;
18560 	}
18561 
18562 	if (ret) {
18563 		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
18564 	}
18565 
18566 	return ret;
18567 }
18568 
18569 #if defined(BCMSDIO) || defined(BCMPCIE)
18570 int
dhd_net_bus_suspend(struct net_device * dev)18571 dhd_net_bus_suspend(struct net_device *dev)
18572 {
18573 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18574 	return dhd_bus_suspend(&dhd->pub);
18575 }
18576 
18577 int
dhd_net_bus_resume(struct net_device * dev,uint8 stage)18578 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
18579 {
18580 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18581 	return dhd_bus_resume(&dhd->pub, stage);
18582 }
18583 
18584 #endif /* BCMSDIO || BCMPCIE */
18585 #endif /* BCMSDIO || BCMPCIE || BCMDBUS */
18586 
net_os_set_suspend_disable(struct net_device * dev,int val)18587 int net_os_set_suspend_disable(struct net_device *dev, int val)
18588 {
18589 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18590 	int ret = 0;
18591 
18592 	if (dhd) {
18593 		ret = dhd->pub.suspend_disable_flag;
18594 		dhd->pub.suspend_disable_flag = val;
18595 	}
18596 	return ret;
18597 }
18598 
net_os_set_suspend(struct net_device * dev,int val,int force)18599 int net_os_set_suspend(struct net_device *dev, int val, int force)
18600 {
18601 	int ret = 0;
18602 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18603 
18604 	if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
18605 		if (!val)
18606 			dhd_conf_set_suspend_resume(&dhd->pub, val);
18607 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
18608 		ret = dhd_set_suspend(val, &dhd->pub);
18609 #else
18610 		ret = dhd_suspend_resume_helper(dhd, val, force);
18611 #endif
18612 #ifdef WL_CFG80211
18613 		wl_cfg80211_update_power_mode(dev);
18614 #endif
18615 		if (val)
18616 			dhd_conf_set_suspend_resume(&dhd->pub, val);
18617 	}
18618 	return ret;
18619 }
18620 
net_os_set_suspend_bcn_li_dtim(struct net_device * dev,int val)18621 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
18622 {
18623 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18624 
18625 	if (dhd) {
18626 		DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
18627 			__FUNCTION__, val));
18628 		dhd->pub.suspend_bcn_li_dtim = val;
18629 	}
18630 
18631 	return 0;
18632 }
18633 
net_os_set_max_dtim_enable(struct net_device * dev,int val)18634 int net_os_set_max_dtim_enable(struct net_device *dev, int val)
18635 {
18636 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18637 
18638 	if (dhd) {
18639 		DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
18640 			__FUNCTION__, (val ? "Enable" : "Disable")));
18641 		if (val) {
18642 			dhd->pub.max_dtim_enable = TRUE;
18643 		} else {
18644 			dhd->pub.max_dtim_enable = FALSE;
18645 		}
18646 	} else {
18647 		return -1;
18648 	}
18649 
18650 	return 0;
18651 }
18652 
18653 #ifdef DISABLE_DTIM_IN_SUSPEND
net_os_set_disable_dtim_in_suspend(struct net_device * dev,int val)18654 int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
18655 {
18656 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18657 
18658 	if (dhd) {
18659 		DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
18660 			__FUNCTION__, (val ? "Enable" : "Disable")));
18661 		if (val) {
18662 			dhd->pub.disable_dtim_in_suspend = TRUE;
18663 		} else {
18664 			dhd->pub.disable_dtim_in_suspend = FALSE;
18665 		}
18666 	} else {
18667 		return BCME_ERROR;
18668 	}
18669 
18670 	return BCME_OK;
18671 }
18672 #endif /* DISABLE_DTIM_IN_SUSPEND */
18673 
18674 #ifdef PKT_FILTER_SUPPORT
net_os_rxfilter_add_remove(struct net_device * dev,int add_remove,int num)18675 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
18676 {
18677 	int ret = 0;
18678 
18679 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
18680 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18681 
18682 	if (!dhd_master_mode)
18683 		add_remove = !add_remove;
18684 	DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
18685 	if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
18686 		return 0;
18687 	}
18688 
18689 #ifdef BLOCK_IPV6_PACKET
18690 	/* customer want to use NO IPV6 packets only */
18691 	if (num == DHD_MULTICAST6_FILTER_NUM) {
18692 		return 0;
18693 	}
18694 #endif /* BLOCK_IPV6_PACKET */
18695 
18696 	if (num >= dhd->pub.pktfilter_count) {
18697 		return -EINVAL;
18698 	}
18699 
18700 	ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
18701 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
18702 
18703 	return ret;
18704 }
18705 
18706 /* XXX RB:4238 Change net_os_set_packet_filter() function name to net_os_enable_packet_filter()
18707  * previous code do 'set' & 'enable' in one fucntion.
18708  * but from now on, we are going to separate 'set' and 'enable' feature.
18709  *  - set : net_os_rxfilter_add_remove() -> dhd_set_packet_filter() -> dhd_pktfilter_offload_set()
18710  *  - enable : net_os_enable_packet_filter() -> dhd_enable_packet_filter()
18711  *                                                              -> dhd_pktfilter_offload_enable()
18712  */
dhd_os_enable_packet_filter(dhd_pub_t * dhdp,int val)18713 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
18714 
18715 {
18716 	int ret = 0;
18717 
18718 	/* Packet filtering is set only if we still in early-suspend and
18719 	 * we need either to turn it ON or turn it OFF
18720 	 * We can always turn it OFF in case of early-suspend, but we turn it
18721 	 * back ON only if suspend_disable_flag was not set
18722 	*/
18723 	if (dhdp && dhdp->up) {
18724 		if (dhdp->in_suspend) {
18725 			if (!val || (val && !dhdp->suspend_disable_flag))
18726 				dhd_enable_packet_filter(val, dhdp);
18727 		}
18728 	}
18729 	return ret;
18730 }
18731 
18732 /* function to enable/disable packet for Network device */
net_os_enable_packet_filter(struct net_device * dev,int val)18733 int net_os_enable_packet_filter(struct net_device *dev, int val)
18734 {
18735 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18736 
18737 	DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
18738 	return dhd_os_enable_packet_filter(&dhd->pub, val);
18739 }
18740 #endif /* PKT_FILTER_SUPPORT */
18741 
18742 int
dhd_dev_init_ioctl(struct net_device * dev)18743 dhd_dev_init_ioctl(struct net_device *dev)
18744 {
18745 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18746 	int ret;
18747 
18748 	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
18749 		goto done;
18750 
18751 done:
18752 	return ret;
18753 }
18754 
18755 int
dhd_dev_get_feature_set(struct net_device * dev)18756 dhd_dev_get_feature_set(struct net_device *dev)
18757 {
18758 	dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
18759 	dhd_pub_t *dhd = (&ptr->pub);
18760 	int feature_set = 0;
18761 
18762 	/* tdls capability or othters can be missed because of initialization */
18763 	if (dhd_get_fw_capabilities(dhd) < 0) {
18764 		DHD_ERROR(("Capabilities rechecking fail\n"));
18765 	}
18766 
18767 	if (FW_SUPPORTED(dhd, sta))
18768 		feature_set |= WIFI_FEATURE_INFRA;
18769 	if (FW_SUPPORTED(dhd, dualband))
18770 		feature_set |= WIFI_FEATURE_INFRA_5G;
18771 	if (FW_SUPPORTED(dhd, p2p))
18772 		feature_set |= WIFI_FEATURE_P2P;
18773 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
18774 		feature_set |= WIFI_FEATURE_SOFT_AP;
18775 	if (FW_SUPPORTED(dhd, tdls))
18776 		feature_set |= WIFI_FEATURE_TDLS;
18777 	if (FW_SUPPORTED(dhd, vsdb))
18778 		feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
18779 	if (FW_SUPPORTED(dhd, nan)) {
18780 		feature_set |= WIFI_FEATURE_NAN;
18781 		/* NAN is essentail for d2d rtt */
18782 		if (FW_SUPPORTED(dhd, rttd2d))
18783 			feature_set |= WIFI_FEATURE_D2D_RTT;
18784 	}
18785 #ifdef RTT_SUPPORT
18786 	if (dhd->rtt_supported) {
18787 		feature_set |= WIFI_FEATURE_D2D_RTT;
18788 		feature_set |= WIFI_FEATURE_D2AP_RTT;
18789 	}
18790 #endif /* RTT_SUPPORT */
18791 #ifdef LINKSTAT_SUPPORT
18792 	feature_set |= WIFI_FEATURE_LINKSTAT;
18793 #endif /* LINKSTAT_SUPPORT */
18794 
18795 #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
18796 	if (dhd_is_pno_supported(dhd)) {
18797 		feature_set |= WIFI_FEATURE_PNO;
18798 #ifdef BATCH_SCAN
18799 		/* Deprecated */
18800 		feature_set |= WIFI_FEATURE_BATCH_SCAN;
18801 #endif /* BATCH_SCAN */
18802 #ifdef GSCAN_SUPPORT
18803 		/* terence 20171115: remove to get GTS PASS
18804 		 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
18805 		 */
18806 //		feature_set |= WIFI_FEATURE_GSCAN;
18807 //		feature_set |= WIFI_FEATURE_HAL_EPNO;
18808 #endif /* GSCAN_SUPPORT */
18809 	}
18810 #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
18811 #ifdef RSSI_MONITOR_SUPPORT
18812 	if (FW_SUPPORTED(dhd, rssi_mon)) {
18813 		feature_set |= WIFI_FEATURE_RSSI_MONITOR;
18814 	}
18815 #endif /* RSSI_MONITOR_SUPPORT */
18816 #ifdef WL11U
18817 	feature_set |= WIFI_FEATURE_HOTSPOT;
18818 #endif /* WL11U */
18819 #ifdef KEEP_ALIVE
18820 	feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
18821 #endif /* KEEP_ALIVE */
18822 #ifdef NDO_CONFIG_SUPPORT
18823 	feature_set |= WIFI_FEATURE_CONFIG_NDO;
18824 #endif /* NDO_CONFIG_SUPPORT */
18825 #ifdef SUPPORT_RANDOM_MAC_SCAN
18826 	feature_set |= WIFI_FEATURE_SCAN_RAND;
18827 #endif /* SUPPORT_RANDOM_MAC_SCAN */
18828 #ifdef FILTER_IE
18829 	if (FW_SUPPORTED(dhd, fie)) {
18830 		feature_set |= WIFI_FEATURE_FILTER_IE;
18831 	}
18832 #endif /* FILTER_IE */
18833 #ifdef ROAMEXP_SUPPORT
18834 	 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
18835 #endif /* ROAMEXP_SUPPORT */
18836 #ifdef WL_LATENCY_MODE
18837 	feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
18838 #endif /* WL_LATENCY_MODE */
18839 #ifdef WL_P2P_RAND
18840 	feature_set |= WIFI_FEATURE_P2P_RAND_MAC;
18841 #endif /* WL_P2P_RAND */
18842 #ifdef WL_SAR_TX_POWER
18843 	feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT;
18844 	feature_set |= WIFI_FEATURE_USE_BODY_HEAD_SAR;
18845 #endif /* WL_SAR_TX_POWER */
18846 #ifdef WL_STATIC_IF
18847 	feature_set |= WIFI_FEATURE_AP_STA;
18848 #endif /* WL_STATIC_IF */
18849 	return feature_set;
18850 }
18851 
18852 int
dhd_dev_get_feature_set_matrix(struct net_device * dev,int num)18853 dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
18854 {
18855 	int feature_set_full;
18856 	int ret = 0;
18857 
18858 	feature_set_full = dhd_dev_get_feature_set(dev);
18859 
18860 	/* Common feature set for all interface */
18861 	ret = (feature_set_full & WIFI_FEATURE_INFRA) |
18862 		(feature_set_full & WIFI_FEATURE_INFRA_5G) |
18863 		(feature_set_full & WIFI_FEATURE_D2D_RTT) |
18864 		(feature_set_full & WIFI_FEATURE_D2AP_RTT) |
18865 		(feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
18866 		(feature_set_full & WIFI_FEATURE_EPR);
18867 
18868 	/* Specific feature group for each interface */
18869 	switch (num) {
18870 	case 0:
18871 		ret |= (feature_set_full & WIFI_FEATURE_P2P) |
18872 			/* Not supported yet */
18873 			/* (feature_set_full & WIFI_FEATURE_NAN) | */
18874 			(feature_set_full & WIFI_FEATURE_TDLS) |
18875 			(feature_set_full & WIFI_FEATURE_PNO) |
18876 			(feature_set_full & WIFI_FEATURE_HAL_EPNO) |
18877 			(feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
18878 			(feature_set_full & WIFI_FEATURE_GSCAN) |
18879 			(feature_set_full & WIFI_FEATURE_HOTSPOT) |
18880 			(feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
18881 		break;
18882 
18883 	case 1:
18884 		ret |= (feature_set_full & WIFI_FEATURE_P2P);
18885 		/* Not yet verified NAN with P2P */
18886 		/* (feature_set_full & WIFI_FEATURE_NAN) | */
18887 		break;
18888 
18889 	case 2:
18890 		ret |= (feature_set_full & WIFI_FEATURE_NAN) |
18891 			(feature_set_full & WIFI_FEATURE_TDLS) |
18892 			(feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
18893 		break;
18894 
18895 	default:
18896 		ret = WIFI_FEATURE_INVALID;
18897 		DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
18898 		break;
18899 	}
18900 
18901 	return ret;
18902 }
18903 
18904 #ifdef CUSTOM_FORCE_NODFS_FLAG
18905 int
dhd_dev_set_nodfs(struct net_device * dev,u32 nodfs)18906 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
18907 {
18908 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
18909 
18910 	if (nodfs)
18911 		dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
18912 	else
18913 		dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
18914 	dhd->pub.force_country_change = TRUE;
18915 	return 0;
18916 }
18917 #endif /* CUSTOM_FORCE_NODFS_FLAG */
18918 
18919 #ifdef NDO_CONFIG_SUPPORT
18920 int
dhd_dev_ndo_cfg(struct net_device * dev,u8 enable)18921 dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
18922 {
18923 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18924 	dhd_pub_t *dhdp = &dhd->pub;
18925 	int ret = 0;
18926 
18927 	if (enable) {
18928 		/* enable ND offload feature (will be enabled in FW on suspend) */
18929 		dhdp->ndo_enable = TRUE;
18930 
18931 		/* Update changes of anycast address & DAD failed address */
18932 		ret = dhd_dev_ndo_update_inet6addr(dev);
18933 		if ((ret < 0) && (ret != BCME_NORESOURCE)) {
18934 			DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
18935 			return ret;
18936 		}
18937 	} else {
18938 		/* disable ND offload feature */
18939 		dhdp->ndo_enable = FALSE;
18940 
18941 		/* disable ND offload in FW */
18942 		ret = dhd_ndo_enable(dhdp, FALSE);
18943 		if (ret < 0) {
18944 			DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
18945 		}
18946 	}
18947 	return ret;
18948 }
18949 
18950 static int
dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev * inet6)18951 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
18952 {
18953 	struct inet6_ifaddr *ifa;
18954 	struct ifacaddr6 *acaddr = NULL;
18955 	int addr_count = 0;
18956 
18957 	/* lock */
18958 	read_lock_bh(&inet6->lock);
18959 
18960 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
18961 	/* Count valid unicast address */
18962 	list_for_each_entry(ifa, &inet6->addr_list, if_list) {
18963 		GCC_DIAGNOSTIC_POP();
18964 		if ((ifa->flags & IFA_F_DADFAILED) == 0) {
18965 			addr_count++;
18966 		}
18967 	}
18968 
18969 	/* Count anycast address */
18970 	acaddr = inet6->ac_list;
18971 	while (acaddr) {
18972 		addr_count++;
18973 		acaddr = acaddr->aca_next;
18974 	}
18975 
18976 	/* unlock */
18977 	read_unlock_bh(&inet6->lock);
18978 
18979 	return addr_count;
18980 }
18981 
18982 int
dhd_dev_ndo_update_inet6addr(struct net_device * dev)18983 dhd_dev_ndo_update_inet6addr(struct net_device *dev)
18984 {
18985 	dhd_info_t *dhd;
18986 	dhd_pub_t *dhdp;
18987 	struct inet6_dev *inet6;
18988 	struct inet6_ifaddr *ifa;
18989 	struct ifacaddr6 *acaddr = NULL;
18990 	struct in6_addr *ipv6_addr = NULL;
18991 	int cnt, i;
18992 	int ret = BCME_OK;
18993 
18994 	/*
18995 	 * this function evaulates host ip address in struct inet6_dev
18996 	 * unicast addr in inet6_dev->addr_list
18997 	 * anycast addr in inet6_dev->ac_list
18998 	 * while evaluating inet6_dev, read_lock_bh() is required to prevent
18999 	 * access on null(freed) pointer.
19000 	 */
19001 
19002 	if (dev) {
19003 		inet6 = dev->ip6_ptr;
19004 		if (!inet6) {
19005 			DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
19006 			return BCME_ERROR;
19007 		}
19008 
19009 		dhd = DHD_DEV_INFO(dev);
19010 		if (!dhd) {
19011 			DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
19012 			return BCME_ERROR;
19013 		}
19014 		dhdp = &dhd->pub;
19015 
19016 		if (dhd_net2idx(dhd, dev) != 0) {
19017 			DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
19018 			return BCME_ERROR;
19019 		}
19020 	} else {
19021 		DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
19022 		return BCME_ERROR;
19023 	}
19024 
19025 	/* Check host IP overflow */
19026 	cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
19027 	if (cnt > dhdp->ndo_max_host_ip) {
19028 		if (!dhdp->ndo_host_ip_overflow) {
19029 			dhdp->ndo_host_ip_overflow = TRUE;
19030 			/* Disable ND offload in FW */
19031 			DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
19032 			ret = dhd_ndo_enable(dhdp, FALSE);
19033 		}
19034 
19035 		return ret;
19036 	}
19037 
19038 	/*
19039 	 * Allocate ipv6 addr buffer to store addresses to be added/removed.
19040 	 * driver need to lock inet6_dev while accessing structure. but, driver
19041 	 * cannot use ioctl while inet6_dev locked since it requires scheduling
19042 	 * hence, copy addresses to the buffer and do ioctl after unlock.
19043 	 */
19044 	ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
19045 		sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
19046 	if (!ipv6_addr) {
19047 		DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
19048 		return BCME_NOMEM;
19049 	}
19050 
19051 	/* Find DAD failed unicast address to be removed */
19052 	cnt = 0;
19053 	read_lock_bh(&inet6->lock);
19054 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
19055 	list_for_each_entry(ifa, &inet6->addr_list, if_list) {
19056 		GCC_DIAGNOSTIC_POP();
19057 		/* DAD failed unicast address */
19058 		if ((ifa->flags & IFA_F_DADFAILED) &&
19059 			(cnt < dhdp->ndo_max_host_ip)) {
19060 				memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
19061 				cnt++;
19062 		}
19063 	}
19064 	read_unlock_bh(&inet6->lock);
19065 
19066 	/* Remove DAD failed unicast address */
19067 	for (i = 0; i < cnt; i++) {
19068 		DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
19069 		ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
19070 		if (ret < 0) {
19071 			goto done;
19072 		}
19073 	}
19074 
19075 	/* Remove all anycast address */
19076 	ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
19077 	if (ret < 0) {
19078 		goto done;
19079 	}
19080 
19081 	/*
19082 	 * if ND offload was disabled due to host ip overflow,
19083 	 * attempt to add valid unicast address.
19084 	 */
19085 	if (dhdp->ndo_host_ip_overflow) {
19086 		/* Find valid unicast address */
19087 		cnt = 0;
19088 		read_lock_bh(&inet6->lock);
19089 		GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
19090 		list_for_each_entry(ifa, &inet6->addr_list, if_list) {
19091 			GCC_DIAGNOSTIC_POP();
19092 			/* valid unicast address */
19093 			if (!(ifa->flags & IFA_F_DADFAILED) &&
19094 				(cnt < dhdp->ndo_max_host_ip)) {
19095 					memcpy(&ipv6_addr[cnt], &ifa->addr,
19096 						sizeof(struct in6_addr));
19097 					cnt++;
19098 			}
19099 		}
19100 		read_unlock_bh(&inet6->lock);
19101 
19102 		/* Add valid unicast address */
19103 		for (i = 0; i < cnt; i++) {
19104 			ret = dhd_ndo_add_ip_with_type(dhdp,
19105 				(char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
19106 			if (ret < 0) {
19107 				goto done;
19108 			}
19109 		}
19110 	}
19111 
19112 	/* Find anycast address */
19113 	cnt = 0;
19114 	read_lock_bh(&inet6->lock);
19115 	acaddr = inet6->ac_list;
19116 	while (acaddr) {
19117 		if (cnt < dhdp->ndo_max_host_ip) {
19118 			memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
19119 			cnt++;
19120 		}
19121 		acaddr = acaddr->aca_next;
19122 	}
19123 	read_unlock_bh(&inet6->lock);
19124 
19125 	/* Add anycast address */
19126 	for (i = 0; i < cnt; i++) {
19127 		ret = dhd_ndo_add_ip_with_type(dhdp,
19128 			(char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
19129 		if (ret < 0) {
19130 			goto done;
19131 		}
19132 	}
19133 
19134 	/* Now All host IP addr were added successfully */
19135 	if (dhdp->ndo_host_ip_overflow) {
19136 		dhdp->ndo_host_ip_overflow = FALSE;
19137 		if (dhdp->in_suspend) {
19138 			/* drvier is in (early) suspend state, need to enable ND offload in FW */
19139 			DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
19140 			ret = dhd_ndo_enable(dhdp, TRUE);
19141 		}
19142 	}
19143 
19144 done:
19145 	if (ipv6_addr) {
19146 		MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
19147 	}
19148 
19149 	return ret;
19150 }
19151 
19152 #endif /* NDO_CONFIG_SUPPORT */
19153 
19154 #ifdef PNO_SUPPORT
19155 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
19156 int
dhd_dev_pno_stop_for_ssid(struct net_device * dev)19157 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
19158 {
19159 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19160 
19161 	return (dhd_pno_stop_for_ssid(&dhd->pub));
19162 }
19163 /* Linux wrapper to call common dhd_pno_set_for_ssid */
19164 int
dhd_dev_pno_set_for_ssid(struct net_device * dev,wlc_ssid_ext_t * ssids_local,int nssid,uint16 scan_fr,int pno_repeat,int pno_freq_expo_max,uint16 * channel_list,int nchan)19165 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
19166 	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
19167 {
19168 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19169 
19170 	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
19171 		pno_repeat, pno_freq_expo_max, channel_list, nchan));
19172 }
19173 
19174 /* Linux wrapper to call common dhd_pno_enable */
19175 int
dhd_dev_pno_enable(struct net_device * dev,int enable)19176 dhd_dev_pno_enable(struct net_device *dev, int enable)
19177 {
19178 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19179 
19180 	return (dhd_pno_enable(&dhd->pub, enable));
19181 }
19182 
19183 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
19184 int
dhd_dev_pno_set_for_hotlist(struct net_device * dev,wl_pfn_bssid_t * p_pfn_bssid,struct dhd_pno_hotlist_params * hotlist_params)19185 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
19186 	struct dhd_pno_hotlist_params *hotlist_params)
19187 {
19188 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19189 	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
19190 }
19191 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
19192 int
dhd_dev_pno_stop_for_batch(struct net_device * dev)19193 dhd_dev_pno_stop_for_batch(struct net_device *dev)
19194 {
19195 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19196 	return (dhd_pno_stop_for_batch(&dhd->pub));
19197 }
19198 
19199 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
19200 int
dhd_dev_pno_set_for_batch(struct net_device * dev,struct dhd_pno_batch_params * batch_params)19201 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
19202 {
19203 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19204 	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
19205 }
19206 
19207 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
19208 int
dhd_dev_pno_get_for_batch(struct net_device * dev,char * buf,int bufsize)19209 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
19210 {
19211 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19212 	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
19213 }
19214 #endif /* PNO_SUPPORT */
19215 
19216 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
19217 #ifdef GSCAN_SUPPORT
19218 bool
dhd_dev_is_legacy_pno_enabled(struct net_device * dev)19219 dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
19220 {
19221 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19222 
19223 	return (dhd_is_legacy_pno_enabled(&dhd->pub));
19224 }
19225 
19226 int
dhd_dev_set_epno(struct net_device * dev)19227 dhd_dev_set_epno(struct net_device *dev)
19228 {
19229 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19230 	if (!dhd) {
19231 		return BCME_ERROR;
19232 	}
19233 	return dhd_pno_set_epno(&dhd->pub);
19234 }
19235 int
dhd_dev_flush_fw_epno(struct net_device * dev)19236 dhd_dev_flush_fw_epno(struct net_device *dev)
19237 {
19238 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19239 	if (!dhd) {
19240 		return BCME_ERROR;
19241 	}
19242 	return dhd_pno_flush_fw_epno(&dhd->pub);
19243 }
19244 
19245 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
19246 int
dhd_dev_pno_set_cfg_gscan(struct net_device * dev,dhd_pno_gscan_cmd_cfg_t type,void * buf,bool flush)19247 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
19248  void *buf, bool flush)
19249 {
19250 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19251 
19252 	return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
19253 }
19254 
19255 /* Linux wrapper to call common dhd_pno_get_gscan */
19256 void *
dhd_dev_pno_get_gscan(struct net_device * dev,dhd_pno_gscan_cmd_cfg_t type,void * info,uint32 * len)19257 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
19258                       void *info, uint32 *len)
19259 {
19260 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19261 
19262 	return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
19263 }
19264 
19265 /* Linux wrapper to call common dhd_wait_batch_results_complete */
19266 int
dhd_dev_wait_batch_results_complete(struct net_device * dev)19267 dhd_dev_wait_batch_results_complete(struct net_device *dev)
19268 {
19269 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19270 
19271 	return (dhd_wait_batch_results_complete(&dhd->pub));
19272 }
19273 
19274 /* Linux wrapper to call common dhd_pno_lock_batch_results */
19275 int
dhd_dev_pno_lock_access_batch_results(struct net_device * dev)19276 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
19277 {
19278 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19279 
19280 	return (dhd_pno_lock_batch_results(&dhd->pub));
19281 }
19282 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
19283 void
dhd_dev_pno_unlock_access_batch_results(struct net_device * dev)19284 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
19285 {
19286 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19287 
19288 	return (dhd_pno_unlock_batch_results(&dhd->pub));
19289 }
19290 
19291 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
19292 int
dhd_dev_pno_run_gscan(struct net_device * dev,bool run,bool flush)19293 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
19294 {
19295 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19296 
19297 	return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
19298 }
19299 
19300 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
19301 int
dhd_dev_pno_enable_full_scan_result(struct net_device * dev,bool real_time_flag)19302 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
19303 {
19304 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19305 
19306 	return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
19307 }
19308 
19309 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
19310 void *
dhd_dev_hotlist_scan_event(struct net_device * dev,const void * data,int * send_evt_bytes,hotlist_type_t type,u32 * buf_len)19311 dhd_dev_hotlist_scan_event(struct net_device *dev,
19312       const void  *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
19313 {
19314 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19315 
19316 	return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
19317 }
19318 
19319 /* Linux wrapper to call common dhd_process_full_gscan_result */
19320 void *
dhd_dev_process_full_gscan_result(struct net_device * dev,const void * data,uint32 len,int * send_evt_bytes)19321 dhd_dev_process_full_gscan_result(struct net_device *dev,
19322 const void  *data, uint32 len, int *send_evt_bytes)
19323 {
19324 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19325 
19326 	return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
19327 }
19328 
19329 void
dhd_dev_gscan_hotlist_cache_cleanup(struct net_device * dev,hotlist_type_t type)19330 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
19331 {
19332 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19333 
19334 	dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
19335 
19336 	return;
19337 }
19338 
19339 int
dhd_dev_gscan_batch_cache_cleanup(struct net_device * dev)19340 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
19341 {
19342 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19343 
19344 	return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
19345 }
19346 
19347 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
19348 int
dhd_dev_retrieve_batch_scan(struct net_device * dev)19349 dhd_dev_retrieve_batch_scan(struct net_device *dev)
19350 {
19351 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19352 
19353 	return (dhd_retreive_batch_scan_results(&dhd->pub));
19354 }
19355 
19356 /* Linux wrapper to call common dhd_pno_process_epno_result */
dhd_dev_process_epno_result(struct net_device * dev,const void * data,uint32 event,int * send_evt_bytes)19357 void * dhd_dev_process_epno_result(struct net_device *dev,
19358 	const void  *data, uint32 event, int *send_evt_bytes)
19359 {
19360 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19361 
19362 	return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
19363 }
19364 
19365 int
dhd_dev_set_lazy_roam_cfg(struct net_device * dev,wlc_roam_exp_params_t * roam_param)19366 dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
19367              wlc_roam_exp_params_t *roam_param)
19368 {
19369 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19370 	wl_roam_exp_cfg_t roam_exp_cfg;
19371 	int err;
19372 
19373 	if (!roam_param) {
19374 		return BCME_BADARG;
19375 	}
19376 
19377 	DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
19378 	      roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
19379 	DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
19380 	      roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
19381 	      roam_param->cur_bssid_boost));
19382 	DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
19383 	      roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
19384 
19385 	memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
19386 	roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
19387 	roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
19388 	if (dhd->pub.lazy_roam_enable) {
19389 		roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
19390 	}
19391 	err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
19392 			(char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
19393 			TRUE);
19394 	if (err < 0) {
19395 		DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
19396 	}
19397 	return err;
19398 }
19399 
19400 int
dhd_dev_lazy_roam_enable(struct net_device * dev,uint32 enable)19401 dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
19402 {
19403 	int err;
19404 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19405 	wl_roam_exp_cfg_t roam_exp_cfg;
19406 
19407 	memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
19408 	roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
19409 	if (enable) {
19410 		roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
19411 	}
19412 
19413 	err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
19414 			(char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
19415 			TRUE);
19416 	if (err < 0) {
19417 		DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
19418 	} else {
19419 		dhd->pub.lazy_roam_enable = (enable != 0);
19420 	}
19421 	return err;
19422 }
19423 
19424 int
dhd_dev_set_lazy_roam_bssid_pref(struct net_device * dev,wl_bssid_pref_cfg_t * bssid_pref,uint32 flush)19425 dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
19426        wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
19427 {
19428 	int err;
19429 	uint len;
19430 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19431 
19432 	bssid_pref->version = BSSID_PREF_LIST_VERSION;
19433 	/* By default programming bssid pref flushes out old values */
19434 	bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
19435 	len = sizeof(wl_bssid_pref_cfg_t);
19436 	if (bssid_pref->count) {
19437 		len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
19438 	}
19439 	err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
19440 			(char *)bssid_pref, len, NULL, 0, TRUE);
19441 	if (err != BCME_OK) {
19442 		DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
19443 	}
19444 	return err;
19445 }
19446 #endif /* GSCAN_SUPPORT */
19447 
19448 #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
19449 int
dhd_dev_set_blacklist_bssid(struct net_device * dev,maclist_t * blacklist,uint32 len,uint32 flush)19450 dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
19451     uint32 len, uint32 flush)
19452 {
19453 	int err;
19454 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19455 	int macmode;
19456 
19457 	if (blacklist) {
19458 		err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
19459 				len, TRUE, 0);
19460 		if (err != BCME_OK) {
19461 			DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
19462 			return err;
19463 		}
19464 	}
19465 	/* By default programming blacklist flushes out old values */
19466 	macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
19467 	err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
19468 	              sizeof(macmode), TRUE, 0);
19469 	if (err != BCME_OK) {
19470 		DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
19471 	}
19472 	return err;
19473 }
19474 
19475 int
dhd_dev_set_whitelist_ssid(struct net_device * dev,wl_ssid_whitelist_t * ssid_whitelist,uint32 len,uint32 flush)19476 dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
19477     uint32 len, uint32 flush)
19478 {
19479 	int err;
19480 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19481 	wl_ssid_whitelist_t whitelist_ssid_flush;
19482 
19483 	if (!ssid_whitelist) {
19484 		if (flush) {
19485 			ssid_whitelist = &whitelist_ssid_flush;
19486 			ssid_whitelist->ssid_count = 0;
19487 		} else {
19488 			DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
19489 			return BCME_BADARG;
19490 		}
19491 	}
19492 	ssid_whitelist->version = SSID_WHITELIST_VERSION;
19493 	ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
19494 	err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
19495 			0, TRUE);
19496 	if (err != BCME_OK) {
19497 		if (err == BCME_UNSUPPORTED) {
19498 			DHD_ERROR(("%s : roam_exp_bssid_pref, UNSUPPORTED \n", __FUNCTION__));
19499 		} else {
19500 			DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n",
19501 				__FUNCTION__, err));
19502 		}
19503 	}
19504 	return err;
19505 }
19506 #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
19507 #endif /* defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
19508 
19509 #ifdef RSSI_MONITOR_SUPPORT
19510 int
dhd_dev_set_rssi_monitor_cfg(struct net_device * dev,int start,int8 max_rssi,int8 min_rssi)19511 dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
19512              int8 max_rssi, int8 min_rssi)
19513 {
19514 	int err;
19515 	wl_rssi_monitor_cfg_t rssi_monitor;
19516 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19517 
19518 	rssi_monitor.version = RSSI_MONITOR_VERSION;
19519 	rssi_monitor.max_rssi = max_rssi;
19520 	rssi_monitor.min_rssi = min_rssi;
19521 	rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
19522 	err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
19523 			NULL, 0, TRUE);
19524 	if (err < 0 && err != BCME_UNSUPPORTED) {
19525 		DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
19526 	}
19527 	return err;
19528 }
19529 #endif /* RSSI_MONITOR_SUPPORT */
19530 
19531 #ifdef DHDTCPACK_SUPPRESS
19532 int
dhd_dev_set_tcpack_sup_mode_cfg(struct net_device * dev,uint8 enable)19533 dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
19534 {
19535 	int err;
19536 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19537 
19538 	err = dhd_tcpack_suppress_set(&dhd->pub, enable);
19539 	if (err != BCME_OK) {
19540 		DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
19541 	}
19542 	return err;
19543 }
19544 #endif /* DHDTCPACK_SUPPRESS */
19545 
19546 int
dhd_dev_cfg_rand_mac_oui(struct net_device * dev,uint8 * oui)19547 dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
19548 {
19549 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19550 	dhd_pub_t *dhdp = &dhd->pub;
19551 
19552 	if (!dhdp || !oui) {
19553 		DHD_ERROR(("NULL POINTER : %s\n",
19554 			__FUNCTION__));
19555 		return BCME_ERROR;
19556 	}
19557 	if (ETHER_ISMULTI(oui)) {
19558 		DHD_ERROR(("Expected unicast OUI\n"));
19559 		return BCME_ERROR;
19560 	} else {
19561 		uint8 *rand_mac_oui = dhdp->rand_mac_oui;
19562 		memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
19563 		DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
19564 			MACOUI2STRDBG(rand_mac_oui)));
19565 	}
19566 	return BCME_OK;
19567 }
19568 
19569 int
dhd_set_rand_mac_oui(dhd_pub_t * dhd)19570 dhd_set_rand_mac_oui(dhd_pub_t *dhd)
19571 {
19572 	int err;
19573 	wl_pfn_macaddr_cfg_t wl_cfg;
19574 	uint8 *rand_mac_oui = dhd->rand_mac_oui;
19575 
19576 	memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
19577 	memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
19578 	wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
19579 	if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
19580 		wl_cfg.flags = 0;
19581 	} else {
19582 		wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
19583 	}
19584 
19585 	DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
19586 		MACOUI2STRDBG(rand_mac_oui)));
19587 
19588 	err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
19589 	if (err < 0) {
19590 		DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
19591 	}
19592 	return err;
19593 }
19594 
19595 #if defined(RTT_SUPPORT) && defined(WL_CFG80211)
19596 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
19597 int
dhd_dev_rtt_set_cfg(struct net_device * dev,void * buf)19598 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
19599 {
19600 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19601 
19602 	return (dhd_rtt_set_cfg(&dhd->pub, buf));
19603 }
19604 
19605 int
dhd_dev_rtt_cancel_cfg(struct net_device * dev,struct ether_addr * mac_list,int mac_cnt)19606 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
19607 {
19608 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19609 
19610 	return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
19611 }
19612 
19613 int
dhd_dev_rtt_register_noti_callback(struct net_device * dev,void * ctx,dhd_rtt_compl_noti_fn noti_fn)19614 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
19615 {
19616 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19617 
19618 	return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
19619 }
19620 
19621 int
dhd_dev_rtt_unregister_noti_callback(struct net_device * dev,dhd_rtt_compl_noti_fn noti_fn)19622 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
19623 {
19624 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19625 
19626 	return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
19627 }
19628 
19629 int
dhd_dev_rtt_capability(struct net_device * dev,rtt_capabilities_t * capa)19630 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
19631 {
19632 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19633 
19634 	return (dhd_rtt_capability(&dhd->pub, capa));
19635 }
19636 
19637 int
dhd_dev_rtt_avail_channel(struct net_device * dev,wifi_channel_info * channel_info)19638 dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
19639 {
19640 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19641 	return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
19642 }
19643 
19644 int
dhd_dev_rtt_enable_responder(struct net_device * dev,wifi_channel_info * channel_info)19645 dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
19646 {
19647 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19648 	return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
19649 }
19650 
dhd_dev_rtt_cancel_responder(struct net_device * dev)19651 int dhd_dev_rtt_cancel_responder(struct net_device *dev)
19652 {
19653 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19654 	return (dhd_rtt_cancel_responder(&dhd->pub));
19655 }
19656 
19657 #endif /* RTT_SUPPORT */
19658 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
_dhd_apf_lock_local(dhd_info_t * dhd)19659 static void _dhd_apf_lock_local(dhd_info_t *dhd)
19660 {
19661 	if (dhd) {
19662 		mutex_lock(&dhd->dhd_apf_mutex);
19663 	}
19664 }
19665 
_dhd_apf_unlock_local(dhd_info_t * dhd)19666 static void _dhd_apf_unlock_local(dhd_info_t *dhd)
19667 {
19668 	if (dhd) {
19669 		mutex_unlock(&dhd->dhd_apf_mutex);
19670 	}
19671 }
19672 
19673 static int
__dhd_apf_add_filter(struct net_device * ndev,uint32 filter_id,u8 * program,uint32 program_len)19674 __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
19675 	u8* program, uint32 program_len)
19676 {
19677 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19678 	dhd_pub_t *dhdp = &dhd->pub;
19679 	wl_pkt_filter_t * pkt_filterp;
19680 	wl_apf_program_t *apf_program;
19681 	char *buf;
19682 	u32 cmd_len, buf_len;
19683 	int ifidx, ret;
19684 	char cmd[] = "pkt_filter_add";
19685 
19686 	ifidx = dhd_net2idx(dhd, ndev);
19687 	if (ifidx == DHD_BAD_IF) {
19688 		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
19689 		return -ENODEV;
19690 	}
19691 
19692 	cmd_len = sizeof(cmd);
19693 
19694 	/* Check if the program_len is more than the expected len
19695 	 * and if the program is NULL return from here.
19696 	 */
19697 	if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
19698 		DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
19699 				__FUNCTION__, program_len, program));
19700 		return -EINVAL;
19701 	}
19702 	buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
19703 		WL_APF_PROGRAM_FIXED_LEN + program_len;
19704 
19705 	buf = MALLOCZ(dhdp->osh, buf_len);
19706 	if (unlikely(!buf)) {
19707 		DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
19708 		return -ENOMEM;
19709 	}
19710 
19711 	memcpy(buf, cmd, cmd_len);
19712 
19713 	pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
19714 	pkt_filterp->id = htod32(filter_id);
19715 	pkt_filterp->negate_match = htod32(FALSE);
19716 	pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
19717 
19718 	apf_program = &pkt_filterp->u.apf_program;
19719 	apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
19720 	apf_program->instr_len = htod16(program_len);
19721 	memcpy(apf_program->instrs, program, program_len);
19722 
19723 	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
19724 	if (unlikely(ret)) {
19725 		DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
19726 			__FUNCTION__, filter_id, ret));
19727 	}
19728 
19729 	if (buf) {
19730 		MFREE(dhdp->osh, buf, buf_len);
19731 	}
19732 	return ret;
19733 }
19734 
19735 static int
__dhd_apf_config_filter(struct net_device * ndev,uint32 filter_id,uint32 mode,uint32 enable)19736 __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
19737 	uint32 mode, uint32 enable)
19738 {
19739 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19740 	dhd_pub_t *dhdp = &dhd->pub;
19741 	wl_pkt_filter_enable_t * pkt_filterp;
19742 	char *buf;
19743 	u32 cmd_len, buf_len;
19744 	int ifidx, ret;
19745 	char cmd[] = "pkt_filter_enable";
19746 
19747 	ifidx = dhd_net2idx(dhd, ndev);
19748 	if (ifidx == DHD_BAD_IF) {
19749 		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
19750 		return -ENODEV;
19751 	}
19752 
19753 	cmd_len = sizeof(cmd);
19754 	buf_len = cmd_len + sizeof(*pkt_filterp);
19755 
19756 	buf = MALLOCZ(dhdp->osh, buf_len);
19757 	if (unlikely(!buf)) {
19758 		DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
19759 		return -ENOMEM;
19760 	}
19761 
19762 	memcpy(buf, cmd, cmd_len);
19763 
19764 	pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
19765 	pkt_filterp->id = htod32(filter_id);
19766 	pkt_filterp->enable = htod32(enable);
19767 
19768 	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
19769 	if (unlikely(ret)) {
19770 		DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
19771 			__FUNCTION__, filter_id, ret));
19772 		goto exit;
19773 	}
19774 
19775 	ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
19776 		WLC_SET_VAR, TRUE, ifidx);
19777 	if (unlikely(ret)) {
19778 		DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
19779 			__FUNCTION__, filter_id, ret));
19780 	}
19781 
19782 exit:
19783 	if (buf) {
19784 		MFREE(dhdp->osh, buf, buf_len);
19785 	}
19786 	return ret;
19787 }
19788 
19789 static int
__dhd_apf_delete_filter(struct net_device * ndev,uint32 filter_id)19790 __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
19791 {
19792 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
19793 	dhd_pub_t *dhdp = &dhd->pub;
19794 	int ifidx, ret;
19795 
19796 	ifidx = dhd_net2idx(dhd, ndev);
19797 	if (ifidx == DHD_BAD_IF) {
19798 		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
19799 		return -ENODEV;
19800 	}
19801 
19802 	ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
19803 		htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
19804 	if (unlikely(ret)) {
19805 		DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
19806 			__FUNCTION__, filter_id, ret));
19807 	}
19808 
19809 	return ret;
19810 }
19811 
dhd_apf_lock(struct net_device * dev)19812 void dhd_apf_lock(struct net_device *dev)
19813 {
19814 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19815 	_dhd_apf_lock_local(dhd);
19816 }
19817 
dhd_apf_unlock(struct net_device * dev)19818 void dhd_apf_unlock(struct net_device *dev)
19819 {
19820 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
19821 	_dhd_apf_unlock_local(dhd);
19822 }
19823 
19824 int
dhd_dev_apf_get_version(struct net_device * ndev,uint32 * version)19825 dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
19826 {
19827 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19828 	dhd_pub_t *dhdp = &dhd->pub;
19829 	int ifidx, ret;
19830 
19831 	if (!FW_SUPPORTED(dhdp, apf)) {
19832 		DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
19833 
19834 		/*
19835 		 * Notify Android framework that APF is not supported by setting
19836 		 * version as zero.
19837 		 */
19838 		*version = 0;
19839 		return BCME_OK;
19840 	}
19841 
19842 	ifidx = dhd_net2idx(dhd, ndev);
19843 	if (ifidx == DHD_BAD_IF) {
19844 		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
19845 		return -ENODEV;
19846 	}
19847 
19848 	ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
19849 		WLC_GET_VAR, FALSE, ifidx);
19850 	if (unlikely(ret)) {
19851 		DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
19852 			__FUNCTION__, ret));
19853 	}
19854 
19855 	return ret;
19856 }
19857 
19858 int
dhd_dev_apf_get_max_len(struct net_device * ndev,uint32 * max_len)19859 dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
19860 {
19861 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
19862 	dhd_pub_t *dhdp = &dhd->pub;
19863 	int ifidx, ret;
19864 
19865 	if (!FW_SUPPORTED(dhdp, apf)) {
19866 		DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
19867 		*max_len = 0;
19868 		return BCME_OK;
19869 	}
19870 
19871 	ifidx = dhd_net2idx(dhd, ndev);
19872 	if (ifidx == DHD_BAD_IF) {
19873 		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
19874 		return -ENODEV;
19875 	}
19876 
19877 	ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
19878 		WLC_GET_VAR, FALSE, ifidx);
19879 	if (unlikely(ret)) {
19880 		DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
19881 			__FUNCTION__, ret));
19882 	}
19883 
19884 	return ret;
19885 }
19886 
19887 int
dhd_dev_apf_add_filter(struct net_device * ndev,u8 * program,uint32 program_len)19888 dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
19889 	uint32 program_len)
19890 {
19891 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19892 	dhd_pub_t *dhdp = &dhd->pub;
19893 	int ret;
19894 
19895 	DHD_APF_LOCK(ndev);
19896 
19897 	/* delete, if filter already exists */
19898 	if (dhdp->apf_set) {
19899 		ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
19900 		if (unlikely(ret)) {
19901 			goto exit;
19902 		}
19903 		dhdp->apf_set = FALSE;
19904 	}
19905 
19906 	ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
19907 	if (ret) {
19908 		goto exit;
19909 	}
19910 	dhdp->apf_set = TRUE;
19911 
19912 	if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
19913 		/* Driver is still in (early) suspend state, enable APF filter back */
19914 		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
19915 			PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
19916 	}
19917 exit:
19918 	DHD_APF_UNLOCK(ndev);
19919 
19920 	return ret;
19921 }
19922 
19923 int
dhd_dev_apf_enable_filter(struct net_device * ndev)19924 dhd_dev_apf_enable_filter(struct net_device *ndev)
19925 {
19926 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19927 	dhd_pub_t *dhdp = &dhd->pub;
19928 	int ret = 0;
19929 	bool nan_dp_active = false;
19930 
19931 	DHD_APF_LOCK(ndev);
19932 #ifdef WL_NAN
19933 	nan_dp_active = wl_cfgnan_is_dp_active(ndev);
19934 #endif /* WL_NAN */
19935 	if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
19936 		!nan_dp_active)) {
19937 		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
19938 			PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
19939 	}
19940 
19941 	DHD_APF_UNLOCK(ndev);
19942 
19943 	return ret;
19944 }
19945 
19946 int
dhd_dev_apf_disable_filter(struct net_device * ndev)19947 dhd_dev_apf_disable_filter(struct net_device *ndev)
19948 {
19949 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19950 	dhd_pub_t *dhdp = &dhd->pub;
19951 	int ret = 0;
19952 
19953 	DHD_APF_LOCK(ndev);
19954 
19955 	if (dhdp->apf_set) {
19956 		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
19957 			PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
19958 	}
19959 
19960 	DHD_APF_UNLOCK(ndev);
19961 
19962 	return ret;
19963 }
19964 
19965 int
dhd_dev_apf_delete_filter(struct net_device * ndev)19966 dhd_dev_apf_delete_filter(struct net_device *ndev)
19967 {
19968 	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
19969 	dhd_pub_t *dhdp = &dhd->pub;
19970 	int ret = 0;
19971 
19972 	DHD_APF_LOCK(ndev);
19973 
19974 	if (dhdp->apf_set) {
19975 		ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
19976 		if (!ret) {
19977 			dhdp->apf_set = FALSE;
19978 		}
19979 	}
19980 
19981 	DHD_APF_UNLOCK(ndev);
19982 
19983 	return ret;
19984 }
19985 #endif /* PKT_FILTER_SUPPORT && APF */
19986 
19987 #if defined(OEM_ANDROID)
dhd_hang_process(struct work_struct * work_data)19988 static void dhd_hang_process(struct work_struct *work_data)
19989 {
19990 	struct net_device *dev;
19991 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
19992 	struct net_device *ndev;
19993 	uint8 i = 0;
19994 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
19995 	struct dhd_info *dhd;
19996 	/* Ignore compiler warnings due to -Werror=cast-qual */
19997 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
19998 	dhd = container_of(work_data, dhd_info_t, dhd_hang_process_work);
19999 	GCC_DIAGNOSTIC_POP();
20000 
20001 	if (!dhd || !dhd->iflist[0])
20002 		return;
20003 	dev = dhd->iflist[0]->net;
20004 
20005 	if (dev) {
20006 #if defined(WL_WIRELESS_EXT)
20007 		wl_iw_send_priv_event(dev, "HANG");
20008 #endif
20009 #if defined(WL_CFG80211)
20010 		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
20011 #endif
20012 	}
20013 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
20014 	/*
20015 	 * For HW2, dev_close need to be done to recover
20016 	 * from upper layer after hang. For Interposer skip
20017 	 * dev_close so that dhd iovars can be used to take
20018 	 * socramdump after crash, also skip for HW4 as
20019 	 * handling of hang event is different
20020 	 */
20021 
20022 	rtnl_lock();
20023 	for (i = 0; i < DHD_MAX_IFS; i++) {
20024 		ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
20025 		if (ndev && (ndev->flags & IFF_UP)) {
20026 			DHD_ERROR(("ndev->name : %s dev close\n",
20027 					ndev->name));
20028 			dev_close(ndev);
20029 		}
20030 	}
20031 	rtnl_unlock();
20032 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
20033 }
20034 
20035 #if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
20036 extern dhd_pub_t *link_recovery;
dhd_host_recover_link(void)20037 void dhd_host_recover_link(void)
20038 {
20039 	DHD_ERROR(("****** %s ******\n", __FUNCTION__));
20040 	link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
20041 	dhd_bus_set_linkdown(link_recovery, TRUE);
20042 	dhd_os_send_hang_message(link_recovery);
20043 }
20044 EXPORT_SYMBOL(dhd_host_recover_link);
20045 #endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
20046 
20047 #ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
20048 #define MAX_CONSECUTIVE_MFG_HANG_COUNT 2
20049 #endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
dhd_os_send_hang_message(dhd_pub_t * dhdp)20050 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
20051 {
20052 	int ret = 0;
20053 	dhd_info_t *dhd_info = NULL;
20054 #ifdef WL_CFG80211
20055 	struct net_device *primary_ndev;
20056 	struct bcm_cfg80211 *cfg;
20057 #endif /* WL_CFG80211 */
20058 
20059 	if (!dhdp) {
20060 		DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
20061 		return -EINVAL;
20062 	}
20063 
20064 	dhd_info = (dhd_info_t *)dhdp->info;
20065 	BCM_REFERENCE(dhd_info);
20066 
20067 #if defined(WLAN_ACCEL_BOOT)
20068 	if (!dhd_info->wl_accel_force_reg_on) {
20069 		DHD_ERROR(("%s: set force reg on\n", __FUNCTION__));
20070 		dhd_info->wl_accel_force_reg_on = TRUE;
20071 	}
20072 #endif /* WLAN_ACCEL_BOOT */
20073 
20074 	if (!dhdp->hang_report) {
20075 		DHD_ERROR(("%s: hang_report is disabled\n", __FUNCTION__));
20076 		return BCME_ERROR;
20077 	}
20078 
20079 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
20080 	if (dhd_info->scheduled_memdump) {
20081 		DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
20082 		dhdp->hang_was_pending = 1;
20083 		return BCME_OK;
20084 	}
20085 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
20086 
20087 #ifdef WL_CFG80211
20088 	primary_ndev = dhd_linux_get_primary_netdev(dhdp);
20089 	if (!primary_ndev) {
20090 		DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
20091 		return -ENODEV;
20092 	}
20093 	cfg = wl_get_cfg(primary_ndev);
20094 	if (!cfg) {
20095 		DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
20096 		return -EINVAL;
20097 	}
20098 
20099 	/* Skip sending HANG event to framework if driver is not ready */
20100 	if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
20101 		DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
20102 		return -ENODEV;
20103 	}
20104 #endif /* WL_CFG80211 */
20105 
20106 #if defined(DHD_HANG_SEND_UP_TEST)
20107 	if (dhdp->req_hang_type) {
20108 		DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
20109 			__FUNCTION__, dhdp->req_hang_type));
20110 		dhdp->req_hang_type = 0;
20111 	}
20112 #endif /* DHD_HANG_SEND_UP_TEST */
20113 
20114 	if (!dhdp->hang_was_sent) {
20115 #ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
20116 		if (dhdp->op_mode & DHD_FLAG_MFG_MODE) {
20117 			dhdp->hang_count++;
20118 			if (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT) {
20119 				DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
20120 					__FUNCTION__, dhdp->hang_count));
20121 				BUG_ON(1);
20122 			}
20123 		}
20124 #endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
20125 #ifdef DHD_DEBUG_UART
20126 		/* If PCIe lane has broken, execute the debug uart application
20127 		 * to gether a ramdump data from dongle via uart
20128 		 */
20129 		if (!dhdp->info->duart_execute) {
20130 			dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
20131 					(void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
20132 					dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
20133 		}
20134 #endif	/* DHD_DEBUG_UART */
20135 		dhdp->hang_was_sent = 1;
20136 #ifdef BT_OVER_SDIO
20137 		dhdp->is_bt_recovery_required = TRUE;
20138 #endif
20139 		schedule_work(&dhdp->info->dhd_hang_process_work);
20140 		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d s=%d\n", __FUNCTION__,
20141 			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
20142 		printf("%s\n", info_string);
20143 		printf("MAC %pM\n", &dhdp->mac);
20144 	}
20145 	return ret;
20146 }
20147 
net_os_send_hang_message(struct net_device * dev)20148 int net_os_send_hang_message(struct net_device *dev)
20149 {
20150 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20151 	int ret = 0;
20152 
20153 	if (dhd) {
20154 		/* Report FW problem when enabled */
20155 		if (dhd->pub.hang_report) {
20156 #ifdef BT_OVER_SDIO
20157 			if (netif_running(dev)) {
20158 #endif /* BT_OVER_SDIO */
20159 				ret = dhd_os_send_hang_message(&dhd->pub);
20160 #ifdef BT_OVER_SDIO
20161 			}
20162 			DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
20163 			bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
20164 #endif /* BT_OVER_SDIO */
20165 		} else {
20166 			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
20167 				__FUNCTION__));
20168 		}
20169 	}
20170 	return ret;
20171 }
20172 
net_os_send_hang_message_reason(struct net_device * dev,const char * string_num)20173 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
20174 {
20175 	dhd_info_t *dhd = NULL;
20176 	dhd_pub_t *dhdp = NULL;
20177 	int reason;
20178 
20179 	dhd = DHD_DEV_INFO(dev);
20180 	if (dhd) {
20181 		dhdp = &dhd->pub;
20182 	}
20183 
20184 	if (!dhd || !dhdp) {
20185 		return 0;
20186 	}
20187 
20188 	reason = bcm_strtoul(string_num, NULL, 0);
20189 	DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
20190 
20191 	if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
20192 		reason = 0;
20193 	}
20194 
20195 	dhdp->hang_reason = reason;
20196 
20197 	return net_os_send_hang_message(dev);
20198 }
20199 #endif /* OEM_ANDROID */
20200 
dhd_net_wifi_platform_set_power(struct net_device * dev,bool on,unsigned long delay_msec)20201 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
20202 {
20203 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20204 	return wifi_platform_set_power(dhd->adapter, on, delay_msec);
20205 }
20206 
dhd_wifi_platform_set_power(dhd_pub_t * pub,bool on)20207 int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on)
20208 {
20209 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20210 	unsigned long delay_msec = on ? WIFI_TURNON_DELAY : WIFI_TURNOFF_DELAY;
20211 	return wifi_platform_set_power(dhd->adapter, on, delay_msec);
20212 }
20213 
dhd_force_country_change(struct net_device * dev)20214 bool dhd_force_country_change(struct net_device *dev)
20215 {
20216 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20217 
20218 	if (dhd && dhd->pub.up)
20219 		return dhd->pub.force_country_change;
20220 	return FALSE;
20221 }
20222 
dhd_get_customized_country_code(struct net_device * dev,char * country_iso_code,wl_country_t * cspec)20223 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
20224 	wl_country_t *cspec)
20225 {
20226 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20227 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20228 	if (!dhd->pub.is_blob)
20229 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20230 	{
20231 #if defined(CUSTOM_COUNTRY_CODE)
20232 		get_customized_country_code(dhd->adapter, country_iso_code, cspec,
20233 			dhd->pub.dhd_cflags);
20234 #else
20235 		get_customized_country_code(dhd->adapter, country_iso_code, cspec);
20236 #endif /* CUSTOM_COUNTRY_CODE */
20237 	}
20238 #if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
20239 	else {
20240 		/* Replace the ccode to XZ if ccode is undefined country */
20241 		if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
20242 			strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
20243 			strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
20244 			strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
20245 			DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
20246 		}
20247 	}
20248 #endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
20249 
20250 #ifdef KEEP_JP_REGREV
20251 /* XXX Needed by customer's request */
20252 	if (strncmp(country_iso_code, "JP", 3) == 0) {
20253 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20254 		if (dhd->pub.is_blob) {
20255 			if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) {
20256 				memcpy(cspec->ccode, dhd->pub.vars_ccode,
20257 					sizeof(dhd->pub.vars_ccode));
20258 			}
20259 		} else
20260 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20261 		{
20262 			if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
20263 				cspec->rev = dhd->pub.vars_regrev;
20264 			}
20265 		}
20266 	}
20267 #endif /* KEEP_JP_REGREV */
20268 	BCM_REFERENCE(dhd);
20269 }
20270 
dhd_bus_country_set(struct net_device * dev,wl_country_t * cspec,bool notify)20271 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
20272 {
20273 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20274 #ifdef WL_CFG80211
20275 	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
20276 #endif
20277 
20278 	if (dhd && dhd->pub.up) {
20279 		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
20280 #ifdef WL_CFG80211
20281 		wl_update_wiphybands(cfg, notify);
20282 #endif
20283 	}
20284 }
20285 
dhd_bus_band_set(struct net_device * dev,uint band)20286 void dhd_bus_band_set(struct net_device *dev, uint band)
20287 {
20288 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20289 #ifdef WL_CFG80211
20290 	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
20291 #endif
20292 	if (dhd && dhd->pub.up) {
20293 #ifdef WL_CFG80211
20294 		wl_update_wiphybands(cfg, true);
20295 #endif
20296 	}
20297 }
20298 
dhd_net_set_fw_path(struct net_device * dev,char * fw)20299 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
20300 {
20301 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20302 
20303 	if (!fw || fw[0] == '\0')
20304 		return -EINVAL;
20305 
20306 	strlcpy(dhd->fw_path, fw, sizeof(dhd->fw_path));
20307 
20308 #if defined(OEM_ANDROID) && defined(SOFTAP)
20309 	if (strstr(fw, "apsta") != NULL) {
20310 		DHD_INFO(("GOT APSTA FIRMWARE\n"));
20311 		ap_fw_loaded = TRUE;
20312 	} else {
20313 		DHD_INFO(("GOT STA FIRMWARE\n"));
20314 		ap_fw_loaded = FALSE;
20315 	}
20316 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
20317 	return 0;
20318 }
20319 
dhd_net_if_lock(struct net_device * dev)20320 void dhd_net_if_lock(struct net_device *dev)
20321 {
20322 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20323 	dhd_net_if_lock_local(dhd);
20324 }
20325 
dhd_net_if_unlock(struct net_device * dev)20326 void dhd_net_if_unlock(struct net_device *dev)
20327 {
20328 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20329 	dhd_net_if_unlock_local(dhd);
20330 }
20331 
dhd_net_if_lock_local(dhd_info_t * dhd)20332 static void dhd_net_if_lock_local(dhd_info_t *dhd)
20333 {
20334 #if defined(OEM_ANDROID)
20335 	if (dhd)
20336 		mutex_lock(&dhd->dhd_net_if_mutex);
20337 #endif
20338 }
20339 
dhd_net_if_unlock_local(dhd_info_t * dhd)20340 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
20341 {
20342 #if defined(OEM_ANDROID)
20343 	if (dhd)
20344 		mutex_unlock(&dhd->dhd_net_if_mutex);
20345 #endif
20346 }
20347 
dhd_suspend_lock(dhd_pub_t * pub)20348 static void dhd_suspend_lock(dhd_pub_t *pub)
20349 {
20350 #if defined(OEM_ANDROID)
20351 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20352 	if (dhd)
20353 		mutex_lock(&dhd->dhd_suspend_mutex);
20354 #endif
20355 }
20356 
dhd_suspend_unlock(dhd_pub_t * pub)20357 static void dhd_suspend_unlock(dhd_pub_t *pub)
20358 {
20359 #if defined(OEM_ANDROID)
20360 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20361 	if (dhd)
20362 		mutex_unlock(&dhd->dhd_suspend_mutex);
20363 #endif
20364 }
20365 
dhd_os_general_spin_lock(dhd_pub_t * pub)20366 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
20367 {
20368 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20369 	unsigned long flags = 0;
20370 
20371 	if (dhd) {
20372 		flags = osl_spin_lock(&dhd->dhd_lock);
20373 	}
20374 
20375 	return flags;
20376 }
20377 
dhd_os_general_spin_unlock(dhd_pub_t * pub,unsigned long flags)20378 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
20379 {
20380 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20381 
20382 	if (dhd) {
20383 		osl_spin_unlock(&dhd->dhd_lock, flags);
20384 	}
20385 }
20386 
20387 void *
dhd_os_dbgring_lock_init(osl_t * osh)20388 dhd_os_dbgring_lock_init(osl_t *osh)
20389 {
20390 	struct mutex *mtx = NULL;
20391 
20392 	mtx = MALLOCZ(osh, sizeof(*mtx));
20393 	if (mtx)
20394 		mutex_init(mtx);
20395 
20396 	return mtx;
20397 }
20398 
20399 void
dhd_os_dbgring_lock_deinit(osl_t * osh,void * mutex)20400 dhd_os_dbgring_lock_deinit(osl_t *osh, void *mutex)
20401 {
20402 	struct mutex *mtx = mutex;
20403 
20404 	if (mtx) {
20405 		mutex_destroy(mtx);
20406 		MFREE(osh, mtx, sizeof(struct mutex));
20407 	}
20408 }
20409 
20410 static int
dhd_get_pend_8021x_cnt(dhd_info_t * dhd)20411 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
20412 {
20413 	return (atomic_read(&dhd->pend_8021x_cnt));
20414 }
20415 
20416 #define MAX_WAIT_FOR_8021X_TX	100
20417 
20418 int
dhd_wait_pend8021x(struct net_device * dev)20419 dhd_wait_pend8021x(struct net_device *dev)
20420 {
20421 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20422 	int timeout = msecs_to_jiffies(10);
20423 	int ntimes = MAX_WAIT_FOR_8021X_TX;
20424 	int pend = dhd_get_pend_8021x_cnt(dhd);
20425 
20426 	while (ntimes && pend) {
20427 		if (pend) {
20428 			set_current_state(TASK_INTERRUPTIBLE);
20429 			schedule_timeout(timeout);
20430 			set_current_state(TASK_RUNNING);
20431 			ntimes--;
20432 		}
20433 		pend = dhd_get_pend_8021x_cnt(dhd);
20434 	}
20435 	if (ntimes == 0)
20436 	{
20437 		atomic_set(&dhd->pend_8021x_cnt, 0);
20438 		WL_MSG(dev->name, "TIMEOUT\n");
20439 	}
20440 	return pend;
20441 }
20442 
20443 #if defined(BCM_ROUTER_DHD) || defined(DHD_DEBUG)
write_file(const char * file_name,uint32 flags,uint8 * buf,int size)20444 int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
20445 {
20446 	int ret = 0;
20447 	struct file *fp = NULL;
20448 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
20449 	mm_segment_t old_fs;
20450 #endif
20451 	loff_t pos = 0;
20452 
20453 	/* change to KERNEL_DS address limit */
20454 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
20455 	old_fs = get_fs();
20456 	set_fs(KERNEL_DS);
20457 #endif
20458 
20459 	/* open file to write */
20460 	fp = filp_open(file_name, flags, 0664);
20461 	if (IS_ERR(fp)) {
20462 		DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
20463 		goto exit;
20464 	}
20465 
20466 	/* Write buf to file */
20467 	ret = vfs_write(fp, buf, size, &pos);
20468 	if (ret < 0) {
20469 		DHD_ERROR(("write file error, err = %d\n", ret));
20470 		goto exit;
20471 	}
20472 
20473 	/* Sync file from filesystem to physical media */
20474 	ret = vfs_fsync(fp, 0);
20475 	if (ret < 0) {
20476 		DHD_ERROR(("sync file error, error = %d\n", ret));
20477 		goto exit;
20478 	}
20479 	ret = BCME_OK;
20480 
20481 exit:
20482 	/* close file before return */
20483 	if (!IS_ERR(fp))
20484 		filp_close(fp, current->files);
20485 
20486 	/* restore previous address limit */
20487 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
20488 	set_fs(old_fs);
20489 #endif
20490 
20491 	return ret;
20492 }
20493 #endif /* BCM_ROUTER_DHD || DHD_DEBUG */
20494 
20495 #ifdef DHD_DEBUG
20496 static void
dhd_convert_memdump_type_to_str(uint32 type,char * buf,size_t buf_len,int substr_type)20497 dhd_convert_memdump_type_to_str(uint32 type, char *buf, size_t buf_len, int substr_type)
20498 {
20499 	char *type_str = NULL;
20500 
20501 	switch (type) {
20502 		case DUMP_TYPE_RESUMED_ON_TIMEOUT:
20503 			type_str = "resumed_on_timeout";
20504 			break;
20505 		case DUMP_TYPE_D3_ACK_TIMEOUT:
20506 			type_str = "D3_ACK_timeout";
20507 			break;
20508 		case DUMP_TYPE_DONGLE_TRAP:
20509 			type_str = "Dongle_Trap";
20510 			break;
20511 		case DUMP_TYPE_MEMORY_CORRUPTION:
20512 			type_str = "Memory_Corruption";
20513 			break;
20514 		case DUMP_TYPE_PKTID_AUDIT_FAILURE:
20515 			type_str = "PKTID_AUDIT_Fail";
20516 			break;
20517 		case DUMP_TYPE_PKTID_INVALID:
20518 			type_str = "PKTID_INVALID";
20519 			break;
20520 		case DUMP_TYPE_SCAN_TIMEOUT:
20521 			type_str = "SCAN_timeout";
20522 			break;
20523 		case DUMP_TYPE_SCAN_BUSY:
20524 			type_str = "SCAN_Busy";
20525 			break;
20526 		case DUMP_TYPE_BY_SYSDUMP:
20527 			if (substr_type == CMD_UNWANTED) {
20528 				type_str = "BY_SYSDUMP_FORUSER_unwanted";
20529 			} else if (substr_type == CMD_DISCONNECTED) {
20530 				type_str = "BY_SYSDUMP_FORUSER_disconnected";
20531 			} else {
20532 				type_str = "BY_SYSDUMP_FORUSER";
20533 			}
20534 			break;
20535 		case DUMP_TYPE_BY_LIVELOCK:
20536 			type_str = "BY_LIVELOCK";
20537 			break;
20538 		case DUMP_TYPE_AP_LINKUP_FAILURE:
20539 			type_str = "BY_AP_LINK_FAILURE";
20540 			break;
20541 		case DUMP_TYPE_AP_ABNORMAL_ACCESS:
20542 			type_str = "INVALID_ACCESS";
20543 			break;
20544 		case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
20545 			type_str = "ERROR_RX_TIMED_OUT";
20546 			break;
20547 		case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
20548 			type_str = "ERROR_TX_TIMED_OUT";
20549 			break;
20550 		case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
20551 			type_str = "CFG_VENDOR_TRIGGERED";
20552 			break;
20553 		case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
20554 			type_str = "BY_INVALID_RING_RDWR";
20555 			break;
20556 		case DUMP_TYPE_IFACE_OP_FAILURE:
20557 			type_str = "BY_IFACE_OP_FAILURE";
20558 			break;
20559 		case DUMP_TYPE_TRANS_ID_MISMATCH:
20560 			type_str = "BY_TRANS_ID_MISMATCH";
20561 			break;
20562 #ifdef DEBUG_DNGL_INIT_FAIL
20563 		case DUMP_TYPE_DONGLE_INIT_FAILURE:
20564 			type_str = "DONGLE_INIT_FAIL";
20565 			break;
20566 #endif /* DEBUG_DNGL_INIT_FAIL */
20567 #ifdef SUPPORT_LINKDOWN_RECOVERY
20568 		case DUMP_TYPE_READ_SHM_FAIL:
20569 			type_str = "READ_SHM_FAIL";
20570 			break;
20571 #endif /* SUPPORT_LINKDOWN_RECOVERY */
20572 		case DUMP_TYPE_DONGLE_HOST_EVENT:
20573 			type_str = "BY_DONGLE_HOST_EVENT";
20574 			break;
20575 		case DUMP_TYPE_SMMU_FAULT:
20576 			type_str = "SMMU_FAULT";
20577 			break;
20578 #ifdef DHD_ERPOM
20579 		case DUMP_TYPE_DUE_TO_BT:
20580 			type_str = "DUE_TO_BT";
20581 			break;
20582 #endif /* DHD_ERPOM */
20583 		case DUMP_TYPE_BY_USER:
20584 			type_str = "BY_USER";
20585 			break;
20586 		case DUMP_TYPE_LOGSET_BEYOND_RANGE:
20587 			type_str = "LOGSET_BEYOND_RANGE";
20588 			break;
20589 		case DUMP_TYPE_CTO_RECOVERY:
20590 			type_str = "CTO_RECOVERY";
20591 			break;
20592 		case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
20593 			type_str = "SEQUENTIAL_PRIVCMD_ERROR";
20594 			break;
20595 		case DUMP_TYPE_PROXD_TIMEOUT:
20596 			type_str = "PROXD_TIMEOUT";
20597 			break;
20598 		case DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE:
20599 			type_str = "INBAND_DEVICE_WAKE_FAILURE";
20600 			break;
20601 		case DUMP_TYPE_PKTID_POOL_DEPLETED:
20602 			type_str = "PKTID_POOL_DEPLETED";
20603 			break;
20604 		case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
20605 			type_str = "ESCAN_SYNCID_MISMATCH";
20606 			break;
20607 		case DUMP_TYPE_INVALID_SHINFO_NRFRAGS:
20608 			type_str = "INVALID_SHINFO_NRFRAGS";
20609 			break;
20610 		default:
20611 			type_str = "Unknown_type";
20612 			break;
20613 	}
20614 
20615 	strlcpy(buf, type_str, buf_len);
20616 }
20617 
20618 void
dhd_get_memdump_filename(struct net_device * ndev,char * memdump_path,int len,char * fname)20619 dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
20620 {
20621 	char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
20622 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
20623 	dhd_pub_t *dhdp = &dhd->pub;
20624 
20625 	/* Init file name */
20626 	memset(memdump_path, 0, len);
20627 	memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
20628 	dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
20629 		dhdp->debug_dump_subcmd);
20630 	clear_debug_dump_time(dhdp->debug_dump_time_str);
20631 	get_debug_dump_time(dhdp->debug_dump_time_str);
20632 	snprintf(memdump_path, len, "%s%s_%s_" "%s",
20633 		DHD_COMMON_DUMP_PATH, fname, memdump_type,  dhdp->debug_dump_time_str);
20634 
20635 	if (strstr(fname, "sssr_dump")) {
20636 		DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
20637 	} else {
20638 		DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
20639 			memdump_path, FILE_NAME_HAL_TAG));
20640 	}
20641 }
20642 
20643 int
write_dump_to_file(dhd_pub_t * dhd,uint8 * buf,int size,char * fname)20644 write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
20645 {
20646 	int ret = 0;
20647 	char memdump_path[DHD_MEMDUMP_PATH_STR_LEN];
20648 	char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
20649 	uint32 file_mode;
20650 
20651 	/* Init file name */
20652 	memset(memdump_path, 0, DHD_MEMDUMP_PATH_STR_LEN);
20653 	memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
20654 	dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
20655 			dhd->debug_dump_subcmd);
20656 	clear_debug_dump_time(dhd->debug_dump_time_str);
20657 	get_debug_dump_time(dhd->debug_dump_time_str);
20658 
20659 	snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
20660 		DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
20661 #ifdef CUSTOMER_HW4_DEBUG
20662 	file_mode = O_CREAT | O_WRONLY | O_SYNC;
20663 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
20664 	file_mode = O_CREAT | O_WRONLY | O_SYNC;
20665 #elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
20666 	file_mode = O_CREAT | O_WRONLY;
20667 #elif defined(OEM_ANDROID)
20668 	/* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
20669 	 * calling BUG_ON immediately after collecting the socram dump.
20670 	 * So the file write operation should directly write the contents into the
20671 	 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
20672 	 * instead of appending.
20673 	 */
20674 	file_mode = O_CREAT | O_WRONLY | O_SYNC;
20675 	{
20676 		struct file *fp = filp_open(memdump_path, file_mode, 0664);
20677 		/* Check if it is live Brix image having /installmedia, else use /data */
20678 		if (IS_ERR(fp)) {
20679 			DHD_ERROR(("open file %s, try /data/\n", memdump_path));
20680 			snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
20681 				"/data/", fname, memdump_type,  dhd->debug_dump_time_str);
20682 		} else {
20683 			filp_close(fp, NULL);
20684 		}
20685 	}
20686 #else
20687 	file_mode = O_CREAT | O_WRONLY;
20688 #endif /* CUSTOMER_HW4_DEBUG */
20689 
20690 	/* print SOCRAM dump file path */
20691 	DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
20692 
20693 #ifdef DHD_LOG_DUMP
20694 	dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
20695 #endif /* DHD_LOG_DUMP */
20696 
20697 	/* Write file */
20698 	ret = write_file(memdump_path, file_mode, buf, size);
20699 
20700 #ifdef DHD_DUMP_MNGR
20701 	if (ret == BCME_OK) {
20702 		dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
20703 	}
20704 #endif /* DHD_DUMP_MNGR */
20705 
20706 	return ret;
20707 }
20708 #endif /* DHD_DEBUG */
20709 
dhd_os_wake_lock_timeout(dhd_pub_t * pub)20710 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
20711 {
20712 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20713 	unsigned long flags;
20714 	int ret = 0;
20715 
20716 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
20717 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20718 		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
20719 			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
20720 #ifdef CONFIG_HAS_WAKELOCK
20721 		if (dhd->wakelock_rx_timeout_enable)
20722 			dhd_wake_lock_timeout(&dhd->wl_rxwake,
20723 				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
20724 		if (dhd->wakelock_ctrl_timeout_enable)
20725 			dhd_wake_lock_timeout(&dhd->wl_ctrlwake,
20726 				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
20727 #endif
20728 		dhd->wakelock_rx_timeout_enable = 0;
20729 		dhd->wakelock_ctrl_timeout_enable = 0;
20730 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20731 	}
20732 	return ret;
20733 }
20734 
net_os_wake_lock_timeout(struct net_device * dev)20735 int net_os_wake_lock_timeout(struct net_device *dev)
20736 {
20737 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20738 	int ret = 0;
20739 
20740 	if (dhd)
20741 		ret = dhd_os_wake_lock_timeout(&dhd->pub);
20742 	return ret;
20743 }
20744 
dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t * pub,int val)20745 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
20746 {
20747 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20748 	unsigned long flags;
20749 
20750 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
20751 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20752 		if (val > dhd->wakelock_rx_timeout_enable)
20753 			dhd->wakelock_rx_timeout_enable = val;
20754 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20755 	}
20756 	return 0;
20757 }
20758 
dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t * pub,int val)20759 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
20760 {
20761 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20762 	unsigned long flags;
20763 
20764 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
20765 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20766 		if (val > dhd->wakelock_ctrl_timeout_enable)
20767 			dhd->wakelock_ctrl_timeout_enable = val;
20768 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20769 	}
20770 	return 0;
20771 }
20772 
dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t * pub)20773 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
20774 {
20775 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
20776 	unsigned long flags;
20777 
20778 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
20779 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20780 		dhd->wakelock_ctrl_timeout_enable = 0;
20781 #ifdef CONFIG_HAS_WAKELOCK
20782 		if (dhd_wake_lock_active(&dhd->wl_ctrlwake))
20783 			dhd_wake_unlock(&dhd->wl_ctrlwake);
20784 #endif
20785 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20786 	}
20787 	return 0;
20788 }
20789 
net_os_wake_lock_rx_timeout_enable(struct net_device * dev,int val)20790 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
20791 {
20792 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20793 	int ret = 0;
20794 
20795 	if (dhd)
20796 		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
20797 	return ret;
20798 }
20799 
net_os_wake_lock_ctrl_timeout_enable(struct net_device * dev,int val)20800 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
20801 {
20802 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
20803 	int ret = 0;
20804 
20805 	if (dhd)
20806 		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
20807 	return ret;
20808 }
20809 
20810 #if defined(DHD_TRACE_WAKE_LOCK)
20811 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20812 #include <linux/hashtable.h>
20813 #else
20814 #include <linux/hash.h>
20815 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20816 
20817 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20818 /* Define 2^5 = 32 bucket size hash table */
20819 DEFINE_HASHTABLE(wklock_history, 5);
20820 #else
20821 /* Define 2^5 = 32 bucket size hash table */
20822 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
20823 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20824 
20825 atomic_t trace_wklock_onoff;
20826 typedef enum dhd_wklock_type {
20827 	DHD_WAKE_LOCK,
20828 	DHD_WAKE_UNLOCK,
20829 	DHD_WAIVE_LOCK,
20830 	DHD_RESTORE_LOCK
20831 } dhd_wklock_t;
20832 
20833 struct wk_trace_record {
20834 	unsigned long addr;	            /* Address of the instruction */
20835 	dhd_wklock_t lock_type;         /* lock_type */
20836 	unsigned long long counter;		/* counter information */
20837 	struct hlist_node wklock_node;  /* hash node */
20838 };
20839 
find_wklock_entry(unsigned long addr)20840 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
20841 {
20842 	struct wk_trace_record *wklock_info;
20843 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20844 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
20845 	hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
20846 #else
20847 	struct hlist_node *entry;
20848 	int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
20849 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
20850 	hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
20851 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20852 	{
20853 		GCC_DIAGNOSTIC_POP();
20854 		if (wklock_info->addr == addr) {
20855 			return wklock_info;
20856 		}
20857 	}
20858 	return NULL;
20859 }
20860 
20861 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20862 #define HASH_ADD(hashtable, node, key) \
20863 	do { \
20864 		hash_add(hashtable, node, key); \
20865 	} while (0);
20866 #else
20867 #define HASH_ADD(hashtable, node, key) \
20868 	do { \
20869 		int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
20870 		hlist_add_head(node, &hashtable[index]); \
20871 	} while (0);
20872 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
20873 
20874 #define STORE_WKLOCK_RECORD(wklock_type) \
20875 	do { \
20876 		struct wk_trace_record *wklock_info = NULL; \
20877 		unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
20878 		wklock_info = find_wklock_entry(func_addr); \
20879 		if (wklock_info) { \
20880 			if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
20881 				wklock_info->counter = dhd->wakelock_counter; \
20882 			} else { \
20883 				wklock_info->counter++; \
20884 			} \
20885 		} else { \
20886 			wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
20887 			if (!wklock_info) {\
20888 				printk("Can't allocate wk_trace_record \n"); \
20889 			} else { \
20890 				wklock_info->addr = func_addr; \
20891 				wklock_info->lock_type = wklock_type; \
20892 				if (wklock_type == DHD_WAIVE_LOCK || \
20893 						wklock_type == DHD_RESTORE_LOCK) { \
20894 					wklock_info->counter = dhd->wakelock_counter; \
20895 				} else { \
20896 					wklock_info->counter++; \
20897 				} \
20898 				HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
20899 			} \
20900 		} \
20901 	} while (0);
20902 
dhd_wk_lock_rec_dump(void)20903 static inline void dhd_wk_lock_rec_dump(void)
20904 {
20905 	int bkt;
20906 	struct wk_trace_record *wklock_info;
20907 
20908 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20909 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
20910 	hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
20911 #else
20912 	struct hlist_node *entry = NULL;
20913 	int max_index = ARRAY_SIZE(wklock_history);
20914 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
20915 	for (bkt = 0; bkt < max_index; bkt++)
20916 		hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
20917 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20918 		{
20919 			GCC_DIAGNOSTIC_POP();
20920 			switch (wklock_info->lock_type) {
20921 				case DHD_WAKE_LOCK:
20922 					DHD_ERROR(("wakelock lock : %pS  lock_counter : %llu \n",
20923 						(void *)wklock_info->addr, wklock_info->counter));
20924 					break;
20925 				case DHD_WAKE_UNLOCK:
20926 					DHD_ERROR(("wakelock unlock : %pS,"
20927 						" unlock_counter : %llu \n",
20928 						(void *)wklock_info->addr, wklock_info->counter));
20929 					break;
20930 				case DHD_WAIVE_LOCK:
20931 					DHD_ERROR(("wakelock waive : %pS  before_waive : %llu \n",
20932 						(void *)wklock_info->addr, wklock_info->counter));
20933 					break;
20934 				case DHD_RESTORE_LOCK:
20935 					DHD_ERROR(("wakelock restore : %pS, after_waive : %llu \n",
20936 						(void *)wklock_info->addr, wklock_info->counter));
20937 					break;
20938 			}
20939 		}
20940 }
20941 
dhd_wk_lock_trace_init(struct dhd_info * dhd)20942 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
20943 {
20944 	unsigned long flags;
20945 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
20946 	int i;
20947 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20948 
20949 	DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20950 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20951 	hash_init(wklock_history);
20952 #else
20953 	for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
20954 		INIT_HLIST_HEAD(&wklock_history[i]);
20955 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20956 	DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20957 	atomic_set(&trace_wklock_onoff, 1);
20958 }
20959 
dhd_wk_lock_trace_deinit(struct dhd_info * dhd)20960 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
20961 {
20962 	int bkt;
20963 	struct wk_trace_record *wklock_info;
20964 	struct hlist_node *tmp;
20965 	unsigned long flags;
20966 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
20967 	struct hlist_node *entry = NULL;
20968 	int max_index = ARRAY_SIZE(wklock_history);
20969 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
20970 
20971 	DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20972 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
20973 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20974 	hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
20975 #else
20976 	for (bkt = 0; bkt < max_index; bkt++)
20977 		hlist_for_each_entry_safe(wklock_info, entry, tmp,
20978 			&wklock_history[bkt], wklock_node)
20979 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
20980 		{
20981 			GCC_DIAGNOSTIC_POP();
20982 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20983 			hash_del(&wklock_info->wklock_node);
20984 #else
20985 			hlist_del_init(&wklock_info->wklock_node);
20986 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
20987 			kfree(wklock_info);
20988 		}
20989 	DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
20990 }
20991 
dhd_wk_lock_stats_dump(dhd_pub_t * dhdp)20992 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
20993 {
20994 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
20995 	unsigned long flags;
20996 
20997 	DHD_ERROR(("DHD Printing wl_wake Lock/Unlock Record \r\n"));
20998 	DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
20999 	dhd_wk_lock_rec_dump();
21000 	DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21001 
21002 }
21003 #else
21004 #define STORE_WKLOCK_RECORD(wklock_type)
21005 #endif /* ! DHD_TRACE_WAKE_LOCK */
21006 
dhd_os_wake_lock(dhd_pub_t * pub)21007 int dhd_os_wake_lock(dhd_pub_t *pub)
21008 {
21009 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21010 	unsigned long flags;
21011 	int ret = 0;
21012 
21013 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
21014 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21015 		if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
21016 #ifdef CONFIG_HAS_WAKELOCK
21017 			dhd_wake_lock(&dhd->wl_wifi);
21018 #elif defined(BCMSDIO)
21019 			dhd_bus_dev_pm_stay_awake(pub);
21020 #endif
21021 		}
21022 #ifdef DHD_TRACE_WAKE_LOCK
21023 		if (atomic_read(&trace_wklock_onoff)) {
21024 			STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
21025 		}
21026 #endif /* DHD_TRACE_WAKE_LOCK */
21027 		dhd->wakelock_counter++;
21028 		ret = dhd->wakelock_counter;
21029 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21030 	}
21031 
21032 	return ret;
21033 }
21034 
dhd_event_wake_lock(dhd_pub_t * pub)21035 void dhd_event_wake_lock(dhd_pub_t *pub)
21036 {
21037 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21038 
21039 	if (dhd) {
21040 #ifdef CONFIG_HAS_WAKELOCK
21041 		dhd_wake_lock(&dhd->wl_evtwake);
21042 #elif defined(BCMSDIO)
21043 		dhd_bus_dev_pm_stay_awake(pub);
21044 #endif
21045 	}
21046 }
21047 
21048 void
dhd_pm_wake_lock_timeout(dhd_pub_t * pub,int val)21049 dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
21050 {
21051 #ifdef CONFIG_HAS_WAKELOCK
21052 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21053 
21054 	if (dhd) {
21055 		dhd_wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
21056 	}
21057 #endif /* CONFIG_HAS_WAKE_LOCK */
21058 }
21059 
21060 void
dhd_txfl_wake_lock_timeout(dhd_pub_t * pub,int val)21061 dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
21062 {
21063 #ifdef CONFIG_HAS_WAKELOCK
21064 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21065 
21066 	if (dhd) {
21067 		dhd_wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
21068 	}
21069 #endif /* CONFIG_HAS_WAKE_LOCK */
21070 }
21071 
21072 void
dhd_nan_wake_lock_timeout(dhd_pub_t * pub,int val)21073 dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val)
21074 {
21075 #ifdef CONFIG_HAS_WAKELOCK
21076 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21077 
21078 	if (dhd) {
21079 		dhd_wake_lock_timeout(&dhd->wl_nanwake, msecs_to_jiffies(val));
21080 	}
21081 #endif /* CONFIG_HAS_WAKE_LOCK */
21082 }
21083 
net_os_wake_lock(struct net_device * dev)21084 int net_os_wake_lock(struct net_device *dev)
21085 {
21086 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
21087 	int ret = 0;
21088 
21089 	if (dhd)
21090 		ret = dhd_os_wake_lock(&dhd->pub);
21091 	return ret;
21092 }
21093 
dhd_os_wake_unlock(dhd_pub_t * pub)21094 int dhd_os_wake_unlock(dhd_pub_t *pub)
21095 {
21096 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21097 	unsigned long flags;
21098 	int ret = 0;
21099 
21100 	dhd_os_wake_lock_timeout(pub);
21101 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
21102 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21103 
21104 		if (dhd->wakelock_counter > 0) {
21105 			dhd->wakelock_counter--;
21106 #ifdef DHD_TRACE_WAKE_LOCK
21107 			if (atomic_read(&trace_wklock_onoff)) {
21108 				STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
21109 			}
21110 #endif /* DHD_TRACE_WAKE_LOCK */
21111 			if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
21112 #ifdef CONFIG_HAS_WAKELOCK
21113 				dhd_wake_unlock(&dhd->wl_wifi);
21114 #elif defined(BCMSDIO)
21115 				dhd_bus_dev_pm_relax(pub);
21116 #endif
21117 			}
21118 			ret = dhd->wakelock_counter;
21119 		}
21120 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21121 	}
21122 	return ret;
21123 }
21124 
dhd_event_wake_unlock(dhd_pub_t * pub)21125 void dhd_event_wake_unlock(dhd_pub_t *pub)
21126 {
21127 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21128 
21129 	if (dhd) {
21130 #ifdef CONFIG_HAS_WAKELOCK
21131 		dhd_wake_unlock(&dhd->wl_evtwake);
21132 #elif defined(BCMSDIO)
21133 		dhd_bus_dev_pm_relax(pub);
21134 #endif
21135 	}
21136 }
21137 
dhd_pm_wake_unlock(dhd_pub_t * pub)21138 void dhd_pm_wake_unlock(dhd_pub_t *pub)
21139 {
21140 #ifdef CONFIG_HAS_WAKELOCK
21141 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21142 
21143 	if (dhd) {
21144 		/* if wl_pmwake is active, unlock it */
21145 		if (dhd_wake_lock_active(&dhd->wl_pmwake)) {
21146 			dhd_wake_unlock(&dhd->wl_pmwake);
21147 		}
21148 	}
21149 #endif /* CONFIG_HAS_WAKELOCK */
21150 }
21151 
dhd_txfl_wake_unlock(dhd_pub_t * pub)21152 void dhd_txfl_wake_unlock(dhd_pub_t *pub)
21153 {
21154 #ifdef CONFIG_HAS_WAKELOCK
21155 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21156 
21157 	if (dhd) {
21158 		/* if wl_txflwake is active, unlock it */
21159 		if (dhd_wake_lock_active(&dhd->wl_txflwake)) {
21160 			dhd_wake_unlock(&dhd->wl_txflwake);
21161 		}
21162 	}
21163 #endif /* CONFIG_HAS_WAKELOCK */
21164 }
21165 
dhd_nan_wake_unlock(dhd_pub_t * pub)21166 void dhd_nan_wake_unlock(dhd_pub_t *pub)
21167 {
21168 #ifdef CONFIG_HAS_WAKELOCK
21169 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21170 
21171 	if (dhd) {
21172 		/* if wl_nanwake is active, unlock it */
21173 		if (dhd_wake_lock_active(&dhd->wl_nanwake)) {
21174 			dhd_wake_unlock(&dhd->wl_nanwake);
21175 		}
21176 	}
21177 #endif /* CONFIG_HAS_WAKELOCK */
21178 }
21179 
dhd_os_check_wakelock(dhd_pub_t * pub)21180 int dhd_os_check_wakelock(dhd_pub_t *pub)
21181 {
21182 #if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
21183 	dhd_info_t *dhd;
21184 
21185 	if (!pub)
21186 		return 0;
21187 	dhd = (dhd_info_t *)(pub->info);
21188 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
21189 
21190 #ifdef CONFIG_HAS_WAKELOCK
21191 	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
21192 	if (dhd && (dhd_wake_lock_active(&dhd->wl_wifi) ||
21193 		(dhd_wake_lock_active(&dhd->wl_wdwake))))
21194 		return 1;
21195 #elif defined(BCMSDIO)
21196 	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
21197 		return 1;
21198 #endif
21199 	return 0;
21200 }
21201 
21202 int
dhd_os_check_wakelock_all(dhd_pub_t * pub)21203 dhd_os_check_wakelock_all(dhd_pub_t *pub)
21204 {
21205 #if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
21206 #if defined(CONFIG_HAS_WAKELOCK)
21207 	int l1, l2, l3, l4, l7, l8, l9, l10;
21208 	int l5 = 0, l6 = 0;
21209 	int c, lock_active;
21210 #endif /* CONFIG_HAS_WAKELOCK */
21211 	dhd_info_t *dhd;
21212 
21213 	if (!pub) {
21214 		return 0;
21215 	}
21216 	if (pub->up == 0) {
21217 		DHD_ERROR(("%s: skip as down in progress\n", __FUNCTION__));
21218 		return 0;
21219 	}
21220 	dhd = (dhd_info_t *)(pub->info);
21221 	if (!dhd) {
21222 		return 0;
21223 	}
21224 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
21225 
21226 #ifdef CONFIG_HAS_WAKELOCK
21227 	c = dhd->wakelock_counter;
21228 	l1 = dhd_wake_lock_active(&dhd->wl_wifi);
21229 	l2 = dhd_wake_lock_active(&dhd->wl_wdwake);
21230 	l3 = dhd_wake_lock_active(&dhd->wl_rxwake);
21231 	l4 = dhd_wake_lock_active(&dhd->wl_ctrlwake);
21232 	l7 = dhd_wake_lock_active(&dhd->wl_evtwake);
21233 #ifdef BCMPCIE_OOB_HOST_WAKE
21234 	l5 = dhd_wake_lock_active(&dhd->wl_intrwake);
21235 #endif /* BCMPCIE_OOB_HOST_WAKE */
21236 #ifdef DHD_USE_SCAN_WAKELOCK
21237 	l6 = dhd_wake_lock_active(&dhd->wl_scanwake);
21238 #endif /* DHD_USE_SCAN_WAKELOCK */
21239 	l8 = dhd_wake_lock_active(&dhd->wl_pmwake);
21240 	l9 = dhd_wake_lock_active(&dhd->wl_txflwake);
21241 	l10 = dhd_wake_lock_active(&dhd->wl_nanwake);
21242 	lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9 || l10);
21243 
21244 	/* Indicate to the Host to avoid going to suspend if internal locks are up */
21245 	if (lock_active) {
21246 		DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
21247 			"ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d nan-%d\n",
21248 			__FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10));
21249 		return 1;
21250 	}
21251 #elif defined(BCMSDIO)
21252 	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
21253 		return 1;
21254 	}
21255 #endif /* defined(BCMSDIO) */
21256 	return 0;
21257 }
21258 
net_os_wake_unlock(struct net_device * dev)21259 int net_os_wake_unlock(struct net_device *dev)
21260 {
21261 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
21262 	int ret = 0;
21263 
21264 	if (dhd)
21265 		ret = dhd_os_wake_unlock(&dhd->pub);
21266 	return ret;
21267 }
21268 
dhd_os_wd_wake_lock(dhd_pub_t * pub)21269 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
21270 {
21271 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21272 	unsigned long flags;
21273 	int ret = 0;
21274 
21275 	if (dhd) {
21276 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21277 		if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
21278 #ifdef CONFIG_HAS_WAKELOCK
21279 			/* if wakelock_wd_counter was never used : lock it at once */
21280 			dhd_wake_lock(&dhd->wl_wdwake);
21281 #endif
21282 		}
21283 		dhd->wakelock_wd_counter++;
21284 		ret = dhd->wakelock_wd_counter;
21285 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21286 	}
21287 	return ret;
21288 }
21289 
dhd_os_wd_wake_unlock(dhd_pub_t * pub)21290 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
21291 {
21292 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21293 	unsigned long flags;
21294 	int ret = 0;
21295 
21296 	if (dhd) {
21297 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21298 		if (dhd->wakelock_wd_counter > 0) {
21299 			dhd->wakelock_wd_counter = 0;
21300 			if (!dhd->waive_wakelock) {
21301 #ifdef CONFIG_HAS_WAKELOCK
21302 				dhd_wake_unlock(&dhd->wl_wdwake);
21303 #endif
21304 			}
21305 		}
21306 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21307 	}
21308 	return ret;
21309 }
21310 
21311 #ifdef BCMPCIE_OOB_HOST_WAKE
21312 void
dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t * pub,int val)21313 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
21314 {
21315 #ifdef CONFIG_HAS_WAKELOCK
21316 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21317 
21318 	if (dhd) {
21319 		dhd_wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
21320 	}
21321 #endif /* CONFIG_HAS_WAKELOCK */
21322 }
21323 
21324 void
dhd_os_oob_irq_wake_unlock(dhd_pub_t * pub)21325 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
21326 {
21327 #ifdef CONFIG_HAS_WAKELOCK
21328 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21329 
21330 	if (dhd) {
21331 		/* if wl_intrwake is active, unlock it */
21332 		if (dhd_wake_lock_active(&dhd->wl_intrwake)) {
21333 			dhd_wake_unlock(&dhd->wl_intrwake);
21334 		}
21335 	}
21336 #endif /* CONFIG_HAS_WAKELOCK */
21337 }
21338 #endif /* BCMPCIE_OOB_HOST_WAKE */
21339 
21340 #ifdef DHD_USE_SCAN_WAKELOCK
21341 void
dhd_os_scan_wake_lock_timeout(dhd_pub_t * pub,int val)21342 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
21343 {
21344 #ifdef CONFIG_HAS_WAKELOCK
21345 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21346 
21347 	if (dhd) {
21348 		dhd_wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
21349 	}
21350 #endif /* CONFIG_HAS_WAKELOCK */
21351 }
21352 
21353 void
dhd_os_scan_wake_unlock(dhd_pub_t * pub)21354 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
21355 {
21356 #ifdef CONFIG_HAS_WAKELOCK
21357 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21358 
21359 	if (dhd) {
21360 		/* if wl_scanwake is active, unlock it */
21361 		if (dhd_wake_lock_active(&dhd->wl_scanwake)) {
21362 			dhd_wake_unlock(&dhd->wl_scanwake);
21363 		}
21364 	}
21365 #endif /* CONFIG_HAS_WAKELOCK */
21366 }
21367 #endif /* DHD_USE_SCAN_WAKELOCK */
21368 
21369 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
21370  * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
21371  */
dhd_os_wake_lock_waive(dhd_pub_t * pub)21372 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
21373 {
21374 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21375 	unsigned long flags;
21376 	int ret = 0;
21377 
21378 	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
21379 		DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21380 
21381 		/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
21382 		if (dhd->waive_wakelock == FALSE) {
21383 #ifdef DHD_TRACE_WAKE_LOCK
21384 			if (atomic_read(&trace_wklock_onoff)) {
21385 				STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
21386 			}
21387 #endif /* DHD_TRACE_WAKE_LOCK */
21388 			/* record current lock status */
21389 			dhd->wakelock_before_waive = dhd->wakelock_counter;
21390 			dhd->waive_wakelock = TRUE;
21391 		}
21392 		ret = dhd->wakelock_wd_counter;
21393 		DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21394 	}
21395 	return ret;
21396 }
21397 
dhd_os_wake_lock_restore(dhd_pub_t * pub)21398 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
21399 {
21400 	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
21401 	unsigned long flags;
21402 	int ret = 0;
21403 
21404 	if (!dhd)
21405 		return 0;
21406 	if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
21407 		return 0;
21408 
21409 	DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
21410 
21411 	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
21412 	if (!dhd->waive_wakelock)
21413 		goto exit;
21414 
21415 	dhd->waive_wakelock = FALSE;
21416 	/* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
21417 	 * we need to make it up by calling dhd_wake_lock or pm_stay_awake. or if somebody releases
21418 	 * the lock in between, do the same by calling dhd_wake_unlock or pm_relax
21419 	 */
21420 #ifdef DHD_TRACE_WAKE_LOCK
21421 	if (atomic_read(&trace_wklock_onoff)) {
21422 		STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
21423 	}
21424 #endif /* DHD_TRACE_WAKE_LOCK */
21425 
21426 	if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
21427 #ifdef CONFIG_HAS_WAKELOCK
21428 		dhd_wake_lock(&dhd->wl_wifi);
21429 #elif defined(BCMSDIO)
21430 		dhd_bus_dev_pm_stay_awake(&dhd->pub);
21431 #endif
21432 	} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
21433 #ifdef CONFIG_HAS_WAKELOCK
21434 		dhd_wake_unlock(&dhd->wl_wifi);
21435 #elif defined(BCMSDIO)
21436 		dhd_bus_dev_pm_relax(&dhd->pub);
21437 #endif
21438 	}
21439 	dhd->wakelock_before_waive = 0;
21440 exit:
21441 	ret = dhd->wakelock_wd_counter;
21442 	DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
21443 	return ret;
21444 }
21445 
dhd_os_wake_lock_init(struct dhd_info * dhd)21446 void dhd_os_wake_lock_init(struct dhd_info *dhd)
21447 {
21448 	DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
21449 	dhd->wakelock_counter = 0;
21450 	dhd->wakelock_rx_timeout_enable = 0;
21451 	dhd->wakelock_ctrl_timeout_enable = 0;
21452 	/* wakelocks prevent a system from going into a low power state */
21453 #ifdef CONFIG_HAS_WAKELOCK
21454 	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
21455 	dhd_wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
21456 	dhd_wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
21457 	dhd_wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
21458 	dhd_wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
21459 	dhd_wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
21460 #ifdef BCMPCIE_OOB_HOST_WAKE
21461 	dhd_wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
21462 #endif /* BCMPCIE_OOB_HOST_WAKE */
21463 #ifdef DHD_USE_SCAN_WAKELOCK
21464 	dhd_wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
21465 #endif /* DHD_USE_SCAN_WAKELOCK */
21466 	dhd_wake_lock_init(&dhd->wl_nanwake, WAKE_LOCK_SUSPEND, "wlan_nan_wake");
21467 #endif /* CONFIG_HAS_WAKELOCK */
21468 #ifdef DHD_TRACE_WAKE_LOCK
21469 	dhd_wk_lock_trace_init(dhd);
21470 #endif /* DHD_TRACE_WAKE_LOCK */
21471 }
21472 
dhd_os_wake_lock_destroy(struct dhd_info * dhd)21473 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
21474 {
21475 	DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
21476 #ifdef CONFIG_HAS_WAKELOCK
21477 	dhd->wakelock_counter = 0;
21478 	dhd->wakelock_rx_timeout_enable = 0;
21479 	dhd->wakelock_ctrl_timeout_enable = 0;
21480 	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
21481 	dhd_wake_lock_unlock_destroy(&dhd->wl_rxwake);
21482 	dhd_wake_lock_unlock_destroy(&dhd->wl_ctrlwake);
21483 	dhd_wake_lock_unlock_destroy(&dhd->wl_evtwake);
21484 	dhd_wake_lock_unlock_destroy(&dhd->wl_pmwake);
21485 	dhd_wake_lock_unlock_destroy(&dhd->wl_txflwake);
21486 #ifdef BCMPCIE_OOB_HOST_WAKE
21487 	dhd_wake_lock_unlock_destroy(&dhd->wl_intrwake);
21488 #endif /* BCMPCIE_OOB_HOST_WAKE */
21489 #ifdef DHD_USE_SCAN_WAKELOCK
21490 	dhd_wake_lock_unlock_destroy(&dhd->wl_scanwake);
21491 #endif /* DHD_USE_SCAN_WAKELOCK */
21492 	dhd_wake_lock_unlock_destroy(&dhd->wl_nanwake);
21493 #ifdef DHD_TRACE_WAKE_LOCK
21494 	dhd_wk_lock_trace_deinit(dhd);
21495 #endif /* DHD_TRACE_WAKE_LOCK */
21496 #else /* !CONFIG_HAS_WAKELOCK */
21497 	if (dhd->wakelock_counter > 0) {
21498 		DHD_ERROR(("%s: wake lock count=%d\n",
21499 			__FUNCTION__, dhd->wakelock_counter));
21500 		while (dhd_os_wake_unlock(&dhd->pub));
21501 	}
21502 #endif /* CONFIG_HAS_WAKELOCK */
21503 }
21504 
dhd_os_check_if_up(dhd_pub_t * pub)21505 bool dhd_os_check_if_up(dhd_pub_t *pub)
21506 {
21507 	if (!pub)
21508 		return FALSE;
21509 	return pub->up;
21510 }
21511 
21512 /* function to collect firmware, chip id and chip version info */
dhd_set_version_info(dhd_pub_t * dhdp,char * fw)21513 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
21514 {
21515 	int i;
21516 
21517 	i = snprintf(info_string, sizeof(info_string),
21518 		"  Driver: %s\n  Firmware: %s\n  CLM: %s ", EPI_VERSION_STR, fw, clm_version);
21519 	printf("%s\n", info_string);
21520 
21521 	if (!dhdp)
21522 		return;
21523 
21524 	i = snprintf(&info_string[i], sizeof(info_string) - i,
21525 		"\n  Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
21526 		dhd_conf_get_chiprev(dhdp));
21527 }
21528 
dhd_ioctl_entry_local(struct net_device * net,wl_ioctl_t * ioc,int cmd)21529 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
21530 {
21531 	int ifidx;
21532 	int ret = 0;
21533 	dhd_info_t *dhd = NULL;
21534 
21535 	if (!net || !DEV_PRIV(net)) {
21536 		DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
21537 			__FUNCTION__, net, DEV_PRIV(net)));
21538 		return -EINVAL;
21539 	}
21540 
21541 	dhd = DHD_DEV_INFO(net);
21542 	if (!dhd)
21543 		return -EINVAL;
21544 
21545 	ifidx = dhd_net2idx(dhd, net);
21546 	if (ifidx == DHD_BAD_IF) {
21547 		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
21548 		return -ENODEV;
21549 	}
21550 
21551 	DHD_OS_WAKE_LOCK(&dhd->pub);
21552 
21553 	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
21554 	dhd_check_hang(net, &dhd->pub, ret);
21555 
21556 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
21557 
21558 	return ret;
21559 }
21560 
dhd_os_check_hang(dhd_pub_t * dhdp,int ifidx,int ret)21561 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
21562 {
21563 	struct net_device *net;
21564 
21565 	net = dhd_idx2net(dhdp, ifidx);
21566 	if (!net) {
21567 		DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
21568 		return -EINVAL;
21569 	}
21570 
21571 	return dhd_check_hang(net, dhdp, ret);
21572 }
21573 
21574 /* Return instance */
dhd_get_instance(dhd_pub_t * dhdp)21575 int dhd_get_instance(dhd_pub_t *dhdp)
21576 {
21577 	return dhdp->info->unit;
21578 }
21579 
21580 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
21581 #define MAX_TRY_CNT             5 /* Number of tries to disable deepsleep */
dhd_deepsleep(struct net_device * dev,int flag)21582 int dhd_deepsleep(struct net_device *dev, int flag)
21583 {
21584 	char iovbuf[20];
21585 	uint powervar = 0;
21586 	dhd_info_t *dhd;
21587 	dhd_pub_t *dhdp;
21588 	int cnt = 0;
21589 	int ret = 0;
21590 
21591 	dhd = DHD_DEV_INFO(dev);
21592 	dhdp = &dhd->pub;
21593 
21594 	switch (flag) {
21595 		case 1 :  /* Deepsleep on */
21596 			DHD_ERROR(("[WiFi] Deepsleep On\n"));
21597 			/* give some time to sysioc_work before deepsleep */
21598 			OSL_SLEEP(200);
21599 #ifdef PKT_FILTER_SUPPORT
21600 		/* disable pkt filter */
21601 		dhd_enable_packet_filter(0, dhdp);
21602 #endif /* PKT_FILTER_SUPPORT */
21603 			/* Disable MPC */
21604 			powervar = 0;
21605 			ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
21606 					0, TRUE);
21607 			if (ret) {
21608 				DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
21609 			}
21610 			/* Enable Deepsleep */
21611 			powervar = 1;
21612 			ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
21613 					NULL, 0, TRUE);
21614 			if (ret) {
21615 				DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
21616 			}
21617 			break;
21618 
21619 		case 0: /* Deepsleep Off */
21620 			DHD_ERROR(("[WiFi] Deepsleep Off\n"));
21621 
21622 			/* Disable Deepsleep */
21623 			for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
21624 				powervar = 0;
21625 				ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
21626 						sizeof(powervar), NULL, 0, TRUE);
21627 				if (ret) {
21628 					DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
21629 				}
21630 
21631 				ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
21632 						sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
21633 				if (ret < 0) {
21634 					DHD_ERROR(("the error of dhd deepsleep status"
21635 						" ret value :%d\n", ret));
21636 				} else {
21637 					if (!(*(int *)iovbuf)) {
21638 						DHD_ERROR(("deepsleep mode is 0,"
21639 							" count: %d\n", cnt));
21640 						break;
21641 					}
21642 				}
21643 			}
21644 
21645 			/* Enable MPC */
21646 			powervar = 1;
21647 			ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar),
21648 					NULL, 0, TRUE);
21649 			if (ret) {
21650 				DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
21651 			}
21652 			break;
21653 	}
21654 
21655 	return 0;
21656 }
21657 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
21658 
21659 #ifdef PROP_TXSTATUS
21660 
dhd_wlfc_plat_init(void * dhd)21661 void dhd_wlfc_plat_init(void *dhd)
21662 {
21663 #ifdef USE_DYNAMIC_F2_BLKSIZE
21664 	dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
21665 #endif /* USE_DYNAMIC_F2_BLKSIZE */
21666 	return;
21667 }
21668 
dhd_wlfc_plat_deinit(void * dhd)21669 void dhd_wlfc_plat_deinit(void *dhd)
21670 {
21671 #ifdef USE_DYNAMIC_F2_BLKSIZE
21672 	dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
21673 #endif /* USE_DYNAMIC_F2_BLKSIZE */
21674 	return;
21675 }
21676 
dhd_wlfc_skip_fc(void * dhdp,uint8 idx)21677 bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
21678 {
21679 #ifdef SKIP_WLFC_ON_CONCURRENT
21680 
21681 #ifdef WL_CFG80211
21682 	struct net_device * net =  dhd_idx2net((dhd_pub_t *)dhdp, idx);
21683 	if (net)
21684 	/* enable flow control in vsdb mode */
21685 	return !(wl_cfg80211_is_concurrent_mode(net));
21686 #else
21687 	return TRUE; /* skip flow control */
21688 #endif /* WL_CFG80211 */
21689 
21690 #else
21691 	return FALSE;
21692 #endif /* SKIP_WLFC_ON_CONCURRENT */
21693 	return FALSE;
21694 }
21695 #endif /* PROP_TXSTATUS */
21696 
21697 #ifdef BCMDBGFS
21698 #include <linux/debugfs.h>
21699 
21700 typedef struct dhd_dbgfs {
21701 	struct dentry	*debugfs_dir;
21702 	struct dentry	*debugfs_mem;
21703 	dhd_pub_t	*dhdp;
21704 	uint32		size;
21705 } dhd_dbgfs_t;
21706 
21707 dhd_dbgfs_t g_dbgfs;
21708 
21709 extern uint32 dhd_readregl(void *bp, uint32 addr);
21710 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
21711 
21712 static int
dhd_dbg_state_open(struct inode * inode,struct file * file)21713 dhd_dbg_state_open(struct inode *inode, struct file *file)
21714 {
21715 	file->private_data = inode->i_private;
21716 	return 0;
21717 }
21718 
21719 static ssize_t
dhd_dbg_state_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)21720 dhd_dbg_state_read(struct file *file, char __user *ubuf,
21721                        size_t count, loff_t *ppos)
21722 {
21723 	ssize_t rval;
21724 	uint32 tmp;
21725 	loff_t pos = *ppos;
21726 	size_t ret;
21727 
21728 	if (pos < 0)
21729 		return -EINVAL;
21730 	if (pos >= g_dbgfs.size || !count)
21731 		return 0;
21732 	if (count > g_dbgfs.size - pos)
21733 		count = g_dbgfs.size - pos;
21734 
21735 	/* XXX: The user can request any length they want, but they are getting 4 bytes */
21736 	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
21737 	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
21738 
21739 	ret = copy_to_user(ubuf, &tmp, 4);
21740 	if (ret == count)
21741 		return -EFAULT;
21742 
21743 	count -= ret;
21744 	*ppos = pos + count;
21745 	rval = count;
21746 
21747 	return rval;
21748 }
21749 
21750 static ssize_t
dhd_debugfs_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)21751 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
21752 {
21753 	loff_t pos = *ppos;
21754 	size_t ret;
21755 	uint32 buf;
21756 
21757 	if (pos < 0)
21758 		return -EINVAL;
21759 	if (pos >= g_dbgfs.size || !count)
21760 		return 0;
21761 	if (count > g_dbgfs.size - pos)
21762 		count = g_dbgfs.size - pos;
21763 
21764 	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
21765 	if (ret == count)
21766 		return -EFAULT;
21767 
21768 	/* XXX: The user can request any length they want, but they are getting 4 bytes */
21769 	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
21770 	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
21771 
21772 	return count;
21773 }
21774 
21775 loff_t
dhd_debugfs_lseek(struct file * file,loff_t off,int whence)21776 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
21777 {
21778 	loff_t pos = -1;
21779 
21780 	switch (whence) {
21781 		case 0:
21782 			pos = off;
21783 			break;
21784 		case 1:
21785 			pos = file->f_pos + off;
21786 			break;
21787 		case 2:
21788 			pos = g_dbgfs.size - off;
21789 	}
21790 	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
21791 }
21792 
21793 static const struct file_operations dhd_dbg_state_ops = {
21794 	.read   = dhd_dbg_state_read,
21795 	.write	= dhd_debugfs_write,
21796 	.open   = dhd_dbg_state_open,
21797 	.llseek	= dhd_debugfs_lseek
21798 };
21799 
dhd_dbgfs_create(void)21800 static void dhd_dbgfs_create(void)
21801 {
21802 	if (g_dbgfs.debugfs_dir) {
21803 		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
21804 			NULL, &dhd_dbg_state_ops);
21805 	}
21806 }
21807 
dhd_dbgfs_init(dhd_pub_t * dhdp)21808 void dhd_dbgfs_init(dhd_pub_t *dhdp)
21809 {
21810 	g_dbgfs.dhdp = dhdp;
21811 	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
21812 
21813 	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
21814 	if (IS_ERR(g_dbgfs.debugfs_dir)) {
21815 		g_dbgfs.debugfs_dir = NULL;
21816 		return;
21817 	}
21818 
21819 	dhd_dbgfs_create();
21820 
21821 	return;
21822 }
21823 
dhd_dbgfs_remove(void)21824 void dhd_dbgfs_remove(void)
21825 {
21826 	debugfs_remove(g_dbgfs.debugfs_mem);
21827 	debugfs_remove(g_dbgfs.debugfs_dir);
21828 
21829 	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
21830 }
21831 #endif /* BCMDBGFS */
21832 
21833 #ifdef CUSTOM_SET_CPUCORE
dhd_set_cpucore(dhd_pub_t * dhd,int set)21834 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
21835 {
21836 	int e_dpc = 0, e_rxf = 0, retry_set = 0;
21837 
21838 	if (!(dhd->chan_isvht80)) {
21839 		DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
21840 		return;
21841 	}
21842 
21843 	if (DPC_CPUCORE) {
21844 		do {
21845 			if (set == TRUE) {
21846 				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
21847 					cpumask_of(DPC_CPUCORE));
21848 			} else {
21849 				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
21850 					cpumask_of(PRIMARY_CPUCORE));
21851 			}
21852 			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
21853 				DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
21854 				return;
21855 			}
21856 			if (e_dpc < 0)
21857 				OSL_SLEEP(1);
21858 		} while (e_dpc < 0);
21859 	}
21860 	if (RXF_CPUCORE) {
21861 		do {
21862 			if (set == TRUE) {
21863 				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
21864 					cpumask_of(RXF_CPUCORE));
21865 			} else {
21866 				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
21867 					cpumask_of(PRIMARY_CPUCORE));
21868 			}
21869 			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
21870 				DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
21871 				return;
21872 			}
21873 			if (e_rxf < 0)
21874 				OSL_SLEEP(1);
21875 		} while (e_rxf < 0);
21876 	}
21877 	DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
21878 
21879 	return;
21880 }
21881 #endif /* CUSTOM_SET_CPUCORE */
21882 #if defined(DHD_TCP_WINSIZE_ADJUST)
dhd_port_list_match(int port)21883 static int dhd_port_list_match(int port)
21884 {
21885 	int i;
21886 	for (i = 0; i < MAX_TARGET_PORTS; i++) {
21887 		if (target_ports[i] == port)
21888 			return 1;
21889 	}
21890 	return 0;
21891 }
dhd_adjust_tcp_winsize(int op_mode,struct sk_buff * skb)21892 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
21893 {
21894 	struct iphdr *ipheader;
21895 	struct tcphdr *tcpheader;
21896 	uint16 win_size;
21897 	int32 incremental_checksum;
21898 
21899 	if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
21900 		return;
21901 	if (skb == NULL || skb->data == NULL)
21902 		return;
21903 
21904 	ipheader = (struct iphdr*)(skb->data);
21905 
21906 	if (ipheader->protocol == IPPROTO_TCP) {
21907 		tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
21908 		if (tcpheader) {
21909 			win_size = ntoh16(tcpheader->window);
21910 			if (win_size < MIN_TCP_WIN_SIZE &&
21911 				dhd_port_list_match(ntoh16(tcpheader->dest))) {
21912 				incremental_checksum = ntoh16(tcpheader->check);
21913 				incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
21914 				if (incremental_checksum < 0)
21915 					--incremental_checksum;
21916 				tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
21917 				tcpheader->check = hton16((unsigned short)incremental_checksum);
21918 			}
21919 		}
21920 		skb_push(skb, (ipheader->ihl)<<2);
21921 	}
21922 }
21923 #endif /* DHD_TCP_WINSIZE_ADJUST */
21924 
21925 #ifdef DHD_MCAST_REGEN
21926 /* Get interface specific ap_isolate configuration */
dhd_get_mcast_regen_bss_enable(dhd_pub_t * dhdp,uint32 idx)21927 int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
21928 {
21929 	dhd_info_t *dhd = dhdp->info;
21930 	dhd_if_t *ifp;
21931 
21932 	ASSERT(idx < DHD_MAX_IFS);
21933 
21934 	ifp = dhd->iflist[idx];
21935 
21936 	return ifp->mcast_regen_bss_enable;
21937 }
21938 
21939 /* Set interface specific mcast_regen configuration */
dhd_set_mcast_regen_bss_enable(dhd_pub_t * dhdp,uint32 idx,int val)21940 int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
21941 {
21942 	dhd_info_t *dhd = dhdp->info;
21943 	dhd_if_t *ifp;
21944 
21945 	ASSERT(idx < DHD_MAX_IFS);
21946 
21947 	ifp = dhd->iflist[idx];
21948 
21949 	ifp->mcast_regen_bss_enable = val;
21950 
21951 	/* Disable rx_pkt_chain feature for interface, if mcast_regen feature
21952 	 * is enabled
21953 	 */
21954 	dhd_update_rx_pkt_chainable_state(dhdp, idx);
21955 	return BCME_OK;
21956 }
21957 #endif	/* DHD_MCAST_REGEN */
21958 
21959 /* Get interface specific ap_isolate configuration */
dhd_get_ap_isolate(dhd_pub_t * dhdp,uint32 idx)21960 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
21961 {
21962 	dhd_info_t *dhd = dhdp->info;
21963 	dhd_if_t *ifp;
21964 
21965 	ASSERT(idx < DHD_MAX_IFS);
21966 
21967 	ifp = dhd->iflist[idx];
21968 
21969 	return ifp->ap_isolate;
21970 }
21971 
21972 /* Set interface specific ap_isolate configuration */
dhd_set_ap_isolate(dhd_pub_t * dhdp,uint32 idx,int val)21973 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
21974 {
21975 	dhd_info_t *dhd = dhdp->info;
21976 	dhd_if_t *ifp;
21977 
21978 	ASSERT(idx < DHD_MAX_IFS);
21979 
21980 	ifp = dhd->iflist[idx];
21981 
21982 	if (ifp)
21983 		ifp->ap_isolate = val;
21984 
21985 	return 0;
21986 }
21987 
21988 #ifdef DHD_RND_DEBUG
21989 /*
21990  * XXX The filename to store .rnd.(in/out) is defined for each platform.
21991  * - The default path of CUSTOMER_HW4 device is "PLATFORM_PATH/.memdump.info"
21992  * - Brix platform will take default path "/installmedia/.memdump.info"
21993  * New platforms can add their ifdefs accordingly below.
21994  */
21995 
21996 #ifdef CUSTOMER_HW4_DEBUG
21997 #define RNDINFO PLATFORM_PATH".rnd"
21998 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
21999 #define RNDINFO "/data/misc/wifi/.rnd"
22000 #elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
22001 #define RNDINFO "/data/misc/wifi/.rnd"
22002 #elif defined(OEM_ANDROID)
22003 #define RNDINFO_LIVE "/installmedia/.rnd"
22004 #define RNDINFO_INST "/data/.rnd"
22005 #define RNDINFO RNDINFO_LIVE
22006 #else /* FC19 and Others */
22007 #define RNDINFO "/root/.rnd"
22008 #endif /* CUSTOMER_HW4_DEBUG */
22009 
22010 #define RND_IN RNDINFO".in"
22011 #define RND_OUT RNDINFO".out"
22012 
22013 int
dhd_get_rnd_info(dhd_pub_t * dhd)22014 dhd_get_rnd_info(dhd_pub_t *dhd)
22015 {
22016 	struct file *fp = NULL;
22017 	int ret = BCME_ERROR;
22018 	char *filepath = RND_IN;
22019 	uint32 file_mode =  O_RDONLY;
22020 	mm_segment_t old_fs;
22021 	loff_t pos = 0;
22022 
22023 	/* Read memdump info from the file */
22024 	fp = filp_open(filepath, file_mode, 0);
22025 	if (IS_ERR(fp)) {
22026 		DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
22027 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
22028 		/* Check if it is Live Brix Image */
22029 		if (bcmstrstr(filepath, RNDINFO_LIVE)) {
22030 			goto err1;
22031 		}
22032 		/* Try if it is Installed Brix Image */
22033 		filepath = RNDINFO_INST".in";
22034 		DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
22035 		fp = filp_open(filepath, file_mode, 0);
22036 		if (IS_ERR(fp)) {
22037 			DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
22038 			goto err1;
22039 		}
22040 #else /* Non Brix Android platform */
22041 		goto err1;
22042 #endif /* CONFIG_X86 && OEM_ANDROID */
22043 	}
22044 
22045 	old_fs = get_fs();
22046 	set_fs(KERNEL_DS);
22047 
22048 	/* Handle success case */
22049 	ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
22050 	if (ret < 0) {
22051 		DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret));
22052 		goto err2;
22053 	}
22054 
22055 	dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
22056 	if (!dhd->rnd_buf) {
22057 		DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
22058 		goto err2;
22059 	}
22060 
22061 	ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
22062 	if (ret < 0) {
22063 		DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret));
22064 		goto err3;
22065 	}
22066 
22067 	set_fs(old_fs);
22068 	filp_close(fp, NULL);
22069 
22070 	DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath));
22071 	return BCME_OK;
22072 
22073 err3:
22074 	MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
22075 	dhd->rnd_buf = NULL;
22076 err2:
22077 	set_fs(old_fs);
22078 	filp_close(fp, NULL);
22079 err1:
22080 	return BCME_ERROR;
22081 }
22082 
22083 int
dhd_dump_rnd_info(dhd_pub_t * dhd,uint8 * rnd_buf,uint32 rnd_len)22084 dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len)
22085 {
22086 	struct file *fp = NULL;
22087 	int ret = BCME_OK;
22088 	char *filepath = RND_OUT;
22089 	uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC;
22090 	mm_segment_t old_fs;
22091 	loff_t pos = 0;
22092 
22093 	/* Read memdump info from the file */
22094 	fp = filp_open(filepath, file_mode, 0664);
22095 	if (IS_ERR(fp)) {
22096 		DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
22097 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
22098 		/* Check if it is Live Brix Image */
22099 		if (bcmstrstr(filepath, RNDINFO_LIVE)) {
22100 			goto err1;
22101 		}
22102 		/* Try if it is Installed Brix Image */
22103 		filepath = RNDINFO_INST".out";
22104 		DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
22105 		fp = filp_open(filepath, file_mode, 0664);
22106 		if (IS_ERR(fp)) {
22107 			DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
22108 			goto err1;
22109 		}
22110 #else /* Non Brix Android platform */
22111 		goto err1;
22112 #endif /* CONFIG_X86 && OEM_ANDROID */
22113 	}
22114 
22115 	old_fs = get_fs();
22116 	set_fs(KERNEL_DS);
22117 
22118 	/* Handle success case */
22119 	ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos);
22120 	if (ret < 0) {
22121 		DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret));
22122 		goto err2;
22123 	}
22124 
22125 	ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos);
22126 	if (ret < 0) {
22127 		DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret));
22128 		goto err2;
22129 	}
22130 
22131 	set_fs(old_fs);
22132 	filp_close(fp, NULL);
22133 	DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath));
22134 	return BCME_OK;
22135 
22136 err2:
22137 	set_fs(old_fs);
22138 	filp_close(fp, NULL);
22139 err1:
22140 	return BCME_ERROR;
22141 
22142 }
22143 #endif /* DHD_RND_DEBUG */
22144 
22145 #ifdef DHD_FW_COREDUMP
dhd_schedule_memdump(dhd_pub_t * dhdp,uint8 * buf,uint32 size)22146 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
22147 {
22148 	dhd_dump_t *dump = NULL;
22149 	unsigned long flags = 0;
22150 	dhd_info_t *dhd_info = NULL;
22151 #if defined(DHD_LOG_DUMP) && !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
22152 	log_dump_type_t type = DLD_BUF_TYPE_ALL;
22153 #endif /* DHD_LOG_DUMP && !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
22154 
22155 	dhd_info = (dhd_info_t *)dhdp->info;
22156 	dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
22157 	if (dump == NULL) {
22158 		DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
22159 		return;
22160 	}
22161 	dump->buf = buf;
22162 	dump->bufsize = size;
22163 #ifdef BCMPCIE
22164 	dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
22165 			(uint32 *)(&dump->hscb_bufsize));
22166 #else
22167 	dump->hscb_bufsize = 0;
22168 #endif /* BCMPCIE */
22169 
22170 #ifdef DHD_LOG_DUMP
22171 	dhd_print_buf_addr(dhdp, "memdump", buf, size);
22172 #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
22173 	/* Print out buffer infomation */
22174 	dhd_log_dump_buf_addr(dhdp, &type);
22175 #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
22176 #endif /* DHD_LOG_DUMP */
22177 
22178 	if (dhdp->memdump_enabled == DUMP_MEMONLY) {
22179 		BUG_ON(1);
22180 	}
22181 
22182 	if ((dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
22183 		(dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
22184 		(dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
22185 	{
22186 		dhd_info->scheduled_memdump = FALSE;
22187 		dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
22188 		/* No need to collect debug dump for init failure */
22189 		if (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) {
22190 			return;
22191 		}
22192 #ifdef DHD_LOG_DUMP
22193 		{
22194 			log_dump_type_t *flush_type = NULL;
22195 			/* for dongle init fail cases, 'dhd_mem_dump' does
22196 			 * not call 'dhd_log_dump', so call it here.
22197 			*/
22198 			flush_type = MALLOCZ(dhdp->osh,
22199 				sizeof(log_dump_type_t));
22200 			if (flush_type) {
22201 				*flush_type = DLD_BUF_TYPE_ALL;
22202 				DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
22203 				dhd_log_dump(dhdp->info, flush_type, 0);
22204 			}
22205 		}
22206 #endif /* DHD_LOG_DUMP */
22207 		return;
22208 	}
22209 
22210 	dhd_info->scheduled_memdump = TRUE;
22211 
22212 	/* bus busy bit for mem dump will be cleared in mem dump
22213 	* work item context, after mem dump file is written
22214 	*/
22215 	DHD_GENERAL_LOCK(dhdp, flags);
22216 	DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
22217 	DHD_GENERAL_UNLOCK(dhdp, flags);
22218 	DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
22219 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
22220 		DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
22221 }
22222 
22223 static void
dhd_mem_dump(void * handle,void * event_info,u8 event)22224 dhd_mem_dump(void *handle, void *event_info, u8 event)
22225 {
22226 	dhd_info_t *dhd = handle;
22227 	dhd_pub_t *dhdp = NULL;
22228 	unsigned long flags = 0;
22229 
22230 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
22231 	int ret = 0;
22232 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
22233 	dhd_dump_t *dump = NULL;
22234 #ifdef DHD_COREDUMP
22235 	char pc_fn[DHD_FUNC_STR_LEN] = "\0";
22236 	char lr_fn[DHD_FUNC_STR_LEN] = "\0";
22237 	char *map_path = VENDOR_PATH CONFIG_BCMDHD_MAP_PATH;
22238 	trap_t *tr;
22239 #endif /* DHD_COREDUMP */
22240 
22241 	DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
22242 
22243 	if (!dhd) {
22244 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
22245 		return;
22246 	}
22247 
22248 	dhdp = &dhd->pub;
22249 	if (!dhdp) {
22250 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
22251 		return;
22252 	}
22253 
22254 	DHD_GENERAL_LOCK(dhdp, flags);
22255 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
22256 		DHD_GENERAL_UNLOCK(dhdp, flags);
22257 		DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
22258 		goto exit;
22259 	}
22260 	DHD_GENERAL_UNLOCK(dhdp, flags);
22261 
22262 	dump = (dhd_dump_t *)event_info;
22263 	if (!dump) {
22264 		DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
22265 		goto exit;
22266 	}
22267 
22268 #ifdef DHD_SDTC_ETB_DUMP
22269 	if (dhdp->collect_sdtc) {
22270 		dhd_sdtc_etb_dump(dhdp);
22271 		dhdp->collect_sdtc = FALSE;
22272 	}
22273 #endif /* DHD_SDTC_ETB_DUMP */
22274 
22275 #ifdef DHD_SSSR_DUMP
22276 	DHD_ERROR(("%s: sssr_enab=%d dhdp->sssr_inited=%d dhdp->collect_sssr=%d\n",
22277 		__FUNCTION__, sssr_enab, dhdp->sssr_inited, dhdp->collect_sssr));
22278 	if (sssr_enab && dhdp->sssr_inited && dhdp->collect_sssr) {
22279 		if (fis_enab && dhdp->sssr_reg_info->rev3.fis_enab) {
22280 			int bcmerror = dhd_bus_fis_trigger(dhdp);
22281 
22282 			if (bcmerror == BCME_OK) {
22283 				dhd_bus_fis_dump(dhdp);
22284 			} else {
22285 				DHD_ERROR(("%s: FIS trigger failed: %d\n",
22286 					__FUNCTION__, bcmerror));
22287 			}
22288 		} else	{
22289 			DHD_ERROR(("%s: FIS not enabled (%d:%d), collect legacy sssr\n",
22290 				__FUNCTION__, fis_enab, dhdp->sssr_reg_info->rev3.fis_enab));
22291 			dhdpcie_sssr_dump(dhdp);
22292 		}
22293 	}
22294 	dhdp->collect_sssr = FALSE;
22295 #endif /* DHD_SSSR_DUMP */
22296 
22297 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
22298 	ret = dhd_wait_for_file_dump(dhdp);
22299 #ifdef BOARD_HIKEY
22300 	/* For Hikey do force kernel write of socram if HAL dump fails */
22301 	if (ret) {
22302 		if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
22303 			DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
22304 		}
22305 	}
22306 #endif /* BOARD_HIKEY */
22307 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
22308 
22309 #ifdef DHD_COREDUMP
22310 	memset_s(dhdp->memdump_str, DHD_MEMDUMP_LONGSTR_LEN, 0, DHD_MEMDUMP_LONGSTR_LEN);
22311 	dhd_convert_memdump_type_to_str(dhdp->memdump_type, dhdp->memdump_str,
22312 		DHD_MEMDUMP_LONGSTR_LEN, dhdp->debug_dump_subcmd);
22313 	if (dhdp->memdump_type == DUMP_TYPE_DONGLE_TRAP &&
22314 		dhdp->dongle_trap_occured == TRUE) {
22315 		tr = &dhdp->last_trap_info;
22316 		dhd_lookup_map(dhdp->osh, map_path,
22317 			ltoh32(tr->epc), pc_fn, ltoh32(tr->r14), lr_fn);
22318 		sprintf(&dhdp->memdump_str[strlen(dhdp->memdump_str)], "_%.79s_%.79s",
22319 				pc_fn, lr_fn);
22320 	}
22321 	DHD_ERROR(("%s: dump reason: %s\n", __FUNCTION__, dhdp->memdump_str));
22322 	if (wifi_platform_set_coredump(dhd->adapter, dump->buf, dump->bufsize, dhdp->memdump_str)) {
22323 		DHD_ERROR(("%s: writing SoC_RAM dump failed\n", __FUNCTION__));
22324 #ifdef DHD_DEBUG_UART
22325 		dhd->pub.memdump_success = FALSE;
22326 #endif  /* DHD_DEBUG_UART */
22327 	}
22328 #endif /* DHD_COREDUMP */
22329 
22330 	/*
22331 	 * If kernel does not have file write access enabled
22332 	 * then skip writing dumps to files.
22333 	 * The dumps will be pushed to HAL layer which will
22334 	 * write into files
22335 	 */
22336 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
22337 
22338 #ifdef D2H_MINIDUMP
22339 	/* dump minidump */
22340 	if (dhd_bus_is_minidump_enabled(dhdp)) {
22341 		dhd_d2h_minidump(&dhd->pub);
22342 	} else {
22343 		DHD_ERROR(("minidump is not enabled\n"));
22344 	}
22345 #endif /* D2H_MINIDUMP */
22346 
22347 	if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
22348 		DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
22349 #ifdef DHD_DEBUG_UART
22350 		dhd->pub.memdump_success = FALSE;
22351 #endif	/* DHD_DEBUG_UART */
22352 	}
22353 
22354 	if (dump->hscb_buf && dump->hscb_bufsize) {
22355 		if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
22356 			dump->hscb_bufsize, "mem_dump_hscb")) {
22357 			DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
22358 #ifdef DHD_DEBUG_UART
22359 			dhd->pub.memdump_success = FALSE;
22360 #endif	/* DHD_DEBUG_UART */
22361 		}
22362 	}
22363 
22364 #ifndef DHD_PKT_LOGGING
22365 	clear_debug_dump_time(dhdp->debug_dump_time_str);
22366 #endif /* !DHD_PKT_LOGGING */
22367 
22368 	/* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
22369 	* context, no need to schedule another work queue for log dump. In case of
22370 	* user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
22371 	* cfg layer is itself scheduling the log_dump work queue.
22372 	* that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
22373 	* collect debug_dump as it may be called from non-sleepable context.
22374 	*/
22375 #ifdef DHD_LOG_DUMP
22376 	if (dhd->scheduled_memdump &&
22377 		dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
22378 		log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
22379 				sizeof(log_dump_type_t));
22380 		if (flush_type) {
22381 			*flush_type = DLD_BUF_TYPE_ALL;
22382 			DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
22383 			dhd_log_dump(dhd, flush_type, 0);
22384 		}
22385 	}
22386 #endif /* DHD_LOG_DUMP */
22387 
22388 	/* before calling bug on, wait for other logs to be dumped.
22389 	* we cannot wait in case dhd_mem_dump is called directly
22390 	* as it may not be from a sleepable context
22391 	*/
22392 	if (dhd->scheduled_memdump) {
22393 		uint bitmask = 0;
22394 		int timeleft = 0;
22395 #ifdef DHD_SSSR_DUMP
22396 		bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
22397 #endif
22398 		if (bitmask != 0) {
22399 			DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
22400 				__FUNCTION__, dhdp->dhd_bus_busy_state));
22401 			timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
22402 					&dhdp->dhd_bus_busy_state, bitmask, 0);
22403 			if ((timeleft == 0) || (timeleft == 1)) {
22404 				DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
22405 					__FUNCTION__, dhdp->dhd_bus_busy_state));
22406 			}
22407 		}
22408 	}
22409 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
22410 
22411 	if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
22412 #ifdef WLAN_ACCEL_BOOT
22413 		/* BUG_ON only if wlan accel boot up is done */
22414 		dhd->wl_accel_boot_on_done == TRUE &&
22415 #endif /* WLAN_ACCEL_BOOT */
22416 #ifdef DHD_LOG_DUMP
22417 		dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
22418 #endif /* DHD_LOG_DUMP */
22419 		dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
22420 #ifdef DHD_DEBUG_UART
22421 		dhd->pub.memdump_success == TRUE &&
22422 #endif	/* DHD_DEBUG_UART */
22423 #ifdef DNGL_EVENT_SUPPORT
22424 		dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
22425 #endif /* DNGL_EVENT_SUPPORT */
22426 		dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
22427 #ifdef SHOW_LOGTRACE
22428 		/* Wait till logtrace context is flushed */
22429 		dhd_flush_logtrace_process(dhd);
22430 #endif /* SHOW_LOGTRACE */
22431 
22432 #ifdef BTLOG
22433 		/* Wait till bt_log_dispatcher_work finishes */
22434 		cancel_work_sync(&dhd->bt_log_dispatcher_work);
22435 #endif /* BTLOG */
22436 
22437 #ifdef EWP_EDL
22438 		cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
22439 #endif
22440 
22441 		printf("%s\n", info_string);
22442 		printf("MAC %pM\n", &dhdp->mac);
22443 		DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
22444 //		BUG_ON(1);
22445 	}
22446 
22447 exit:
22448 	if (dump) {
22449 		MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
22450 	}
22451 	DHD_GENERAL_LOCK(dhdp, flags);
22452 	DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
22453 	dhd_os_busbusy_wake(dhdp);
22454 	DHD_GENERAL_UNLOCK(dhdp, flags);
22455 	dhd->scheduled_memdump = FALSE;
22456 
22457 #ifdef OEM_ANDROID
22458 	if (dhdp->hang_was_pending) {
22459 		DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
22460 		dhd_os_send_hang_message(dhdp);
22461 		dhdp->hang_was_pending = 0;
22462 	}
22463 #endif /* OEM_ANDROID */
22464 	DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
22465 
22466 	return;
22467 }
22468 #endif /* DHD_FW_COREDUMP */
22469 
22470 #ifdef D2H_MINIDUMP
22471 void
dhd_d2h_minidump(dhd_pub_t * dhdp)22472 dhd_d2h_minidump(dhd_pub_t *dhdp)
22473 {
22474 	char d2h_minidump[128];
22475 	dhd_dma_buf_t *minidump_buf;
22476 
22477 	minidump_buf = dhd_prot_get_minidump_buf(dhdp);
22478 	if (minidump_buf->va == NULL) {
22479 		DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__));
22480 		return;
22481 	}
22482 
22483 	/* Init file name */
22484 	memset(d2h_minidump, 0, sizeof(d2h_minidump));
22485 	snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump");
22486 
22487 	if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va, minidump_buf->len, d2h_minidump)) {
22488 		DHD_ERROR(("%s: failed to dump d2h_minidump to file\n", __FUNCTION__));
22489 	}
22490 }
22491 #endif /* D2H_MINIDUMP */
22492 
22493 #ifdef DHD_SSSR_DUMP
22494 uint
dhd_sssr_dig_buf_size(dhd_pub_t * dhdp)22495 dhd_sssr_dig_buf_size(dhd_pub_t *dhdp)
22496 {
22497 	uint dig_buf_size = 0;
22498 
22499 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
22500 	switch (dhdp->sssr_reg_info->rev2.version) {
22501 		case SSSR_REG_INFO_VER_3:
22502 			/* intentional fall through */
22503 		case SSSR_REG_INFO_VER_2 :
22504 			if ((dhdp->sssr_reg_info->rev2.length >
22505 			 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
22506 			 dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
22507 				dig_buf_size = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
22508 			}
22509 			break;
22510 		case SSSR_REG_INFO_VER_1 :
22511 			if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
22512 				dig_buf_size = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
22513 			} else if ((dhdp->sssr_reg_info->rev1.length >
22514 			 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
22515 			 dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
22516 				dig_buf_size = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
22517 			}
22518 			break;
22519 		case SSSR_REG_INFO_VER_0 :
22520 			if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
22521 				dig_buf_size = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
22522 			}
22523 			break;
22524 		default :
22525 			DHD_ERROR(("invalid sssr_reg_ver"));
22526 			return BCME_UNSUPPORTED;
22527 	}
22528 
22529 	return dig_buf_size;
22530 }
22531 
22532 uint
dhd_sssr_dig_buf_addr(dhd_pub_t * dhdp)22533 dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp)
22534 {
22535 	uint dig_buf_addr = 0;
22536 
22537 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
22538 	switch (dhdp->sssr_reg_info->rev2.version) {
22539 		case SSSR_REG_INFO_VER_3 :
22540 			/* intentional fall through */
22541 		case SSSR_REG_INFO_VER_2 :
22542 			if ((dhdp->sssr_reg_info->rev2.length >
22543 			 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
22544 			 dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
22545 				dig_buf_addr = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr;
22546 			}
22547 			break;
22548 		case SSSR_REG_INFO_VER_1 :
22549 			if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
22550 				dig_buf_addr = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_addr;
22551 			} else if ((dhdp->sssr_reg_info->rev1.length >
22552 			 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
22553 			 dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
22554 				dig_buf_addr = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_addr;
22555 			}
22556 			break;
22557 		case SSSR_REG_INFO_VER_0 :
22558 			if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
22559 				dig_buf_addr = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_addr;
22560 			}
22561 			break;
22562 		default :
22563 			DHD_ERROR(("invalid sssr_reg_ver"));
22564 			return BCME_UNSUPPORTED;
22565 	}
22566 
22567 	return dig_buf_addr;
22568 }
22569 
22570 uint
dhd_sssr_mac_buf_size(dhd_pub_t * dhdp,uint8 core_idx)22571 dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx)
22572 {
22573 	uint mac_buf_size = 0;
22574 	uint8 num_d11cores;
22575 
22576 	num_d11cores = dhd_d11_slices_num_get(dhdp);
22577 
22578 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
22579 	if (core_idx < num_d11cores) {
22580 		switch (dhdp->sssr_reg_info->rev2.version) {
22581 			case SSSR_REG_INFO_VER_3 :
22582 				/* intentional fall through */
22583 			case SSSR_REG_INFO_VER_2 :
22584 				mac_buf_size = dhdp->sssr_reg_info->rev2.mac_regs[core_idx].sr_size;
22585 				break;
22586 			case SSSR_REG_INFO_VER_1 :
22587 				mac_buf_size = dhdp->sssr_reg_info->rev1.mac_regs[core_idx].sr_size;
22588 				break;
22589 			case SSSR_REG_INFO_VER_0 :
22590 				mac_buf_size = dhdp->sssr_reg_info->rev0.mac_regs[core_idx].sr_size;
22591 				break;
22592 			default :
22593 				DHD_ERROR(("invalid sssr_reg_ver"));
22594 				return BCME_UNSUPPORTED;
22595 		}
22596 	}
22597 
22598 	return mac_buf_size;
22599 }
22600 
22601 uint
dhd_sssr_mac_xmtaddress(dhd_pub_t * dhdp,uint8 core_idx)22602 dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx)
22603 {
22604 	uint xmtaddress = 0;
22605 	uint8 num_d11cores;
22606 
22607 	num_d11cores = dhd_d11_slices_num_get(dhdp);
22608 
22609 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
22610 	if (core_idx < num_d11cores) {
22611 		switch (dhdp->sssr_reg_info->rev2.version) {
22612 			case SSSR_REG_INFO_VER_3 :
22613 				/* intentional fall through */
22614 			case SSSR_REG_INFO_VER_2 :
22615 				xmtaddress = dhdp->sssr_reg_info->rev2.
22616 					mac_regs[core_idx].base_regs.xmtaddress;
22617 				break;
22618 			case SSSR_REG_INFO_VER_1 :
22619 				xmtaddress = dhdp->sssr_reg_info->rev1.
22620 					mac_regs[core_idx].base_regs.xmtaddress;
22621 				break;
22622 			case SSSR_REG_INFO_VER_0 :
22623 				xmtaddress = dhdp->sssr_reg_info->rev0.
22624 					mac_regs[core_idx].base_regs.xmtaddress;
22625 				break;
22626 			default :
22627 				DHD_ERROR(("invalid sssr_reg_ver"));
22628 				return BCME_UNSUPPORTED;
22629 		}
22630 	}
22631 
22632 	return xmtaddress;
22633 }
22634 
22635 uint
dhd_sssr_mac_xmtdata(dhd_pub_t * dhdp,uint8 core_idx)22636 dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx)
22637 {
22638 	uint xmtdata = 0;
22639 	uint8 num_d11cores;
22640 
22641 	num_d11cores = dhd_d11_slices_num_get(dhdp);
22642 
22643 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
22644 	if (core_idx < num_d11cores) {
22645 		switch (dhdp->sssr_reg_info->rev2.version) {
22646 			case SSSR_REG_INFO_VER_3 :
22647 				/* intentional fall through */
22648 			case SSSR_REG_INFO_VER_2 :
22649 				xmtdata = dhdp->sssr_reg_info->rev2.
22650 					mac_regs[core_idx].base_regs.xmtdata;
22651 				break;
22652 			case SSSR_REG_INFO_VER_1 :
22653 				xmtdata = dhdp->sssr_reg_info->rev1.
22654 					mac_regs[core_idx].base_regs.xmtdata;
22655 				break;
22656 			case SSSR_REG_INFO_VER_0 :
22657 				xmtdata = dhdp->sssr_reg_info->rev0.
22658 					mac_regs[core_idx].base_regs.xmtdata;
22659 				break;
22660 			default :
22661 				DHD_ERROR(("invalid sssr_reg_ver"));
22662 				return BCME_UNSUPPORTED;
22663 		}
22664 	}
22665 
22666 	return xmtdata;
22667 }
22668 
22669 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22670 int
dhd_sssr_dump_dig_buf_before(void * dev,const void * user_buf,uint32 len)22671 dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
22672 {
22673 	dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
22674 	dhd_pub_t *dhdp = &dhd_info->pub;
22675 	int pos = 0, ret = BCME_ERROR;
22676 	uint dig_buf_size = 0;
22677 
22678 	dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
22679 
22680 	if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
22681 		ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
22682 			NULL, user_buf, dig_buf_size, &pos);
22683 	}
22684 	return ret;
22685 }
22686 
22687 int
dhd_sssr_dump_d11_buf_before(void * dev,const void * user_buf,uint32 len,int core)22688 dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
22689 {
22690 	dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
22691 	dhd_pub_t *dhdp = &dhd_info->pub;
22692 	int pos = 0, ret = BCME_ERROR;
22693 
22694 	if (dhdp->sssr_d11_before[core] &&
22695 		dhdp->sssr_d11_outofreset[core] &&
22696 		(dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
22697 		ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
22698 			NULL, user_buf, len, &pos);
22699 	}
22700 	return ret;
22701 }
22702 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22703 
22704 int
dhd_sssr_dump_dig_buf_after(void * dev,const void * user_buf,uint32 len)22705 dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
22706 {
22707 	dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
22708 	dhd_pub_t *dhdp = &dhd_info->pub;
22709 	int pos = 0, ret = BCME_ERROR;
22710 	uint dig_buf_size = 0;
22711 
22712 	dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
22713 
22714 	if (dhdp->sssr_dig_buf_after) {
22715 		ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
22716 			NULL, user_buf, dig_buf_size, &pos);
22717 	}
22718 	return ret;
22719 }
22720 
22721 int
dhd_sssr_dump_d11_buf_after(void * dev,const void * user_buf,uint32 len,int core)22722 dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
22723 {
22724 	dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
22725 	dhd_pub_t *dhdp = &dhd_info->pub;
22726 	int pos = 0, ret = BCME_ERROR;
22727 
22728 	if (dhdp->sssr_d11_after[core] &&
22729 		dhdp->sssr_d11_outofreset[core]) {
22730 		ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
22731 			NULL, user_buf, len, &pos);
22732 	}
22733 	return ret;
22734 }
22735 
22736 void
dhd_sssr_dump_to_file(dhd_info_t * dhdinfo)22737 dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
22738 {
22739 	dhd_info_t *dhd = dhdinfo;
22740 	dhd_pub_t *dhdp;
22741 	int i;
22742 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22743 	char before_sr_dump[128];
22744 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22745 	char after_sr_dump[128];
22746 	unsigned long flags = 0;
22747 	uint dig_buf_size = 0;
22748 	uint8 num_d11cores = 0;
22749 	uint d11_buf_size = 0;
22750 
22751 	DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
22752 
22753 	if (!dhd) {
22754 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
22755 		return;
22756 	}
22757 
22758 	dhdp = &dhd->pub;
22759 
22760 	DHD_GENERAL_LOCK(dhdp, flags);
22761 	DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
22762 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
22763 		DHD_GENERAL_UNLOCK(dhdp, flags);
22764 		DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
22765 		goto exit;
22766 	}
22767 	DHD_GENERAL_UNLOCK(dhdp, flags);
22768 
22769 	num_d11cores = dhd_d11_slices_num_get(dhdp);
22770 
22771 	for (i = 0; i < num_d11cores; i++) {
22772 		/* Init file name */
22773 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22774 		memset(before_sr_dump, 0, sizeof(before_sr_dump));
22775 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22776 		memset(after_sr_dump, 0, sizeof(after_sr_dump));
22777 
22778 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22779 		snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
22780 			"sssr_dump_core", i, "before_SR");
22781 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22782 		snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
22783 			"sssr_dump_core", i, "after_SR");
22784 
22785 		d11_buf_size = dhd_sssr_mac_buf_size(dhdp, i);
22786 
22787 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22788 		if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
22789 			(dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
22790 			if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
22791 				d11_buf_size, before_sr_dump)) {
22792 				DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
22793 					__FUNCTION__));
22794 			}
22795 		}
22796 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22797 
22798 		if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
22799 			if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
22800 				d11_buf_size, after_sr_dump)) {
22801 				DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
22802 					__FUNCTION__));
22803 			}
22804 		}
22805 	}
22806 
22807 	dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
22808 
22809 #ifdef DHD_SSSR_DUMP_BEFORE_SR
22810 	if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
22811 		if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
22812 			dig_buf_size, "sssr_dump_dig_before_SR")) {
22813 			DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
22814 				__FUNCTION__));
22815 		}
22816 	}
22817 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
22818 
22819 	if (dhdp->sssr_dig_buf_after) {
22820 		if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
22821 			dig_buf_size, "sssr_dump_dig_after_SR")) {
22822 			DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
22823 			 __FUNCTION__));
22824 		}
22825 	}
22826 
22827 exit:
22828 	DHD_GENERAL_LOCK(dhdp, flags);
22829 	DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
22830 	dhd_os_busbusy_wake(dhdp);
22831 	DHD_GENERAL_UNLOCK(dhdp, flags);
22832 }
22833 
22834 void
dhd_write_sssr_dump(dhd_pub_t * dhdp,uint32 dump_mode)22835 dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
22836 {
22837 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
22838 	dhdp->sssr_dump_mode = dump_mode;
22839 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
22840 
22841 	/*
22842 	 * If kernel does not have file write access enabled
22843 	 * then skip writing dumps to files.
22844 	 * The dumps will be pushed to HAL layer which will
22845 	 * write into files
22846 	 */
22847 #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
22848 	return;
22849 #else
22850 	/*
22851 	 * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
22852 	 * Without workqueue -
22853 	 * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
22854 	 * : These are called in own handler, not in the interrupt context
22855 	 * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
22856 	 * Thus, it doesn't neeed to dump SSSR in workqueue
22857 	 */
22858 	DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
22859 	dhd_sssr_dump_to_file(dhdp->info);
22860 #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
22861 }
22862 #endif /* DHD_SSSR_DUMP */
22863 
22864 #ifdef DHD_SDTC_ETB_DUMP
22865 void
dhd_sdtc_etb_dump(dhd_pub_t * dhd)22866 dhd_sdtc_etb_dump(dhd_pub_t *dhd)
22867 {
22868 	etb_info_t etb_info;
22869 	uint8 *sdtc_etb_dump;
22870 	uint8 *sdtc_etb_mempool;
22871 	uint etb_dump_len;
22872 	int ret = 0;
22873 
22874 	if (!dhd->sdtc_etb_inited) {
22875 		DHD_ERROR(("%s, SDTC ETB dump not supported\n", __FUNCTION__));
22876 		return;
22877 	}
22878 
22879 	bzero(&etb_info, sizeof(etb_info));
22880 
22881 	if ((ret = dhd_bus_get_etb_info(dhd, dhd->etb_addr_info.etbinfo_addr, &etb_info))) {
22882 		DHD_ERROR(("%s: failed to get etb info %d\n", __FUNCTION__, ret));
22883 		return;
22884 	}
22885 
22886 	if (etb_info.read_bytes == 0) {
22887 		DHD_ERROR(("%s ETB is of zero size. Hence donot collect SDTC ETB\n", __FUNCTION__));
22888 		return;
22889 	}
22890 
22891 	DHD_ERROR(("%s etb_info ver:%d len:%d rwp:%d etb_full:%d etb:addr:0x%x, len:%d\n",
22892 		__FUNCTION__, etb_info.version, etb_info.len,
22893 		etb_info.read_write_p, etb_info.etb_full,
22894 		etb_info.addr, etb_info.read_bytes));
22895 
22896 	/*
22897 	 * etb mempool format = etb_info + etb
22898 	 */
22899 	etb_dump_len = etb_info.read_bytes + sizeof(etb_info);
22900 	if (etb_dump_len > DHD_SDTC_ETB_MEMPOOL_SIZE) {
22901 		DHD_ERROR(("%s etb_dump_len: %d is more than the alloced %d.Hence cannot collect\n",
22902 			__FUNCTION__, etb_dump_len, DHD_SDTC_ETB_MEMPOOL_SIZE));
22903 		return;
22904 	}
22905 	sdtc_etb_mempool = dhd->sdtc_etb_mempool;
22906 	memcpy(sdtc_etb_mempool, &etb_info, sizeof(etb_info));
22907 	sdtc_etb_dump = sdtc_etb_mempool + sizeof(etb_info);
22908 	if ((ret = dhd_bus_get_sdtc_etb(dhd, sdtc_etb_dump, etb_info.addr, etb_info.read_bytes))) {
22909 		DHD_ERROR(("%s: error to get SDTC ETB ret: %d\n", __FUNCTION__, ret));
22910 		return;
22911 	}
22912 
22913 	if (write_dump_to_file(dhd, (uint8 *)sdtc_etb_mempool,
22914 		etb_dump_len, "sdtc_etb_dump")) {
22915 		DHD_ERROR(("%s: failed to dump sdtc_etb to file\n",
22916 			__FUNCTION__));
22917 	}
22918 }
22919 #endif /* DHD_SDTC_ETB_DUMP */
22920 
22921 #ifdef DHD_LOG_DUMP
22922 static void
dhd_log_dump(void * handle,void * event_info,u8 event)22923 dhd_log_dump(void *handle, void *event_info, u8 event)
22924 {
22925 	dhd_info_t *dhd = handle;
22926 	log_dump_type_t *type = (log_dump_type_t *)event_info;
22927 
22928 	if (!dhd || !type) {
22929 		DHD_ERROR(("%s: dhd/type is NULL\n", __FUNCTION__));
22930 		return;
22931 	}
22932 
22933 #ifdef WL_CFG80211
22934 	/* flush the fw preserve logs */
22935 	wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
22936 		FW_LOGSET_MASK_ALL);
22937 #endif
22938 
22939 	/* there are currently 3 possible contexts from which
22940 	 * log dump can be scheduled -
22941 	 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
22942 	 * 3.HEALTH CHECK event
22943 	 * The concise debug info buffer is a shared resource
22944 	 * and in case a trap is one of the contexts then both the
22945 	 * scheduled work queues need to run because trap data is
22946 	 * essential for debugging. Hence a mutex lock is acquired
22947 	 * before calling do_dhd_log_dump().
22948 	 */
22949 	DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
22950 	dhd_os_logdump_lock(&dhd->pub);
22951 	DHD_OS_WAKE_LOCK(&dhd->pub);
22952 	if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
22953 		DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
22954 	}
22955 	DHD_OS_WAKE_UNLOCK(&dhd->pub);
22956 	dhd_os_logdump_unlock(&dhd->pub);
22957 }
22958 
dhd_schedule_log_dump(dhd_pub_t * dhdp,void * type)22959 void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
22960 {
22961 	DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
22962 
22963 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22964 		type, DHD_WQ_WORK_DHD_LOG_DUMP,
22965 		dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
22966 }
22967 
22968 static void
dhd_print_buf_addr(dhd_pub_t * dhdp,char * name,void * buf,unsigned int size)22969 dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
22970 {
22971 #ifdef DHD_FW_COREDUMP
22972 	if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
22973 		(dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
22974 		(dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
22975 #ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
22976 		(dhdp->op_mode & DHD_FLAG_MFG_MODE &&
22977 			(dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT-1)) ||
22978 #endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
22979 		FALSE)
22980 #else
22981 	if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
22982 #endif
22983 	{
22984 #if defined(CONFIG_ARM64)
22985 		DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
22986 			name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
22987 #elif defined(__ARM_ARCH_7A__)
22988 		DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
22989 			name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
22990 #endif /* __ARM_ARCH_7A__ */
22991 	}
22992 }
22993 
22994 static void
dhd_log_dump_buf_addr(dhd_pub_t * dhdp,log_dump_type_t * type)22995 dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
22996 {
22997 	int i;
22998 	unsigned long wr_size = 0;
22999 	struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
23000 	size_t log_size = 0;
23001 	char buf_name[DHD_PRINT_BUF_NAME_LEN];
23002 	dhd_dbg_ring_t *ring = NULL;
23003 
23004 	BCM_REFERENCE(ring);
23005 
23006 	for (i = 0; i < DLD_BUFFER_NUM; i++) {
23007 		dld_buf = &g_dld_buf[i];
23008 		log_size = (unsigned long)dld_buf->max -
23009 			(unsigned long)dld_buf->buffer;
23010 		if (dld_buf->wraparound) {
23011 			wr_size = log_size;
23012 		} else {
23013 			wr_size = (unsigned long)dld_buf->present -
23014 				(unsigned long)dld_buf->front;
23015 		}
23016 		scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
23017 		dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
23018 		scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
23019 		dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
23020 		scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
23021 		dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
23022 		scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
23023 		dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
23024 	}
23025 
23026 #ifdef DEBUGABILITY_ECNTRS_LOGGING
23027 	/* periodic flushing of ecounters is NOT supported */
23028 	if (*type == DLD_BUF_TYPE_ALL &&
23029 			logdump_ecntr_enable &&
23030 			dhdp->ecntr_dbg_ring) {
23031 
23032 		ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
23033 		dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
23034 		dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
23035 				LOG_DUMP_ECNTRS_MAX_BUFSIZE);
23036 	}
23037 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
23038 
23039 #if defined(BCMPCIE)
23040 	if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
23041 		dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
23042 				BCMPCIE_EXT_TRAP_DATA_MAXLEN);
23043 	}
23044 #endif /* BCMPCIE */
23045 
23046 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
23047 	/* if health check event was received */
23048 	if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
23049 		dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
23050 				HEALTH_CHK_BUF_SIZE);
23051 	}
23052 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
23053 
23054 	/* append the concise debug information */
23055 	if (dhdp->concise_dbg_buf) {
23056 		dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
23057 				CONCISE_DUMP_BUFLEN);
23058 	}
23059 }
23060 
23061 #ifdef CUSTOMER_HW4_DEBUG
23062 static void
dhd_log_dump_print_to_kmsg(char * bufptr,unsigned long len)23063 dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
23064 {
23065 	char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
23066 	char *end = NULL;
23067 	unsigned long plen = 0;
23068 
23069 	if (!bufptr || !len)
23070 		return;
23071 
23072 	memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
23073 	end = bufptr + len;
23074 	while (bufptr < end) {
23075 		if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
23076 			memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
23077 			tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
23078 			printf("%s", tmp_buf);
23079 			bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
23080 		} else {
23081 			plen = (unsigned long)end - (unsigned long)bufptr;
23082 			memcpy(tmp_buf, bufptr, plen);
23083 			tmp_buf[plen] = '\0';
23084 			printf("%s", tmp_buf);
23085 			bufptr += plen;
23086 		}
23087 	}
23088 }
23089 
23090 static void
dhd_log_dump_print_tail(dhd_pub_t * dhdp,struct dhd_log_dump_buf * dld_buf,uint tail_len)23091 dhd_log_dump_print_tail(dhd_pub_t *dhdp,
23092 		struct dhd_log_dump_buf *dld_buf,
23093 		uint tail_len)
23094 {
23095 	char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
23096 	unsigned long len_flush1 = 0, len_flush2 = 0;
23097 	unsigned long flags = 0;
23098 
23099 	/* need to hold the lock before accessing 'present' and 'remain' ptrs */
23100 	DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
23101 	flush_ptr1 = dld_buf->present - tail_len;
23102 	if (flush_ptr1 >= dld_buf->front) {
23103 		/* tail content is within the buffer */
23104 		flush_ptr2 = NULL;
23105 		len_flush1 = tail_len;
23106 	} else if (dld_buf->wraparound) {
23107 		/* tail content spans the buffer length i.e, wrap around */
23108 		flush_ptr1 = dld_buf->front;
23109 		len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
23110 		len_flush2 = (unsigned long)tail_len - len_flush1;
23111 		flush_ptr2 = (char *)((unsigned long)dld_buf->max -
23112 			(unsigned long)len_flush2);
23113 	} else {
23114 		/* amt of logs in buffer is less than tail size */
23115 		flush_ptr1 = dld_buf->front;
23116 		flush_ptr2 = NULL;
23117 		len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
23118 	}
23119 	DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
23120 
23121 	printf("\n================= LOG_DUMP tail =================\n");
23122 	if (flush_ptr2) {
23123 		dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
23124 	}
23125 	dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
23126 	printf("\n===================================================\n");
23127 }
23128 #endif /* CUSTOMER_HW4_DEBUG */
23129 
23130 #ifdef DHD_SSSR_DUMP
23131 int
dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t * dhd,uint32 * arr_len)23132 dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
23133 {
23134 	int i = 0;
23135 	uint dig_buf_size = 0;
23136 
23137 	DHD_ERROR(("%s\n", __FUNCTION__));
23138 
23139 	/* core 0 */
23140 	i = 0;
23141 #ifdef DHD_SSSR_DUMP_BEFORE_SR
23142 	if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
23143 		(dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
23144 
23145 		arr_len[SSSR_C0_D11_BEFORE]  = dhd_sssr_mac_buf_size(dhd, i);
23146 		DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
23147 			arr_len[SSSR_C0_D11_BEFORE]));
23148 #ifdef DHD_LOG_DUMP
23149 		dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
23150 			dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
23151 #endif /* DHD_LOG_DUMP */
23152 	}
23153 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
23154 	if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
23155 		arr_len[SSSR_C0_D11_AFTER]  = dhd_sssr_mac_buf_size(dhd, i);
23156 		DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
23157 			arr_len[SSSR_C0_D11_AFTER]));
23158 #ifdef DHD_LOG_DUMP
23159 		dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
23160 			dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
23161 #endif /* DHD_LOG_DUMP */
23162 	}
23163 
23164 	/* core 1 */
23165 	i = 1;
23166 #ifdef DHD_SSSR_DUMP_BEFORE_SR
23167 	if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
23168 		(dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
23169 		arr_len[SSSR_C1_D11_BEFORE]  = dhd_sssr_mac_buf_size(dhd, i);
23170 		DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
23171 			arr_len[SSSR_C1_D11_BEFORE]));
23172 #ifdef DHD_LOG_DUMP
23173 		dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
23174 			dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
23175 #endif /* DHD_LOG_DUMP */
23176 	}
23177 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
23178 	if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
23179 		arr_len[SSSR_C1_D11_AFTER]  = dhd_sssr_mac_buf_size(dhd, i);
23180 		DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
23181 			arr_len[SSSR_C1_D11_AFTER]));
23182 #ifdef DHD_LOG_DUMP
23183 		dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
23184 			dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
23185 #endif /* DHD_LOG_DUMP */
23186 	}
23187 
23188 	/* core 2 scan core */
23189 	if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_2) {
23190 		i = 2;
23191 #ifdef DHD_SSSR_DUMP_BEFORE_SR
23192 		if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
23193 			(dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
23194 			arr_len[SSSR_C2_D11_BEFORE]  = dhd_sssr_mac_buf_size(dhd, i);
23195 			DHD_ERROR(("%s: arr_len[SSSR_C2_D11_BEFORE] : %d\n", __FUNCTION__,
23196 				arr_len[SSSR_C2_D11_BEFORE]));
23197 #ifdef DHD_LOG_DUMP
23198 			dhd_print_buf_addr(dhd, "SSSR_C2_D11_BEFORE",
23199 				dhd->sssr_d11_before[i], arr_len[SSSR_C2_D11_BEFORE]);
23200 #endif /* DHD_LOG_DUMP */
23201 		}
23202 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
23203 		if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
23204 			arr_len[SSSR_C2_D11_AFTER]  = dhd_sssr_mac_buf_size(dhd, i);
23205 			DHD_ERROR(("%s: arr_len[SSSR_C2_D11_AFTER] : %d\n", __FUNCTION__,
23206 				arr_len[SSSR_C2_D11_AFTER]));
23207 #ifdef DHD_LOG_DUMP
23208 			dhd_print_buf_addr(dhd, "SSSR_C2_D11_AFTER",
23209 				dhd->sssr_d11_after[i], arr_len[SSSR_C2_D11_AFTER]);
23210 #endif /* DHD_LOG_DUMP */
23211 		}
23212 	}
23213 
23214 	/* DIG core or VASIP */
23215 	dig_buf_size = dhd_sssr_dig_buf_size(dhd);
23216 #ifdef DHD_SSSR_DUMP_BEFORE_SR
23217 	arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_dig_buf_before) ? dig_buf_size : 0;
23218 	DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
23219 		arr_len[SSSR_DIG_BEFORE]));
23220 #ifdef DHD_LOG_DUMP
23221 		if (dhd->sssr_dig_buf_before) {
23222 			dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
23223 				dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
23224 		}
23225 #endif /* DHD_LOG_DUMP */
23226 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
23227 
23228 	arr_len[SSSR_DIG_AFTER] = (dhd->sssr_dig_buf_after) ? dig_buf_size : 0;
23229 	DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
23230 		arr_len[SSSR_DIG_AFTER]));
23231 #ifdef DHD_LOG_DUMP
23232 	if (dhd->sssr_dig_buf_after) {
23233 		dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
23234 			dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
23235 	}
23236 #endif /* DHD_LOG_DUMP */
23237 
23238 	return BCME_OK;
23239 }
23240 
23241 void
dhd_nla_put_sssr_dump_len(void * ndev,uint32 * arr_len)23242 dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
23243 {
23244 	dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23245 	dhd_pub_t *dhdp = &dhd_info->pub;
23246 
23247 	if (dhdp->sssr_dump_collected) {
23248 		dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
23249 	}
23250 }
23251 #endif /* DHD_SSSR_DUMP */
23252 
23253 uint32
dhd_get_time_str_len()23254 dhd_get_time_str_len()
23255 {
23256 	char *ts = NULL, time_str[128];
23257 
23258 	ts = dhd_log_dump_get_timestamp();
23259 	snprintf(time_str, sizeof(time_str),
23260 			"\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
23261 	return strlen(time_str);
23262 }
23263 
23264 #if defined(BCMPCIE)
23265 uint32
dhd_get_ext_trap_len(void * ndev,dhd_pub_t * dhdp)23266 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
23267 {
23268 	int length = 0;
23269 	log_dump_section_hdr_t sec_hdr;
23270 	dhd_info_t *dhd_info;
23271 
23272 	if (ndev) {
23273 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23274 		dhdp = &dhd_info->pub;
23275 	}
23276 
23277 	if (!dhdp)
23278 		return length;
23279 
23280 	if (dhdp->extended_trap_data) {
23281 		length = (strlen(EXT_TRAP_LOG_HDR)
23282 					+ sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
23283 	}
23284 	return length;
23285 }
23286 #endif /* BCMPCIE */
23287 
23288 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
23289 uint32
dhd_get_health_chk_len(void * ndev,dhd_pub_t * dhdp)23290 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
23291 {
23292 	int length = 0;
23293 	log_dump_section_hdr_t sec_hdr;
23294 	dhd_info_t *dhd_info;
23295 
23296 	if (ndev) {
23297 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23298 		dhdp = &dhd_info->pub;
23299 	}
23300 
23301 	if (!dhdp)
23302 		return length;
23303 
23304 	if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
23305 		length = (strlen(HEALTH_CHK_LOG_HDR)
23306 			+ sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
23307 	}
23308 	return length;
23309 }
23310 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
23311 
23312 uint32
dhd_get_dhd_dump_len(void * ndev,dhd_pub_t * dhdp)23313 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
23314 {
23315 	uint32 length = 0;
23316 	log_dump_section_hdr_t sec_hdr;
23317 	dhd_info_t *dhd_info;
23318 	int remain_len = 0;
23319 
23320 	if (ndev) {
23321 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23322 		dhdp = &dhd_info->pub;
23323 	}
23324 
23325 	if (!dhdp)
23326 		return length;
23327 
23328 	if (dhdp->concise_dbg_buf) {
23329 		remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
23330 		 if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
23331 			DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n",
23332 				__FUNCTION__, remain_len));
23333 			return length;
23334 		}
23335 
23336 		length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
23337 	}
23338 
23339 	length += (uint32)(strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr));
23340 	return length;
23341 }
23342 
23343 uint32
dhd_get_cookie_log_len(void * ndev,dhd_pub_t * dhdp)23344 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
23345 {
23346 	int length = 0;
23347 	dhd_info_t *dhd_info;
23348 
23349 	if (ndev) {
23350 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23351 		dhdp = &dhd_info->pub;
23352 	}
23353 
23354 	if (!dhdp)
23355 		return length;
23356 
23357 	if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
23358 		length = dhd_log_dump_cookie_len(dhdp);
23359 	}
23360 	return length;
23361 
23362 }
23363 
23364 #ifdef DHD_DUMP_PCIE_RINGS
23365 uint32
dhd_get_flowring_len(void * ndev,dhd_pub_t * dhdp)23366 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
23367 {
23368 	uint32 length = 0;
23369 	log_dump_section_hdr_t sec_hdr;
23370 	dhd_info_t *dhd_info;
23371 	uint16 h2d_flowrings_total;
23372 	int remain_len = 0;
23373 
23374 	if (ndev) {
23375 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23376 		dhdp = &dhd_info->pub;
23377 	}
23378 
23379 	if (!dhdp)
23380 		return length;
23381 
23382 	if (dhdp->concise_dbg_buf) {
23383 		remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
23384 		if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
23385 			DHD_ERROR(("%s: error getting concise debug info ! remain_len: %d\n",
23386 				__FUNCTION__, remain_len));
23387 		   return length;
23388 		}
23389 
23390 		length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
23391 	}
23392 
23393 	length += (uint32) strlen(FLOWRING_DUMP_HDR);
23394 	length += (uint32) sizeof(sec_hdr);
23395 	h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
23396 	length += ((D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
23397 				+ (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
23398 				+ (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
23399 				+ (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
23400 				+ (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
23401 #ifdef EWP_EDL
23402 				+ (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
23403 #else
23404 				+ (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
23405 				+ (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
23406 #endif /* EWP_EDL */
23407 
23408 #if defined(DHD_HTPUT_TUNABLES)
23409 	/* flowring lengths are different for HTPUT rings, handle accordingly */
23410 	length += ((H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_htput_max_txpost(dhdp) *
23411 		HTPUT_TOTAL_FLOW_RINGS) +
23412 		(H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
23413 		(h2d_flowrings_total - HTPUT_TOTAL_FLOW_RINGS)));
23414 #else
23415 	length += (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
23416 		h2d_flowrings_total);
23417 #endif /* DHD_HTPUT_TUNABLES */
23418 
23419 	return length;
23420 }
23421 #endif /* DHD_DUMP_PCIE_RINGS */
23422 
23423 #ifdef EWP_ECNTRS_LOGGING
23424 uint32
dhd_get_ecntrs_len(void * ndev,dhd_pub_t * dhdp)23425 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
23426 {
23427 	dhd_info_t *dhd_info;
23428 	log_dump_section_hdr_t sec_hdr;
23429 	int length = 0;
23430 	dhd_dbg_ring_t *ring;
23431 
23432 	if (ndev) {
23433 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23434 		dhdp = &dhd_info->pub;
23435 	}
23436 
23437 	if (!dhdp)
23438 		return length;
23439 
23440 	if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
23441 		ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
23442 		length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
23443 	}
23444 	return length;
23445 }
23446 #endif /* EWP_ECNTRS_LOGGING */
23447 
23448 int
dhd_get_dld_log_dump(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,int type,void * pos)23449 dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23450 	void *fp, uint32 len, int type, void *pos)
23451 {
23452 	int ret = BCME_OK;
23453 	struct dhd_log_dump_buf *dld_buf;
23454 	log_dump_section_hdr_t sec_hdr;
23455 	dhd_info_t *dhd_info;
23456 
23457 	dld_buf = &g_dld_buf[type];
23458 
23459 	if (dev) {
23460 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23461 		dhdp = &dhd_info->pub;
23462 	} else if (!dhdp) {
23463 		return BCME_ERROR;
23464 	}
23465 
23466 	DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
23467 
23468 	dhd_init_sec_hdr(&sec_hdr);
23469 
23470 	/* write the section header first */
23471 	ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
23472 		strlen(dld_hdrs[type].hdr_str), pos);
23473 	if (ret < 0)
23474 		goto exit;
23475 	len -= (uint32)strlen(dld_hdrs[type].hdr_str);
23476 	len -= (uint32)sizeof(sec_hdr);
23477 	sec_hdr.type = dld_hdrs[type].sec_type;
23478 	sec_hdr.length = len;
23479 	ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
23480 	if (ret < 0)
23481 		goto exit;
23482 	ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
23483 	if (ret < 0)
23484 		goto exit;
23485 
23486 exit:
23487 	return ret;
23488 }
23489 
23490 static int
dhd_log_flush(dhd_pub_t * dhdp,log_dump_type_t * type)23491 dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
23492 {
23493 	unsigned long flags = 0;
23494 #ifdef EWP_EDL
23495 	int i = 0;
23496 #endif /* EWP_EDL */
23497 	dhd_info_t *dhd_info = NULL;
23498 
23499 	BCM_REFERENCE(dhd_info);
23500 
23501 	/* if dhdp is null, its extremely unlikely that log dump will be scheduled
23502 	 * so not freeing 'type' here is ok, even if we want to free 'type'
23503 	 * we cannot do so, since 'dhdp->osh' is unavailable
23504 	 * as dhdp is null
23505 	 */
23506 	if (!dhdp || !type) {
23507 		if (dhdp) {
23508 			DHD_GENERAL_LOCK(dhdp, flags);
23509 			DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
23510 			dhd_os_busbusy_wake(dhdp);
23511 			DHD_GENERAL_UNLOCK(dhdp, flags);
23512 		}
23513 		return BCME_ERROR;
23514 	}
23515 
23516 #if defined(BCMPCIE)
23517 	if (dhd_bus_get_linkdown(dhdp)) {
23518 		/* As link is down donot collect any data over PCIe.
23519 		 * Also return BCME_OK to caller, so that caller can
23520 		 * dump all the outstanding data to file
23521 		 */
23522 		return BCME_OK;
23523 	}
23524 #endif /* BCMPCIE */
23525 
23526 	dhd_info = (dhd_info_t *)dhdp->info;
23527 	/* in case of trap get preserve logs from ETD */
23528 #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
23529 	if (dhdp->dongle_trap_occured &&
23530 			dhdp->extended_trap_data) {
23531 		dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
23532 				&dhd_info->event_data);
23533 	}
23534 #endif /* BCMPCIE */
23535 
23536 	/* flush the event work items to get any fw events/logs
23537 	 * flush_work is a blocking call
23538 	 */
23539 #ifdef SHOW_LOGTRACE
23540 #ifdef EWP_EDL
23541 	if (dhd_info->pub.dongle_edl_support) {
23542 		/* wait till existing edl items are processed */
23543 		dhd_flush_logtrace_process(dhd_info);
23544 		/* dhd_flush_logtrace_process will ensure the work items in the ring
23545 		* (EDL ring) from rd to wr are processed. But if wr had
23546 		* wrapped around, only the work items from rd to ring-end are processed.
23547 		* So to ensure that the work items at the
23548 		* beginning of ring are also processed in the wrap around case, call
23549 		* it twice
23550 		*/
23551 		for (i = 0; i < 2; i++) {
23552 			/* blocks till the edl items are processed */
23553 			dhd_flush_logtrace_process(dhd_info);
23554 		}
23555 	} else {
23556 		dhd_flush_logtrace_process(dhd_info);
23557 	}
23558 #else
23559 	dhd_flush_logtrace_process(dhd_info);
23560 #endif /* EWP_EDL */
23561 #endif /* SHOW_LOGTRACE */
23562 
23563 #ifdef CUSTOMER_HW4_DEBUG
23564 	/* print last 'x' KB of preserve buffer data to kmsg console
23565 	* this is to address cases where debug_dump is not
23566 	* available for debugging
23567 	*/
23568 	dhd_log_dump_print_tail(dhdp,
23569 		&g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
23570 #endif /* CUSTOMER_HW4_DEBUG */
23571 	return BCME_OK;
23572 }
23573 
23574 int
dhd_get_debug_dump_file_name(void * dev,dhd_pub_t * dhdp,char * dump_path,int size)23575 dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
23576 {
23577 	int ret;
23578 	int len = 0;
23579 	dhd_info_t *dhd_info;
23580 
23581 	if (dev) {
23582 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23583 		dhdp = &dhd_info->pub;
23584 	}
23585 
23586 	if (!dhdp)
23587 		return BCME_ERROR;
23588 
23589 	memset(dump_path, 0, size);
23590 
23591 	ret = snprintf(dump_path, size, "%s",
23592 			DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
23593 	len += ret;
23594 
23595 	/* Keep the same timestamp across different dump logs */
23596 	if (!dhdp->logdump_periodic_flush) {
23597 		struct rtc_time tm;
23598 		clear_debug_dump_time(dhdp->debug_dump_time_str);
23599 		get_debug_dump_time(dhdp->debug_dump_time_str);
23600 		sscanf(dhdp->debug_dump_time_str, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS,
23601 			&tm.tm_year, &tm.tm_mon, &tm.tm_mday,
23602 			&tm.tm_hour, &tm.tm_min, &tm.tm_sec);
23603 		ret = snprintf(dump_path + len, size - len, "_" DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS,
23604 				tm.tm_year, tm.tm_mon, tm.tm_mday,
23605 				tm.tm_hour, tm.tm_min, tm.tm_sec);
23606 		len += ret;
23607 	}
23608 
23609 	ret = 0;
23610 	switch (dhdp->debug_dump_subcmd) {
23611 	case CMD_UNWANTED:
23612 		ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_UNWANTED);
23613 		break;
23614 	case CMD_DISCONNECTED:
23615 		ret = snprintf(dump_path + len, size - len, "%s", DHD_DUMP_SUBSTR_DISCONNECTED);
23616 		break;
23617 	default:
23618 		break;
23619 	}
23620 	len += ret;
23621 
23622 	return BCME_OK;
23623 }
23624 
23625 uint32
dhd_get_dld_len(int log_type)23626 dhd_get_dld_len(int log_type)
23627 {
23628 	unsigned long wr_size = 0;
23629 	unsigned long buf_size = 0;
23630 	unsigned long flags = 0;
23631 	struct dhd_log_dump_buf *dld_buf;
23632 	log_dump_section_hdr_t sec_hdr;
23633 
23634 	/* calculate the length of the log */
23635 	dld_buf = &g_dld_buf[log_type];
23636 	buf_size = (unsigned long)dld_buf->max -
23637 			(unsigned long)dld_buf->buffer;
23638 
23639 	if (dld_buf->wraparound) {
23640 		wr_size = buf_size;
23641 	} else {
23642 		/* need to hold the lock before accessing 'present' and 'remain' ptrs */
23643 		DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
23644 		wr_size = (unsigned long)dld_buf->present -
23645 				(unsigned long)dld_buf->front;
23646 		DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
23647 	}
23648 	return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
23649 }
23650 
23651 static void
dhd_get_time_str(dhd_pub_t * dhdp,char * time_str,int size)23652 dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
23653 {
23654 	char *ts = NULL;
23655 	memset(time_str, 0, size);
23656 	ts = dhd_log_dump_get_timestamp();
23657 	snprintf(time_str, size,
23658 			"\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
23659 }
23660 
23661 int
dhd_print_time_str(const void * user_buf,void * fp,uint32 len,void * pos)23662 dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
23663 {
23664 	char *ts = NULL;
23665 	int ret = 0;
23666 	char time_str[128];
23667 
23668 	memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
23669 	ts = dhd_log_dump_get_timestamp();
23670 	snprintf(time_str, sizeof(time_str),
23671 			"\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
23672 
23673 	/* write the timestamp hdr to the file first */
23674 	ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
23675 	if (ret < 0) {
23676 		DHD_ERROR(("write file error, err = %d\n", ret));
23677 	}
23678 	return ret;
23679 }
23680 
23681 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
23682 int
dhd_print_health_chk_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23683 dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23684 	void *fp, uint32 len, void *pos)
23685 {
23686 	int ret = BCME_OK;
23687 	log_dump_section_hdr_t sec_hdr;
23688 	dhd_info_t *dhd_info;
23689 
23690 	if (dev) {
23691 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23692 		dhdp = &dhd_info->pub;
23693 	}
23694 
23695 	if (!dhdp)
23696 		return BCME_ERROR;
23697 
23698 	dhd_init_sec_hdr(&sec_hdr);
23699 
23700 	if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
23701 		/* write the section header first */
23702 		ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
23703 			strlen(HEALTH_CHK_LOG_HDR), pos);
23704 		if (ret < 0)
23705 			goto exit;
23706 
23707 		len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
23708 		sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
23709 		sec_hdr.length = HEALTH_CHK_BUF_SIZE;
23710 		ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
23711 		if (ret < 0)
23712 			goto exit;
23713 
23714 		len -= (uint32)sizeof(sec_hdr);
23715 		/* write the log */
23716 		ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
23717 			user_buf, len, pos);
23718 		if (ret < 0)
23719 			goto exit;
23720 	}
23721 exit:
23722 	return ret;
23723 }
23724 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
23725 
23726 #if defined(BCMPCIE)
23727 int
dhd_print_ext_trap_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23728 dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23729 	void *fp, uint32 len, void *pos)
23730 {
23731 	int ret = BCME_OK;
23732 	log_dump_section_hdr_t sec_hdr;
23733 	dhd_info_t *dhd_info;
23734 
23735 	if (dev) {
23736 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23737 		dhdp = &dhd_info->pub;
23738 	}
23739 
23740 	if (!dhdp)
23741 		return BCME_ERROR;
23742 
23743 	dhd_init_sec_hdr(&sec_hdr);
23744 
23745 	/* append extended trap data to the file in case of traps */
23746 	if (dhdp->dongle_trap_occured &&
23747 			dhdp->extended_trap_data) {
23748 		/* write the section header first */
23749 		ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
23750 			strlen(EXT_TRAP_LOG_HDR), pos);
23751 		if (ret < 0)
23752 			goto exit;
23753 
23754 		len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
23755 		sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
23756 		sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
23757 		ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
23758 		if (ret < 0)
23759 			goto exit;
23760 
23761 		len -= (uint32)sizeof(sec_hdr);
23762 		/* write the log */
23763 		ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
23764 			user_buf, len, pos);
23765 		if (ret < 0)
23766 			goto exit;
23767 	}
23768 exit:
23769 	return ret;
23770 }
23771 #endif /* BCMPCIE */
23772 
23773 int
dhd_print_dump_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23774 dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23775 	void *fp, uint32 len, void *pos)
23776 {
23777 	int ret = BCME_OK;
23778 	log_dump_section_hdr_t sec_hdr;
23779 	dhd_info_t *dhd_info;
23780 
23781 	if (dev) {
23782 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23783 		dhdp = &dhd_info->pub;
23784 	}
23785 
23786 	if (!dhdp)
23787 		return BCME_ERROR;
23788 
23789 	dhd_init_sec_hdr(&sec_hdr);
23790 
23791 	ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
23792 	if (ret < 0)
23793 		goto exit;
23794 
23795 	len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
23796 	sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
23797 	sec_hdr.length = len;
23798 	ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
23799 	if (ret < 0)
23800 		goto exit;
23801 
23802 	len -= (uint32)sizeof(sec_hdr);
23803 
23804 	if (dhdp->concise_dbg_buf) {
23805 		dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
23806 		ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
23807 		if (ret < 0)
23808 			goto exit;
23809 	}
23810 
23811 exit:
23812 	return ret;
23813 }
23814 
23815 int
dhd_print_cookie_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23816 dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23817 	void *fp, uint32 len, void *pos)
23818 {
23819 	int ret = BCME_OK;
23820 	dhd_info_t *dhd_info;
23821 
23822 	if (dev) {
23823 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23824 		dhdp = &dhd_info->pub;
23825 	}
23826 
23827 	if (!dhdp)
23828 		return BCME_ERROR;
23829 
23830 	if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
23831 		ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
23832 	}
23833 	return ret;
23834 }
23835 
23836 #ifdef DHD_DUMP_PCIE_RINGS
23837 int
dhd_print_flowring_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23838 dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23839 		void *fp, uint32 len, void *pos)
23840 {
23841 	log_dump_section_hdr_t sec_hdr;
23842 	int ret = BCME_OK;
23843 	int remain_len = 0;
23844 	dhd_info_t *dhd_info;
23845 
23846 	if (dev) {
23847 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23848 		dhdp = &dhd_info->pub;
23849 	}
23850 
23851 	if (!dhdp)
23852 		return BCME_ERROR;
23853 
23854 	dhd_init_sec_hdr(&sec_hdr);
23855 
23856 	remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
23857 	if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
23858 		DHD_ERROR(("%s: error getting concise debug info !\n",
23859 			__FUNCTION__));
23860 	   return BCME_ERROR;
23861 	}
23862 	memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
23863 
23864 	/* write the section header first */
23865 	ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
23866 		strlen(FLOWRING_DUMP_HDR), pos);
23867 	if (ret < 0)
23868 		goto exit;
23869 
23870 	/* Write the ring summary */
23871 	ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
23872 		(CONCISE_DUMP_BUFLEN - remain_len), pos);
23873 	if (ret < 0)
23874 		goto exit;
23875 
23876 	sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
23877 	sec_hdr.length = len;
23878 	ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
23879 	if (ret < 0)
23880 		goto exit;
23881 
23882 	/* write the log */
23883 	ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
23884 	if (ret < 0)
23885 		goto exit;
23886 
23887 exit:
23888 	return ret;
23889 }
23890 #endif /* DHD_DUMP_PCIE_RINGS */
23891 
23892 #ifdef EWP_ECNTRS_LOGGING
23893 int
dhd_print_ecntrs_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23894 dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23895 		void *fp, uint32 len, void *pos)
23896 {
23897 	log_dump_section_hdr_t sec_hdr;
23898 	int ret = BCME_OK;
23899 	dhd_info_t *dhd_info;
23900 
23901 	if (dev) {
23902 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23903 		dhdp = &dhd_info->pub;
23904 	}
23905 
23906 	if (!dhdp)
23907 		return BCME_ERROR;
23908 
23909 	dhd_init_sec_hdr(&sec_hdr);
23910 
23911 	if (logdump_ecntr_enable &&
23912 			dhdp->ecntr_dbg_ring) {
23913 		sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
23914 		ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
23915 				user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
23916 	}
23917 	return ret;
23918 
23919 }
23920 #endif /* EWP_ECNTRS_LOGGING */
23921 
23922 #ifdef EWP_RTT_LOGGING
23923 int
dhd_print_rtt_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23924 dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23925 		void *fp, uint32 len, void *pos)
23926 {
23927 	log_dump_section_hdr_t sec_hdr;
23928 	int ret = BCME_OK;
23929 	dhd_info_t *dhd_info;
23930 
23931 	if (dev) {
23932 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23933 		dhdp = &dhd_info->pub;
23934 	}
23935 
23936 	if (!dhdp)
23937 		return BCME_ERROR;
23938 
23939 	dhd_init_sec_hdr(&sec_hdr);
23940 
23941 	if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
23942 		ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
23943 				user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
23944 	}
23945 	return ret;
23946 
23947 }
23948 #endif /* EWP_RTT_LOGGING */
23949 
23950 #ifdef DHD_STATUS_LOGGING
23951 int
dhd_print_status_log_data(void * dev,dhd_pub_t * dhdp,const void * user_buf,void * fp,uint32 len,void * pos)23952 dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
23953 	void *fp, uint32 len, void *pos)
23954 {
23955 	dhd_info_t *dhd_info;
23956 
23957 	if (dev) {
23958 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
23959 		dhdp = &dhd_info->pub;
23960 	}
23961 
23962 	if (!dhdp) {
23963 		return BCME_ERROR;
23964 	}
23965 
23966 	return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
23967 }
23968 
23969 uint32
dhd_get_status_log_len(void * ndev,dhd_pub_t * dhdp)23970 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
23971 {
23972 	dhd_info_t *dhd_info;
23973 	uint32 length = 0;
23974 
23975 	if (ndev) {
23976 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
23977 		dhdp = &dhd_info->pub;
23978 	}
23979 
23980 	if (dhdp) {
23981 		length = dhd_statlog_get_logbuf_len(dhdp);
23982 	}
23983 
23984 	return length;
23985 }
23986 #endif /* DHD_STATUS_LOGGING */
23987 
23988 void
dhd_init_sec_hdr(log_dump_section_hdr_t * sec_hdr)23989 dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
23990 {
23991 	/* prep the section header */
23992 	memset(sec_hdr, 0, sizeof(*sec_hdr));
23993 	sec_hdr->magic = LOG_DUMP_MAGIC;
23994 	sec_hdr->timestamp = local_clock();
23995 }
23996 
23997 /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
23998 static int
do_dhd_log_dump(dhd_pub_t * dhdp,log_dump_type_t * type)23999 do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
24000 {
24001 	int ret = 0, i = 0;
24002 	struct file *fp = NULL;
24003 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
24004 	mm_segment_t old_fs;
24005 #endif
24006 	loff_t pos = 0;
24007 	char dump_path[128];
24008 	uint32 file_mode;
24009 	unsigned long flags = 0;
24010 	size_t log_size = 0;
24011 	size_t fspace_remain = 0;
24012 	struct kstat stat;
24013 	char time_str[128];
24014 	unsigned int len = 0;
24015 	log_dump_section_hdr_t sec_hdr;
24016 
24017 	DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
24018 
24019 	DHD_GENERAL_LOCK(dhdp, flags);
24020 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
24021 		DHD_GENERAL_UNLOCK(dhdp, flags);
24022 		DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
24023 		goto exit1;
24024 	}
24025 	DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
24026 	DHD_GENERAL_UNLOCK(dhdp, flags);
24027 
24028 	if ((ret = dhd_log_flush(dhdp, type)) < 0) {
24029 		goto exit1;
24030 	}
24031 	/* change to KERNEL_DS address limit */
24032 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
24033 	old_fs = get_fs();
24034 	set_fs(KERNEL_DS);
24035 #endif
24036 
24037 	dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
24038 
24039 	DHD_ERROR(("debug_dump_path = %s\n", dump_path));
24040 	DHD_ERROR(("DHD version: %s\n", dhd_version));
24041 	DHD_ERROR(("F/W version: %s\n", fw_version));
24042 
24043 	dhd_log_dump_buf_addr(dhdp, type);
24044 
24045 	dhd_get_time_str(dhdp, time_str, 128);
24046 
24047 	/* if this is the first time after dhd is loaded,
24048 	 * or, if periodic flush is disabled, clear the log file
24049 	 */
24050 	if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
24051 		file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
24052 	else
24053 		file_mode = O_CREAT | O_RDWR | O_SYNC;
24054 
24055 	fp = filp_open(dump_path, file_mode, 0664);
24056 	if (IS_ERR(fp)) {
24057 		/* If android installed image, try '/data' directory */
24058 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
24059 		DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
24060 			__FUNCTION__));
24061 		snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
24062 		if (!dhdp->logdump_periodic_flush) {
24063 			snprintf(dump_path + strlen(dump_path),
24064 				sizeof(dump_path) - strlen(dump_path),
24065 				"_%s", dhdp->debug_dump_time_str);
24066 		}
24067 		fp = filp_open(dump_path, file_mode, 0664);
24068 		if (IS_ERR(fp)) {
24069 			ret = PTR_ERR(fp);
24070 			DHD_ERROR(("open file error, err = %d\n", ret));
24071 			goto exit2;
24072 		}
24073 		DHD_ERROR(("debug_dump_path = %s\n", dump_path));
24074 #else
24075 		ret = PTR_ERR(fp);
24076 		DHD_ERROR(("open file error, err = %d\n", ret));
24077 		goto exit2;
24078 #endif /* CONFIG_X86 && OEM_ANDROID */
24079 	}
24080 
24081 	ret = vfs_stat(dump_path, &stat);
24082 	if (ret < 0) {
24083 		DHD_ERROR(("file stat error, err = %d\n", ret));
24084 		goto exit2;
24085 	}
24086 
24087 	/* if some one else has changed the file */
24088 	if (dhdp->last_file_posn != 0 &&
24089 			stat.size < dhdp->last_file_posn) {
24090 		dhdp->last_file_posn = 0;
24091 	}
24092 
24093 	/* XXX: periodic flush is disabled by default, if enabled
24094 	 * only periodic flushing of 'GENERAL' log dump buffer
24095 	 * is supported, its not recommended to turn on periodic
24096 	 * flushing, except for developer unit test.
24097 	 */
24098 	if (dhdp->logdump_periodic_flush) {
24099 		log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
24100 		/* calculate the amount of space required to dump all logs */
24101 		for (i = 0; i < DLD_BUFFER_NUM; ++i) {
24102 			if (*type != DLD_BUF_TYPE_ALL && i != *type)
24103 				continue;
24104 
24105 			if (g_dld_buf[i].wraparound) {
24106 				log_size += (unsigned long)g_dld_buf[i].max
24107 						- (unsigned long)g_dld_buf[i].buffer;
24108 			} else {
24109 				DHD_LOG_DUMP_BUF_LOCK(&g_dld_buf[i].lock, flags);
24110 				log_size += (unsigned long)g_dld_buf[i].present -
24111 						(unsigned long)g_dld_buf[i].front;
24112 				DHD_LOG_DUMP_BUF_UNLOCK(&g_dld_buf[i].lock, flags);
24113 			}
24114 			log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
24115 
24116 			if (*type != DLD_BUF_TYPE_ALL && i == *type)
24117 				break;
24118 		}
24119 
24120 		ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
24121 		if (ret < 0) {
24122 			DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
24123 			goto exit2;
24124 		}
24125 		pos = fp->f_pos;
24126 
24127 		/* if the max file size is reached, wrap around to beginning of the file
24128 		 * we're treating the file as a large ring buffer
24129 		 */
24130 		fspace_remain = logdump_max_filesize - pos;
24131 		if (log_size > fspace_remain) {
24132 			fp->f_pos -= pos;
24133 			pos = fp->f_pos;
24134 		}
24135 	}
24136 
24137 	dhd_print_time_str(0, fp, len, &pos);
24138 
24139 	for (i = 0; i < DLD_BUFFER_NUM; ++i) {
24140 
24141 		if (*type != DLD_BUF_TYPE_ALL && i != *type)
24142 			continue;
24143 
24144 		len = dhd_get_dld_len(i);
24145 		dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
24146 		if (*type != DLD_BUF_TYPE_ALL)
24147 			break;
24148 	}
24149 
24150 #ifdef EWP_ECNTRS_LOGGING
24151 	if (*type == DLD_BUF_TYPE_ALL &&
24152 			logdump_ecntr_enable &&
24153 			dhdp->ecntr_dbg_ring) {
24154 		dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
24155 				fp, (unsigned long *)&pos,
24156 				&sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
24157 	}
24158 #endif /* EWP_ECNTRS_LOGGING */
24159 
24160 #ifdef DHD_STATUS_LOGGING
24161 	if (dhdp->statlog) {
24162 		/* write the statlog */
24163 		len = dhd_get_status_log_len(NULL, dhdp);
24164 		if (len) {
24165 			if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
24166 				len, &pos) < 0) {
24167 				goto exit2;
24168 			}
24169 		}
24170 	}
24171 #endif /* DHD_STATUS_LOGGING */
24172 
24173 #ifdef DHD_STATUS_LOGGING
24174 	if (dhdp->statlog) {
24175 		dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
24176 			dhd_statlog_get_logbuf_len(dhdp));
24177 	}
24178 #endif /* DHD_STATUS_LOGGING */
24179 
24180 #ifdef EWP_RTT_LOGGING
24181 	if (*type == DLD_BUF_TYPE_ALL &&
24182 			logdump_rtt_enable &&
24183 			dhdp->rtt_dbg_ring) {
24184 		dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
24185 				fp, (unsigned long *)&pos,
24186 				&sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
24187 	}
24188 #endif /* EWP_RTT_LOGGING */
24189 
24190 #ifdef EWP_BCM_TRACE
24191 	if (*type == DLD_BUF_TYPE_ALL &&
24192 		dhdp->bcm_trace_dbg_ring) {
24193 		dhd_log_dump_ring_to_file(dhdp, dhdp->bcm_trace_dbg_ring,
24194 				fp, (unsigned long *)&pos,
24195 				&sec_hdr, BCM_TRACE_LOG_HDR, LOG_DUMP_SECTION_BCM_TRACE);
24196 	}
24197 #endif /* EWP_BCM_TRACE */
24198 
24199 #ifdef BCMPCIE
24200 	len = dhd_get_ext_trap_len(NULL, dhdp);
24201 	if (len) {
24202 		if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
24203 			goto exit2;
24204 	}
24205 #endif /* BCMPCIE */
24206 
24207 #if defined(DHD_FW_COREDUMP) && defined (DNGL_EVENT_SUPPORT)
24208 	len = dhd_get_health_chk_len(NULL, dhdp);
24209 	if (len) {
24210 		if (dhd_print_health_chk_data(NULL, dhdp, 0, fp, len, &pos) < 0)
24211 			goto exit2;
24212 	}
24213 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
24214 
24215 	len = dhd_get_dhd_dump_len(NULL, dhdp);
24216 	if (len) {
24217 		if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
24218 			goto exit2;
24219 	}
24220 
24221 	len = dhd_get_cookie_log_len(NULL, dhdp);
24222 	if (len) {
24223 		if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
24224 			goto exit2;
24225 	}
24226 
24227 #ifdef DHD_DUMP_PCIE_RINGS
24228 	len = dhd_get_flowring_len(NULL, dhdp);
24229 	if (len) {
24230 		if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
24231 			goto exit2;
24232 	}
24233 #endif
24234 
24235 	if (dhdp->logdump_periodic_flush) {
24236 		/* store the last position written to in the file for future use */
24237 		dhdp->last_file_posn = pos;
24238 	}
24239 
24240 exit2:
24241 	if (!IS_ERR(fp) && fp != NULL) {
24242 		filp_close(fp, NULL);
24243 		DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
24244 				__FUNCTION__, dump_path));
24245 	}
24246 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
24247 	set_fs(old_fs);
24248 #endif
24249 exit1:
24250 	if (type) {
24251 		MFREE(dhdp->osh, type, sizeof(*type));
24252 	}
24253 	DHD_GENERAL_LOCK(dhdp, flags);
24254 	DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
24255 	dhd_os_busbusy_wake(dhdp);
24256 	DHD_GENERAL_UNLOCK(dhdp, flags);
24257 
24258 #ifdef DHD_DUMP_MNGR
24259 	if (ret >= 0) {
24260 		dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
24261 	}
24262 #endif /* DHD_DUMP_MNGR */
24263 
24264 	return (ret < 0) ? BCME_ERROR : BCME_OK;
24265 }
24266 #endif /* DHD_LOG_DUMP */
24267 
24268 /* This function writes data to the file pointed by fp, OR
24269  * copies data to the user buffer sent by upper layer(HAL).
24270  */
24271 int
dhd_export_debug_data(void * mem_buf,void * fp,const void * user_buf,uint32 buf_len,void * pos)24272 dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos)
24273 {
24274 	int ret = BCME_OK;
24275 
24276 	if (fp) {
24277 		ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
24278 		if (ret < 0) {
24279 			DHD_ERROR(("write file error, err = %d\n", ret));
24280 			goto exit;
24281 		}
24282 	} else {
24283 #ifdef CONFIG_COMPAT
24284 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
24285 		if (in_compat_syscall())
24286 #else
24287 		if (is_compat_task())
24288 #endif /* LINUX_VER >= 4.6 */
24289 		{
24290 			void * usr_ptr =  compat_ptr((uintptr_t) user_buf);
24291 			ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
24292 				mem_buf, buf_len);
24293 			if (ret) {
24294 				DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
24295 				goto exit;
24296 			}
24297 		}
24298 		else
24299 #endif /* CONFIG_COMPAT */
24300 		{
24301 			ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
24302 				mem_buf, buf_len);
24303 			if (ret) {
24304 				DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
24305 				goto exit;
24306 			}
24307 		}
24308 		(*(int *)pos) += buf_len;
24309 	}
24310 exit:
24311 	return ret;
24312 }
24313 
24314 #ifdef BCM_ROUTER_DHD
dhd_schedule_trap_log_dump(dhd_pub_t * dhdp,uint8 * buf,uint32 size)24315 void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp,
24316 	uint8 *buf, uint32 size)
24317 {
24318 	dhd_write_file_t *wf = NULL;
24319 	wf = (dhd_write_file_t *)MALLOC(dhdp->osh, sizeof(dhd_write_file_t));
24320 	if (wf == NULL) {
24321 		DHD_ERROR(("%s: dhd write file memory allocation failed\n", __FUNCTION__));
24322 		return;
24323 	}
24324 	snprintf(wf->file_path, sizeof(wf->file_path), "%s", "/tmp/failed_if.txt");
24325 	wf->file_flags = O_CREAT | O_WRONLY | O_SYNC;
24326 	wf->buf = buf;
24327 	wf->bufsize = size;
24328 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)wf,
24329 		DHD_WQ_WORK_INFORM_DHD_MON, dhd_inform_dhd_monitor_handler,
24330 		DHD_WQ_WORK_PRIORITY_HIGH);
24331 }
24332 
24333 /* Returns the pid of a the userspace process running with the given name */
24334 static struct task_struct *
_get_task_info(const char * pname)24335 _get_task_info(const char *pname)
24336 {
24337 	struct task_struct *task;
24338 	if (!pname)
24339 		return NULL;
24340 
24341 	for_each_process(task) {
24342 		if (strcmp(pname, task->comm) == 0)
24343 			return task;
24344 	}
24345 
24346 	return NULL;
24347 }
24348 
24349 #define DHD_MONITOR_NS	"dhd_monitor"
24350 extern void emergency_restart(void);
24351 
24352 static void
dhd_inform_dhd_monitor_handler(void * handle,void * event_info,u8 event)24353 dhd_inform_dhd_monitor_handler(void *handle, void *event_info, u8 event)
24354 {
24355 	dhd_info_t *dhd = handle;
24356 	dhd_write_file_t *wf = event_info;
24357 	struct task_struct *monitor_task;
24358 	if (!dhd) {
24359 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
24360 		return;
24361 	}
24362 	if (!event_info) {
24363 		DHD_ERROR(("%s: File info is NULL\n", __FUNCTION__));
24364 		return;
24365 	}
24366 	if (!wf->buf) {
24367 		DHD_ERROR(("%s: Unable to get failed interface name\n", __FUNCTION__));
24368 		goto exit;
24369 	}
24370 	if (write_file(wf->file_path, wf->file_flags, wf->buf, wf->bufsize)) {
24371 		DHD_ERROR(("%s: writing to the file failed\n", __FUNCTION__));
24372 	}
24373 exit:
24374 	MFREE(dhd->pub.osh, wf, sizeof(dhd_write_file_t));
24375 
24376 	/* check if dhd_monitor is running */
24377 	monitor_task = _get_task_info(DHD_MONITOR_NS);
24378 	if (monitor_task == NULL) {
24379 		/* If dhd_monitor is not running, handle recovery from here */
24380 
24381 		char *val = nvram_get("watchdog");
24382 		if (val && bcm_atoi(val)) {
24383 			/* watchdog enabled, so reboot */
24384 			DHD_ERROR(("%s: Dongle(wl%d) trap detected. Restarting the system\n",
24385 				__FUNCTION__, dhd->unit));
24386 
24387 			mdelay(1000);
24388 			emergency_restart();
24389 			while (1)
24390 				cpu_relax();
24391 		} else {
24392 			DHD_ERROR(("%s: Dongle(wl%d) trap detected. No watchdog.\n",
24393 			   __FUNCTION__, dhd->unit));
24394 		}
24395 
24396 		return;
24397 	}
24398 
24399 	/* If monitor daemon is running, let's signal the monitor for recovery */
24400 	DHD_ERROR(("%s: Dongle(wl%d) trap detected. Send signal to dhd_monitor.\n",
24401 		__FUNCTION__, dhd->unit));
24402 
24403 	send_sig_info(SIGUSR1, (void *)1L, monitor_task);
24404 }
24405 #endif /* BCM_ROUTER_DHD */
24406 
24407 #ifdef BCMDBG
24408 #define DUMPMAC_BUF_SZ (128 * 1024)
24409 #define DUMPMAC_FILENAME_SZ 32
24410 
24411 static void
_dhd_schedule_macdbg_dump(void * handle,void * event_info,u8 event)24412 _dhd_schedule_macdbg_dump(void *handle, void *event_info, u8 event)
24413 {
24414 	dhd_info_t *dhd = handle;
24415 	dhd_pub_t *dhdp = &dhd->pub;
24416 #ifndef BCM_ROUTER_DHD
24417 	char *dumpbuf = NULL;
24418 	int dumpbuf_len = 0;
24419 	uint16 dump_signature;
24420 	char dumpfilename[DUMPMAC_FILENAME_SZ] = {0, };
24421 #endif /* BCM_ROUTER_DHD */
24422 
24423 	ASSERT(event == DHD_WQ_WORK_MACDBG);
24424 	BCM_REFERENCE(event_info);
24425 
24426 	DHD_ERROR(("%s: Dongle(wl%d) macreg dump scheduled\n",
24427 		__FUNCTION__, dhd->unit));
24428 
24429 	DHD_OS_WAKE_LOCK(dhdp);
24430 
24431 	/* Make sure dongle stops running to avoid race condition in reading mac registers */
24432 	(void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
24433 
24434 	/* In router, skip macregs dump as dhd_monitor will dump them */
24435 #ifndef BCM_ROUTER_DHD
24436 	dumpbuf = (char *)MALLOCZ(dhdp->osh, DUMPMAC_BUF_SZ);
24437 	if (dumpbuf) {
24438 		/* Write macdump to a file */
24439 
24440 		/* Get dump file signature */
24441 		dump_signature = (uint16)OSL_RAND();
24442 
24443 		/* PSMr */
24444 		if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
24445 			&dumpbuf_len, FALSE) == BCME_OK) {
24446 			snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
24447 				"/tmp/d11reg_dump_%04X.txt", dump_signature);
24448 			DHD_ERROR(("%s: PSMr macreg dump to %s\n", __FUNCTION__, dumpfilename));
24449 			/* Write to a file */
24450 			if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
24451 				dumpbuf, dumpbuf_len)) {
24452 				DHD_ERROR(("%s: writing mac dump to the file failed\n",
24453 					__FUNCTION__));
24454 			}
24455 			memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
24456 			memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
24457 			dumpbuf_len = 0;
24458 		}
24459 
24460 		/* PSMx */
24461 		if (dhd_macdbg_dumpmac(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
24462 			&dumpbuf_len, TRUE) == BCME_OK) {
24463 			snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
24464 				"/tmp/d11regx_dump_%04X.txt", dump_signature);
24465 			DHD_ERROR(("%s: PSMx macreg dump to %s\n", __FUNCTION__, dumpfilename));
24466 			/* Write to a file */
24467 			if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
24468 				dumpbuf, dumpbuf_len)) {
24469 				DHD_ERROR(("%s: writing mac dump to the file failed\n",
24470 					__FUNCTION__));
24471 			}
24472 			memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
24473 			memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
24474 			dumpbuf_len = 0;
24475 		}
24476 
24477 		/* SVMP */
24478 		if (dhd_macdbg_dumpsvmp(dhdp, dumpbuf, DUMPMAC_BUF_SZ,
24479 			&dumpbuf_len) == BCME_OK) {
24480 			snprintf(dumpfilename, DUMPMAC_FILENAME_SZ,
24481 				"/tmp/svmp_dump_%04X.txt", dump_signature);
24482 			DHD_ERROR(("%s: SVMP mems dump to %s\n", __FUNCTION__, dumpfilename));
24483 			/* Write to a file */
24484 			if (write_file(dumpfilename, (O_CREAT | O_WRONLY | O_SYNC),
24485 				dumpbuf, dumpbuf_len)) {
24486 				DHD_ERROR(("%s: writing svmp dump to the file failed\n",
24487 					__FUNCTION__));
24488 			}
24489 			memset(dumpbuf, 0, DUMPMAC_BUF_SZ);
24490 			memset(dumpfilename, 0, DUMPMAC_FILENAME_SZ);
24491 			dumpbuf_len = 0;
24492 		}
24493 
24494 		MFREE(dhdp->osh, dumpbuf, DUMPMAC_BUF_SZ);
24495 	} else {
24496 		DHD_ERROR(("%s: print macdump\n", __FUNCTION__));
24497 		/* Just printf the dumps */
24498 		(void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, FALSE); /* PSMr */
24499 		(void) dhd_macdbg_dumpmac(dhdp, NULL, 0, NULL, TRUE); /* PSMx */
24500 		(void) dhd_macdbg_dumpsvmp(dhdp, NULL, 0, NULL);
24501 	}
24502 #endif /* BCM_ROUTER_DHD */
24503 
24504 	DHD_OS_WAKE_UNLOCK(dhdp);
24505 	dhd_deferred_work_set_skip(dhd->dhd_deferred_wq,
24506 		DHD_WQ_WORK_MACDBG, FALSE);
24507 }
24508 
24509 void
dhd_schedule_macdbg_dump(dhd_pub_t * dhdp)24510 dhd_schedule_macdbg_dump(dhd_pub_t *dhdp)
24511 {
24512 	DHD_ERROR(("%s: Dongle(wl%d) schedule macreg dump\n",
24513 		__FUNCTION__, dhdp->info->unit));
24514 
24515 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
24516 		DHD_WQ_WORK_MACDBG, _dhd_schedule_macdbg_dump, DHD_WQ_WORK_PRIORITY_LOW);
24517 	dhd_deferred_work_set_skip(dhdp->info->dhd_deferred_wq,
24518 		DHD_WQ_WORK_MACDBG, TRUE);
24519 }
24520 #endif /* BCMDBG */
24521 
24522 /*
24523  * This call is to get the memdump size so that,
24524  * halutil can alloc that much buffer in user space.
24525  */
24526 int
dhd_os_socram_dump(struct net_device * dev,uint32 * dump_size)24527 dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
24528 {
24529 	int ret = BCME_OK;
24530 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24531 	dhd_pub_t *dhdp = &dhd->pub;
24532 
24533 	if (dhdp->busstate == DHD_BUS_DOWN) {
24534 		DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
24535 		return BCME_ERROR;
24536 	}
24537 
24538 	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
24539 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
24540 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
24541 		return BCME_ERROR;
24542 	}
24543 #ifdef DHD_PCIE_RUNTIMEPM
24544 	dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
24545 #endif /* DHD_PCIE_RUNTIMEPM */
24546 	ret = dhd_common_socram_dump(dhdp);
24547 	if (ret == BCME_OK) {
24548 		*dump_size = dhdp->soc_ram_length;
24549 	}
24550 	return ret;
24551 }
24552 
24553 /*
24554  * This is to get the actual memdup after getting the memdump size
24555  */
24556 int
dhd_os_get_socram_dump(struct net_device * dev,char ** buf,uint32 * size)24557 dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
24558 {
24559 	int ret = BCME_OK;
24560 	int orig_len = 0;
24561 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24562 	dhd_pub_t *dhdp = &dhd->pub;
24563 	if (buf == NULL)
24564 		return BCME_ERROR;
24565 	orig_len = *size;
24566 	if (dhdp->soc_ram) {
24567 		if (orig_len >= dhdp->soc_ram_length) {
24568 			*buf = dhdp->soc_ram;
24569 			*size = dhdp->soc_ram_length;
24570 		} else {
24571 			ret = BCME_BUFTOOSHORT;
24572 			DHD_ERROR(("The length of the buffer is too short"
24573 				" to save the memory dump with %d\n", dhdp->soc_ram_length));
24574 		}
24575 	} else {
24576 		DHD_ERROR(("socram_dump is not ready to get\n"));
24577 		ret = BCME_NOTREADY;
24578 	}
24579 	return ret;
24580 }
24581 
24582 #ifdef EWP_RTT_LOGGING
24583 uint32
dhd_get_rtt_len(void * ndev,dhd_pub_t * dhdp)24584 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
24585 {
24586 	dhd_info_t *dhd_info;
24587 	log_dump_section_hdr_t sec_hdr;
24588 	int length = 0;
24589 	dhd_dbg_ring_t *ring;
24590 
24591 	if (ndev) {
24592 		dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
24593 		dhdp = &dhd_info->pub;
24594 	}
24595 
24596 	if (!dhdp)
24597 		return length;
24598 
24599 	if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
24600 		ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
24601 		length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
24602 	}
24603 	return length;
24604 }
24605 #endif /* EWP_RTT_LOGGING */
24606 
24607 int
dhd_os_get_version(struct net_device * dev,bool dhd_ver,char ** buf,uint32 size)24608 dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
24609 {
24610 	char *fw_str;
24611 
24612 	if (size == 0)
24613 		return BCME_BADARG;
24614 
24615 	fw_str = strstr(info_string, "Firmware: ");
24616 	if (fw_str == NULL) {
24617 		return BCME_ERROR;
24618 	}
24619 
24620 	bzero(*buf, size);
24621 	if (dhd_ver) {
24622 		strlcpy(*buf, dhd_version, size);
24623 	} else {
24624 		strlcpy(*buf, fw_str, size);
24625 	}
24626 	return BCME_OK;
24627 }
24628 
24629 #ifdef DHD_PKT_LOGGING
24630 int
dhd_os_get_pktlog_dump(void * dev,const void * user_buf,uint32 len)24631 dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
24632 {
24633 	int ret = BCME_OK;
24634 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24635 	dhd_pub_t *dhdp = &dhd->pub;
24636 	if (user_buf == NULL) {
24637 		DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
24638 		return BCME_ERROR;
24639 	}
24640 
24641 	ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
24642 	if (ret < 0) {
24643 		DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
24644 		return ret;
24645 	}
24646 	return ret;
24647 }
24648 
24649 uint32
dhd_os_get_pktlog_dump_size(struct net_device * dev)24650 dhd_os_get_pktlog_dump_size(struct net_device *dev)
24651 {
24652 	uint32 size = 0;
24653 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24654 	dhd_pub_t *dhdp = &dhd->pub;
24655 
24656 	size = dhd_pktlog_get_dump_length(dhdp);
24657 	if (size == 0) {
24658 		DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
24659 	}
24660 	return size;
24661 }
24662 
24663 void
dhd_os_get_pktlogdump_filename(struct net_device * dev,char * dump_path,int len)24664 dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
24665 {
24666 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24667 	dhd_pub_t *dhdp = &dhd->pub;
24668 
24669 	dhd_pktlog_get_filename(dhdp, dump_path, len);
24670 }
24671 #endif /* DHD_PKT_LOGGING */
24672 #ifdef DNGL_AXI_ERROR_LOGGING
24673 int
dhd_os_get_axi_error_dump(void * dev,const void * user_buf,uint32 len)24674 dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
24675 {
24676 	int ret = BCME_OK;
24677 	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
24678 	dhd_pub_t *dhdp = &dhd->pub;
24679 	loff_t pos = 0;
24680 	if (user_buf == NULL) {
24681 		DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
24682 		return BCME_ERROR;
24683 	}
24684 
24685 	ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
24686 			NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
24687 
24688 	if (ret < 0) {
24689 		DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
24690 		return ret;
24691 	}
24692 	return ret;
24693 }
24694 
24695 int
dhd_os_get_axi_error_dump_size(struct net_device * dev)24696 dhd_os_get_axi_error_dump_size(struct net_device *dev)
24697 {
24698 	int size = -1;
24699 
24700 	size = sizeof(dhd_axi_error_dump_t);
24701 	if (size < 0) {
24702 		DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
24703 	}
24704 	return size;
24705 }
24706 
24707 void
dhd_os_get_axi_error_filename(struct net_device * dev,char * dump_path,int len)24708 dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
24709 {
24710 	snprintf(dump_path, len, "%s",
24711 		DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
24712 }
24713 #endif /* DNGL_AXI_ERROR_LOGGING */
24714 #ifdef DHD_WMF
24715 /* Returns interface specific WMF configuration */
dhd_wmf_conf(dhd_pub_t * dhdp,uint32 idx)24716 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
24717 {
24718 	dhd_info_t *dhd = dhdp->info;
24719 	dhd_if_t *ifp;
24720 
24721 	ASSERT(idx < DHD_MAX_IFS);
24722 
24723 	ifp = dhd->iflist[idx];
24724 	return &ifp->wmf;
24725 }
24726 #endif /* DHD_WMF */
24727 
24728 #if defined(BCM_ROUTER_DHD)
traffic_mgmt_pkt_set_prio(dhd_pub_t * dhdp,void * pktbuf)24729 void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
24730 {
24731 	struct ether_header *eh;
24732 	struct ethervlan_header *evh;
24733 	uint8 *pktdata, *ip_body;
24734 	uint8  dwm_filter;
24735 	uint8 tos_tc = 0;
24736 	uint8 dscp   = 0;
24737 	pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
24738 	eh = (struct ether_header *) pktdata;
24739 	ip_body = NULL;
24740 
24741 	if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
24742 		if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
24743 			evh = (struct ethervlan_header *)eh;
24744 			if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
24745 				(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
24746 				ip_body = pktdata + sizeof(struct ethervlan_header);
24747 			}
24748 		} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
24749 			(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
24750 			ip_body = pktdata + sizeof(struct ether_header);
24751 		}
24752 		if (ip_body) {
24753 			tos_tc = IP_TOS46(ip_body);
24754 			dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
24755 		}
24756 
24757 		if (dscp < DHD_DWM_TBL_SIZE) {
24758 			dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
24759 			if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
24760 				PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
24761 			}
24762 		}
24763 	}
24764 }
24765 #endif /* BCM_ROUTER_DHD */
24766 
dhd_sta_associated(dhd_pub_t * dhdp,uint32 bssidx,uint8 * mac)24767 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
24768 {
24769 	return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
24770 }
24771 
24772 #ifdef DHD_L2_FILTER
24773 arp_table_t*
dhd_get_ifp_arp_table_handle(dhd_pub_t * dhdp,uint32 bssidx)24774 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
24775 {
24776 	dhd_info_t *dhd = dhdp->info;
24777 	dhd_if_t *ifp;
24778 
24779 	ASSERT(bssidx < DHD_MAX_IFS);
24780 
24781 	ifp = dhd->iflist[bssidx];
24782 	return ifp->phnd_arp_table;
24783 }
24784 
dhd_get_parp_status(dhd_pub_t * dhdp,uint32 idx)24785 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
24786 {
24787 	dhd_info_t *dhd = dhdp->info;
24788 	dhd_if_t *ifp;
24789 
24790 	ASSERT(idx < DHD_MAX_IFS);
24791 
24792 	ifp = dhd->iflist[idx];
24793 
24794 	if (ifp)
24795 		return ifp->parp_enable;
24796 	else
24797 		return FALSE;
24798 }
24799 
24800 /* Set interface specific proxy arp configuration */
dhd_set_parp_status(dhd_pub_t * dhdp,uint32 idx,int val)24801 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
24802 {
24803 	dhd_info_t *dhd = dhdp->info;
24804 	dhd_if_t *ifp;
24805 	ASSERT(idx < DHD_MAX_IFS);
24806 	ifp = dhd->iflist[idx];
24807 
24808 	if (!ifp)
24809 	    return BCME_ERROR;
24810 
24811 	/* At present all 3 variables are being
24812 	 * handled at once
24813 	 */
24814 	ifp->parp_enable = val;
24815 	ifp->parp_discard = val;
24816 	ifp->parp_allnode = val;
24817 
24818 	/* Flush ARP entries when disabled */
24819 	if (val == FALSE) {
24820 		bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
24821 			FALSE, dhdp->tickcnt);
24822 	}
24823 	return BCME_OK;
24824 }
24825 
dhd_parp_discard_is_enabled(dhd_pub_t * dhdp,uint32 idx)24826 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
24827 {
24828 	dhd_info_t *dhd = dhdp->info;
24829 	dhd_if_t *ifp;
24830 
24831 	ASSERT(idx < DHD_MAX_IFS);
24832 
24833 	ifp = dhd->iflist[idx];
24834 
24835 	ASSERT(ifp);
24836 	return ifp->parp_discard;
24837 }
24838 
24839 bool
dhd_parp_allnode_is_enabled(dhd_pub_t * dhdp,uint32 idx)24840 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
24841 {
24842 	dhd_info_t *dhd = dhdp->info;
24843 	dhd_if_t *ifp;
24844 
24845 	ASSERT(idx < DHD_MAX_IFS);
24846 
24847 	ifp = dhd->iflist[idx];
24848 
24849 	ASSERT(ifp);
24850 
24851 	return ifp->parp_allnode;
24852 }
24853 
dhd_get_dhcp_unicast_status(dhd_pub_t * dhdp,uint32 idx)24854 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
24855 {
24856 	dhd_info_t *dhd = dhdp->info;
24857 	dhd_if_t *ifp;
24858 
24859 	ASSERT(idx < DHD_MAX_IFS);
24860 
24861 	ifp = dhd->iflist[idx];
24862 
24863 	ASSERT(ifp);
24864 
24865 	return ifp->dhcp_unicast;
24866 }
24867 
dhd_set_dhcp_unicast_status(dhd_pub_t * dhdp,uint32 idx,int val)24868 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
24869 {
24870 	dhd_info_t *dhd = dhdp->info;
24871 	dhd_if_t *ifp;
24872 	ASSERT(idx < DHD_MAX_IFS);
24873 	ifp = dhd->iflist[idx];
24874 
24875 	ASSERT(ifp);
24876 
24877 	ifp->dhcp_unicast = val;
24878 	return BCME_OK;
24879 }
24880 
dhd_get_block_ping_status(dhd_pub_t * dhdp,uint32 idx)24881 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
24882 {
24883 	dhd_info_t *dhd = dhdp->info;
24884 	dhd_if_t *ifp;
24885 
24886 	ASSERT(idx < DHD_MAX_IFS);
24887 
24888 	ifp = dhd->iflist[idx];
24889 
24890 	ASSERT(ifp);
24891 
24892 	return ifp->block_ping;
24893 }
24894 
dhd_set_block_ping_status(dhd_pub_t * dhdp,uint32 idx,int val)24895 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
24896 {
24897 	dhd_info_t *dhd = dhdp->info;
24898 	dhd_if_t *ifp;
24899 	ASSERT(idx < DHD_MAX_IFS);
24900 	ifp = dhd->iflist[idx];
24901 
24902 	ASSERT(ifp);
24903 
24904 	ifp->block_ping = val;
24905 	/* Disable rx_pkt_chain feature for interface if block_ping option is
24906 	 * enabled
24907 	 */
24908 	dhd_update_rx_pkt_chainable_state(dhdp, idx);
24909 	return BCME_OK;
24910 }
24911 
dhd_get_grat_arp_status(dhd_pub_t * dhdp,uint32 idx)24912 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
24913 {
24914 	dhd_info_t *dhd = dhdp->info;
24915 	dhd_if_t *ifp;
24916 
24917 	ASSERT(idx < DHD_MAX_IFS);
24918 
24919 	ifp = dhd->iflist[idx];
24920 
24921 	ASSERT(ifp);
24922 
24923 	return ifp->grat_arp;
24924 }
24925 
dhd_set_grat_arp_status(dhd_pub_t * dhdp,uint32 idx,int val)24926 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
24927 {
24928 	dhd_info_t *dhd = dhdp->info;
24929 	dhd_if_t *ifp;
24930 	ASSERT(idx < DHD_MAX_IFS);
24931 	ifp = dhd->iflist[idx];
24932 
24933 	ASSERT(ifp);
24934 
24935 	ifp->grat_arp = val;
24936 
24937 	return BCME_OK;
24938 }
24939 
dhd_get_block_tdls_status(dhd_pub_t * dhdp,uint32 idx)24940 int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
24941 {
24942 	dhd_info_t *dhd = dhdp->info;
24943 	dhd_if_t *ifp;
24944 
24945 	ASSERT(idx < DHD_MAX_IFS);
24946 
24947 	ifp = dhd->iflist[idx];
24948 
24949 	ASSERT(ifp);
24950 
24951 	return ifp->block_tdls;
24952 }
24953 
dhd_set_block_tdls_status(dhd_pub_t * dhdp,uint32 idx,int val)24954 int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
24955 {
24956 	dhd_info_t *dhd = dhdp->info;
24957 	dhd_if_t *ifp;
24958 	ASSERT(idx < DHD_MAX_IFS);
24959 	ifp = dhd->iflist[idx];
24960 
24961 	ASSERT(ifp);
24962 
24963 	ifp->block_tdls = val;
24964 
24965 	return BCME_OK;
24966 }
24967 #endif /* DHD_L2_FILTER */
24968 
24969 #if defined(SET_XPS_CPUS)
dhd_xps_cpus_enable(struct net_device * net,int enable)24970 int dhd_xps_cpus_enable(struct net_device *net, int enable)
24971 {
24972 	dhd_info_t *dhd = DHD_DEV_INFO(net);
24973 	dhd_if_t *ifp;
24974 	int ifidx;
24975 	char * XPS_CPU_SETBUF;
24976 
24977 	ifidx = dhd_net2idx(dhd, net);
24978 	if (ifidx == DHD_BAD_IF) {
24979 		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
24980 		return -ENODEV;
24981 	}
24982 
24983 	if (!dhd->pub.conf->xps_cpus)
24984 		return -ENODEV;
24985 
24986 	if (ifidx == PRIMARY_INF) {
24987 		if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
24988 			DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
24989 			XPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
24990 		} else {
24991 			DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
24992 			XPS_CPU_SETBUF = RPS_CPUS_MASK;
24993 		}
24994 	} else if (ifidx == VIRTUAL_INF) {
24995 		DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
24996 		XPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
24997 	} else {
24998 		DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
24999 		return -EINVAL;
25000 	}
25001 
25002 	ifp = dhd->iflist[ifidx];
25003 	if (ifp) {
25004 		if (enable) {
25005 			DHD_INFO(("%s : set xps_cpus as [%s]\n", __FUNCTION__, XPS_CPU_SETBUF));
25006 			custom_xps_map_set(ifp->net, XPS_CPU_SETBUF, strlen(XPS_CPU_SETBUF));
25007 		} else {
25008 			custom_xps_map_clear(ifp->net);
25009 		}
25010 	} else {
25011 		DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
25012 		return -ENODEV;
25013 	}
25014 	return BCME_OK;
25015 }
25016 
custom_xps_map_set(struct net_device * net,char * buf,size_t len)25017 int custom_xps_map_set(struct net_device *net, char *buf, size_t len)
25018 {
25019 	cpumask_var_t mask;
25020 	int err;
25021 
25022 	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
25023 
25024 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
25025 		DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
25026 		return -ENOMEM;
25027 	}
25028 
25029 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
25030 	if (err) {
25031 		free_cpumask_var(mask);
25032 		DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
25033 		return err;
25034 	}
25035 
25036 	err = netif_set_xps_queue(net, mask, 0);
25037 
25038 	free_cpumask_var(mask);
25039 
25040 	if (0 == err)
25041 		WL_MSG(net->name, "Done. mapping cpu\n");
25042 
25043 	return err;
25044 }
25045 
custom_xps_map_clear(struct net_device * net)25046 void custom_xps_map_clear(struct net_device *net)
25047 {
25048     struct xps_dev_maps *dev_maps;
25049 
25050 	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
25051 
25052     rcu_read_lock();
25053 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
25054 	dev_maps = rcu_dereference(net->xps_cpus_map);
25055 #else
25056     dev_maps = rcu_dereference(net->xps_maps);
25057 #endif
25058     rcu_read_unlock();
25059 
25060 	if (dev_maps) {
25061 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
25062 		RCU_INIT_POINTER(net->xps_cpus_map, NULL);
25063 #else
25064 		RCU_INIT_POINTER(net->xps_maps, NULL);
25065 #endif
25066 		kfree_rcu(dev_maps, rcu);
25067 		DHD_INFO(("%s : xps_cpus map clear.\n", __FUNCTION__));
25068 	}
25069 }
25070 #endif // endif
25071 
25072 #if defined(SET_RPS_CPUS)
dhd_rps_cpus_enable(struct net_device * net,int enable)25073 int dhd_rps_cpus_enable(struct net_device *net, int enable)
25074 {
25075 	dhd_info_t *dhd = DHD_DEV_INFO(net);
25076 	dhd_if_t *ifp;
25077 	int ifidx;
25078 	char * RPS_CPU_SETBUF;
25079 
25080 	ifidx = dhd_net2idx(dhd, net);
25081 	if (ifidx == DHD_BAD_IF) {
25082 		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
25083 		return -ENODEV;
25084 	}
25085 
25086 	if (!dhd->pub.conf->rps_cpus)
25087 		return -ENODEV;
25088 
25089 	if (ifidx == PRIMARY_INF) {
25090 		if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
25091 			DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
25092 			RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
25093 		} else {
25094 			DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
25095 			RPS_CPU_SETBUF = RPS_CPUS_MASK;
25096 		}
25097 	} else if (ifidx == VIRTUAL_INF) {
25098 		DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
25099 		RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
25100 	} else {
25101 		DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
25102 		return -EINVAL;
25103 	}
25104 
25105 	ifp = dhd->iflist[ifidx];
25106 	if (ifp) {
25107 		if (enable) {
25108 			DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
25109 			custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
25110 		} else {
25111 			custom_rps_map_clear(ifp->net->_rx);
25112 		}
25113 	} else {
25114 		DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
25115 		return -ENODEV;
25116 	}
25117 	return BCME_OK;
25118 }
25119 
custom_rps_map_set(struct netdev_rx_queue * queue,char * buf,size_t len)25120 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
25121 {
25122 	struct rps_map *old_map, *map;
25123 	cpumask_var_t mask;
25124 	int err, cpu, i;
25125 	static DEFINE_SPINLOCK(rps_map_lock);
25126 
25127 	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
25128 
25129 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
25130 		DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
25131 		return -ENOMEM;
25132 	}
25133 
25134 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
25135 	if (err) {
25136 		free_cpumask_var(mask);
25137 		DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
25138 		return err;
25139 	}
25140 
25141 	map = kzalloc(max_t(unsigned int,
25142 		RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
25143 		GFP_KERNEL);
25144 	if (!map) {
25145 		free_cpumask_var(mask);
25146 		DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
25147 		return -ENOMEM;
25148 	}
25149 
25150 	i = 0;
25151 	for_each_cpu(cpu, mask) {
25152 		map->cpus[i++] = cpu;
25153 	}
25154 
25155 	if (i) {
25156 		map->len = i;
25157 	} else {
25158 		kfree(map);
25159 		map = NULL;
25160 		free_cpumask_var(mask);
25161 		DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
25162 		return -1;
25163 	}
25164 
25165 	spin_lock(&rps_map_lock);
25166 	old_map = rcu_dereference_protected(queue->rps_map,
25167 		lockdep_is_held(&rps_map_lock));
25168 	rcu_assign_pointer(queue->rps_map, map);
25169 	spin_unlock(&rps_map_lock);
25170 
25171 	if (map) {
25172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
25173 		static_key_slow_inc(&rps_needed.key);
25174 #else
25175 		static_key_slow_inc(&rps_needed);
25176 #endif
25177 	}
25178 	if (old_map) {
25179 		kfree_rcu(old_map, rcu);
25180 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
25181 		static_key_slow_dec(&rps_needed.key);
25182 #else
25183 		static_key_slow_dec(&rps_needed);
25184 #endif
25185 	}
25186 	free_cpumask_var(mask);
25187 
25188 	DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
25189 	return map->len;
25190 }
25191 
custom_rps_map_clear(struct netdev_rx_queue * queue)25192 void custom_rps_map_clear(struct netdev_rx_queue *queue)
25193 {
25194 	struct rps_map *map;
25195 
25196 	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
25197 
25198 	map = rcu_dereference_protected(queue->rps_map, 1);
25199 	if (map) {
25200 		RCU_INIT_POINTER(queue->rps_map, NULL);
25201 		kfree_rcu(map, rcu);
25202 		DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
25203 	}
25204 }
25205 #endif // endif
25206 
25207 #ifdef DHD_BUZZZ_LOG_ENABLED
25208 
25209 static int
dhd_buzzz_thread(void * data)25210 dhd_buzzz_thread(void *data)
25211 {
25212 	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
25213 
25214 	DAEMONIZE("dhd_buzzz");
25215 
25216 	/*  signal: thread has started */
25217 	complete(&tsk->completed);
25218 
25219 	/* Run until signal received */
25220 	while (1) {
25221 		if (down_interruptible(&tsk->sema) == 0) {
25222 			if (tsk->terminated) {
25223 				break;
25224 			}
25225 			printk("%s: start to dump...\n", __FUNCTION__);
25226 			dhd_buzzz_dump();
25227 		} else {
25228 			break;
25229 		}
25230 	}
25231 	complete_and_exit(&tsk->completed, 0);
25232 }
25233 
dhd_os_create_buzzz_thread(void)25234 void* dhd_os_create_buzzz_thread(void)
25235 {
25236 	tsk_ctl_t *thr_buzzz_ctl = NULL;
25237 
25238 	thr_buzzz_ctl = kmalloc(sizeof(tsk_ctl_t), GFP_KERNEL);
25239 	if (!thr_buzzz_ctl) {
25240 		return NULL;
25241 	}
25242 
25243 	PROC_START(dhd_buzzz_thread, NULL, thr_buzzz_ctl, 0, "dhd_buzzz");
25244 
25245 	return (void *)thr_buzzz_ctl;
25246 }
25247 
dhd_os_destroy_buzzz_thread(void * thr_hdl)25248 void dhd_os_destroy_buzzz_thread(void *thr_hdl)
25249 {
25250 	tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl;
25251 
25252 	if (!thr_buzzz_ctl) {
25253 		return;
25254 	}
25255 
25256 	PROC_STOP(thr_buzzz_ctl);
25257 	kfree(thr_buzzz_ctl);
25258 }
25259 
dhd_os_sched_buzzz_thread(void * thr_hdl)25260 void dhd_os_sched_buzzz_thread(void *thr_hdl)
25261 {
25262 	tsk_ctl_t *thr_buzzz_ctl = (tsk_ctl_t *)thr_hdl;
25263 
25264 	if (!thr_buzzz_ctl) {
25265 		return;
25266 	}
25267 
25268 	if (thr_buzzz_ctl->thr_pid >= 0) {
25269 		up(&thr_buzzz_ctl->sema);
25270 	}
25271 }
25272 #endif /* DHD_BUZZZ_LOG_ENABLED */
25273 
25274 #ifdef DHD_DEBUG_PAGEALLOC
25275 /* XXX Additional Kernel implemenation is needed to use this function at
25276  * the top of the check_poison_mem() function in mm/debug-pagealloc.c file.
25277  * Please check if below codes are implemenated your Linux Kernel first.
25278  *
25279  * - mm/debug-pagealloc.c
25280  *
25281  * // for DHD_DEBUG_PAGEALLOC
25282  * typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, uint addr_len);
25283  * page_corrupt_cb_t corrupt_cb = NULL;
25284  * void *corrupt_cb_handle = NULL;
25285  *
25286  * void register_page_corrupt_cb(page_corrupt_cb_t cb, void *handle)
25287  * {
25288  *      corrupt_cb = cb;
25289  *      corrupt_cb_handle = handle;
25290  * }
25291  * EXPORT_SYMBOL(register_page_corrupt_cb);
25292  *
25293  * extern void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
25294  *
25295  * static void check_poison_mem(unsigned char *mem, size_t bytes)
25296  * {
25297  * ......
25298  *
25299  *      if (!__ratelimit(&ratelimit))
25300  *               return;
25301  *      else if (start == end && single_bit_flip(*start, PAGE_POISON))
25302  *              printk(KERN_ERR "pagealloc: single bit error\n");
25303  *      else
25304  *              printk(KERN_ERR "pagealloc: memory corruption\n");
25305  *
25306  *      print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
25307  *              end - start + 1, 1);
25308  *
25309  *      // for DHD_DEBUG_PAGEALLOC
25310  *      dhd_page_corrupt_cb(corrupt_cb_handle, start, end - start + 1);
25311  *
25312  *      dump_stack();
25313  * }
25314  *
25315  */
25316 
25317 void
dhd_page_corrupt_cb(void * handle,void * addr_corrupt,size_t len)25318 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
25319 {
25320 	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
25321 
25322 	DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
25323 		__FUNCTION__, addr_corrupt, (uint32)len));
25324 
25325 	DHD_OS_WAKE_LOCK(dhdp);
25326 	prhex("Page Corruption:", addr_corrupt, len);
25327 	dhd_dump_to_kernelog(dhdp);
25328 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
25329 	/* Load the dongle side dump to host memory and then BUG_ON() */
25330 	dhdp->memdump_enabled = DUMP_MEMONLY;
25331 	dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
25332 	dhd_bus_mem_dump(dhdp);
25333 #endif /* BCMPCIE && DHD_FW_COREDUMP */
25334 	DHD_OS_WAKE_UNLOCK(dhdp);
25335 }
25336 EXPORT_SYMBOL(dhd_page_corrupt_cb);
25337 #endif /* DHD_DEBUG_PAGEALLOC */
25338 
25339 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
25340 void
dhd_pktid_error_handler(dhd_pub_t * dhdp)25341 dhd_pktid_error_handler(dhd_pub_t *dhdp)
25342 {
25343 	DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
25344 	DHD_OS_WAKE_LOCK(dhdp);
25345 	dhd_dump_to_kernelog(dhdp);
25346 #ifdef DHD_FW_COREDUMP
25347 	/* Load the dongle side dump to host memory */
25348 	if (dhdp->memdump_enabled == DUMP_DISABLED) {
25349 		dhdp->memdump_enabled = DUMP_MEMFILE;
25350 	}
25351 	dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
25352 	dhd_bus_mem_dump(dhdp);
25353 #endif /* DHD_FW_COREDUMP */
25354 #ifdef OEM_ANDROID
25355 	/* XXX Send HANG event to Android Framework for recovery */
25356 	dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
25357 	dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
25358 #endif /* OEM_ANDROID */
25359 	DHD_OS_WAKE_UNLOCK(dhdp);
25360 }
25361 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
25362 
25363 struct net_device *
dhd_linux_get_primary_netdev(dhd_pub_t * dhdp)25364 dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
25365 {
25366 	dhd_info_t *dhd = dhdp->info;
25367 
25368 	if (dhd->iflist[0] && dhd->iflist[0]->net)
25369 		return dhd->iflist[0]->net;
25370 	else
25371 		return NULL;
25372 }
25373 
25374 #ifdef DHD_PKTTS
25375 /**
25376  * dhd_msgbuf_get_ipv6_id - return ipv6 identification number
25377  * return 0 in case of error
25378  *
25379  * @pkt: packet pointer
25380  */
25381 uint
dhd_msgbuf_get_ipv6_id(void * pkt)25382 dhd_msgbuf_get_ipv6_id(void *pkt)
25383 {
25384 	struct frag_hdr _frag;
25385 	const struct sk_buff *skb;
25386 	const struct frag_hdr *fh;
25387 	unsigned int offset = 0;
25388 	int err;
25389 
25390 	skb = (struct sk_buff *)pkt;
25391 	err = ipv6_find_hdr(skb, &offset, NEXTHDR_FRAGMENT, NULL, NULL);
25392 	if (err < 0) {
25393 		return 0;
25394 	}
25395 
25396 	fh = skb_header_pointer(skb, offset, sizeof(_frag), &_frag);
25397 	if (fh == NULL) {
25398 		return 0;
25399 	}
25400 
25401 	return ntohl(fh->identification);
25402 }
25403 
25404 /**
25405  * dhd_create_to_notifier_ts - create BCM_NL_TS netlink socket
25406  *
25407  * @void:
25408  */
25409 int
dhd_create_to_notifier_ts(void)25410 dhd_create_to_notifier_ts(void)
25411 {
25412 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
25413 	/* Kernel 3.6 onwards this API accepts only 3 arguments. */
25414 	nl_to_ts = netlink_kernel_create(&init_net, BCM_NL_TS, &dhd_netlink_ts);
25415 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
25416 	if (!nl_to_ts)	{
25417 		DHD_ERROR(("Error creating ts socket.\n"));
25418 		return -1;
25419 	}
25420 	DHD_INFO(("nl_to socket created successfully...\n"));
25421 	return 0;
25422 }
25423 
25424 /**
25425  * dhd_destroy_to_notifier_ts - destroy BCM_NL_TS netlink socket
25426  *
25427  * @void:
25428  */
25429 void
dhd_destroy_to_notifier_ts(void)25430 dhd_destroy_to_notifier_ts(void)
25431 {
25432 	DHD_INFO(("Destroying nl_to_ts socket\n"));
25433 	if (nl_to_ts) {
25434 		netlink_kernel_release(nl_to_ts);
25435 		nl_to_ts = NULL;
25436 	}
25437 }
25438 
25439 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
25440 /**
25441  * dhd_recv_msg_from_ts - this is called on BCM_NL_TS netlink recv message
25442  * this api updates app pid of app which is currenty using this netlink socket
25443  *
25444  * @skb: rx packet socket buffer
25445  */
25446 static void
dhd_recv_msg_from_ts(struct sk_buff * skb)25447 dhd_recv_msg_from_ts(struct sk_buff *skb)
25448 {
25449 	sender_pid_ts = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
25450 	DHD_INFO(("DHD Daemon Started, PID:%d\n", sender_pid_ts));
25451 }
25452 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
25453 
25454 /**
25455  * dhd_send_msg_to_ts - send data to BCM_NL_TS netlink socket
25456  *
25457  * @skb: socket buffer (unused)
25458  * @data: output data
25459  * @size: size of output data
25460  */
25461 int
dhd_send_msg_to_ts(struct sk_buff * skb,void * data,int size)25462 dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size)
25463 {
25464 	struct nlmsghdr *nlh;
25465 	struct sk_buff *skb_out = NULL;
25466 	int ret = BCME_ERROR;
25467 
25468 	BCM_REFERENCE(skb);
25469 	if (sender_pid_ts == 0) {
25470 		goto err;
25471 	}
25472 
25473 	if ((skb_out = nlmsg_new(size, GFP_ATOMIC)) == NULL) {
25474 		DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
25475 		goto err;
25476 	}
25477 
25478 	nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
25479 	if (nlh == NULL) {
25480 		DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
25481 		goto err;
25482 	}
25483 
25484 	NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
25485 	(void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
25486 
25487 	if ((ret = nlmsg_unicast(nl_to_ts, skb_out, sender_pid_ts)) < 0) {
25488 		DHD_ERROR(("Error sending message, ret:%d\n", ret));
25489 		/* skb is already freed inside nlmsg_unicast() on error case */
25490 		/* explicitly making skb_out to NULL to avoid double free */
25491 		skb_out = NULL;
25492 		goto err;
25493 	}
25494 	return BCME_OK;
25495 
25496 err:
25497 	if (skb_out) {
25498 		nlmsg_free(skb_out);
25499 	}
25500 	return ret;
25501 }
25502 #endif /* DHD_PKTTS */
25503 
25504 static int
dhd_create_to_notifier_skt(void)25505 dhd_create_to_notifier_skt(void)
25506 {
25507 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
25508 	/* Kernel 3.7 onwards this API accepts only 3 arguments. */
25509 	/* Kernel version 3.6 is a special case which accepts 4 arguments */
25510 	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
25511 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
25512 	/* Kernel version 3.5 and below use this old API format */
25513 	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
25514 			dhd_process_daemon_msg, NULL, THIS_MODULE);
25515 #else
25516 	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
25517 			&dhd_netlink_cfg);
25518 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
25519 	if (!nl_to_event_sk)
25520 	{
25521 		printf("Error creating socket.\n");
25522 		return -1;
25523 	}
25524 	DHD_INFO(("nl_to socket created successfully...\n"));
25525 	return 0;
25526 }
25527 
25528 void
dhd_destroy_to_notifier_skt(void)25529 dhd_destroy_to_notifier_skt(void)
25530 {
25531 	DHD_INFO(("Destroying nl_to socket\n"));
25532 	netlink_kernel_release(nl_to_event_sk);
25533 }
25534 
25535 static void
dhd_recv_msg_from_daemon(struct sk_buff * skb)25536 dhd_recv_msg_from_daemon(struct sk_buff *skb)
25537 {
25538 	struct nlmsghdr *nlh;
25539 	bcm_to_info_t *cmd;
25540 
25541 	nlh = (struct nlmsghdr *)skb->data;
25542 	cmd = (bcm_to_info_t *)nlmsg_data(nlh);
25543 	if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
25544 		sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
25545 		DHD_INFO(("DHD Daemon Started\n"));
25546 	}
25547 }
25548 
25549 int
dhd_send_msg_to_daemon(struct sk_buff * skb,void * data,int size)25550 dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
25551 {
25552 	struct nlmsghdr *nlh;
25553 	struct sk_buff *skb_out;
25554 	int ret = BCME_ERROR;
25555 
25556 	BCM_REFERENCE(skb);
25557 	if (sender_pid == 0) {
25558 		DHD_INFO(("Invalid PID 0\n"));
25559 		skb_out = NULL;
25560 		goto err;
25561 	}
25562 
25563 	if ((skb_out = nlmsg_new(size, 0)) == NULL) {
25564 		DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
25565 		ret = BCME_NOMEM;
25566 		goto err;
25567 	}
25568 	nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
25569 	if (nlh == NULL) {
25570 		DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
25571 		goto err;
25572 	}
25573 	NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
25574 	(void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
25575 
25576 	if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
25577 		DHD_ERROR(("Error sending message, ret:%d\n", ret));
25578 		/* skb is already freed inside nlmsg_unicast() on error case */
25579 		/* explicitly making skb_out to NULL to avoid double free */
25580 		skb_out = NULL;
25581 		goto err;
25582 	}
25583 	return BCME_OK;
25584 err:
25585 	if (skb_out) {
25586 		nlmsg_free(skb_out);
25587 	}
25588 	return ret;
25589 }
25590 
25591 static void
dhd_process_daemon_msg(struct sk_buff * skb)25592 dhd_process_daemon_msg(struct sk_buff *skb)
25593 {
25594 	bcm_to_info_t to_info;
25595 
25596 	to_info.magic = BCM_TO_MAGIC;
25597 	to_info.reason = REASON_DAEMON_STARTED;
25598 	to_info.trap = NO_TRAP;
25599 
25600 	dhd_recv_msg_from_daemon(skb);
25601 	dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
25602 }
25603 
25604 #ifdef REPORT_FATAL_TIMEOUTS
25605 static void
dhd_send_trap_to_fw(dhd_pub_t * pub,int reason,int trap)25606 dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
25607 {
25608 	bcm_to_info_t to_info;
25609 
25610 	to_info.magic = BCM_TO_MAGIC;
25611 	to_info.reason = reason;
25612 	to_info.trap = trap;
25613 
25614 	DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
25615 	dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
25616 }
25617 
25618 void
dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub,timeout_reasons_t reason)25619 dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
25620 {
25621 	int to_reason;
25622 	int trap = NO_TRAP;
25623 	switch (reason) {
25624 		case DHD_REASON_COMMAND_TO:
25625 			to_reason = REASON_COMMAND_TO;
25626 			trap = DO_TRAP;
25627 			break;
25628 		case DHD_REASON_JOIN_TO:
25629 			to_reason = REASON_JOIN_TO;
25630 			trap = DO_TRAP;
25631 			break;
25632 		case DHD_REASON_SCAN_TO:
25633 			to_reason = REASON_SCAN_TO;
25634 			trap = DO_TRAP;
25635 			break;
25636 		case DHD_REASON_OQS_TO:
25637 			to_reason = REASON_OQS_TO;
25638 			trap = DO_TRAP;
25639 			break;
25640 		default:
25641 			to_reason = REASON_UNKOWN;
25642 	}
25643 	dhd_send_trap_to_fw(pub, to_reason, trap);
25644 }
25645 #endif /* REPORT_FATAL_TIMEOUTS */
25646 
25647 #ifdef DHD_LOG_DUMP
25648 bool
dhd_log_dump_ecntr_enabled(void)25649 dhd_log_dump_ecntr_enabled(void)
25650 {
25651 	return (bool)logdump_ecntr_enable;
25652 }
25653 
25654 bool
dhd_log_dump_rtt_enabled(void)25655 dhd_log_dump_rtt_enabled(void)
25656 {
25657 	return (bool)logdump_rtt_enable;
25658 }
25659 
25660 void
dhd_log_dump_init(dhd_pub_t * dhd)25661 dhd_log_dump_init(dhd_pub_t *dhd)
25662 {
25663 	struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
25664 	int i = 0;
25665 	uint8 *prealloc_buf = NULL, *bufptr = NULL;
25666 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
25667 	int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
25668 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
25669 	int ret;
25670 	dhd_dbg_ring_t *ring = NULL;
25671 	unsigned long flags = 0;
25672 	dhd_info_t *dhd_info = dhd->info;
25673 #if defined(EWP_ECNTRS_LOGGING)
25674 	void *cookie_buf = NULL;
25675 #endif
25676 
25677 	BCM_REFERENCE(ret);
25678 	BCM_REFERENCE(ring);
25679 	BCM_REFERENCE(flags);
25680 
25681 	/* sanity check */
25682 	if (logdump_prsrv_tailsize <= 0 ||
25683 		logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
25684 		logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
25685 	}
25686 	/* now adjust the preserve log flush size based on the
25687 	* kernel printk log buffer size
25688 	*/
25689 #ifdef CONFIG_LOG_BUF_SHIFT
25690 	DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
25691 		" limit prsrv tail size to = %uKB\n",
25692 		__FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
25693 		logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
25694 
25695 	if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
25696 		logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
25697 	}
25698 #else
25699 	DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
25700 		__FUNCTION__, logdump_prsrv_tailsize/1024);
25701 #endif /* CONFIG_LOG_BUF_SHIFT */
25702 
25703 	mutex_init(&dhd_info->logdump_lock);
25704 	/* initialize log dump buf structures */
25705 	memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
25706 
25707 	/* set the log dump buffer size based on the module_param */
25708 	if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
25709 			logdump_max_bufsize <= 0)
25710 		dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
25711 	else
25712 		dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
25713 
25714 	/* pre-alloc the memory for the log buffers & 'special' buffer */
25715 	dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
25716 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
25717 	prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
25718 	dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
25719 			dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25720 #else
25721 	prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
25722 	dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25723 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
25724 
25725 	if (!prealloc_buf) {
25726 		DHD_ERROR(("Failed to allocate memory for log buffers\n"));
25727 		goto fail;
25728 	}
25729 	if (!dld_buf_special->buffer) {
25730 		DHD_ERROR(("Failed to allocate memory for special buffer\n"));
25731 		goto fail;
25732 	}
25733 #ifdef BCMINTERNAL
25734 	DHD_ERROR(("prealloc_buf:%p dld_buf_special->buffer:%p\n",
25735 		prealloc_buf, dld_buf_special->buffer));
25736 #endif /* BCMINTERNAL */
25737 
25738 	bufptr = prealloc_buf;
25739 	for (i = 0; i < DLD_BUFFER_NUM; i++) {
25740 		dld_buf = &g_dld_buf[i];
25741 		dld_buf->dhd_pub = dhd;
25742 		spin_lock_init(&dld_buf->lock);
25743 		dld_buf->wraparound = 0;
25744 		if (i != DLD_BUF_TYPE_SPECIAL) {
25745 			dld_buf->buffer = bufptr;
25746 			dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
25747 			bufptr = (uint8 *)dld_buf->max;
25748 		} else {
25749 			dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
25750 		}
25751 		dld_buf->present = dld_buf->front = dld_buf->buffer;
25752 		dld_buf->remain = dld_buf_size[i];
25753 		dld_buf->enable = 1;
25754 	}
25755 
25756 	/* now use the rest of the pre-alloc'd memory for other rings */
25757 #ifdef EWP_ECNTRS_LOGGING
25758 	dhd->ecntr_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
25759 			ECNTR_RING_ID, ECNTR_RING_NAME,
25760 			LOG_DUMP_ECNTRS_MAX_BUFSIZE,
25761 			bufptr, TRUE);
25762 	if (!dhd->ecntr_dbg_ring) {
25763 		DHD_ERROR(("%s: unable to init ecounters dbg ring !\n",
25764 				__FUNCTION__));
25765 		goto fail;
25766 	}
25767 	bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
25768 #endif /* EWP_ECNTRS_LOGGING */
25769 
25770 #ifdef EWP_RTT_LOGGING
25771 	dhd->rtt_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
25772 			RTT_RING_ID, RTT_RING_NAME,
25773 			LOG_DUMP_RTT_MAX_BUFSIZE,
25774 			bufptr, TRUE);
25775 	if (!dhd->rtt_dbg_ring) {
25776 		DHD_ERROR(("%s: unable to init rtt dbg ring !\n",
25777 				__FUNCTION__));
25778 		goto fail;
25779 	}
25780 	bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
25781 #endif /* EWP_RTT_LOGGING */
25782 
25783 #ifdef EWP_BCM_TRACE
25784 	dhd->bcm_trace_dbg_ring = dhd_dbg_ring_alloc_init(dhd,
25785 			BCM_TRACE_RING_ID, BCM_TRACE_RING_NAME,
25786 			LOG_DUMP_BCM_TRACE_MAX_BUFSIZE,
25787 			bufptr, TRUE);
25788 	if (!dhd->bcm_trace_dbg_ring) {
25789 		DHD_ERROR(("%s: unable to init bcm trace dbg ring !\n",
25790 				__FUNCTION__));
25791 		goto fail;
25792 	}
25793 	bufptr += LOG_DUMP_BCM_TRACE_MAX_BUFSIZE;
25794 #endif /* EWP_BCM_TRACE */
25795 
25796 	/* Concise buffer is used as intermediate buffer for following purposes
25797 	* a) pull ecounters records temporarily before
25798 	*  writing it to file
25799 	* b) to store dhd dump data before putting it to file
25800 	* It should have a size equal to
25801 	* MAX(largest possible ecntr record, 'dhd dump' data size)
25802 	*/
25803 	dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
25804 	if (!dhd->concise_dbg_buf) {
25805 		DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
25806 				__FUNCTION__));
25807 		goto fail;
25808 	}
25809 
25810 #if defined(DHD_EVENT_LOG_FILTER)
25811 	/* XXX init filter last, because filter use buffer which alloced by log dump */
25812 	ret = dhd_event_log_filter_init(dhd,
25813 		bufptr,
25814 		LOG_DUMP_FILTER_MAX_BUFSIZE);
25815 	if (ret != BCME_OK) {
25816 		goto fail;
25817 	}
25818 #endif /* DHD_EVENT_LOG_FILTER */
25819 
25820 #if defined(EWP_ECNTRS_LOGGING)
25821 	cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
25822 	if (!cookie_buf) {
25823 		DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
25824 			__FUNCTION__));
25825 		goto fail;
25826 	}
25827 
25828 	ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
25829 	if (ret != BCME_OK) {
25830 		MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
25831 		goto fail;
25832 	}
25833 #endif /* EWP_ECNTRS_LOGGING */
25834 	return;
25835 
25836 fail:
25837 
25838 #if defined(DHD_EVENT_LOG_FILTER)
25839 	/* XXX deinit filter first, because filter use buffer which alloced by log dump */
25840 	if (dhd->event_log_filter) {
25841 		dhd_event_log_filter_deinit(dhd);
25842 	}
25843 #endif /* DHD_EVENT_LOG_FILTER */
25844 
25845 	if (dhd->concise_dbg_buf) {
25846 		MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
25847 	}
25848 
25849 #ifdef EWP_ECNTRS_LOGGING
25850 	if (dhd->logdump_cookie) {
25851 		dhd_logdump_cookie_deinit(dhd);
25852 		MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
25853 		dhd->logdump_cookie = NULL;
25854 	}
25855 #endif /* EWP_ECNTRS_LOGGING */
25856 
25857 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
25858 	if (prealloc_buf) {
25859 		DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
25860 	}
25861 	if (dld_buf_special->buffer) {
25862 		DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
25863 				dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25864 	}
25865 #else
25866 	if (prealloc_buf) {
25867 		MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
25868 	}
25869 	if (dld_buf_special->buffer) {
25870 		MFREE(dhd->osh, dld_buf_special->buffer,
25871 				dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25872 	}
25873 #endif /* CONFIG_DHD_USE_STATIC_BUF */
25874 	for (i = 0; i < DLD_BUFFER_NUM; i++) {
25875 		dld_buf = &g_dld_buf[i];
25876 		dld_buf->enable = 0;
25877 		dld_buf->buffer = NULL;
25878 	}
25879 	mutex_destroy(&dhd_info->logdump_lock);
25880 }
25881 
25882 void
25883 dhd_log_dump_deinit(dhd_pub_t *dhd)
25884 {
25885 	struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
25886 	int i = 0;
25887 	dhd_info_t *dhd_info = dhd->info;
25888 	dhd_dbg_ring_t *ring = NULL;
25889 
25890 	BCM_REFERENCE(ring);
25891 
25892 	if (dhd->concise_dbg_buf) {
25893 		MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
25894 		dhd->concise_dbg_buf = NULL;
25895 	}
25896 
25897 #ifdef EWP_ECNTRS_LOGGING
25898 	if (dhd->logdump_cookie) {
25899 		dhd_logdump_cookie_deinit(dhd);
25900 		MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
25901 		dhd->logdump_cookie = NULL;
25902 	}
25903 
25904 	if (dhd->ecntr_dbg_ring) {
25905 		dhd_dbg_ring_dealloc_deinit(&dhd->ecntr_dbg_ring, dhd);
25906 	}
25907 #endif /* EWP_ECNTRS_LOGGING */
25908 
25909 #ifdef EWP_RTT_LOGGING
25910 	if (dhd->rtt_dbg_ring) {
25911 		dhd_dbg_ring_dealloc_deinit(&dhd->rtt_dbg_ring, dhd);
25912 	}
25913 #endif /* EWP_RTT_LOGGING */
25914 
25915 #ifdef EWP_BCM_TRACE
25916 	if (dhd->bcm_trace_dbg_ring) {
25917 		dhd_dbg_ring_dealloc_deinit(&dhd->bcm_trace_dbg_ring, dhd);
25918 	}
25919 #endif /* EWP_BCM_TRACE */
25920 
25921 	/* 'general' buffer points to start of the pre-alloc'd memory */
25922 	dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
25923 	dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
25924 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
25925 	if (dld_buf->buffer) {
25926 		DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
25927 	}
25928 	if (dld_buf_special->buffer) {
25929 		DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
25930 				dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25931 	}
25932 #else
25933 	if (dld_buf->buffer) {
25934 		MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
25935 	}
25936 	if (dld_buf_special->buffer) {
25937 		MFREE(dhd->osh, dld_buf_special->buffer,
25938 				dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
25939 	}
25940 #endif /* CONFIG_DHD_USE_STATIC_BUF */
25941 	for (i = 0; i < DLD_BUFFER_NUM; i++) {
25942 		dld_buf = &g_dld_buf[i];
25943 		dld_buf->enable = 0;
25944 		dld_buf->buffer = NULL;
25945 	}
25946 	mutex_destroy(&dhd_info->logdump_lock);
25947 }
25948 
25949 void
25950 dhd_log_dump_write(int type, char *binary_data,
25951 		int binary_len, const char *fmt, ...)
25952 {
25953 	int len = 0;
25954 	char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
25955 	va_list args;
25956 	unsigned long flags = 0;
25957 	struct dhd_log_dump_buf *dld_buf = NULL;
25958 	bool flush_log = FALSE;
25959 
25960 	if (type < 0 || type >= DLD_BUFFER_NUM) {
25961 		DHD_INFO(("%s: Unsupported DHD_LOG_DUMP_BUF_TYPE(%d).\n",
25962 			__FUNCTION__, type));
25963 		return;
25964 	}
25965 
25966 	dld_buf = &g_dld_buf[type];
25967 	if (dld_buf->enable != 1) {
25968 		return;
25969 	}
25970 
25971 	va_start(args, fmt);
25972 	len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
25973 	/* Non ANSI C99 compliant returns -1,
25974 	 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
25975 	 */
25976 	va_end(args);
25977 	if (len < 0) {
25978 		return;
25979 	}
25980 
25981 	if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
25982 		len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
25983 		tmp_buf[len] = '\0';
25984 	}
25985 
25986 	/* make a critical section to eliminate race conditions */
25987 	DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
25988 	if (dld_buf->remain < len) {
25989 		dld_buf->wraparound = 1;
25990 		dld_buf->present = dld_buf->front;
25991 		dld_buf->remain = dld_buf_size[type];
25992 		/* if wrap around happens, flush the ring buffer to the file */
25993 		flush_log = TRUE;
25994 	}
25995 
25996 	memcpy(dld_buf->present, tmp_buf, len);
25997 	dld_buf->remain -= len;
25998 	dld_buf->present += len;
25999 	DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
26000 
26001 	/* double check invalid memory operation */
26002 	ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
26003 
26004 	if (dld_buf->dhd_pub) {
26005 		dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
26006 		dhdp->logdump_periodic_flush =
26007 			logdump_periodic_flush;
26008 		if (logdump_periodic_flush && flush_log) {
26009 			log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
26010 					sizeof(log_dump_type_t));
26011 			if (flush_type) {
26012 				*flush_type = type;
26013 				dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
26014 			}
26015 		}
26016 	}
26017 }
26018 
26019 #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
26020 char*
26021 dhd_dbg_get_system_timestamp(void)
26022 {
26023 	static char timebuf[DEBUG_DUMP_TIME_BUF_LEN];
26024 	struct timeval tv;
26025 	unsigned long local_time;
26026 	struct rtc_time tm;
26027 
26028 	memset_s(timebuf, DEBUG_DUMP_TIME_BUF_LEN, 0, DEBUG_DUMP_TIME_BUF_LEN);
26029 	do_gettimeofday(&tv);
26030 	local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60));
26031 	rtc_time_to_tm(local_time, &tm);
26032 	scnprintf(timebuf, DEBUG_DUMP_TIME_BUF_LEN,
26033 			"%02d:%02d:%02d.%06lu",
26034 			tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec);
26035 	return timebuf;
26036 }
26037 
26038 extern struct dhd_dbg_ring_buf g_ring_buf;
26039 void
26040 dhd_dbg_ring_write(int type, char *binary_data,
26041 		int binary_len, const char *fmt, ...)
26042 {
26043 	int len = 0;
26044 	va_list args;
26045 	struct dhd_dbg_ring_buf *ring_buf = NULL;
26046 	char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
26047 
26048 	ring_buf = &g_ring_buf;
26049 
26050 	va_start(args, fmt);
26051 	len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
26052 	/* Non ANSI C99 compliant returns -1,
26053 	 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
26054 	 */
26055 	va_end(args);
26056 	if (len < 0) {
26057 		return;
26058 	}
26059 
26060 	if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
26061 		len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
26062 		tmp_buf[len] = '\0';
26063 	}
26064 
26065 	if (ring_buf->dhd_pub) {
26066 		dhd_pub_t *dhdp = (dhd_pub_t *)ring_buf->dhd_pub;
26067 		if (type == DRIVER_LOG_RING_ID || type == FW_VERBOSE_RING_ID ||
26068 				type == ROAM_STATS_RING_ID) {
26069 			if (DBG_RING_ACTIVE(dhdp, type)) {
26070 				dhd_os_push_push_ring_data(dhdp, type,
26071 						tmp_buf, strlen(tmp_buf));
26072 				return;
26073 			}
26074 		}
26075 	}
26076 	return;
26077 }
26078 #endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
26079 
26080 char*
26081 dhd_log_dump_get_timestamp(void)
26082 {
26083 	static char buf[32];
26084 	u64 ts_nsec;
26085 	unsigned long rem_nsec;
26086 
26087 	ts_nsec = local_clock();
26088 	rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
26089 	snprintf(buf, sizeof(buf), "%5lu.%06lu",
26090 		(unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
26091 
26092 	return buf;
26093 }
26094 #endif /* DHD_LOG_DUMP */
26095 
26096 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
26097 void
26098 dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
26099 {
26100 	dhd_info_t * dhd;
26101 
26102 	if (dhdp) {
26103 		dhd = dhdp->info;
26104 		if (dhd) {
26105 			flush_workqueue(dhd->tx_wq);
26106 			flush_workqueue(dhd->rx_wq);
26107 		}
26108 	}
26109 
26110 	return;
26111 }
26112 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
26113 
26114 #ifdef DHD_DEBUG_UART
26115 bool
26116 dhd_debug_uart_is_running(struct net_device *dev)
26117 {
26118 	dhd_info_t *dhd = DHD_DEV_INFO(dev);
26119 
26120 	if (dhd->duart_execute) {
26121 		return TRUE;
26122 	}
26123 
26124 	return FALSE;
26125 }
26126 
26127 static void
26128 dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
26129 {
26130 	dhd_pub_t *dhdp = handle;
26131 	dhd_debug_uart_exec(dhdp, "rd");
26132 }
26133 
26134 static void
26135 dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
26136 {
26137 	int ret;
26138 
26139 	char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
26140 	char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
26141 
26142 #ifdef DHD_FW_COREDUMP
26143 	if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
26144 #endif
26145 	{
26146 		if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
26147 			dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
26148 #ifdef DHD_FW_COREDUMP
26149 			dhdp->memdump_success == FALSE ||
26150 #endif
26151 			FALSE) {
26152 			dhdp->info->duart_execute = TRUE;
26153 			DHD_ERROR(("DHD: %s - execute %s %s\n",
26154 				__FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
26155 			ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
26156 			DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
26157 				__FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
26158 			dhdp->info->duart_execute = FALSE;
26159 
26160 #ifdef DHD_LOG_DUMP
26161 			if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
26162 #endif
26163 			{
26164 				BUG_ON(1);
26165 			}
26166 		}
26167 	}
26168 }
26169 #endif	/* DHD_DEBUG_UART */
26170 
26171 #if defined(DHD_BLOB_EXISTENCE_CHECK)
26172 void
26173 dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
26174 {
26175 	struct file *fp;
26176 	char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
26177 
26178 	fp = filp_open(filepath, O_RDONLY, 0);
26179 	if (IS_ERR(fp)) {
26180 		DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
26181 			filepath));
26182 		dhdp->is_blob = FALSE;
26183 	} else {
26184 		DHD_ERROR(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath));
26185 		dhdp->is_blob = TRUE;
26186 #if defined(CONCATE_BLOB)
26187 		strncat(fw_path, "_blob", strlen("_blob"));
26188 #else
26189 		BCM_REFERENCE(fw_path);
26190 #endif /* SKIP_CONCATE_BLOB */
26191 		filp_close(fp, NULL);
26192 	}
26193 }
26194 #endif /* DHD_BLOB_EXISTENCE_CHECK */
26195 
26196 #if defined(PCIE_FULL_DONGLE)
26197 /** test / loopback */
26198 void
26199 dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
26200 {
26201 	dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
26202 	dhd_info_t *dhd_info = (dhd_info_t *)handle;
26203 
26204 	if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
26205 		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
26206 		return;
26207 	}
26208 	if (dhd_info == NULL) {
26209 		DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
26210 		return;
26211 	}
26212 	if (dmmap == NULL) {
26213 		DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
26214 		return;
26215 	}
26216 	dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
26217 }
26218 
26219 void
26220 dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
26221 {
26222 	dhd_info_t *dhd_info = dhdp->info;
26223 
26224 	dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
26225 		DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
26226 }
26227 #endif /* PCIE_FULL_DONGLE */
26228 /* ---------------------------- End of sysfs implementation ------------------------------------- */
26229 #ifdef SET_PCIE_IRQ_CPU_CORE
26230 void
26231 dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
26232 {
26233 	unsigned int pcie_irq = 0;
26234 #if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
26235 	struct dhd_info  *dhd = NULL;
26236 #endif /* DHD_LB && DHD_LB_HOST_CTRL */
26237 
26238 	if (!dhdp) {
26239 		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
26240 		return;
26241 	}
26242 
26243 	if (!dhdp->bus) {
26244 		DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
26245 		return;
26246 	}
26247 
26248 	if (affinity_cmd < DHD_AFFINITY_OFF || affinity_cmd > DHD_AFFINITY_LAST) {
26249 		DHD_ERROR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__));
26250 		return;
26251 	}
26252 
26253 	DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
26254 
26255 	if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
26256 		DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
26257 		return;
26258 	}
26259 
26260 #if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
26261 	dhd = dhdp->info;
26262 
26263 	if (affinity_cmd == DHD_AFFINITY_OFF) {
26264 		dhd->permitted_primary_cpu = FALSE;
26265 	} else if (affinity_cmd == DHD_AFFINITY_TPUT_150MBPS ||
26266 		affinity_cmd == DHD_AFFINITY_TPUT_300MBPS) {
26267 		dhd->permitted_primary_cpu = TRUE;
26268 	}
26269 	dhd_select_cpu_candidacy(dhd);
26270 	/*
26271 	* It needs to NAPI disable -> enable to raise NET_RX napi CPU core
26272 	* during Rx traffic
26273 	* NET_RX does not move to NAPI CPU core if continusly calling napi polling
26274 	* function
26275 	*/
26276 	napi_disable(&dhd->rx_napi_struct);
26277 	napi_enable(&dhd->rx_napi_struct);
26278 #endif /* DHD_LB && DHD_LB_HOST_CTRL */
26279 
26280 	/*
26281 		irq_set_affinity() assign dedicated CPU core PCIe interrupt
26282 		If dedicated CPU core is not on-line,
26283 		PCIe interrupt scheduled on CPU core 0
26284 	*/
26285 #if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA)
26286 	/* For SDM platform */
26287 	switch (affinity_cmd) {
26288 		case DHD_AFFINITY_OFF:
26289 #if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
26290 			irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_secondary);
26291 			irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary);
26292 #endif /* DHD_LB && DHD_LB_HOST_CTRL */
26293 			break;
26294 		case DHD_AFFINITY_TPUT_150MBPS:
26295 		case DHD_AFFINITY_TPUT_300MBPS:
26296 			irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
26297 			irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
26298 			break;
26299 		default:
26300 			DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
26301 				__FUNCTION__, affinity_cmd));
26302 	}
26303 #elif defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
26304 	defined(CONFIG_SOC_EXYNOS9830)
26305 	/* For Exynos platform */
26306 	switch (affinity_cmd) {
26307 		case DHD_AFFINITY_OFF:
26308 #if defined(DHD_LB) && defined(DHD_LB_HOST_CTRL)
26309 			irq_set_affinity(pcie_irq, dhdp->info->cpumask_secondary);
26310 #endif /* DHD_LB && DHD_LB_HOST_CTRL */
26311 			break;
26312 		case DHD_AFFINITY_TPUT_150MBPS:
26313 			irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
26314 			break;
26315 		case DHD_AFFINITY_TPUT_300MBPS:
26316 			DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
26317 				__FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
26318 			irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
26319 			break;
26320 		default:
26321 			DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
26322 				__FUNCTION__, affinity_cmd));
26323 	}
26324 #else /* For Undefined platform */
26325 	DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
26326 		__FUNCTION__, affinity_cmd));
26327 #endif /* End of Platfrom define */
26328 
26329 }
26330 #endif /* SET_PCIE_IRQ_CPU_CORE */
26331 
26332 int
26333 dhd_write_file(const char *filepath, char *buf, int buf_len)
26334 {
26335 	struct file *fp = NULL;
26336 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26337 	mm_segment_t old_fs;
26338 #endif
26339 	int ret = 0;
26340 
26341 	/* change to KERNEL_DS address limit */
26342 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26343 	old_fs = get_fs();
26344 	set_fs(KERNEL_DS);
26345 #endif
26346 
26347 	/* File is always created. */
26348 	fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
26349 	if (IS_ERR(fp)) {
26350 		DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
26351 			__FUNCTION__, filepath, PTR_ERR(fp)));
26352 		ret = BCME_ERROR;
26353 	} else {
26354 		if (fp->f_mode & FMODE_WRITE) {
26355 			ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
26356 			if (ret < 0) {
26357 				DHD_ERROR(("%s: Couldn't write file '%s'\n",
26358 					__FUNCTION__, filepath));
26359 				ret = BCME_ERROR;
26360 			} else {
26361 				ret = BCME_OK;
26362 			}
26363 		}
26364 		filp_close(fp, NULL);
26365 	}
26366 
26367 	/* restore previous address limit */
26368 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26369 	set_fs(old_fs);
26370 #endif
26371 
26372 	return ret;
26373 }
26374 
26375 int
26376 dhd_read_file(const char *filepath, char *buf, int buf_len)
26377 {
26378 	struct file *fp = NULL;
26379 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26380 	mm_segment_t old_fs;
26381 #endif
26382 	int ret;
26383 
26384 	/* change to KERNEL_DS address limit */
26385 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26386 	old_fs = get_fs();
26387 	set_fs(KERNEL_DS);
26388 #endif
26389 
26390 	fp = filp_open(filepath, O_RDONLY, 0);
26391 	if (IS_ERR(fp)) {
26392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26393 		set_fs(old_fs);
26394 #endif
26395 		DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
26396 		return BCME_ERROR;
26397 	}
26398 
26399 	ret = kernel_read_compat(fp, 0, buf, buf_len);
26400 	filp_close(fp, NULL);
26401 
26402 	/* restore previous address limit */
26403 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
26404 	set_fs(old_fs);
26405 #endif
26406 
26407 	/* Return the number of bytes read */
26408 	if (ret > 0) {
26409 		/* Success to read */
26410 		ret = 0;
26411 	} else {
26412 		DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
26413 			__FUNCTION__, filepath, ret));
26414 		ret = BCME_ERROR;
26415 	}
26416 
26417 	return ret;
26418 }
26419 
26420 int
26421 dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
26422 {
26423 	int ret;
26424 
26425 	ret = dhd_write_file(filepath, buf, buf_len);
26426 	if (ret < 0) {
26427 		return ret;
26428 	}
26429 
26430 	/* Read the file again and check if the file size is not zero */
26431 	memset(buf, 0, buf_len);
26432 	ret = dhd_read_file(filepath, buf, buf_len);
26433 
26434 	return ret;
26435 }
26436 
26437 #ifdef FILTER_IE
26438 int dhd_read_from_file(dhd_pub_t *dhd)
26439 {
26440 	int ret = 0, nread = 0;
26441 	void *fd;
26442 	uint8 *buf;
26443 	NULL_CHECK(dhd, "dhd is NULL", ret);
26444 
26445 	buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
26446 	if (!buf) {
26447 		DHD_ERROR(("error: failed to alllocate buf.\n"));
26448 		return BCME_NOMEM;
26449 	}
26450 
26451 	/* open file to read */
26452 	fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
26453 	if (!fd) {
26454 		DHD_ERROR(("No filter file(not an error), filter path%s\n", FILTER_IE_PATH));
26455 		ret = BCME_EPERM;
26456 		goto exit;
26457 	}
26458 	nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
26459 	if (nread > 0) {
26460 		buf[nread] = '\0';
26461 		if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
26462 			DHD_ERROR(("error: failed to parse filter ie\n"));
26463 		}
26464 	} else {
26465 		DHD_ERROR(("error: zero length file.failed to read\n"));
26466 		ret = BCME_ERROR;
26467 	}
26468 	dhd_os_close_image1(dhd, fd);
26469 exit:
26470 	if (buf) {
26471 		MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
26472 	}
26473 	return ret;
26474 }
26475 
26476 int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
26477 {
26478 	uint8* pstr = buf;
26479 	int element_count = 0;
26480 
26481 	if (buf == NULL) {
26482 		return BCME_ERROR;
26483 	}
26484 
26485 	while (*pstr != '\0') {
26486 		if (*pstr == '\n') {
26487 			element_count++;
26488 		}
26489 		pstr++;
26490 	}
26491 	/*
26492 	 * New line character must not be present after last line.
26493 	 * To count last line
26494 	 */
26495 	element_count++;
26496 
26497 	return element_count;
26498 }
26499 
26500 int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
26501 {
26502 	uint8 i, j, msb, lsb, oui_len = 0;
26503 	/*
26504 	 * OUI can vary from 3 bytes to 5 bytes.
26505 	 * While reading from file as ascii input it can
26506 	 * take maximum size of 14 bytes and minumum size of
26507 	 * 8 bytes including ":"
26508 	 * Example 5byte OUI <AB:DE:BE:CD:FA>
26509 	 * Example 3byte OUI <AB:DC:EF>
26510 	 */
26511 
26512 	if ((inbuf == NULL) || (len < 8) || (len > 14)) {
26513 		DHD_ERROR(("error: failed to parse OUI \n"));
26514 		return BCME_ERROR;
26515 	}
26516 
26517 	for (j = 0, i = 0; i < len; i += 3, ++j) {
26518 		if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
26519 			DHD_ERROR(("error: invalid OUI format \n"));
26520 			return BCME_ERROR;
26521 		}
26522 		msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
26523 		lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
26524 			'A' + 10 : inbuf[i + 1] - '0';
26525 		oui[j] = (msb << 4) | lsb;
26526 	}
26527 	/* Size of oui.It can vary from 3/4/5 */
26528 	oui_len = j;
26529 
26530 	return oui_len;
26531 }
26532 
26533 int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
26534 {
26535 	int i = 0;
26536 
26537 	while (i < len) {
26538 		if (!bcm_isdigit(buf[i])) {
26539 			DHD_ERROR(("error: non digit value found in filter_ie \n"));
26540 			return BCME_ERROR;
26541 		}
26542 		i++;
26543 	}
26544 	if (bcm_atoi((char*)buf) > 255) {
26545 		DHD_ERROR(("error: element id cannot be greater than 255 \n"));
26546 		return BCME_ERROR;
26547 	}
26548 
26549 	return BCME_OK;
26550 }
26551 
26552 int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
26553 {
26554 	int element_count = 0, i = 0, oui_size = 0, ret = 0;
26555 	uint16 bufsize, buf_space_left, id = 0, len = 0;
26556 	uint16 filter_iovsize, all_tlvsize;
26557 	wl_filter_ie_tlv_t *p_ie_tlv = NULL;
26558 	wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
26559 	char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
26560 	uint8 data[20];
26561 
26562 	element_count = dhd_get_filter_ie_count(dhd, buf);
26563 	DHD_INFO(("total element count %d \n", element_count));
26564 	/* Calculate the whole buffer size */
26565 	filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
26566 	p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
26567 
26568 	if (p_filter_iov == NULL) {
26569 		DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
26570 		return BCME_ERROR;
26571 	}
26572 
26573 	/* setup filter iovar header */
26574 	p_filter_iov->version = WL_FILTER_IE_VERSION;
26575 	p_filter_iov->len = filter_iovsize;
26576 	p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
26577 	p_filter_iov->pktflag = FC_PROBE_REQ;
26578 	p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
26579 	/* setup TLVs */
26580 	bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
26581 	p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
26582 	buf_space_left = bufsize;
26583 
26584 	while ((i < element_count) && (buf != NULL)) {
26585 		len = 0;
26586 		/* token contains one line of input data */
26587 		token = bcmstrtok((char**)&buf, "\n", NULL);
26588 		if (token == NULL) {
26589 			break;
26590 		}
26591 		if ((ele_token = bcmstrstr(token, ",")) == NULL) {
26592 		/* only element id is present */
26593 			if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
26594 				DHD_ERROR(("error: Invalid element id \n"));
26595 				ret = BCME_ERROR;
26596 				goto exit;
26597 			}
26598 			id = bcm_atoi((char*)token);
26599 			data[len++] = WL_FILTER_IE_SET;
26600 		} else {
26601 			/* oui is present */
26602 			ele_token = bcmstrtok(&token, ",", NULL);
26603 			if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
26604 				strlen(ele_token)) == BCME_ERROR)) {
26605 				DHD_ERROR(("error: Invalid element id \n"));
26606 				ret = BCME_ERROR;
26607 				goto exit;
26608 			}
26609 			id =  bcm_atoi((char*)ele_token);
26610 			data[len++] = WL_FILTER_IE_SET;
26611 			if ((oui_token = bcmstrstr(token, ",")) == NULL) {
26612 				oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
26613 				if (oui_size == BCME_ERROR) {
26614 					DHD_ERROR(("error: Invalid OUI \n"));
26615 					ret = BCME_ERROR;
26616 					goto exit;
26617 				}
26618 				len += oui_size;
26619 			} else {
26620 				/* type is present */
26621 				oui_token = bcmstrtok(&token, ",", NULL);
26622 				if ((oui_token == NULL) || ((oui_size =
26623 					dhd_parse_oui(dhd, oui_token,
26624 					&(data[len]), strlen(oui_token))) == BCME_ERROR)) {
26625 					DHD_ERROR(("error: Invalid OUI \n"));
26626 					ret = BCME_ERROR;
26627 					goto exit;
26628 				}
26629 				len += oui_size;
26630 				if ((type = bcmstrstr(token, ",")) == NULL) {
26631 					if (dhd_check_valid_ie(dhd, token,
26632 						strlen(token)) == BCME_ERROR) {
26633 						DHD_ERROR(("error: Invalid type \n"));
26634 						ret = BCME_ERROR;
26635 						goto exit;
26636 					}
26637 					data[len++] = bcm_atoi((char*)token);
26638 				} else {
26639 					/* subtype is present */
26640 					type = bcmstrtok(&token, ",", NULL);
26641 					if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
26642 						strlen(type)) == BCME_ERROR)) {
26643 						DHD_ERROR(("error: Invalid type \n"));
26644 						ret = BCME_ERROR;
26645 						goto exit;
26646 					}
26647 					data[len++] = bcm_atoi((char*)type);
26648 					/* subtype is last element */
26649 					if ((token == NULL) || (*token == '\0') ||
26650 						(dhd_check_valid_ie(dhd, token,
26651 						strlen(token)) == BCME_ERROR)) {
26652 						DHD_ERROR(("error: Invalid subtype \n"));
26653 						ret = BCME_ERROR;
26654 						goto exit;
26655 					}
26656 					data[len++] = bcm_atoi((char*)token);
26657 				}
26658 			}
26659 		}
26660 		ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
26661 			&buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
26662 		if (ret != BCME_OK) {
26663 			DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
26664 				"status=%d\n", __FUNCTION__, ret));
26665 			goto exit;
26666 		}
26667 		i++;
26668 	}
26669 	if (i == 0) {
26670 		/* file is empty or first line is blank */
26671 		DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
26672 		ret = BCME_ERROR;
26673 		goto exit;
26674 	}
26675 	/* update the iov header, set len to include all TLVs + header */
26676 	all_tlvsize = (bufsize - buf_space_left);
26677 	p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
26678 	ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
26679 			p_filter_iov->len, NULL, 0, TRUE);
26680 	if (ret != BCME_OK) {
26681 		DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
26682 	}
26683 exit:
26684 	/* clean up */
26685 	if (p_filter_iov) {
26686 		MFREE(dhd->osh, p_filter_iov, filter_iovsize);
26687 	}
26688 	return ret;
26689 }
26690 #endif /* FILTER_IE */
26691 #ifdef DHD_WAKE_STATUS
26692 wake_counts_t*
26693 dhd_get_wakecount(dhd_pub_t *dhdp)
26694 {
26695 #ifdef BCMDBUS
26696 	return NULL;
26697 #else
26698 	return dhd_bus_get_wakecount(dhdp);
26699 #endif /* BCMDBUS */
26700 }
26701 #endif /* DHD_WAKE_STATUS */
26702 
26703 int
26704 dhd_get_random_bytes(uint8 *buf, uint len)
26705 {
26706 #ifdef BCMPCIE
26707 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
26708 	int rndlen = get_random_bytes_arch(buf, len);
26709 	if (rndlen != len) {
26710 		bzero(buf, len);
26711 		get_random_bytes(buf, len);
26712 	}
26713 #else
26714 	get_random_bytes_arch(buf, len);
26715 #endif
26716 #endif /* BCMPCIE */
26717 	return BCME_OK;
26718 }
26719 
26720 #if defined(DHD_HANG_SEND_UP_TEST)
26721 void
26722 dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
26723 {
26724 	dhd_info_t *dhd = NULL;
26725 	dhd_pub_t *dhdp = NULL;
26726 	uint reason = HANG_REASON_MAX;
26727 	uint32 fw_test_code = 0;
26728 	dhd = DHD_DEV_INFO(dev);
26729 
26730 	if (dhd) {
26731 		dhdp = &dhd->pub;
26732 	}
26733 
26734 	if (!dhd || !dhdp) {
26735 		return;
26736 	}
26737 
26738 	reason = (uint) bcm_strtoul(string_num, NULL, 0);
26739 	DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__,  reason));
26740 
26741 	if (reason == 0) {
26742 		if (dhdp->req_hang_type) {
26743 			DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
26744 				__FUNCTION__, dhdp->req_hang_type));
26745 			dhdp->req_hang_type = 0;
26746 			return;
26747 		} else {
26748 			DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
26749 			return;
26750 		}
26751 	} else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
26752 		DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
26753 		return;
26754 	}
26755 
26756 	if (dhdp->req_hang_type != 0) {
26757 		DHD_ERROR(("Already HANG requested for test\n"));
26758 		return;
26759 	}
26760 
26761 	switch (reason) {
26762 		case HANG_REASON_IOCTL_RESP_TIMEOUT:
26763 			DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
26764 			dhdp->req_hang_type = reason;
26765 			fw_test_code = 102; /* resumed on timeour */
26766 			(void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
26767 				WLC_SET_VAR, TRUE, 0);
26768 			break;
26769 		case HANG_REASON_DONGLE_TRAP:
26770 			DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
26771 			dhdp->req_hang_type = reason;
26772 			fw_test_code = 99; /* dongle trap */
26773 			(void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
26774 				WLC_SET_VAR, TRUE, 0);
26775 			break;
26776 		case HANG_REASON_D3_ACK_TIMEOUT:
26777 			DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
26778 			dhdp->req_hang_type = reason;
26779 			break;
26780 		case HANG_REASON_BUS_DOWN:
26781 			DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
26782 			dhdp->req_hang_type = reason;
26783 			break;
26784 		case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
26785 		case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
26786 		case HANG_REASON_MSGBUF_LIVELOCK:
26787 			dhdp->req_hang_type = 0;
26788 			DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
26789 			break;
26790 		case HANG_REASON_IFACE_DEL_FAILURE:
26791 			dhdp->req_hang_type = 0;
26792 			DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
26793 			break;
26794 		case HANG_REASON_HT_AVAIL_ERROR:
26795 			dhdp->req_hang_type = 0;
26796 			DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
26797 			break;
26798 		case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
26799 			DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
26800 			dhdp->req_hang_type = reason;
26801 			break;
26802 		default:
26803 			dhdp->req_hang_type = 0;
26804 			DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
26805 			break;
26806 	}
26807 }
26808 #endif /* DHD_HANG_SEND_UP_TEST */
26809 
26810 #ifdef BT_OVER_PCIE
26811 #define BT_QUIESCE TRUE
26812 #define BT_RESUME FALSE
26813 #define BT_QUIESCE_RESPONSE_TIMEOUT 4000
26814 
26815 int
26816 dhd_request_bt_quiesce(dhd_pub_t *dhdp)
26817 {
26818 	dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
26819 	long timeout = BT_QUIESCE_RESPONSE_TIMEOUT;
26820 
26821 	if (request_bt_quiesce_ptr == NULL) {
26822 		DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__));
26823 		return BCME_OK;
26824 	}
26825 
26826 	mutex_lock(&dhd->quiesce_lock);
26827 	DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
26828 	if (dhd->dhd_quiesce_state != DHD_QUIESCE_INIT) {
26829 		DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
26830 		mutex_unlock(&dhd->quiesce_lock);
26831 		return BCME_ERROR;
26832 	}
26833 	dhd->dhd_quiesce_state = REQUEST_BT_QUIESCE;
26834 	request_bt_quiesce_ptr(BT_QUIESCE);
26835 
26836 	timeout = wait_event_timeout(dhd->quiesce_wait,
26837 		(dhd->dhd_quiesce_state == RESPONSE_BT_QUIESCE), timeout);
26838 
26839 	DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
26840 
26841 	mutex_unlock(&dhd->quiesce_lock);
26842 	if (!timeout) {
26843 		DHD_ERROR(("%s: timeout quiesce_state = %d\n",
26844 			__FUNCTION__, dhd->dhd_quiesce_state));
26845 		return BCME_BUSY;
26846 	}
26847 	return BCME_OK;
26848 }
26849 
26850 int
26851 dhd_request_bt_resume(dhd_pub_t *dhdp)
26852 {
26853 	dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
26854 	long timeout = BT_QUIESCE_RESPONSE_TIMEOUT;
26855 
26856 	if (request_bt_quiesce_ptr == NULL) {
26857 		DHD_ERROR(("%s: BT not loaded\n", __FUNCTION__));
26858 		return BCME_OK;
26859 	}
26860 
26861 	DHD_ERROR(("%s: start quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
26862 	mutex_lock(&dhd->quiesce_lock);
26863 	if (dhd->dhd_quiesce_state != RESPONSE_BT_QUIESCE) {
26864 		mutex_unlock(&dhd->quiesce_lock);
26865 		return BCME_ERROR;
26866 	}
26867 	dhd->dhd_quiesce_state = REQUEST_BT_RESUME;
26868 	request_bt_quiesce_ptr(BT_RESUME);
26869 
26870 	timeout = wait_event_timeout(dhd->quiesce_wait,
26871 		(dhd->dhd_quiesce_state == RESPONSE_BT_RESUME), timeout);
26872 
26873 	DHD_ERROR(("%s: after wait quiesce_state = %d\n", __FUNCTION__, dhd->dhd_quiesce_state));
26874 
26875 	dhd->dhd_quiesce_state = DHD_QUIESCE_INIT;
26876 	mutex_unlock(&dhd->quiesce_lock);
26877 	if (!timeout) {
26878 		DHD_ERROR(("%s: timeout quiesce_state = %d\n",
26879 			__FUNCTION__, dhd->dhd_quiesce_state));
26880 		return BCME_BUSY;
26881 	}
26882 	return BCME_OK;
26883 }
26884 
26885 void
26886 response_bt_quiesce(bool quiesce)
26887 {
26888 	dhd_pub_t *dhdp = g_dhd_pub;
26889 	dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
26890 	if (quiesce == BT_QUIESCE) {
26891 		if (dhd->dhd_quiesce_state == REQUEST_BT_QUIESCE) {
26892 			dhd->dhd_quiesce_state = RESPONSE_BT_QUIESCE;
26893 			wake_up(&dhd->quiesce_wait);
26894 			return;
26895 		}
26896 	} else if (quiesce == BT_RESUME) {
26897 		if (dhd->dhd_quiesce_state == REQUEST_BT_RESUME) {
26898 			dhd->dhd_quiesce_state = RESPONSE_BT_RESUME;
26899 			wake_up(&dhd->quiesce_wait);
26900 			return;
26901 		}
26902 	}
26903 	DHD_ERROR(("%s: Wrong Queisce Response=%d in State=%d\n",
26904 		__FUNCTION__, quiesce, dhd->dhd_quiesce_state));
26905 	return;
26906 }
26907 
26908 int
26909 dhd_bus_perform_flr_with_quiesce(dhd_pub_t *dhdp, struct dhd_bus *bus,
26910 		bool init_deinit_path)
26911 {
26912 	int ret;
26913 	dhd_info_t * dhd = (dhd_info_t *)(dhdp->info);
26914 	bool dongle_isolation = dhdp->dongle_isolation;
26915 	mutex_lock(&dhd->quiesce_flr_lock);
26916 	dhd->dhd_quiesce_state = DHD_QUIESCE_INIT;
26917 
26918 	/* pause data on all the interfaces */
26919 	dhd_bus_stop_queue(dhdp->bus);
26920 
26921 	/* Since we are about to do FLR advertise that bus down is in progress
26922 	 * to other bus user contexts like Tx, Rx, IOVAR, WD etc
26923 	 */
26924 	dhdpcie_advertise_bus_cleanup(dhdp);
26925 
26926 #ifdef BT_OVER_PCIE
26927 	/* Disable L1SS of RC and EP
26928 	 * L1SS is enabled again in dhd_bus_start if dhd_sync_with_dongle succeed
26929 	 */
26930 	dhd_bus_l1ss_enable_rc_ep(dhdp->bus, FALSE);
26931 #endif /* BT_OVER_PCIE */
26932 
26933 	if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) {
26934 		DHD_ERROR(("%s: Request Quiesce\n", __FUNCTION__));
26935 		/* Request BT quiesce right before F0 FLR to minimise latency */
26936 		ret = dhd_request_bt_quiesce(dhdp); /* Handle return value */
26937 		if (ret != BCME_OK) {
26938 			DHD_ERROR(("%s: Error(%d) in Request Quiesce\n", __FUNCTION__, ret));
26939 			/* TODO: plugin API for Toggle REGON Here */
26940 			mutex_unlock(&dhd->quiesce_flr_lock);
26941 			return ret;
26942 		}
26943 	}
26944 
26945 	dhd_bus_pcie_pwr_req_reload_war(dhdp->bus);
26946 
26947 	DHD_ERROR(("%s: Perform FLR\n", __FUNCTION__));
26948 
26949 	ret = dhd_bus_perform_flr(dhdp->bus, dhd_bus_get_flr_force_fail(dhdp->bus));
26950 	if (ret != BCME_OK) {
26951 		DHD_ERROR(("%s: Error(%d) in Performing FLR\n", __FUNCTION__, ret));
26952 		/* TODO: Ensure that BT Host Driver is out of Quiesce state before REGON
26953 		 * Either by sending an unquiesce message Here OR as a part of ON/OFF API.
26954 		 */
26955 		/* TODO: plugin API for Toggle REGON Here */
26956 		mutex_unlock(&dhd->quiesce_flr_lock);
26957 		return ret;
26958 	}
26959 
26960 	if (dhd_bus_force_bt_quiesce_enabled(dhdp->bus)) {
26961 		DHD_ERROR(("%s: Request Resume\n", __FUNCTION__));
26962 		/* Resume BT right after F0 FLR to minimise latency */
26963 		ret = dhd_request_bt_resume(dhdp); /* Handle return value */
26964 		if (ret != BCME_OK) {
26965 			DHD_ERROR(("%s: Error(%d) in Request Resume\n", __FUNCTION__, ret));
26966 			/* TODO: plugin API for Toggle REGON Here */
26967 			mutex_unlock(&dhd->quiesce_flr_lock);
26968 			return ret;
26969 		}
26970 	}
26971 
26972 	/* Devreset function will perform FLR again, to avoid it set dongle_isolation */
26973 	dhdp->dongle_isolation = TRUE;
26974 
26975 	DHD_ERROR(("%s: Devreset ON\n", __FUNCTION__));
26976 	dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
26977 
26978 	DHD_ERROR(("%s: Devreset OFF\n", __FUNCTION__));
26979 	dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
26980 
26981 	dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
26982 
26983 	/* resume data on all the interfaces */
26984 	dhd_bus_start_queue(dhdp->bus);
26985 	mutex_unlock(&dhd->quiesce_flr_lock);
26986 
26987 	DHD_ERROR(("%s: done\n", __FUNCTION__));
26988 	return BCME_DNGL_DEVRESET;
26989 }
26990 #endif /* BT_OVER_PCIE */
26991 
26992 #ifdef DHD_TX_PROFILE
26993 static int
26994 process_layer2_headers(uint8 **p, int *plen, uint16 *type, bool is_host_sfhllc)
26995 {
26996 	int err = BCME_OK;
26997 
26998 	if (*type < ETHER_TYPE_MIN) {
26999 		struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)*p;
27000 
27001 		if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) {
27002 			*type = ntoh16(sh->type);
27003 			if (*type == ETHER_TYPE_8021Q ||
27004 				(is_host_sfhllc && *type != ETHER_TYPE_8021Q)) {
27005 				*p += sizeof(struct dot3_mac_llc_snap_header);
27006 				if ((*plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) {
27007 					err = BCME_ERROR;
27008 				}
27009 			}
27010 			else {
27011 				struct dot3_mac_llc_snapvlan_header *svh = (struct
27012 					dot3_mac_llc_snapvlan_header *)*p;
27013 
27014 				*type = ntoh16(svh->ether_type);
27015 				*p += sizeof(struct dot3_mac_llc_snapvlan_header);
27016 				if ((*plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) {
27017 					err = BCME_ERROR;
27018 				}
27019 			}
27020 		}
27021 		else {
27022 			err = BCME_ERROR;
27023 		}
27024 	}
27025 	else {
27026 		if (*type == ETHER_TYPE_8021Q) {
27027 			struct ethervlan_header *evh = (struct ethervlan_header *)*p;
27028 
27029 			*type = ntoh16(evh->ether_type);
27030 			*p += ETHERVLAN_HDR_LEN;
27031 			if ((*plen -= ETHERVLAN_HDR_LEN) <= 0) {
27032 				err = BCME_ERROR;
27033 			}
27034 		}
27035 		else {
27036 			*p += ETHER_HDR_LEN;
27037 			if ((*plen -= ETHER_HDR_LEN) <= 0) {
27038 				err = BCME_ERROR;
27039 			}
27040 		}
27041 	}
27042 
27043 	return err;
27044 }
27045 
27046 static int
27047 process_layer3_headers(uint8 **p, int plen, uint16 *type)
27048 {
27049 	int err = BCME_OK;
27050 
27051 	if (*type == ETHER_TYPE_IP) {
27052 		struct ipv4_hdr *iph = (struct ipv4_hdr *)*p;
27053 		uint16 len = IPV4_HLEN(iph);
27054 		if ((plen -= len) <= 0) {
27055 			err = BCME_ERROR;
27056 		} else if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) {
27057 			*type = IPV4_PROT(iph);
27058 			*p += len;
27059 		} else {
27060 			err = BCME_ERROR;
27061 		}
27062 	} else if (*type == ETHER_TYPE_IPV6) {
27063 		struct ipv6_hdr *ip6h = (struct ipv6_hdr *)*p;
27064 		if ((plen -= IPV6_MIN_HLEN) <= 0) {
27065 			err = BCME_ERROR;
27066 		} else if (IP_VER(ip6h) == IP_VER_6) {
27067 			*type = IPV6_PROT(ip6h);
27068 			*p += IPV6_MIN_HLEN;
27069 			if (IPV6_EXTHDR(*type)) {
27070 				uint8 proto_6 = 0;
27071 				int32 exth_len = ipv6_exthdr_len(*p, &proto_6);
27072 				if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
27073 					err = BCME_ERROR;
27074 				} else {
27075 					*type = proto_6;
27076 					*p += exth_len;
27077 				}
27078 			}
27079 		} else {
27080 			err = BCME_ERROR;
27081 		}
27082 	}
27083 
27084 	return err;
27085 }
27086 
27087 bool
27088 dhd_protocol_matches_profile(uint8 *p, int plen, const dhd_tx_profile_protocol_t
27089 		*proto, bool is_host_sfhllc)
27090 {
27091 	struct ether_header *eh = NULL;
27092 	bool result = FALSE;
27093 	uint16 type = 0, ether_type = 0;
27094 
27095 	ASSERT(proto != NULL);
27096 	ASSERT(p != NULL);
27097 
27098 	if (plen <= 0) {
27099 		result = FALSE;
27100 	} else {
27101 		eh = (struct ether_header *)p;
27102 		type = ntoh16(eh->ether_type);
27103 		if (type < ETHER_TYPE_MIN && is_host_sfhllc) {
27104 			struct dot3_mac_llc_snap_header *dot3 =
27105 				(struct dot3_mac_llc_snap_header *)p;
27106 			ether_type = ntoh16(dot3->type);
27107 		} else {
27108 			ether_type = type;
27109 		}
27110 
27111 		if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER &&
27112 				proto->protocol_number == ether_type) {
27113 			result = TRUE;
27114 		} else if (process_layer2_headers(&p, &plen, &type, is_host_sfhllc) != BCME_OK) {
27115 			/* pass 'type' instead of 'ether_type' to process_layer2_headers
27116 			 * because process_layer2_headers will take care of extraction
27117 			 * of protocol types if llc snap header is present, based on
27118 			 * the condition (type < ETHER_TYPE_MIN)
27119 			 */
27120 			result = FALSE;
27121 		} else if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER) {
27122 			result = proto->protocol_number == type;
27123 		} else if (proto->layer != DHD_TX_PROFILE_NETWORK_LAYER) {
27124 			result = FALSE;
27125 		} else if (process_layer3_headers(&p, plen, &type) != BCME_OK) {
27126 			result = FALSE;
27127 		} else if (proto->protocol_number == type) {
27128 			/* L4, only check TCP/UDP case */
27129 			if ((type == IP_PROT_TCP) || (type == IP_PROT_UDP)) {
27130 				/* src/dst port are the first two uint16 fields in both tcp/udp
27131 				 * hdr
27132 				 */
27133 				struct bcmudp_hdr *hdr = (struct bcmudp_hdr *)p;
27134 
27135 				/* note that a src_port or dest_port of zero counts as a match
27136 				 */
27137 				result = ((proto->src_port == 0) || (proto->src_port ==
27138 					ntoh16(hdr->src_port))) && ((proto->dest_port == 0) ||
27139 					(proto->dest_port == ntoh16(hdr->dst_port)));
27140 			} else {
27141 				/* at this point we know we are dealing with layer 3, and we
27142 				 * know we are not dealing with TCP or UDP; this is considered a
27143 				 * match
27144 				 */
27145 				result = TRUE;
27146 			}
27147 		}
27148 	}
27149 
27150 	return result;
27151 }
27152 #endif /* defined(DHD_TX_PROFILE) */
27153 
27154 #ifdef DHD_TIMESYNC
27155 void
27156 BCMFASTPATH(dhd_parse_proto)(uint8 *pktdata, dhd_pkt_parse_t *parse)
27157 {
27158 	uint8 *pkt = NULL;
27159 	struct iphdr *iph = NULL;
27160 	struct ether_header *eh = (struct ether_header *)pktdata;
27161 
27162 	if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
27163 		pkt = (uint8 *)&pktdata[ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN];
27164 	} else {
27165 		pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
27166 	}
27167 
27168 	iph = (struct iphdr *)pkt;
27169 
27170 	parse->proto = IP_PROT_RESERVED;
27171 	parse->t1 = 0;
27172 	parse->t2 = 0;
27173 
27174 	/* check IP header */
27175 	if ((IPV4_HLEN(iph) != IPV4_HLEN_MIN) || (IP_VER(iph) != IP_VER_4)) {
27176 		return;
27177 	}
27178 
27179 	if (iph->protocol == IP_PROT_ICMP) {
27180 		struct icmphdr *icmph;
27181 
27182 		parse->proto = iph->protocol;
27183 		icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
27184 
27185 		if ((icmph->type == ICMP_ECHO) || (icmph->type == ICMP_ECHOREPLY)) {
27186 			parse->t1 = icmph->type;
27187 			parse->t2 = ntoh16(icmph->un.echo.sequence);
27188 		} else {
27189 			parse->t1 = icmph->type;
27190 			parse->t2 = icmph->code;
27191 		}
27192 	} else {
27193 		parse->proto = iph->protocol;
27194 	}
27195 
27196 	return;
27197 }
27198 #endif /* DHD_TIMESYNC */
27199 
27200 #ifdef BCMPCIE
27201 #define KIRQ_PRINT_BUF_LEN 256
27202 
27203 void
27204 dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
27205 {
27206 	unsigned long flags = 0;
27207 	struct irq_desc *desc;
27208 	int i;          /* cpu iterator */
27209 	struct bcmstrbuf strbuf;
27210 	char tmp_buf[KIRQ_PRINT_BUF_LEN];
27211 
27212 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
27213 	desc = irq_to_desc(irq_num);
27214 	if (!desc) {
27215 		DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
27216 		return;
27217 	}
27218 	bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
27219 	raw_spin_lock_irqsave(&desc->lock, flags);
27220 	bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
27221 	for_each_online_cpu(i)
27222 		bcm_bprintf(&strbuf, "%10u ",
27223 			desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
27224 	if (desc->irq_data.chip) {
27225 		if (desc->irq_data.chip->name)
27226 			bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
27227 		else
27228 			bcm_bprintf(&strbuf, " %8s", "-");
27229 	} else {
27230 		bcm_bprintf(&strbuf, " %8s", "None");
27231 	}
27232 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
27233 	if (desc->irq_data.domain)
27234 		bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
27235 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
27236 	bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
27237 #endif
27238 #endif /* LINUX VERSION > 3.1.0 */
27239 
27240 	if (desc->name)
27241 		bcm_bprintf(&strbuf, "-%-8s", desc->name);
27242 
27243 	DHD_ERROR(("%s\n", strbuf.origbuf));
27244 	raw_spin_unlock_irqrestore(&desc->lock, flags);
27245 #endif /* LINUX VERSION > 2.6.28 */
27246 }
27247 #endif /* BCMPCIE */
27248 
27249 void
27250 dhd_show_kirqstats(dhd_pub_t *dhd)
27251 {
27252 	unsigned int irq = -1;
27253 #ifdef BCMPCIE
27254 	dhdpcie_get_pcieirq(dhd->bus, &irq);
27255 #endif /* BCMPCIE */
27256 #ifdef BCMSDIO
27257 	irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
27258 #endif /* BCMSDIO */
27259 	if (irq != -1) {
27260 #ifdef BCMPCIE
27261 		DHD_ERROR(("DUMP data kernel irq stats : \n"));
27262 		dhd_print_kirqstats(dhd, irq);
27263 #endif /* BCMPCIE */
27264 #ifdef BCMSDIO
27265 		DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
27266 #endif /* BCMSDIO */
27267 	}
27268 #ifdef BCMPCIE_OOB_HOST_WAKE
27269 	irq = dhd_bus_get_oob_irq_num(dhd);
27270 	if (irq) {
27271 		DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
27272 		dhd_print_kirqstats(dhd, irq);
27273 	}
27274 #endif /* BCMPCIE_OOB_HOST_WAKE */
27275 }
27276 
27277 void
27278 dhd_print_tasklet_status(dhd_pub_t *dhd)
27279 {
27280 	dhd_info_t *dhdinfo;
27281 
27282 	if (!dhd) {
27283 		DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
27284 		return;
27285 	}
27286 
27287 	dhdinfo = dhd->info;
27288 
27289 	if (!dhdinfo) {
27290 		DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
27291 		return;
27292 	}
27293 
27294 	DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
27295 }
27296 
27297 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
27298 void
27299 dhd_mqstats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
27300 {
27301 	dhd_info_t *dhd = NULL;
27302 	int i = 0, j = 0;
27303 
27304 	if (!dhdp || !strbuf)
27305 		return;
27306 
27307 	dhd = dhdp->info;
27308 	bcm_bprintf(strbuf, "\nMQ STATS:\n=========\n");
27309 
27310 	bcm_bprintf(strbuf, "\nTx packet arrival AC histogram:\n");
27311 	bcm_bprintf(strbuf, "AC_BE    \tAC_BK    \tAC_VI    \tAC_VO\n");
27312 	bcm_bprintf(strbuf, "-----    \t-----    \t-----    \t-----\n");
27313 	for (i = 0; i < AC_COUNT; i++)
27314 		bcm_bprintf(strbuf, "%-10d\t", dhd->pktcnt_per_ac[i]);
27315 
27316 	bcm_bprintf(strbuf, "\n\nTx packet arrival Q-AC histogram:\n");
27317 	bcm_bprintf(strbuf, "\tAC_BE    \tAC_BK    \tAC_VI    \tAC_VO\n");
27318 	bcm_bprintf(strbuf, "\t-----    \t-----    \t-----    \t-----");
27319 	for (i = 0; i < MQ_MAX_QUEUES; i++) {
27320 		bcm_bprintf(strbuf, "\nQ%d\t", i);
27321 		for (j = 0; j < AC_COUNT; j++)
27322 			bcm_bprintf(strbuf, "%-8d\t", dhd->pktcnt_qac_histo[i][j]);
27323 	}
27324 
27325 	bcm_bprintf(strbuf, "\n\nTx Q-CPU scheduling histogram:\n");
27326 	bcm_bprintf(strbuf, "\t");
27327 	for (i = 0; i < nr_cpu_ids; i++)
27328 		bcm_bprintf(strbuf, "CPU%d    \t", i);
27329 	for (i = 0; i < MQ_MAX_QUEUES; i++) {
27330 		bcm_bprintf(strbuf, "\nQ%d\t", i);
27331 		for (j = 0; j < nr_cpu_ids; j++)
27332 			bcm_bprintf(strbuf, "%-8d\t", dhd->cpu_qstats[i][j]);
27333 	}
27334 	bcm_bprintf(strbuf, "\n");
27335 }
27336 #endif /* DHD_MQ && DHD_MQ_STATS */
27337 
27338 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
27339 /* Procfs that provides to GDB Proxy asynchronous access to "sbreg", "membytes",
27340  * "gdb_proxy_probe", "gdb_proxy_stop_count" iovars.
27341  * Procfs is comprised of the root directory,
27342  * /proc/dhd_gdb_proxy_<dev_name> (here <dev_name> is like 'eth0',
27343  * etc.) that contains files: "sbreg", "membytes", "gdb_proxy_probe",
27344  * "gdb_proxy_stop_count". These files are to be used to access respective
27345  * iovars. Difference from iovar is that access to these files is not blocked
27346  * by current iovar processing (i.e. file might be accessed while wl iovar is
27347  * stuck on breakpoint inside firmware)
27348  * Setting address for "membytes" and "sbreg" files is performed by means of
27349  * seek position
27350  * For now "membytes" and "sbreg" may only be used to read/write 1, 2 or 4
27351  * bytes - this may be expanded later.
27352  * For now "gdb_proxy_probe" only returns current Proxy ID, but does not set
27353  * a new one (unlike iovar that may do both things)
27354  */
27355 
27356 /* Size of firmware address space */
27357 #define GDB_PROXY_FS_MEM_SIZE ((loff_t)1 << 32)
27358 
27359 /* Common part of 'llseek' routine for all files */
27360 static loff_t
27361 gdb_proxy_fs_llseek(struct file *fp, loff_t off, int whence, loff_t file_len)
27362 {
27363 	loff_t pos = -1;
27364 
27365 	switch (whence) {
27366 	case SEEK_SET:
27367 		pos = off;
27368 		break;
27369 	case SEEK_CUR:
27370 		pos = fp->f_pos + off;
27371 		break;
27372 	case SEEK_END:
27373 		pos = file_len - off;
27374 		break;
27375 	}
27376 	if ((pos < 0) || (pos > file_len)) {
27377 		return -EINVAL;
27378 	}
27379 	fp->f_pos = pos;
27380 	return pos;
27381 }
27382 
27383 /* Common read/write procedure for "gdb_proxy_probe" and "gdb_proxy_stop_count"
27384  * procfs files
27385  * fp: file descriptor
27386  * user_buffer_in: userspace buffer address for write operation, NULL for read
27387  *	operation
27388  * user_buffer_out: userspace buffer address for read operation, NULL for write
27389  *	operation
27390  * count: maximum number of bytes to read/write
27391  * position: seek position incremented by length of data read/written
27392  * iovar: name of iovar being accessed
27393  * iovar_data_buf: intermediate buffer to store iovar data
27394  * iovar_data_len: length of data, corresponded to iovar
27395  * read_params: NULL or address of input parameter for iovar read
27396  * read_plen: 0 or length of input parameter for iovar read
27397  * Returns number of bytes read/written or error code
27398  */
27399 static ssize_t
27400 gdb_proxy_fs_iovar_data_op(struct file *fp, const char __user *user_buffer_in,
27401 	char __user *user_buffer_out, size_t count, loff_t *position,
27402 	const char *iovar, void *iovar_data_buf, size_t iovar_data_len,
27403 	void *read_params, size_t read_plen)
27404 {
27405 	dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp));
27406 	int err;
27407 	if (count == 0) {
27408 		return 0;
27409 	}
27410 	/* If position out of data length - read nothing */
27411 	if ((*position < 0) || (*position >= (loff_t)iovar_data_len)) {
27412 		return 0;
27413 	}
27414 	/* If buffer end is past structure lenght - truncate it */
27415 	if ((*position + count) > (loff_t)iovar_data_len) {
27416 		count = (size_t)((loff_t)iovar_data_len - *position);
27417 	}
27418 	if (user_buffer_in) {
27419 		/* SET operation */
27420 		/* Read/modify/write if not whole-buffer-operation */
27421 		if ((*position != 0) || (count < iovar_data_len)) {
27422 			err = dhd_bus_iovar_op(&(dhd->pub), iovar,
27423 				(char *)read_params, (uint)read_plen,
27424 				(char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET);
27425 			if (err) {
27426 				return -EPERM;
27427 			}
27428 		}
27429 		if (copy_from_user((char *)iovar_data_buf + (uint)*position, user_buffer_in, count))
27430 		{
27431 			return -EPERM;
27432 		}
27433 		/* This params/plen of NULL/0 is a 'legal fiction', imposed by
27434 		 * strange assert in dhd_bus_iovar_op(). After this strange
27435 		 * assert, arg/arglen is copied to params/plen - and even used
27436 		 * inside iovar handler!
27437 		 */
27438 		err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0,
27439 			(char *)iovar_data_buf, (uint)iovar_data_len, IOV_SET);
27440 	} else {
27441 		/* GET operation */
27442 		err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)read_params, (uint)read_plen,
27443 			(char *)iovar_data_buf, (uint)iovar_data_len, IOV_GET);
27444 	}
27445 	if (err) {
27446 		return -EPERM;
27447 	}
27448 	if (user_buffer_out) {
27449 		if (copy_to_user(user_buffer_out, (char *)iovar_data_buf + (uint)*position, count))
27450 		{
27451 			return -EPERM;
27452 		}
27453 	}
27454 	*position += count;
27455 	return count;
27456 }
27457 
27458 /* Read for "gdb_proxy_probe" procfs file */
27459 static ssize_t
27460 gdb_proxy_fs_probe_read(struct file *fp, char __user *user_buffer, size_t count,
27461 	loff_t *position)
27462 {
27463 	uint32 proxy_id = 0;
27464 	dhd_gdb_proxy_probe_data_t probe_data;
27465 	return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position, "gdb_proxy_probe",
27466 		&probe_data, sizeof(probe_data), &proxy_id, sizeof(proxy_id));
27467 }
27468 
27469 /* Seek for "gdb_proxy_probe" file */
27470 static loff_t
27471 gdb_proxy_fs_probe_llseek(struct file *fp, loff_t off, int whence)
27472 {
27473 	return gdb_proxy_fs_llseek(fp, off, whence, sizeof(dhd_gdb_proxy_probe_data_t));
27474 }
27475 
27476 /* File operations for "gdb_proxy_probe" procfs file */
27477 static const struct file_operations
27478 gdb_proxy_fs_probe_file_ops = {
27479 	.read = gdb_proxy_fs_probe_read,
27480 	.llseek = gdb_proxy_fs_probe_llseek,
27481 };
27482 
27483 /* Read for "gdb_proxy_stop_count" procfs file */
27484 static ssize_t
27485 gdb_proxy_fs_stop_count_read(struct file *fp, char __user *user_buffer, size_t count,
27486 	loff_t *position)
27487 {
27488 	uint32 stop_count;
27489 	return gdb_proxy_fs_iovar_data_op(fp, NULL, user_buffer, count, position,
27490 		"gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0);
27491 }
27492 
27493 /* Write for "gdb_proxy_stop_count" procfs file */
27494 static ssize_t
27495 gdb_proxy_fs_stop_count_write(struct file *fp, const char __user *user_buffer, size_t count,
27496 	loff_t *position)
27497 {
27498 	uint32 stop_count;
27499 	return gdb_proxy_fs_iovar_data_op(fp, user_buffer, NULL, count, position,
27500 		"gdb_proxy_stop_count", &stop_count, sizeof(stop_count), NULL, 0);
27501 }
27502 
27503 /* Seek for "gdb_proxy_stop_count" file */
27504 static loff_t
27505 gdb_proxy_fs_stop_count_llseek(struct file *fp, loff_t off, int whence)
27506 {
27507 	return gdb_proxy_fs_llseek(fp, off, whence, sizeof(uint32));
27508 }
27509 
27510 /* File operations for "gdb_proxy_stop_count" procfs file */
27511 static const struct file_operations
27512 gdb_proxy_fs_stop_count_file_ops = {
27513 	.read = gdb_proxy_fs_stop_count_read,
27514 	.write = gdb_proxy_fs_stop_count_write,
27515 	.llseek = gdb_proxy_fs_stop_count_llseek,
27516 };
27517 
27518 /* Common read/write procedure for "membytes" and "sbreg" procfs files
27519  * fp: file descriptor
27520  * buffer_in: userspace buffer address for write operation, NULL for read
27521  *	operation
27522  * buffer_out: userspace buffer address for read operation, NULL for write
27523  *	operation
27524  * count: maximum number of bytes to read/write
27525  * position: seek position (interpreted as memory address in firmware address
27526  *	space),
27527  *	incremented by length of data read/written
27528  * iovar: name of iovar being accessed
27529  * address_first: TRUE if address shall be packed first, FALSE if width
27530  * Returns number of bytes read/written or error code
27531  */
27532 static ssize_t
27533 gdb_proxy_fs_iovar_mem_op(struct file *fp, const char __user *user_buffer_in,
27534 	char __user *user_buffer_out, size_t count, loff_t *position,
27535 	const char *iovar, bool address_first)
27536 {
27537 	dhd_info_t *dhd = (dhd_info_t *)PDE_DATA(file_inode(fp));
27538 	uint32 buf[3];
27539 	int err;
27540 	if (count == 0) {
27541 		return 0;
27542 	}
27543 	if ((count > sizeof(uint32)) || (count & (count - 1))) {
27544 		return -EINVAL;
27545 	}
27546 	buf[address_first ? 0 : 1] = (uint32)(*position);
27547 	buf[address_first ? 1 : 0] = (uint32)count;
27548 	if (user_buffer_in) {
27549 		/* SET operation */
27550 		if (copy_from_user(&buf[2], user_buffer_in, count)) {
27551 			return -EPERM;
27552 		}
27553 		/* This params/plen of NULL/0 is a 'legal fiction', imposed by
27554 		 * strange assert in dhd_bus_iovar_op(). After this strange
27555 		 * assert, arg/arglen is copied to params/plen - and even used
27556 		 * inside iovar handler!
27557 		 */
27558 		err = dhd_bus_iovar_op(&(dhd->pub), iovar, NULL, 0, (char *)buf, sizeof(*buf) * 3,
27559 			IOV_SET);
27560 	} else {
27561 		/* GET operation */
27562 		/* This arglen of 8 bytes (where 4 would suffice) is due to
27563 		 * strange requirement of minimum arglen to be 8, hardcoded into
27564 		 * "membytes" iovar definition
27565 		 */
27566 		err = dhd_bus_iovar_op(&(dhd->pub), iovar, (char *)buf, sizeof(*buf) * 2,
27567 			(char *)buf, sizeof(*buf) * 2, IOV_GET);
27568 	}
27569 	if (err) {
27570 		return -EPERM;
27571 	}
27572 	*position += count;
27573 	if (user_buffer_out) {
27574 		if (copy_to_user(user_buffer_out, &buf[0], count)) {
27575 			return -EPERM;
27576 		}
27577 	}
27578 	return count;
27579 }
27580 
27581 /* Common seek procedure for "membytes" and "sbreg" procfs files */
27582 static loff_t
27583 gdb_proxy_fs_memory_llseek(struct file *fp, loff_t off, int whence)
27584 {
27585 	return gdb_proxy_fs_llseek(fp, off, whence, GDB_PROXY_FS_MEM_SIZE);
27586 }
27587 
27588 /* Read for "membytes" procfs file */
27589 static ssize_t
27590 gdb_proxy_fs_membytes_read(struct file *fp, char __user *user_buffer, size_t count,
27591 	loff_t *position)
27592 {
27593 	return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "membytes", TRUE);
27594 }
27595 
27596 /* Write for "membytes" procfs file */
27597 static ssize_t
27598 gdb_proxy_fs_membytes_write(struct file *fp, const char __user *user_buffer, size_t count,
27599 	loff_t *position)
27600 {
27601 	return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "membytes", TRUE);
27602 }
27603 
27604 /* File operations for "membytes" procfs file */
27605 static const struct file_operations
27606 gdb_proxy_fs_membytes_file_ops = {
27607 	.read = gdb_proxy_fs_membytes_read,
27608 	.write = gdb_proxy_fs_membytes_write,
27609 	.llseek = gdb_proxy_fs_memory_llseek,
27610 };
27611 
27612 /* Read for "sbreg" procfs file */
27613 static ssize_t
27614 gdb_proxy_fs_sbreg_read(struct file *fp, char __user *user_buffer, size_t count, loff_t *position)
27615 {
27616 	return gdb_proxy_fs_iovar_mem_op(fp, NULL, user_buffer, count, position, "sbreg", FALSE);
27617 }
27618 
27619 /* Write for "sbreg" procfs file */
27620 static ssize_t
27621 gdb_proxy_fs_sbreg_write(struct file *fp, const char __user *user_buffer, size_t count,
27622 	loff_t *position)
27623 {
27624 	return gdb_proxy_fs_iovar_mem_op(fp, user_buffer, NULL, count, position, "sbreg", FALSE);
27625 }
27626 
27627 /* File operations for "sbreg" procfs file */
27628 static const struct file_operations
27629 gdb_proxy_fs_sbreg_file_ops = {
27630 	.read = gdb_proxy_fs_sbreg_read,
27631 	.write = gdb_proxy_fs_sbreg_write,
27632 	.llseek = gdb_proxy_fs_memory_llseek,
27633 };
27634 
27635 /* If GDB Proxy procfs files set not yet created for given dhd instance - creates it */
27636 static void
27637 gdb_proxy_fs_try_create(dhd_info_t *dhd, const char *dev_name)
27638 {
27639 	char dir_name[sizeof(dhd->gdb_proxy_fs_root_name)] = "dhd_gdb_proxy_";
27640 	struct proc_dir_entry *root_dentry;
27641 	int i;
27642 	static const struct {
27643 		const char *file_name;
27644 		const struct file_operations *fops;
27645 	} fileinfos[] = {
27646 		{"gdb_proxy_probe", &gdb_proxy_fs_probe_file_ops},
27647 		{"gdb_proxy_stop_count", &gdb_proxy_fs_stop_count_file_ops},
27648 		{"membytes", &gdb_proxy_fs_membytes_file_ops},
27649 		{"sbreg", &gdb_proxy_fs_sbreg_file_ops},
27650 	};
27651 	if (!dev_name || !*dev_name || dhd->gdb_proxy_fs_root) {
27652 		return;
27653 	}
27654 	strlcat_s(dir_name, dev_name, sizeof(dir_name));
27655 	dir_name[sizeof(dir_name) - 1] = 0;
27656 	root_dentry = proc_mkdir(dir_name, NULL);
27657 	if ((root_dentry == NULL) || IS_ERR(root_dentry)) {
27658 		return;
27659 	}
27660 	for (i = 0; i < ARRAYSIZE(fileinfos); ++i) {
27661 		struct proc_dir_entry *file_dentry = proc_create_data(fileinfos[i].file_name,
27662 			S_IRUGO | (fileinfos[i].fops->write ? S_IWUGO : 0), root_dentry,
27663 			fileinfos[i].fops,  dhd);
27664 		if ((file_dentry == NULL) || IS_ERR(file_dentry)) {
27665 			goto fail;
27666 		}
27667 	}
27668 	dhd->gdb_proxy_fs_root = root_dentry;
27669 	memcpy_s(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name),
27670 		dir_name, sizeof(dhd->gdb_proxy_fs_root_name));
27671 	return;
27672 fail:
27673 	if (root_dentry) {
27674 		remove_proc_subtree(dir_name, NULL);
27675 	}
27676 }
27677 
27678 /* If GDB Proxy procfs files set created for given dhd instance - removes it */
27679 static void
27680 gdb_proxy_fs_remove(dhd_info_t *dhd)
27681 {
27682 	if (dhd->gdb_proxy_fs_root) {
27683 		remove_proc_subtree(dhd->gdb_proxy_fs_root_name, NULL);
27684 		dhd->gdb_proxy_fs_root = NULL;
27685 		bzero(dhd->gdb_proxy_fs_root_name, sizeof(dhd->gdb_proxy_fs_root_name));
27686 	}
27687 }
27688 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
27689 
27690 #ifdef DHD_MAP_LOGGING
27691 /* Will be called from SMMU fault handler */
27692 void
27693 dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
27694 {
27695 	dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
27696 	uint32 irq = (uint32)-1;
27697 
27698 	DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
27699 	DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
27700 	dhdp->smmu_fault_occurred = TRUE;
27701 #ifdef DNGL_AXI_ERROR_LOGGING
27702 	dhdp->axi_error = TRUE;
27703 	dhdp->axi_err_dump->axid = axid;
27704 	dhdp->axi_err_dump->fault_address = fault_addr;
27705 #endif /* DNGL_AXI_ERROR_LOGGING */
27706 
27707 	/* Disable PCIe IRQ */
27708 	dhdpcie_get_pcieirq(dhdp->bus, &irq);
27709 	if (irq != (uint32)-1) {
27710 		disable_irq_nosync(irq);
27711 	}
27712 
27713 	/* Take debug information first */
27714 	DHD_OS_WAKE_LOCK(dhdp);
27715 	dhd_prot_smmu_fault_dump(dhdp);
27716 	DHD_OS_WAKE_UNLOCK(dhdp);
27717 
27718 	/* Take AXI information if possible */
27719 #ifdef DNGL_AXI_ERROR_LOGGING
27720 #ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
27721 	dhd_axi_error_dispatch(dhdp);
27722 #else
27723 	dhd_axi_error(dhdp);
27724 #endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
27725 #endif /* DNGL_AXI_ERROR_LOGGING */
27726 }
27727 EXPORT_SYMBOL(dhd_smmu_fault_handler);
27728 #endif /* DHD_MAP_LOGGING */
27729 
27730 #ifdef DHD_PKTTS
27731 /* Get pktts flow configuration */
27732 int
27733 dhd_get_pktts_flow(dhd_pub_t *dhdp, void *arg, int len)
27734 {
27735 	dhd_info_t *dhd = dhdp->info;
27736 
27737 	if (!arg || len <= (sizeof(pktts_flow_t) * PKTTS_CONFIG_MAX)) {
27738 		return BCME_BADARG;
27739 	}
27740 
27741 	return memcpy_s(arg, len, &dhd->config[0], sizeof(dhd->config));
27742 }
27743 
27744 /* Set pktts flow configuration */
27745 int
27746 dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen)
27747 {
27748 	dhd_info_t *dhd = dhdp->info;
27749 	pktts_flow_t *config;
27750 	uint32 checksum = 0;
27751 	int ret = BCME_OK;
27752 	uint32 temp;
27753 	uint32 idx = PKTTS_CONFIG_MAX;
27754 	uint32 num_config = 0;
27755 
27756 	if (plen < sizeof(*config)) {
27757 		DHD_ERROR(("dhd_set_pktts_flow: invalid buffer length (%d)\n", plen));
27758 		return BCME_BADLEN;
27759 	}
27760 
27761 	config = (pktts_flow_t *)params;
27762 
27763 	temp = htonl(config->src_ip);
27764 	checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
27765 			sizeof(temp) / sizeof(uint32));
27766 	temp = htonl(config->dst_ip);
27767 	checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
27768 			sizeof(temp) / sizeof(uint32));
27769 
27770 	temp = (hton16(config->dst_port) << 16) | hton16(config->src_port);
27771 	checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
27772 			sizeof(temp) / sizeof(uint32));
27773 	temp = config->proto;
27774 	checksum ^= bcm_compute_xor32((volatile uint32 *)&temp,
27775 			sizeof(temp) / sizeof(uint32));
27776 
27777 	/* Look for checksum match: for delete or update */
27778 	dhd_match_pktts_flow(dhdp, checksum, &idx, &num_config);
27779 
27780 	/* no matching config */
27781 	if (idx == PKTTS_CONFIG_MAX) {
27782 		if (config->pkt_offset == PKTTS_OFFSET_INVALID) {
27783 			/* no matching config found for deletion */
27784 			return BCME_NOTFOUND;
27785 		}
27786 
27787 		/* look for free config space */
27788 		for (idx = 0; idx < PKTTS_CONFIG_MAX; idx++) {
27789 			if (dhd->config[idx].chksum == 0) {
27790 				break;
27791 			}
27792 		}
27793 
27794 		if (idx == PKTTS_CONFIG_MAX) {
27795 			/* no config space left */
27796 			return BCME_NORESOURCE;
27797 		}
27798 	}
27799 
27800 	if (config->pkt_offset == PKTTS_OFFSET_INVALID) {
27801 		/* reset if pkt_offset is zero */
27802 		memset(&dhd->config[idx], 0, sizeof(dhd->config[idx]));
27803 	} else {
27804 		ret = memcpy_s(&dhd->config[idx], sizeof(dhd->config[idx]),
27805 			config, sizeof(*config));
27806 		if (ret == BCME_OK) {
27807 			dhd->config[idx].chksum = checksum;
27808 		}
27809 	}
27810 
27811 	return ret;
27812 }
27813 
27814 /**
27815  * dhd_match_pktts_flow - this api returns matching pktts config against checksum
27816  *
27817  * @dhdp: pointer to dhd_pub object
27818  * @checksum: five tuple checksum
27819  * @idx: returns index of matching pktts config
27820  * @num_config: returns number of valid pktts config
27821  */
27822 pktts_flow_t *
27823 dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum, uint32 *idx, uint32 *num_config)
27824 {
27825 	dhd_info_t *dhd = dhdp->info;
27826 	pktts_flow_t *flow = NULL;
27827 	uint8 i;
27828 
27829 	for (i = 0; i < PKTTS_CONFIG_MAX; i++) {
27830 		if (dhd->config[i].chksum) {
27831 			(*num_config)++;
27832 		}
27833 
27834 		if (checksum && (dhd->config[i].chksum == checksum)) {
27835 			flow = &dhd->config[i];
27836 			break;
27837 		}
27838 	}
27839 
27840 	/* update matching config index */
27841 	if (idx) {
27842 		*idx = i;
27843 	}
27844 
27845 	/* countinue with valid config count */
27846 	for (; i < PKTTS_CONFIG_MAX; i++) {
27847 		if (dhd->config[i].chksum) {
27848 			(*num_config)++;
27849 		}
27850 	}
27851 
27852 	return flow;
27853 }
27854 
27855 /* Get pktts enab configuration */
27856 int dhd_get_pktts_enab(dhd_pub_t *dhdp)
27857 {
27858 	dhd_info_t *dhd = dhdp->info;
27859 
27860 	return dhd->latency;
27861 }
27862 
27863 /* Set pktts enable configuration */
27864 int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val)
27865 {
27866 	dhd_info_t *dhd = dhdp->info;
27867 	uint32 var_int =  val;
27868 	int ret = BCME_OK;
27869 	uint power_val;
27870 
27871 	/* check FW supports pktlat_ipc or pktlat_meta */
27872 	if (!FW_SUPPORTED(dhdp, pktlat_ipc) && !FW_SUPPORTED(dhdp, pktlat_meta)) {
27873 		BCM_REFERENCE(power_val);
27874 		DHD_INFO(("Chip does not support pktlat\n"));
27875 		return ret;
27876 	}
27877 	power_val = 0;
27878 	/* Disabling mpc and PM mode for pktlat */
27879 	ret = dhd_iovar(dhdp, 0, "mpc", (char *)&power_val, sizeof(power_val), NULL, 0, TRUE);
27880 	if (ret < 0) {
27881 		DHD_ERROR(("%s: Unable to set mpc 0, ret=%d\n", __FUNCTION__, ret));
27882 		return ret;
27883 	}
27884 	power_val = PM_OFF;
27885 	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_PM, (char *)&power_val, sizeof(power_val),
27886 			TRUE, 0);
27887 	if (ret < 0) {
27888 		DHD_ERROR(("%s: Unable to set PM 0, ret=%d\n", __FUNCTION__, ret));
27889 		return ret;
27890 	}
27891 
27892 	ret = dhd_iovar(dhdp, 0, "pktts_enab", (char *)&var_int, sizeof(var_int), NULL, 0, TRUE);
27893 	if (ret < 0) {
27894 		DHD_ERROR(("%s: enabling pktts_enab failed, ret=%d\n", __FUNCTION__, ret));
27895 		return ret;
27896 	}
27897 
27898 	dhd->latency = val;
27899 
27900 	return 0;
27901 }
27902 #endif /* DHD_PKTTS */
27903 
27904 #ifdef DHD_ERPOM
27905 static void
27906 dhd_error_recovery(void *handle, void *event_info, u8 event)
27907 {
27908 	dhd_info_t *dhd = handle;
27909 	dhd_pub_t *dhdp;
27910 	int ret = 0;
27911 
27912 	if (!dhd) {
27913 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
27914 		return;
27915 	}
27916 
27917 	dhdp = &dhd->pub;
27918 
27919 	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
27920 		DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
27921 			__FUNCTION__));
27922 		return;
27923 	}
27924 
27925 #ifdef BT_OVER_PCIE
27926 	if (dhdp->dongle_trap_due_to_bt) {
27927 		DHD_ERROR(("WLAN trapped due to BT, toggle REG_ON\n"));
27928 		/* toggle REG_ON */
27929 		dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_BT);
27930 		return;
27931 	}
27932 #endif /* BT_OVER_PCIE */
27933 
27934 	ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
27935 	if (ret != BCME_DNGL_DEVRESET) {
27936 		DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
27937 			"toggle REG_ON\n", __FUNCTION__, ret));
27938 		/* toggle REG_ON */
27939 		dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
27940 		return;
27941 	}
27942 }
27943 
27944 void
27945 dhd_schedule_reset(dhd_pub_t *dhdp)
27946 {
27947 	if (dhdp->enable_erpom) {
27948 		dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
27949 			DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
27950 	}
27951 }
27952 #endif /* DHD_ERPOM */
27953 
27954 #ifdef DHD_PKT_LOGGING
27955 int
27956 dhd_pktlog_debug_dump(dhd_pub_t *dhdp)
27957 {
27958 	struct net_device *primary_ndev;
27959 	struct bcm_cfg80211 *cfg;
27960 	unsigned long flags = 0;
27961 
27962 	primary_ndev = dhd_linux_get_primary_netdev(dhdp);
27963 	if (!primary_ndev) {
27964 		DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
27965 		return BCME_ERROR;
27966 	}
27967 
27968 	cfg = wl_get_cfg(primary_ndev);
27969 	if (!cfg) {
27970 		DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
27971 		return BCME_ERROR;
27972 	}
27973 
27974 	DHD_GENERAL_LOCK(dhdp, flags);
27975 	if (DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp)) {
27976 		DHD_GENERAL_UNLOCK(dhdp, flags);
27977 		DHD_ERROR(("%s: HAL dump is already triggered \n", __FUNCTION__));
27978 		return BCME_ERROR;
27979 	}
27980 
27981 	DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
27982 	DHD_GENERAL_UNLOCK(dhdp, flags);
27983 	DHD_OS_WAKE_LOCK(dhdp);
27984 
27985 	if (wl_cfg80211_is_hal_started(cfg)) {
27986 		dhdp->pktlog_debug = TRUE;
27987 		dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
27988 	} else {
27989 		DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
27990 	}
27991 	DHD_OS_WAKE_UNLOCK(dhdp);
27992 	/* In case of dhd_os_busbusy_wait_bitmask() timeout,
27993 	 * hal dump bit will not be cleared. Hence clearing it here.
27994 	 */
27995 	DHD_GENERAL_LOCK(dhdp, flags);
27996 	DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
27997 	dhd_os_busbusy_wake(dhdp);
27998 	DHD_GENERAL_UNLOCK(dhdp, flags);
27999 
28000 	return BCME_OK;
28001 }
28002 
28003 void
28004 dhd_pktlog_dump(void *handle, void *event_info, u8 event)
28005 {
28006 	dhd_info_t *dhd = handle;
28007 
28008 	if (!dhd) {
28009 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
28010 		return;
28011 	}
28012 
28013 	if (dhd_pktlog_dump_write_file(&dhd->pub)) {
28014 		DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
28015 		return;
28016 	}
28017 }
28018 
28019 void
28020 dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
28021 {
28022 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
28023 			(void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
28024 			dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
28025 }
28026 #endif /* DHD_PKT_LOGGING */
28027 
28028 #ifdef DHDTCPSYNC_FLOOD_BLK
28029 static void dhd_blk_tsfl_handler(struct work_struct * work)
28030 {
28031 	dhd_if_t *ifp = NULL;
28032 	dhd_pub_t *dhdp = NULL;
28033 	/* Ignore compiler warnings due to -Werror=cast-qual */
28034 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
28035 	ifp = container_of(work, dhd_if_t, blk_tsfl_work);
28036 	GCC_DIAGNOSTIC_POP();
28037 
28038 	if (ifp) {
28039 		dhdp = &ifp->info->pub;
28040 		if (dhdp) {
28041 			if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
28042 				(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
28043 				DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
28044 				wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
28045 			} else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
28046 				(dhdp->op_mode & DHD_FLAG_STA_MODE)) {
28047 				DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
28048 				wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
28049 			}
28050 			ifp->disconnect_tsync_flood = TRUE;
28051 		}
28052 	}
28053 }
28054 void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
28055 {
28056 	ifp->tsync_rcvd = 0;
28057 	ifp->tsyncack_txed = 0;
28058 	ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
28059 }
28060 void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
28061 {
28062 	dhd_if_t *ifp = NULL;
28063 	if (dev) {
28064 		ifp = DHD_DEV_IFP(dev);
28065 	}
28066 	if (ifp) {
28067 		ifp->tsync_rcvd = 0;
28068 		ifp->tsyncack_txed = 0;
28069 		ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
28070 		ifp->tsync_per_sec = 0;
28071 		ifp->disconnect_tsync_flood = FALSE;
28072 	}
28073 }
28074 #endif /* DHDTCPSYNC_FLOOD_BLK */
28075 
28076 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
28077 static void dhd_m4_state_handler(struct work_struct *work)
28078 {
28079 	dhd_if_t *ifp = NULL;
28080 	/* Ignore compiler warnings due to -Werror=cast-qual */
28081 	struct delayed_work *dw = to_delayed_work(work);
28082 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
28083 	ifp = container_of(dw, dhd_if_t, m4state_work);
28084 	GCC_DIAGNOSTIC_POP();
28085 
28086 	if (ifp && ifp->net &&
28087 		(OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
28088 		DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
28089 				ifp->net->name));
28090 		wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
28091 	}
28092 }
28093 
28094 void
28095 dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
28096 {
28097 	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
28098 	struct ether_header *eh;
28099 	uint16 type;
28100 
28101 	if (!success) {
28102 		/* XXX where does this stuff belong to? */
28103 		dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
28104 
28105 		/* XXX Use packet tag when it is available to identify its type */
28106 		eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
28107 		type  = ntoh16(eh->ether_type);
28108 		if (type == ETHER_TYPE_802_1X) {
28109 			if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
28110 				dhd_if_t *ifp = NULL;
28111 				ifp = dhd->iflist[ifidx];
28112 				if (!ifp || !ifp->net) {
28113 					return;
28114 				}
28115 
28116 				DHD_INFO(("%s: M4 TX failed on %d.\n",
28117 					__FUNCTION__, ifidx));
28118 
28119 				OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
28120 				schedule_delayed_work(&ifp->m4state_work,
28121 					msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
28122 			}
28123 		}
28124 	}
28125 }
28126 
28127 void
28128 dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
28129 {
28130 	dhd_info_t *dhdinfo;
28131 	dhd_if_t *ifp;
28132 
28133 	if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
28134 		DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
28135 		return;
28136 	}
28137 
28138 	dhdinfo = (dhd_info_t *)(dhdp->info);
28139 	if (!dhdinfo) {
28140 		DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
28141 		return;
28142 	}
28143 
28144 	ifp = dhdinfo->iflist[ifidx];
28145 	if (ifp) {
28146 		cancel_delayed_work_sync(&ifp->m4state_work);
28147 	}
28148 }
28149 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
28150 
28151 #ifdef BIGDATA_SOFTAP
28152 void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
28153 {
28154 	struct bcm_cfg80211 *cfg;
28155 	dhd_pub_t *dhdp;
28156 	ap_sta_wq_data_t *p_wq_data;
28157 
28158 	if  (!bcm_cfg || !ndev || !e) {
28159 		WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
28160 		return;
28161 	}
28162 
28163 	cfg = (struct bcm_cfg80211 *)bcm_cfg;
28164 	dhdp = (dhd_pub_t *)cfg->pub;
28165 
28166 	if (!dhdp || !cfg->ap_sta_info) {
28167 		WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
28168 		return;
28169 	}
28170 
28171 	p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
28172 	if (unlikely(!p_wq_data)) {
28173 		DHD_ERROR(("%s(): could not allocate memory for - "
28174 					"ap_sta_wq_data_t\n", __FUNCTION__));
28175 		return;
28176 	}
28177 
28178 	mutex_lock(&cfg->ap_sta_info->wq_data_sync);
28179 
28180 	memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
28181 	p_wq_data->dhdp = dhdp;
28182 	p_wq_data->bcm_cfg = cfg;
28183 	p_wq_data->ndev = (struct net_device *)ndev;
28184 
28185 	mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
28186 
28187 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
28188 			p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
28189 			wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
28190 
28191 }
28192 #endif /* BIGDATA_SOFTAP */
28193 
28194 void
28195 get_debug_dump_time(char *str)
28196 {
28197 	struct osl_timespec curtime;
28198 	unsigned long local_time;
28199 	struct rtc_time tm;
28200 
28201 	if (!strlen(str)) {
28202 		osl_do_gettimeofday(&curtime);
28203 		local_time = (u32)(curtime.tv_sec -
28204 				(sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
28205 		rtc_time_to_tm(local_time, &tm);
28206 		snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
28207 				tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
28208 				tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
28209 	}
28210 }
28211 
28212 void
28213 clear_debug_dump_time(char *str)
28214 {
28215 	memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
28216 }
28217 #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
28218 void
28219 copy_debug_dump_time(char *dest, char *src)
28220 {
28221 	memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
28222 }
28223 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
28224 
28225 /*
28226  * DHD RING
28227  */
28228 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
28229 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
28230 
28231 #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
28232 #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
28233 
28234 #define DHD_RING_MAGIC 0x20170910
28235 #define DHD_RING_IDX_INVALID	0xffffffff
28236 
28237 #define DHD_RING_SYNC_LOCK_INIT(osh)		osl_spin_lock_init(osh)
28238 #define DHD_RING_SYNC_LOCK_DEINIT(osh, lock)	osl_spin_lock_deinit(osh, lock)
28239 #define DHD_RING_SYNC_LOCK(lock, flags)		(flags) = osl_spin_lock(lock)
28240 #define DHD_RING_SYNC_UNLOCK(lock, flags)	osl_spin_unlock(lock, flags)
28241 
28242 typedef struct {
28243 	uint32 elem_size;
28244 	uint32 elem_cnt;
28245 	uint32 write_idx;	/* next write index, -1 : not started */
28246 	uint32 read_idx;	/* next read index, -1 : not start */
28247 
28248 	/* protected elements during serialization */
28249 	int lock_idx;	/* start index of locked, element will not be overried */
28250 	int lock_count; /* number of locked, from lock idx */
28251 
28252 	/* saved data elements */
28253 	void *elem;
28254 } dhd_fixed_ring_info_t;
28255 
28256 typedef struct {
28257 	uint32 elem_size;
28258 	uint32 elem_cnt;
28259 	uint32 idx;		/* -1 : not started */
28260 	uint32 rsvd;		/* reserved for future use */
28261 
28262 	/* protected elements during serialization */
28263 	atomic_t ring_locked;
28264 	/* check the overwriting */
28265 	uint32 ring_overwrited;
28266 
28267 	/* saved data elements */
28268 	void *elem;
28269 } dhd_singleidx_ring_info_t;
28270 
28271 typedef struct {
28272 	uint32 magic;
28273 	uint32 type;
28274 	void *ring_sync; /* spinlock for sync */
28275 	union {
28276 		dhd_fixed_ring_info_t fixed;
28277 		dhd_singleidx_ring_info_t single;
28278 	};
28279 } dhd_ring_info_t;
28280 
28281 uint32
28282 dhd_ring_get_hdr_size(void)
28283 {
28284 	return sizeof(dhd_ring_info_t);
28285 }
28286 
28287 void *
28288 dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
28289 	uint32 elem_cnt, uint32 type)
28290 {
28291 	dhd_ring_info_t *ret_ring;
28292 
28293 	if (!buf) {
28294 		DHD_RING_ERR(("NO RING BUFFER\n"));
28295 		return NULL;
28296 	}
28297 
28298 	if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
28299 		DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
28300 		return NULL;
28301 	}
28302 
28303 	if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
28304 		DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
28305 		return NULL;
28306 	}
28307 
28308 	ret_ring = (dhd_ring_info_t *)buf;
28309 	ret_ring->type = type;
28310 	ret_ring->ring_sync = (void *)DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
28311 	ret_ring->magic = DHD_RING_MAGIC;
28312 
28313 	if (type == DHD_RING_TYPE_FIXED) {
28314 		ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
28315 		ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
28316 		ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
28317 		ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
28318 		ret_ring->fixed.elem_size = elem_size;
28319 		ret_ring->fixed.elem_cnt = elem_cnt;
28320 	} else {
28321 		ret_ring->single.idx = DHD_RING_IDX_INVALID;
28322 		atomic_set(&ret_ring->single.ring_locked, 0);
28323 		ret_ring->single.ring_overwrited = 0;
28324 		ret_ring->single.rsvd = 0;
28325 		ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
28326 		ret_ring->single.elem_size = elem_size;
28327 		ret_ring->single.elem_cnt = elem_cnt;
28328 	}
28329 
28330 	return ret_ring;
28331 }
28332 
28333 void
28334 dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
28335 {
28336 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28337 	if (!ring) {
28338 		return;
28339 	}
28340 
28341 	if (ring->magic != DHD_RING_MAGIC) {
28342 		return;
28343 	}
28344 
28345 	if (ring->type != DHD_RING_TYPE_FIXED &&
28346 		ring->type != DHD_RING_TYPE_SINGLE_IDX) {
28347 		return;
28348 	}
28349 
28350 	DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
28351 	ring->ring_sync = NULL;
28352 	if (ring->type == DHD_RING_TYPE_FIXED) {
28353 		dhd_fixed_ring_info_t *fixed = &ring->fixed;
28354 		memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
28355 		fixed->elem_size = fixed->elem_cnt = 0;
28356 	} else {
28357 		dhd_singleidx_ring_info_t *single = &ring->single;
28358 		memset(single->elem, 0, single->elem_size * single->elem_cnt);
28359 		single->elem_size = single->elem_cnt = 0;
28360 	}
28361 	ring->type = 0;
28362 	ring->magic = 0;
28363 }
28364 
28365 static inline uint32
28366 __dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
28367 {
28368 	uint32 diff;
28369 	uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
28370 	uint32 elem_size, elem_cnt;
28371 	void *elem;
28372 
28373 	if (type == DHD_RING_TYPE_FIXED) {
28374 		dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
28375 		elem_size = fixed->elem_size;
28376 		elem_cnt = fixed->elem_cnt;
28377 		elem = fixed->elem;
28378 	} else if (type == DHD_RING_TYPE_SINGLE_IDX) {
28379 		dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
28380 		elem_size = single->elem_size;
28381 		elem_cnt = single->elem_cnt;
28382 		elem = single->elem;
28383 	} else {
28384 		DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
28385 		return ret_idx;
28386 	}
28387 
28388 	if (ptr < elem) {
28389 		DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
28390 		return ret_idx;
28391 	}
28392 	diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
28393 	if (diff % elem_size != 0) {
28394 		DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
28395 		return ret_idx;
28396 	}
28397 	ret_idx = diff / elem_size;
28398 	if (ret_idx >= elem_cnt) {
28399 		DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
28400 	}
28401 	return ret_idx;
28402 }
28403 
28404 /* Sub functions for fixed ring */
28405 /* get counts between two indexes of ring buffer (internal only) */
28406 static inline int
28407 __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
28408 {
28409 	if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
28410 		return 0;
28411 	}
28412 
28413 	return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
28414 }
28415 
28416 static inline int
28417 __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
28418 {
28419 	return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
28420 }
28421 
28422 static inline void *
28423 __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
28424 {
28425 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28426 		return NULL;
28427 	}
28428 	return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
28429 }
28430 
28431 static inline void
28432 __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
28433 {
28434 	uint32 next_idx;
28435 
28436 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28437 		DHD_RING_ERR(("EMPTY RING\n"));
28438 		return;
28439 	}
28440 
28441 	next_idx = (ring->read_idx + 1) % ring->elem_cnt;
28442 	if (ring->read_idx == ring->write_idx) {
28443 		/* Become empty */
28444 		ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
28445 		return;
28446 	}
28447 
28448 	ring->read_idx = next_idx;
28449 	return;
28450 }
28451 
28452 static inline void *
28453 __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
28454 {
28455 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28456 		return NULL;
28457 	}
28458 	return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
28459 }
28460 
28461 static inline void *
28462 __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
28463 {
28464 	uint32 tmp_idx;
28465 
28466 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28467 		ring->read_idx = ring->write_idx = 0;
28468 		return (uint8 *)ring->elem;
28469 	}
28470 
28471 	/* check next index is not locked */
28472 	tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
28473 	if (ring->lock_idx == tmp_idx) {
28474 		return NULL;
28475 	}
28476 
28477 	ring->write_idx = tmp_idx;
28478 	if (ring->write_idx == ring->read_idx) {
28479 		/* record is full, drop oldest one */
28480 		ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
28481 
28482 	}
28483 	return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
28484 }
28485 
28486 static inline void *
28487 __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
28488 {
28489 	uint32 cur_idx;
28490 
28491 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28492 		DHD_RING_ERR(("EMPTY RING\n"));
28493 		return NULL;
28494 	}
28495 
28496 	cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
28497 	if (cur_idx >= ring->elem_cnt) {
28498 		return NULL;
28499 	}
28500 
28501 	if (cur_idx == ring->write_idx) {
28502 		/* no more new record */
28503 		return NULL;
28504 	}
28505 
28506 	cur_idx = (cur_idx + 1) % ring->elem_cnt;
28507 	return (uint8 *)ring->elem + ring->elem_size * cur_idx;
28508 }
28509 
28510 static inline void *
28511 __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
28512 {
28513 	uint32 cur_idx;
28514 
28515 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28516 		DHD_RING_ERR(("EMPTY RING\n"));
28517 		return NULL;
28518 	}
28519 	cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
28520 	if (cur_idx >= ring->elem_cnt) {
28521 		return NULL;
28522 	}
28523 	if (cur_idx == ring->read_idx) {
28524 		/* no more new record */
28525 		return NULL;
28526 	}
28527 
28528 	cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
28529 	return (uint8 *)ring->elem + ring->elem_size * cur_idx;
28530 }
28531 
28532 static inline void
28533 __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
28534 {
28535 	uint32 first_idx;
28536 	uint32 last_idx;
28537 	uint32 ring_filled_cnt;
28538 	uint32 tmp_cnt;
28539 
28540 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28541 		DHD_RING_ERR(("EMPTY RING\n"));
28542 		return;
28543 	}
28544 
28545 	if (first_ptr) {
28546 		first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
28547 		if (first_idx >= ring->elem_cnt) {
28548 			return;
28549 		}
28550 	} else {
28551 		first_idx = ring->read_idx;
28552 	}
28553 
28554 	if (last_ptr) {
28555 		last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
28556 		if (last_idx >= ring->elem_cnt) {
28557 			return;
28558 		}
28559 	} else {
28560 		last_idx = ring->write_idx;
28561 	}
28562 
28563 	ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
28564 	tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
28565 	if (tmp_cnt > ring_filled_cnt) {
28566 		DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
28567 			ring->write_idx, ring->read_idx, first_idx));
28568 		return;
28569 	}
28570 
28571 	tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
28572 	if (tmp_cnt > ring_filled_cnt) {
28573 		DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
28574 			ring->write_idx, ring->read_idx, last_idx));
28575 		return;
28576 	}
28577 
28578 	ring->lock_idx = first_idx;
28579 	ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
28580 	return;
28581 }
28582 
28583 static inline void
28584 __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
28585 {
28586 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28587 		DHD_RING_ERR(("EMPTY RING\n"));
28588 		return;
28589 	}
28590 
28591 	ring->lock_idx = DHD_RING_IDX_INVALID;
28592 	ring->lock_count = 0;
28593 	return;
28594 }
28595 static inline void *
28596 __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
28597 {
28598 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28599 		DHD_RING_ERR(("EMPTY RING\n"));
28600 		return NULL;
28601 	}
28602 	if (ring->lock_idx == DHD_RING_IDX_INVALID) {
28603 		DHD_RING_ERR(("NO LOCK POINT\n"));
28604 		return NULL;
28605 	}
28606 	return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
28607 }
28608 
28609 static inline void *
28610 __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
28611 {
28612 	int lock_last_idx;
28613 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28614 		DHD_RING_ERR(("EMPTY RING\n"));
28615 		return NULL;
28616 	}
28617 	if (ring->lock_idx == DHD_RING_IDX_INVALID) {
28618 		DHD_RING_ERR(("NO LOCK POINT\n"));
28619 		return NULL;
28620 	}
28621 
28622 	lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
28623 	return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
28624 }
28625 
28626 static inline int
28627 __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
28628 {
28629 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28630 		DHD_RING_ERR(("EMPTY RING\n"));
28631 		return BCME_ERROR;
28632 	}
28633 	if (ring->lock_idx == DHD_RING_IDX_INVALID) {
28634 		DHD_RING_ERR(("NO LOCK POINT\n"));
28635 		return BCME_ERROR;
28636 	}
28637 	return ring->lock_count;
28638 }
28639 
28640 static inline void
28641 __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
28642 {
28643 	if (ring->read_idx == DHD_RING_IDX_INVALID) {
28644 		DHD_RING_ERR(("EMPTY RING\n"));
28645 		return;
28646 	}
28647 	if (ring->lock_idx == DHD_RING_IDX_INVALID) {
28648 		DHD_RING_ERR(("NO LOCK POINT\n"));
28649 		return;
28650 	}
28651 
28652 	ring->lock_count--;
28653 	if (ring->lock_count <= 0) {
28654 		ring->lock_idx = DHD_RING_IDX_INVALID;
28655 	} else {
28656 		ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
28657 	}
28658 	return;
28659 }
28660 
28661 static inline void
28662 __dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
28663 {
28664 	ring->read_idx = idx;
28665 }
28666 
28667 static inline void
28668 __dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
28669 {
28670 	ring->write_idx = idx;
28671 }
28672 
28673 static inline uint32
28674 __dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
28675 {
28676 	return ring->read_idx;
28677 }
28678 
28679 static inline uint32
28680 __dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
28681 {
28682 	return ring->write_idx;
28683 }
28684 
28685 /* Sub functions for single index ring */
28686 static inline void *
28687 __dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
28688 {
28689 	uint32 tmp_idx = 0;
28690 
28691 	if (ring->idx == DHD_RING_IDX_INVALID) {
28692 		return NULL;
28693 	}
28694 
28695 	if (ring->ring_overwrited) {
28696 		tmp_idx = (ring->idx + 1) % ring->elem_cnt;
28697 	}
28698 
28699 	return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
28700 }
28701 
28702 static inline void *
28703 __dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
28704 {
28705 	if (ring->idx == DHD_RING_IDX_INVALID) {
28706 		return NULL;
28707 	}
28708 
28709 	return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
28710 }
28711 
28712 static inline void *
28713 __dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
28714 {
28715 	if (ring->idx == DHD_RING_IDX_INVALID) {
28716 		ring->idx = 0;
28717 		return (uint8 *)ring->elem;
28718 	}
28719 
28720 	/* check the lock is held */
28721 	if (atomic_read(&ring->ring_locked)) {
28722 		return NULL;
28723 	}
28724 
28725 	/* check the index rollover */
28726 	if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
28727 		ring->ring_overwrited = 1;
28728 	}
28729 
28730 	ring->idx = (ring->idx + 1) % ring->elem_cnt;
28731 
28732 	return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
28733 }
28734 
28735 static inline void *
28736 __dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
28737 {
28738 	uint32 cur_idx;
28739 
28740 	if (ring->idx == DHD_RING_IDX_INVALID) {
28741 		DHD_RING_ERR(("EMPTY RING\n"));
28742 		return NULL;
28743 	}
28744 
28745 	cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
28746 	if (cur_idx >= ring->elem_cnt) {
28747 		return NULL;
28748 	}
28749 
28750 	if (cur_idx == ring->idx) {
28751 		/* no more new record */
28752 		return NULL;
28753 	}
28754 
28755 	cur_idx = (cur_idx + 1) % ring->elem_cnt;
28756 
28757 	return (uint8 *)ring->elem + ring->elem_size * cur_idx;
28758 }
28759 
28760 static inline void *
28761 __dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
28762 {
28763 	uint32 cur_idx;
28764 
28765 	if (ring->idx == DHD_RING_IDX_INVALID) {
28766 		DHD_RING_ERR(("EMPTY RING\n"));
28767 		return NULL;
28768 	}
28769 	cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
28770 	if (cur_idx >= ring->elem_cnt) {
28771 		return NULL;
28772 	}
28773 
28774 	if (!ring->ring_overwrited && cur_idx == 0) {
28775 		/* no more new record */
28776 		return NULL;
28777 	}
28778 
28779 	cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
28780 	if (ring->ring_overwrited && cur_idx == ring->idx) {
28781 		/* no more new record */
28782 		return NULL;
28783 	}
28784 
28785 	return (uint8 *)ring->elem + ring->elem_size * cur_idx;
28786 }
28787 
28788 static inline void
28789 __dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
28790 {
28791 	if (!atomic_read(&ring->ring_locked)) {
28792 		atomic_set(&ring->ring_locked, 1);
28793 	}
28794 }
28795 
28796 static inline void
28797 __dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
28798 {
28799 	if (atomic_read(&ring->ring_locked)) {
28800 		atomic_set(&ring->ring_locked, 0);
28801 	}
28802 }
28803 
28804 /* Get first element : oldest element */
28805 void *
28806 dhd_ring_get_first(void *_ring)
28807 {
28808 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28809 	void *ret = NULL;
28810 	unsigned long flags;
28811 
28812 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28813 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28814 		return NULL;
28815 	}
28816 
28817 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28818 	if (ring->type == DHD_RING_TYPE_FIXED) {
28819 		ret = __dhd_fixed_ring_get_first(&ring->fixed);
28820 	}
28821 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
28822 		ret = __dhd_singleidx_ring_get_first(&ring->single);
28823 	}
28824 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28825 	return ret;
28826 }
28827 
28828 /* Free first element : oldest element */
28829 void
28830 dhd_ring_free_first(void *_ring)
28831 {
28832 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28833 	unsigned long flags;
28834 
28835 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28836 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28837 		return;
28838 	}
28839 
28840 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28841 	if (ring->type == DHD_RING_TYPE_FIXED) {
28842 		__dhd_fixed_ring_free_first(&ring->fixed);
28843 	}
28844 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28845 }
28846 
28847 void
28848 dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
28849 {
28850 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28851 	unsigned long flags;
28852 
28853 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28854 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28855 		return;
28856 	}
28857 
28858 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28859 	if (ring->type == DHD_RING_TYPE_FIXED) {
28860 		__dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
28861 	}
28862 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28863 }
28864 
28865 void
28866 dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
28867 {
28868 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28869 	unsigned long flags;
28870 
28871 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28872 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28873 		return;
28874 	}
28875 
28876 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28877 	if (ring->type == DHD_RING_TYPE_FIXED) {
28878 		__dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
28879 	}
28880 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28881 }
28882 
28883 uint32
28884 dhd_ring_get_read_idx(void *_ring)
28885 {
28886 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28887 	uint32 read_idx = DHD_RING_IDX_INVALID;
28888 	unsigned long flags;
28889 
28890 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28891 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28892 		return read_idx;
28893 	}
28894 
28895 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28896 	if (ring->type == DHD_RING_TYPE_FIXED) {
28897 		read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
28898 	}
28899 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28900 
28901 	return read_idx;
28902 }
28903 
28904 uint32
28905 dhd_ring_get_write_idx(void *_ring)
28906 {
28907 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28908 	uint32 write_idx = DHD_RING_IDX_INVALID;
28909 	unsigned long flags;
28910 
28911 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28912 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28913 		return write_idx;
28914 	}
28915 
28916 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28917 	if (ring->type == DHD_RING_TYPE_FIXED) {
28918 		write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
28919 	}
28920 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28921 
28922 	return write_idx;
28923 }
28924 
28925 /* Get latest element */
28926 void *
28927 dhd_ring_get_last(void *_ring)
28928 {
28929 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28930 	void *ret = NULL;
28931 	unsigned long flags;
28932 
28933 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28934 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28935 		return NULL;
28936 	}
28937 
28938 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28939 	if (ring->type == DHD_RING_TYPE_FIXED) {
28940 		ret = __dhd_fixed_ring_get_last(&ring->fixed);
28941 	}
28942 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
28943 		ret = __dhd_singleidx_ring_get_last(&ring->single);
28944 	}
28945 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28946 	return ret;
28947 }
28948 
28949 /* Get next point can be written
28950  * will overwrite which doesn't read
28951  * will return NULL if next pointer is locked.
28952  */
28953 void *
28954 dhd_ring_get_empty(void *_ring)
28955 {
28956 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28957 	void *ret = NULL;
28958 	unsigned long flags;
28959 
28960 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28961 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28962 		return NULL;
28963 	}
28964 
28965 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28966 	if (ring->type == DHD_RING_TYPE_FIXED) {
28967 		ret = __dhd_fixed_ring_get_empty(&ring->fixed);
28968 	}
28969 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
28970 		ret = __dhd_singleidx_ring_get_empty(&ring->single);
28971 	}
28972 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28973 	return ret;
28974 }
28975 
28976 void *
28977 dhd_ring_get_next(void *_ring, void *cur)
28978 {
28979 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
28980 	void *ret = NULL;
28981 	unsigned long flags;
28982 
28983 	if (!ring || ring->magic != DHD_RING_MAGIC) {
28984 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
28985 		return NULL;
28986 	}
28987 
28988 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
28989 	if (ring->type == DHD_RING_TYPE_FIXED) {
28990 		ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
28991 	}
28992 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
28993 		ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
28994 	}
28995 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
28996 	return ret;
28997 }
28998 
28999 void *
29000 dhd_ring_get_prev(void *_ring, void *cur)
29001 {
29002 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29003 	void *ret = NULL;
29004 	unsigned long flags;
29005 
29006 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29007 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29008 		return NULL;
29009 	}
29010 
29011 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29012 	if (ring->type == DHD_RING_TYPE_FIXED) {
29013 		ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
29014 	}
29015 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
29016 		ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
29017 	}
29018 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29019 	return ret;
29020 }
29021 
29022 int
29023 dhd_ring_get_cur_size(void *_ring)
29024 {
29025 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29026 	int cnt = 0;
29027 	unsigned long flags;
29028 
29029 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29030 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29031 		return cnt;
29032 	}
29033 
29034 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29035 	if (ring->type == DHD_RING_TYPE_FIXED) {
29036 		cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
29037 	}
29038 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29039 	return cnt;
29040 }
29041 
29042 /* protect element between lock_ptr and write_idx */
29043 void
29044 dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
29045 {
29046 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29047 	unsigned long flags;
29048 
29049 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29050 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29051 		return;
29052 	}
29053 
29054 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29055 	if (ring->type == DHD_RING_TYPE_FIXED) {
29056 		__dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
29057 	}
29058 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29059 }
29060 
29061 /* free all lock */
29062 void
29063 dhd_ring_lock_free(void *_ring)
29064 {
29065 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29066 	unsigned long flags;
29067 
29068 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29069 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29070 		return;
29071 	}
29072 
29073 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29074 	if (ring->type == DHD_RING_TYPE_FIXED) {
29075 		__dhd_fixed_ring_lock_free(&ring->fixed);
29076 	}
29077 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29078 }
29079 
29080 void *
29081 dhd_ring_lock_get_first(void *_ring)
29082 {
29083 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29084 	void *ret = NULL;
29085 	unsigned long flags;
29086 
29087 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29088 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29089 		return NULL;
29090 	}
29091 
29092 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29093 	if (ring->type == DHD_RING_TYPE_FIXED) {
29094 		ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
29095 	}
29096 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29097 	return ret;
29098 }
29099 
29100 void *
29101 dhd_ring_lock_get_last(void *_ring)
29102 {
29103 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29104 	void *ret = NULL;
29105 	unsigned long flags;
29106 
29107 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29108 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29109 		return NULL;
29110 	}
29111 
29112 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29113 	if (ring->type == DHD_RING_TYPE_FIXED) {
29114 		ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
29115 	}
29116 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29117 	return ret;
29118 }
29119 
29120 int
29121 dhd_ring_lock_get_count(void *_ring)
29122 {
29123 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29124 	int ret = BCME_ERROR;
29125 	unsigned long flags;
29126 
29127 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29128 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29129 		return ret;
29130 	}
29131 
29132 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29133 	if (ring->type == DHD_RING_TYPE_FIXED) {
29134 		ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
29135 	}
29136 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29137 	return ret;
29138 }
29139 
29140 /* free first locked element */
29141 void
29142 dhd_ring_lock_free_first(void *_ring)
29143 {
29144 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29145 	unsigned long flags;
29146 
29147 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29148 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29149 		return;
29150 	}
29151 
29152 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29153 	if (ring->type == DHD_RING_TYPE_FIXED) {
29154 		__dhd_fixed_ring_lock_free_first(&ring->fixed);
29155 	}
29156 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29157 }
29158 
29159 void
29160 dhd_ring_whole_lock(void *_ring)
29161 {
29162 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29163 	unsigned long flags;
29164 
29165 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29166 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29167 		return;
29168 	}
29169 
29170 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29171 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
29172 		__dhd_singleidx_ring_whole_lock(&ring->single);
29173 	}
29174 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29175 }
29176 
29177 void
29178 dhd_ring_whole_unlock(void *_ring)
29179 {
29180 	dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
29181 	unsigned long flags;
29182 
29183 	if (!ring || ring->magic != DHD_RING_MAGIC) {
29184 		DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
29185 		return;
29186 	}
29187 
29188 	DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
29189 	if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
29190 		__dhd_singleidx_ring_whole_unlock(&ring->single);
29191 	}
29192 	DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
29193 }
29194 /* END of DHD RING */
29195 
29196 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
29197 #define DHD_VFS_INODE(dir) (dir->d_inode)
29198 #else
29199 #define DHD_VFS_INODE(dir) d_inode(dir)
29200 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
29201 
29202 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
29203 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
29204 #else
29205 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
29206 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
29207 
29208 #if ((defined DHD_DUMP_MNGR) || (defined DNGL_AXI_ERROR_LOGGING))
29209 int
29210 dhd_file_delete(char *path)
29211 {
29212 	struct path file_path;
29213 	int err;
29214 	struct dentry *dir;
29215 
29216 	err = kern_path(path, 0, &file_path);
29217 
29218 	if (err < 0) {
29219 		DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
29220 		return err;
29221 	}
29222 	if (
29223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
29224 		!d_is_file(file_path.dentry) ||
29225 #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
29226 		d_really_is_negative(file_path.dentry) ||
29227 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
29228 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
29229 		FALSE)
29230 	{
29231 		err = -EINVAL;
29232 	} else {
29233 		dir = dget_parent(file_path.dentry);
29234 
29235 		if (!IS_ERR(dir)) {
29236 			err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
29237 			dput(dir);
29238 		} else {
29239 			err = PTR_ERR(dir);
29240 		}
29241 	}
29242 
29243 	path_put(&file_path);
29244 
29245 	if (err < 0) {
29246 		DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
29247 	}
29248 
29249 	return err;
29250 }
29251 #endif
29252 
29253 #ifdef DHD_DUMP_MNGR
29254 static int
29255 dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
29256 {
29257 	int i;
29258 	int fm_idx = -1;
29259 
29260 	for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
29261 		/* XXX dump file manager enqueues the type name to empty slot,
29262 		 * so it's impossible that empty slot is in the middle.
29263 		 */
29264 		if (strlen(fm_ptr->elems[i].type_name) == 0) {
29265 			fm_idx = i;
29266 			break;
29267 		}
29268 		if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
29269 			fm_idx = i;
29270 			break;
29271 		}
29272 	}
29273 
29274 	if (fm_idx == -1) {
29275 		return fm_idx;
29276 	}
29277 
29278 	if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
29279 		strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
29280 		fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
29281 		fm_ptr->elems[fm_idx].file_idx = 0;
29282 	}
29283 
29284 	return fm_idx;
29285 }
29286 
29287 /*
29288  * dhd_dump_file_manage_enqueue - enqueue dump file path
29289  * and delete odest file if file count is max.
29290  */
29291 void
29292 dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
29293 {
29294 	int fm_idx;
29295 	int fp_idx;
29296 	dhd_dump_file_manage_t *fm_ptr;
29297 	DFM_elem_t *elem;
29298 
29299 	if (!dhd || !dhd->dump_file_manage) {
29300 		DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
29301 			__FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
29302 		return;
29303 	}
29304 
29305 	fm_ptr = dhd->dump_file_manage;
29306 
29307 	/* find file_manage idx */
29308 	DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
29309 	if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
29310 		DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
29311 			__FUNCTION__, fname));
29312 		return;
29313 	}
29314 
29315 	elem = &fm_ptr->elems[fm_idx];
29316 	fp_idx = elem->file_idx;
29317 	DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
29318 		__FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
29319 
29320 	/* delete oldest file */
29321 	if (strlen(elem->file_path[fp_idx]) != 0) {
29322 		if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
29323 			DHD_ERROR(("%s(): Failed to delete file: %s\n",
29324 				__FUNCTION__, elem->file_path[fp_idx]));
29325 		} else {
29326 			DHD_ERROR(("%s(): Successed to delete file: %s\n",
29327 				__FUNCTION__, elem->file_path[fp_idx]));
29328 		}
29329 	}
29330 
29331 	/* save dump file path */
29332 	strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
29333 	elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
29334 
29335 	/* change file index to next file index */
29336 	elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
29337 }
29338 #endif /* DHD_DUMP_MNGR */
29339 
29340 #ifdef DHD_HP2P
29341 unsigned long
29342 dhd_os_hp2plock(dhd_pub_t *pub)
29343 {
29344 	dhd_info_t *dhd;
29345 	unsigned long flags = 0;
29346 
29347 	dhd = (dhd_info_t *)(pub->info);
29348 
29349 	if (dhd) {
29350 		flags = osl_spin_lock(&dhd->hp2p_lock);
29351 	}
29352 
29353 	return flags;
29354 }
29355 
29356 void
29357 dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
29358 {
29359 	dhd_info_t *dhd;
29360 
29361 	dhd = (dhd_info_t *)(pub->info);
29362 
29363 	if (dhd) {
29364 		osl_spin_unlock(&dhd->hp2p_lock, flags);
29365 	}
29366 }
29367 #endif /* DHD_HP2P */
29368 #ifdef DNGL_AXI_ERROR_LOGGING
29369 static void
29370 dhd_axi_error_dump(void *handle, void *event_info, u8 event)
29371 {
29372 	dhd_info_t *dhd = (dhd_info_t *)handle;
29373 	dhd_pub_t *dhdp = NULL;
29374 
29375 	if (!dhd) {
29376 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
29377 		goto exit;
29378 	}
29379 
29380 	dhdp = &dhd->pub;
29381 	if (!dhdp) {
29382 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
29383 		goto exit;
29384 	}
29385 
29386 	/**
29387 	 * First save axi error information to a file
29388 	 * because panic should happen right after this.
29389 	 * After dhd reset, dhd reads the file, and do hang event process
29390 	 * to send axi error stored on the file to Bigdata server
29391 	 */
29392 	if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
29393 		DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
29394 			__FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
29395 	}
29396 
29397 	DHD_OS_WAKE_LOCK(dhdp);
29398 #ifdef DHD_FW_COREDUMP
29399 #ifdef DHD_SSSR_DUMP
29400 	DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
29401 	dhdp->collect_sssr = TRUE;
29402 #endif /* DHD_SSSR_DUMP */
29403 	DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
29404 	dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
29405 #endif /* DHD_FW_COREDUMP */
29406 	DHD_OS_WAKE_UNLOCK(dhdp);
29407 
29408 exit:
29409 	/* Trigger kernel panic after taking necessary dumps */
29410 	BUG_ON(1);
29411 }
29412 
29413 void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
29414 {
29415 	DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
29416 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
29417 		type, DHD_WQ_WORK_AXI_ERROR_DUMP,
29418 		dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
29419 }
29420 #endif /* DNGL_AXI_ERROR_LOGGING */
29421 
29422 #ifdef SUPPORT_SET_TID
29423 /*
29424  * Set custom TID value for UDP frame based on UID value.
29425  * This will be triggered by android private command below.
29426  * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
29427  * Mode 0(SET_TID_OFF) : Disable changing TID
29428  * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
29429  * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
29430 */
29431 void
29432 dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
29433 {
29434 	struct ether_header *eh = NULL;
29435 	struct sock *sk = NULL;
29436 	uint8 *pktdata = NULL;
29437 	uint8 *ip_hdr = NULL;
29438 	uint8 cur_prio;
29439 	uint8 prio;
29440 	uint32 uid;
29441 
29442 	if (dhdp->tid_mode == SET_TID_OFF) {
29443 		return;
29444 	}
29445 
29446 	pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
29447 	eh = (struct ether_header *) pktdata;
29448 	ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
29449 
29450 	if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
29451 		return;
29452 	}
29453 
29454 	cur_prio = PKTPRIO(pkt);
29455 	prio = dhdp->target_tid;
29456 	uid = dhdp->target_uid;
29457 
29458 	if ((cur_prio == prio) ||
29459 		(cur_prio != PRIO_8021D_BE)) {
29460 			return;
29461 	}
29462 
29463 	sk = ((struct sk_buff*)(pkt))->sk;
29464 
29465 	if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
29466 		(sk && (uid == __kuid_val(sock_i_uid(sk))))) {
29467 		PKTSETPRIO(pkt, prio);
29468 	}
29469 }
29470 #endif /* SUPPORT_SET_TID */
29471 
29472 #ifdef BCMPCIE
29473 static void
29474 dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
29475 {
29476 	dhd_info_t *dhd = handle;
29477 	dhd_pub_t *dhdp = NULL;
29478 
29479 	if (!dhd) {
29480 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
29481 		BUG_ON(1);
29482 		return;
29483 	}
29484 
29485 	dhdp = &dhd->pub;
29486 	if (dhdp->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) {
29487 		DHD_ERROR(("%s: skip cto recovery for DHD_INDUCE_BH_CBP_HANG\n",
29488 			__FUNCTION__));
29489 		return;
29490 	}
29491 	dhdpcie_cto_recovery_handler(dhdp);
29492 }
29493 
29494 void
29495 dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
29496 {
29497 	DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
29498 	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
29499 		NULL, DHD_WQ_WORK_CTO_RECOVERY,
29500 		dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
29501 }
29502 #endif /* BCMPCIE */
29503 
29504 #ifdef DHD_WIFI_SHUTDOWN
29505 void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
29506 {
29507 	dhd_pub_t *dhd_pub = NULL;
29508 	dhd_info_t *dhd_info = NULL;
29509 	dhd_if_t *dhd_if = NULL;
29510 
29511 	DHD_ERROR(("%s enter\n", __FUNCTION__));
29512 	dhd_pub = g_dhd_pub;
29513 
29514 	if (dhd_os_check_if_up(dhd_pub)) {
29515 		dhd_info = (dhd_info_t *)dhd_pub->info;
29516 		dhd_if = dhd_info->iflist[0];
29517 		ASSERT(dhd_if);
29518 		ASSERT(dhd_if->net);
29519 		if (dhd_if && dhd_if->net) {
29520 			dhd_stop(dhd_if->net);
29521 		}
29522 	}
29523 }
29524 #endif /* DHD_WIFI_SHUTDOWN */
29525 #ifdef WL_AUTO_QOS
29526 void
29527 dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off)
29528 {
29529 	dhd_sock_qos_set_status(dhdp->info, on_off);
29530 }
29531 #endif /* WL_AUTO_QOS */
29532 
29533 #ifdef DHD_CFG80211_SUSPEND_RESUME
29534 void
29535 dhd_cfg80211_suspend(dhd_pub_t *dhdp)
29536 {
29537 	struct net_device *net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
29538 	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
29539 	wl_cfg80211_suspend(cfg);
29540 }
29541 
29542 void
29543 dhd_cfg80211_resume(dhd_pub_t *dhdp)
29544 {
29545 	struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
29546 	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
29547 	wl_cfg80211_resume(cfg);
29548 }
29549 #endif /* DHD_CFG80211_SUSPEND_RESUME */
29550 
29551 void
29552 dhd_generate_rand_mac_addr(struct ether_addr *ea_addr)
29553 {
29554 	RANDOM_BYTES(ea_addr->octet, ETHER_ADDR_LEN);
29555 	/* restore mcast and local admin bits to 0 and 1 */
29556 	ETHER_SET_UNICAST(ea_addr->octet);
29557 	ETHER_SET_LOCALADDR(ea_addr->octet);
29558 	DHD_ERROR(("%s:generated new MAC="MACDBG" \n",
29559 		__FUNCTION__, MAC2STRDBG(ea_addr->octet)));
29560 	return;
29561 }
29562 
29563 void *
29564 dhd_get_roam_evt(dhd_pub_t *dhdp)
29565 {
29566 #if defined(DHD_PUB_ROAM_EVT)
29567 	return (void *)&(dhdp->roam_evt);
29568 #else
29569 	return NULL;
29570 #endif /* DHD_PUB_ROAM_EVT */
29571 }
29572 
29573 /* BANDLOCK_FILE is for Hikey only and BANDLOCK has a priority than BANDLOCK_FILE */
29574 static void dhd_set_bandlock(dhd_pub_t * dhd)
29575 {
29576 #if defined(BANDLOCK)
29577 	int band = BANDLOCK;
29578 	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
29579 		DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
29580 	}
29581 #elif defined(BANDLOCK_FILE)
29582 	int band;
29583 	char val[2] = {0, 0};
29584 	if (dhd_read_file(PATH_BANDLOCK_INFO, (char *)val, sizeof(char)) == BCME_OK) {
29585 		band = bcm_atoi(val);
29586 		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
29587 			DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
29588 		}
29589 	}
29590 #endif /* BANDLOCK */
29591 }
29592 
29593 #ifdef PCIE_FULL_DONGLE
29594 /* API to delete flowings and Stations
29595 * corresponding to the interface(ndev)
29596 */
29597 void
29598 dhd_net_del_flowrings_sta(dhd_pub_t *dhd, struct net_device *ndev)
29599 {
29600 	dhd_if_t *ifp = NULL;
29601 
29602 	ifp = dhd_get_ifp_by_ndev(dhd, ndev);
29603 	if (ifp == NULL) {
29604 		DHD_ERROR(("DHD Iface Info corresponding to %s not found\n", ndev->name));
29605 		return;
29606 	}
29607 
29608 	/* For now called only in iface delete path..
29609 	* Add reason codes if this API need to be reused in any other paths.
29610 	*/
29611 	DHD_ERROR(("%s:Clean up IFACE idx %d due to interface delete\n",
29612 		__FUNCTION__, ifp->idx));
29613 
29614 	dhd_del_all_sta(dhd, ifp->idx);
29615 	dhd_flow_rings_delete(dhd, ifp->idx);
29616 }
29617 #endif /* PCIE_FULL_DONGLE */
29618 
29619 #ifndef BCMDBUS
29620 static void
29621 dhd_deferred_socram_dump(void *handle, void *event_info, u8 event)
29622 {
29623 	dhd_pub_t *dhdp = (dhd_pub_t *)event_info;
29624 	DHD_ERROR(("%s ... scheduled to collect memdump over bus\n", __FUNCTION__));
29625 	dhd_socram_dump(dhdp->bus);
29626 }
29627 
29628 int
29629 dhd_schedule_socram_dump(dhd_pub_t *dhdp)
29630 {
29631 	int ret = 0;
29632 	ret = dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
29633 		DHD_WQ_WORK_SOC_RAM_COLLECT, dhd_deferred_socram_dump, DHD_WQ_WORK_PRIORITY_HIGH);
29634 	return ret;
29635 }
29636 #endif
29637 
29638 void *dhd_get_pub(struct net_device *dev)
29639 {
29640 	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
29641 	if (dhdinfo)
29642 		return (void *)&dhdinfo->pub;
29643 	else {
29644 		printf("%s: null dhdinfo\n", __FUNCTION__);
29645 		return NULL;
29646 	}
29647 }
29648 
29649 void *dhd_get_conf(struct net_device *dev)
29650 {
29651 	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
29652 	if (dhdinfo)
29653 		return (void *)dhdinfo->pub.conf;
29654 	else {
29655 		printf("%s: null dhdinfo\n", __FUNCTION__);
29656 		return NULL;
29657 	}
29658 }
29659 
29660 bool dhd_os_wd_timer_enabled(void *bus)
29661 {
29662 	dhd_pub_t *pub = bus;
29663 	dhd_info_t *dhd = (dhd_info_t *)pub->info;
29664 
29665 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
29666 	if (!dhd) {
29667 		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
29668 		return FALSE;
29669 	}
29670 	return dhd->wd_timer_valid;
29671 }
29672 
29673 #if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
29674 /* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
29675 static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
29676 {
29677 	struct net_device *primary_ndev = NULL, *br_dev = NULL;
29678 	int cmd;
29679 	struct ifreq ifr;
29680 
29681 	/* add new interface to bridge dev */
29682 	if (dev) {
29683 		int found = 0, i;
29684 		DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
29685 		for (i = 0 ; i < ifidx; i++) {
29686 			DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
29687 			/* search the primary interface */
29688 			if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
29689 				primary_ndev = dhd->pub.info->iflist[i]->net;
29690 				DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
29691 				found = 1;
29692 				break;
29693 			}
29694 		}
29695 		if (found == 0) {
29696 			DHD_ERROR(("Can not find primary dev %s\n", dev->name));
29697 			return;
29698 		}
29699 		cmd = SIOCBRADDIF;
29700 		ifr.ifr_ifindex = dev->ifindex;
29701 	} else { /* del interface from bridge dev */
29702 		primary_ndev = dhd->pub.info->iflist[ifidx]->net;
29703 		cmd = SIOCBRDELIF;
29704 		ifr.ifr_ifindex = primary_ndev->ifindex;
29705 	}
29706 	/* if primary net device is bridged */
29707 	if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
29708 		rtnl_lock();
29709 		/* get bridge device */
29710 		br_dev = netdev_master_upper_dev_get(primary_ndev);
29711 		if (br_dev) {
29712 			const struct net_device_ops *ops = br_dev->netdev_ops;
29713 			DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
29714 			if (ops) {
29715 				if (cmd == SIOCBRADDIF) {
29716 					DHD_ERROR(("br call ndo_add_slave\n"));
29717 					ops->ndo_add_slave(br_dev, dev);
29718 					/* Also bring wds0.x interface up automatically */
29719 					dev_change_flags(dev, dev->flags | IFF_UP);
29720 				}
29721 				else {
29722 					DHD_ERROR(("br call ndo_del_slave\n"));
29723 					ops->ndo_del_slave(br_dev, primary_ndev);
29724 				}
29725 			}
29726 		}
29727 		else {
29728 			DHD_ERROR(("no br dev\n"));
29729 		}
29730 		rtnl_unlock();
29731 	}
29732 	else {
29733 		DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
29734 	}
29735 }
29736 #endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
29737