1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2017, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $
29 */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39 #include <linux/init.h>
40 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/inetdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/etherdevice.h>
47 #include <linux/random.h>
48 #include <linux/spinlock.h>
49 #include <linux/ethtool.h>
50 #include <linux/fcntl.h>
51 #include <linux/fs.h>
52 #include <linux/ip.h>
53 #include <linux/reboot.h>
54 #include <linux/notifier.h>
55 #include <net/addrconf.h>
56 #include <uapi/linux/sched/types.h>
57 #include <linux/netdevice.h>
58 #ifdef ENABLE_ADAPTIVE_SCHED
59 #include <linux/cpufreq.h>
60 #endif /* ENABLE_ADAPTIVE_SCHED */
61
62 #include <asm/uaccess.h>
63 #include <asm/unaligned.h>
64
65 #include <epivers.h>
66 #include <bcmutils.h>
67 #include <bcmendian.h>
68 #include <bcmdevs.h>
69
70
71 #include <ethernet.h>
72 #include <bcmevent.h>
73 #include <vlan.h>
74 #include <802.3.h>
75
76 #include <dngl_stats.h>
77 #include <dhd_linux_wq.h>
78 #include <dhd.h>
79 #include <dhd_linux.h>
80 #ifdef DHD_WET
81 #include <dhd_wet.h>
82 #endif /* DHD_WET */
83 #ifdef PCIE_FULL_DONGLE
84 #include <dhd_flowring.h>
85 #endif
86 #include <dhd_bus.h>
87 #include <dhd_proto.h>
88 #include <dhd_config.h>
89 #include <dhd_dbg.h>
90 #include <dhd_debug.h>
91 #ifdef CONFIG_HAS_WAKELOCK
92 #include <linux/wakelock.h>
93 #endif
94 #ifdef WL_CFG80211
95 #include <wl_cfg80211.h>
96 #endif
97 #ifdef PNO_SUPPORT
98 #include <dhd_pno.h>
99 #endif
100 #ifdef RTT_SUPPORT
101 #include <dhd_rtt.h>
102 #endif
103 #ifdef DHD_TIMESYNC
104 #include <dhd_timesync.h>
105 #endif /* DHD_TIMESYNC */
106
107 #ifdef CONFIG_COMPAT
108 #include <linux/compat.h>
109 #endif
110
111 #if defined(CONFIG_SOC_EXYNOS8895)
112 #include <linux/exynos-pci-ctrl.h>
113 #endif /* CONFIG_SOC_EXYNOS8895 */
114
115 #ifdef DHD_WMF
116 #include <dhd_wmf_linux.h>
117 #endif /* DHD_WMF */
118
119 #ifdef DHD_L2_FILTER
120 #include <bcmicmp.h>
121 #include <bcm_l2_filter.h>
122 #include <dhd_l2_filter.h>
123 #endif /* DHD_L2_FILTER */
124
125 #ifdef DHD_PSTA
126 #include <dhd_psta.h>
127 #endif /* DHD_PSTA */
128
129
130 #ifdef DHDTCPACK_SUPPRESS
131 #include <dhd_ip.h>
132 #endif /* DHDTCPACK_SUPPRESS */
133 #include <dhd_daemon.h>
134 #ifdef DHD_PKT_LOGGING
135 #include <dhd_pktlog.h>
136 #endif /* DHD_PKT_LOGGING */
137 #if defined(STAT_REPORT)
138 #include <wl_statreport.h>
139 #endif /* STAT_REPORT */
140 #ifdef DHD_DEBUG_PAGEALLOC
141 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
142 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
143 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
144 #endif /* DHD_DEBUG_PAGEALLOC */
145
146
147 #if defined(DHD_LB)
148 #if !defined(PCIE_FULL_DONGLE)
149 #error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
150 #endif /* !PCIE_FULL_DONGLE */
151 #endif /* DHD_LB */
152
153 #if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
154 defined(DHD_LB_STATS)
155 #if !defined(DHD_LB)
156 #error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
157 #endif /* !DHD_LB */
158 #endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
159
160 #if defined(DHD_LB)
161 /* Dynamic CPU selection for load balancing */
162 #include <linux/cpu.h>
163 #include <linux/cpumask.h>
164 #include <linux/notifier.h>
165 #include <linux/workqueue.h>
166 #include <asm/atomic.h>
167
168 #if !defined(DHD_LB_PRIMARY_CPUS)
169 #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
170 #endif
171 #if !defined(DHD_LB_SECONDARY_CPUS)
172 #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
173 #endif
174
175 #define HIST_BIN_SIZE 9
176
177 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
178
179 #if defined(DHD_LB_TXP)
180 static void dhd_lb_tx_handler(unsigned long data);
181 static void dhd_tx_dispatcher_work(struct work_struct * work);
182 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
183 static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
184
185 /* Pkttag not compatible with PROP_TXSTATUS or WLFC */
186 typedef struct dhd_tx_lb_pkttag_fr {
187 struct net_device *net;
188 int ifidx;
189 } dhd_tx_lb_pkttag_fr_t;
190
191 #define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
192 #define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
193
194 #define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
195 #define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
196 #endif /* DHD_LB_TXP */
197 #endif /* DHD_LB */
198
199 #ifdef HOFFLOAD_MODULES
200 #include <linux/firmware.h>
201 #endif
202
203 #ifdef WLMEDIA_HTSF
204 #include <linux/time.h>
205 #include <htsf.h>
206
207 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
208 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
209 #define TSMAX 1000 /* max no. of timing record kept */
210 #define NUMBIN 34
211
212 static uint32 tsidx = 0;
213 static uint32 htsf_seqnum = 0;
214 uint32 tsfsync;
215 struct timeval tsync;
216 static uint32 tsport = 5010;
217
218 typedef struct histo_ {
219 uint32 bin[NUMBIN];
220 } histo_t;
221
222 #if !ISPOWEROF2(DHD_SDALIGN)
223 #error DHD_SDALIGN is not a power of 2!
224 #endif
225
226 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
227 #endif /* WLMEDIA_HTSF */
228
229 #ifdef WL_MONITOR
230 #include <bcmmsgbuf.h>
231 #include <bcmwifi_monitor.h>
232 #endif
233
234 #define htod32(i) (i)
235 #define htod16(i) (i)
236 #define dtoh32(i) (i)
237 #define dtoh16(i) (i)
238 #define htodchanspec(i) (i)
239 #define dtohchanspec(i) (i)
240
241 #ifdef STBLINUX
242 #ifdef quote_str
243 #undef quote_str
244 #endif /* quote_str */
245 #ifdef to_str
246 #undef to_str
247 #endif /* quote_str */
248 #define to_str(s) #s
249 #define quote_str(s) to_str(s)
250
251 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
252 #endif /* STBLINUX */
253
254
255
256 #if defined(SOFTAP)
257 extern bool ap_cfg_running;
258 extern bool ap_fw_loaded;
259 #endif
260
261 extern void dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname,
262 char *dump_data, bool direction);
263
264 #ifdef FIX_CPU_MIN_CLOCK
265 #include <linux/pm_qos.h>
266 #endif /* FIX_CPU_MIN_CLOCK */
267
268 #ifdef SET_RANDOM_MAC_SOFTAP
269 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
270 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
271 #endif
272 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
273 #endif /* SET_RANDOM_MAC_SOFTAP */
274
275 #ifdef ENABLE_ADAPTIVE_SCHED
276 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
277 #ifndef CUSTOM_CPUFREQ_THRESH
278 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
279 #endif /* CUSTOM_CPUFREQ_THRESH */
280 #endif /* ENABLE_ADAPTIVE_SCHED */
281
282 /* enable HOSTIP cache update from the host side when an eth0:N is up */
283 #define AOE_IP_ALIAS_SUPPORT 1
284
285 #ifdef BCM_FD_AGGR
286 #include <bcm_rpc.h>
287 #include <bcm_rpc_tp.h>
288 #endif
289 #ifdef PROP_TXSTATUS
290 #include <wlfc_proto.h>
291 #include <dhd_wlfc.h>
292 #endif
293
294 #include <wl_android.h>
295 #ifdef WL_ESCAN
296 #include <wl_escan.h>
297 #endif
298
299 /* Maximum STA per radio */
300 #define DHD_MAX_STA 32
301
302
303
304 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
305 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
306 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
307
308 #ifdef ARP_OFFLOAD_SUPPORT
309 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
310 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
311 unsigned long event, void *ptr);
312 static struct notifier_block dhd_inetaddr_notifier = {
313 .notifier_call = dhd_inetaddr_notifier_call
314 };
315 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
316 * created in kernel notifier link list (with 'next' pointing to itself)
317 */
318 static bool dhd_inetaddr_notifier_registered = FALSE;
319 #endif /* ARP_OFFLOAD_SUPPORT */
320
321 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
322 int dhd_inet6addr_notifier_call(struct notifier_block *this,
323 unsigned long event, void *ptr);
324 static struct notifier_block dhd_inet6addr_notifier = {
325 .notifier_call = dhd_inet6addr_notifier_call
326 };
327 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
328 * created in kernel notifier link list (with 'next' pointing to itself)
329 */
330 static bool dhd_inet6addr_notifier_registered = FALSE;
331 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
332
333 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
334 #include <linux/suspend.h>
335 volatile bool dhd_mmc_suspend = FALSE;
336 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
337 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
338
339 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
340 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
341 #endif
342 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
343 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
344 #endif
345 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
346 MODULE_LICENSE("GPL and additional rights");
347 #endif /* LinuxVer */
348
349 #if defined(MULTIPLE_SUPPLICANT)
350 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
351 DEFINE_MUTEX(_dhd_mutex_lock_);
352 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
353 #endif
354
355 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
356 #define MAX_CONSECUTIVE_HANG_COUNTS 5
357 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
358
359 #include <dhd_bus.h>
360
361 #ifdef DHD_ULP
362 #include <dhd_ulp.h>
363 #endif /* DHD_ULP */
364
365 #ifdef BCM_FD_AGGR
366 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
367 #else
368 #ifndef PROP_TXSTATUS
369 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
370 #else
371 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
372 #endif
373 #endif /* BCM_FD_AGGR */
374
375 #ifdef PROP_TXSTATUS
376 extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
377 extern void dhd_wlfc_plat_init(void *dhd);
378 extern void dhd_wlfc_plat_deinit(void *dhd);
379 #endif /* PROP_TXSTATUS */
380 extern uint sd_f2_blocksize;
381 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
382
383 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
384 const char *
print_tainted()385 print_tainted()
386 {
387 return "";
388 }
389 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
390
391 /* Linux wireless extension support */
392 #if defined(WL_WIRELESS_EXT)
393 #include <wl_iw.h>
394 extern wl_iw_extra_params_t g_wl_iw_params;
395 #endif /* defined(WL_WIRELESS_EXT) */
396
397 #ifdef CONFIG_PARTIALSUSPEND_SLP
398 #include <linux/partialsuspend_slp.h>
399 #define CONFIG_HAS_EARLYSUSPEND
400 #define DHD_USE_EARLYSUSPEND
401 #define register_early_suspend register_pre_suspend
402 #define unregister_early_suspend unregister_pre_suspend
403 #define early_suspend pre_suspend
404 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
405 #else
406 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
407 #include <linux/earlysuspend.h>
408 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
409 #endif /* CONFIG_PARTIALSUSPEND_SLP */
410
411 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
412 #include <linux/nl80211.h>
413 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
414
415 #if defined(BCMPCIE)
416 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
417 #else
418 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
419 #endif /* OEM_ANDROID && BCMPCIE */
420
421 #ifdef PKT_FILTER_SUPPORT
422 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
423 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
424 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
425 #endif
426
427 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
428 static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
429 u8* program, uint32 program_len);
430 static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
431 uint32 mode, uint32 enable);
432 static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
433 #endif /* PKT_FILTER_SUPPORT && APF */
434
435
436
argos_register_notifier_init(struct net_device * net)437 static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;}
argos_register_notifier_deinit(void)438 static INLINE int argos_register_notifier_deinit(void) { return 0;}
439
440 #if defined(BT_OVER_SDIO)
441 extern void wl_android_set_wifi_on_flag(bool enable);
442 #endif /* BT_OVER_SDIO */
443
444
445 #if defined(TRAFFIC_MGMT_DWM)
446 void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf);
447 #endif
448
449 #ifdef DHD_FW_COREDUMP
450 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
451 #endif /* DHD_FW_COREDUMP */
452 #ifdef DHD_LOG_DUMP
453 #define DLD_BUFFER_NUM 2
454 /* [0]: General, [1]: Special */
455 struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
456 static const int dld_buf_size[] = {
457 (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */
458 (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */
459 };
460 static void dhd_log_dump_init(dhd_pub_t *dhd);
461 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
462 static void dhd_log_dump(void *handle, void *event_info, u8 event);
463 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
464 static int do_dhd_log_dump(dhd_pub_t *dhdp);
465 #endif /* DHD_LOG_DUMP */
466
467 #ifdef DHD_DEBUG_UART
468 #include <linux/kmod.h>
469 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
470 static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
471 static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
472 #endif /* DHD_DEBUG_UART */
473
474 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
475 static struct notifier_block dhd_reboot_notifier = {
476 .notifier_call = dhd_reboot_callback,
477 .priority = 1,
478 };
479
480 #ifdef BCMPCIE
481 static int is_reboot = 0;
482 #endif /* BCMPCIE */
483
484 #if defined(BT_OVER_SDIO)
485 #include "dhd_bt_interface.h"
486 dhd_pub_t *g_dhd_pub = NULL;
487 #endif /* defined (BT_OVER_SDIO) */
488
489 atomic_t exit_in_progress = ATOMIC_INIT(0);
490
491 typedef struct dhd_if_event {
492 struct list_head list;
493 wl_event_data_if_t event;
494 char name[IFNAMSIZ+1];
495 uint8 mac[ETHER_ADDR_LEN];
496 } dhd_if_event_t;
497
498 /* Interface control information */
499 typedef struct dhd_if {
500 struct dhd_info *info; /* back pointer to dhd_info */
501 /* OS/stack specifics */
502 struct net_device *net;
503 int idx; /* iface idx in dongle */
504 uint subunit; /* subunit */
505 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
506 bool set_macaddress;
507 bool set_multicast;
508 uint8 bssidx; /* bsscfg index for the interface */
509 bool attached; /* Delayed attachment when unset */
510 bool txflowcontrol; /* Per interface flow control indicator */
511 char name[IFNAMSIZ+1]; /* linux interface name */
512 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
513 struct net_device_stats stats;
514 #ifdef DHD_WMF
515 dhd_wmf_t wmf; /* per bsscfg wmf setting */
516 bool wmf_psta_disable; /* enable/disable MC pkt to each mac
517 * of MC group behind PSTA
518 */
519 #endif /* DHD_WMF */
520 #ifdef PCIE_FULL_DONGLE
521 struct list_head sta_list; /* sll of associated stations */
522 #if !defined(BCM_GMAC3)
523 spinlock_t sta_list_lock; /* lock for manipulating sll */
524 #endif /* ! BCM_GMAC3 */
525 #endif /* PCIE_FULL_DONGLE */
526 uint32 ap_isolate; /* ap-isolation settings */
527 #ifdef DHD_L2_FILTER
528 bool parp_enable;
529 bool parp_discard;
530 bool parp_allnode;
531 arp_table_t *phnd_arp_table;
532 /* for Per BSS modification */
533 bool dhcp_unicast;
534 bool block_ping;
535 bool grat_arp;
536 #endif /* DHD_L2_FILTER */
537 #ifdef DHD_MCAST_REGEN
538 bool mcast_regen_bss_enable;
539 #endif
540 bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
541 cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
542 } dhd_if_t;
543
544 #ifdef WLMEDIA_HTSF
545 typedef struct {
546 uint32 low;
547 uint32 high;
548 } tsf_t;
549
550 typedef struct {
551 uint32 last_cycle;
552 uint32 last_sec;
553 uint32 last_tsf;
554 uint32 coef; /* scaling factor */
555 uint32 coefdec1; /* first decimal */
556 uint32 coefdec2; /* second decimal */
557 } htsf_t;
558
559 typedef struct {
560 uint32 t1;
561 uint32 t2;
562 uint32 t3;
563 uint32 t4;
564 } tstamp_t;
565
566 static tstamp_t ts[TSMAX];
567 static tstamp_t maxdelayts;
568 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
569
570 #endif /* WLMEDIA_HTSF */
571
572 struct ipv6_work_info_t {
573 uint8 if_idx;
574 char ipv6_addr[IPV6_ADDR_LEN];
575 unsigned long event;
576 };
577 static void dhd_process_daemon_msg(struct sk_buff *skb);
578 static void dhd_destroy_to_notifier_skt(void);
579 static int dhd_create_to_notifier_skt(void);
580 static struct sock *nl_to_event_sk = NULL;
581 int sender_pid = 0;
582
583 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
584 struct netlink_kernel_cfg g_cfg = {
585 .groups = 1,
586 .input = dhd_process_daemon_msg,
587 };
588 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
589
590 typedef struct dhd_dump {
591 uint8 *buf;
592 int bufsize;
593 } dhd_dump_t;
594
595
596 /* When Perimeter locks are deployed, any blocking calls must be preceeded
597 * with a PERIM UNLOCK and followed by a PERIM LOCK.
598 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
599 * wait_event_timeout().
600 */
601
602 /* Local private structure (extension of pub) */
603 typedef struct dhd_info {
604 #if defined(WL_WIRELESS_EXT)
605 wl_iw_t iw; /* wireless extensions state (must be first) */
606 #endif /* defined(WL_WIRELESS_EXT) */
607 dhd_pub_t pub;
608 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
609
610 wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
611 char fw_path[PATH_MAX]; /* path to firmware image */
612 char nv_path[PATH_MAX]; /* path to nvram vars file */
613 char clm_path[PATH_MAX]; /* path to clm vars file */
614 char conf_path[PATH_MAX]; /* path to config vars file */
615 #ifdef DHD_UCODE_DOWNLOAD
616 char uc_path[PATH_MAX]; /* path to ucode image */
617 #endif /* DHD_UCODE_DOWNLOAD */
618
619 /* serialize dhd iovars */
620 struct mutex dhd_iovar_mutex;
621
622 struct semaphore proto_sem;
623 #ifdef PROP_TXSTATUS
624 spinlock_t wlfc_spinlock;
625
626 #ifdef BCMDBUS
627 ulong wlfc_lock_flags;
628 ulong wlfc_pub_lock_flags;
629 #endif /* BCMDBUS */
630 #endif /* PROP_TXSTATUS */
631 #ifdef WLMEDIA_HTSF
632 htsf_t htsf;
633 #endif
634 wait_queue_head_t ioctl_resp_wait;
635 wait_queue_head_t d3ack_wait;
636 wait_queue_head_t dhd_bus_busy_state_wait;
637 uint32 default_wd_interval;
638
639 struct timer_list timer;
640 bool wd_timer_valid;
641 #ifdef DHD_PCIE_RUNTIMEPM
642 struct timer_list rpm_timer;
643 bool rpm_timer_valid;
644 tsk_ctl_t thr_rpm_ctl;
645 #endif /* DHD_PCIE_RUNTIMEPM */
646 struct tasklet_struct tasklet;
647 spinlock_t sdlock;
648 spinlock_t txqlock;
649 spinlock_t rxqlock;
650 spinlock_t dhd_lock;
651 #ifdef BCMDBUS
652 ulong txqlock_flags;
653 #else
654
655 struct semaphore sdsem;
656 tsk_ctl_t thr_dpc_ctl;
657 tsk_ctl_t thr_wdt_ctl;
658 #endif /* BCMDBUS */
659
660 tsk_ctl_t thr_rxf_ctl;
661 spinlock_t rxf_lock;
662 bool rxthread_enabled;
663
664 /* Wakelocks */
665 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
666 struct wake_lock wl_wifi; /* Wifi wakelock */
667 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
668 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
669 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
670 struct wake_lock wl_evtwake; /* Wifi event wakelock */
671 struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
672 struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
673 #ifdef BCMPCIE_OOB_HOST_WAKE
674 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
675 #endif /* BCMPCIE_OOB_HOST_WAKE */
676 #ifdef DHD_USE_SCAN_WAKELOCK
677 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
678 #endif /* DHD_USE_SCAN_WAKELOCK */
679 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
680
681 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
682 /* net_device interface lock, prevent race conditions among net_dev interface
683 * calls and wifi_on or wifi_off
684 */
685 struct mutex dhd_net_if_mutex;
686 struct mutex dhd_suspend_mutex;
687 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
688 struct mutex dhd_apf_mutex;
689 #endif /* PKT_FILTER_SUPPORT && APF */
690 #endif
691 spinlock_t wakelock_spinlock;
692 spinlock_t wakelock_evt_spinlock;
693 uint32 wakelock_counter;
694 int wakelock_wd_counter;
695 int wakelock_rx_timeout_enable;
696 int wakelock_ctrl_timeout_enable;
697 bool waive_wakelock;
698 uint32 wakelock_before_waive;
699
700 /* Thread to issue ioctl for multicast */
701 wait_queue_head_t ctrl_wait;
702 atomic_t pend_8021x_cnt;
703 dhd_attach_states_t dhd_state;
704 #ifdef SHOW_LOGTRACE
705 dhd_event_log_t event_data;
706 #endif /* SHOW_LOGTRACE */
707
708 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
709 struct early_suspend early_suspend;
710 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
711
712 #ifdef ARP_OFFLOAD_SUPPORT
713 u32 pend_ipaddr;
714 #endif /* ARP_OFFLOAD_SUPPORT */
715 #ifdef BCM_FD_AGGR
716 void *rpc_th;
717 void *rpc_osh;
718 struct timer_list rpcth_timer;
719 bool rpcth_timer_active;
720 uint8 fdaggr;
721 #endif
722 #ifdef DHDTCPACK_SUPPRESS
723 spinlock_t tcpack_lock;
724 #endif /* DHDTCPACK_SUPPRESS */
725 #ifdef FIX_CPU_MIN_CLOCK
726 bool cpufreq_fix_status;
727 struct mutex cpufreq_fix;
728 struct pm_qos_request dhd_cpu_qos;
729 #ifdef FIX_BUS_MIN_CLOCK
730 struct pm_qos_request dhd_bus_qos;
731 #endif /* FIX_BUS_MIN_CLOCK */
732 #endif /* FIX_CPU_MIN_CLOCK */
733 void *dhd_deferred_wq;
734 #ifdef DEBUG_CPU_FREQ
735 struct notifier_block freq_trans;
736 int __percpu *new_freq;
737 #endif
738 unsigned int unit;
739 struct notifier_block pm_notifier;
740 #ifdef DHD_PSTA
741 uint32 psta_mode; /* PSTA or PSR */
742 #endif /* DHD_PSTA */
743 #ifdef DHD_WET
744 uint32 wet_mode;
745 #endif /* DHD_WET */
746 #ifdef DHD_DEBUG
747 dhd_dump_t *dump;
748 struct timer_list join_timer;
749 u32 join_timeout_val;
750 bool join_timer_active;
751 uint scan_time_count;
752 struct timer_list scan_timer;
753 bool scan_timer_active;
754 #endif
755 #if defined(DHD_LB)
756 /* CPU Load Balance dynamic CPU selection */
757
758 /* Variable that tracks the currect CPUs available for candidacy */
759 cpumask_var_t cpumask_curr_avail;
760
761 /* Primary and secondary CPU mask */
762 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
763 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
764
765 struct notifier_block cpu_notifier;
766
767 /* Tasklet to handle Tx Completion packet freeing */
768 struct tasklet_struct tx_compl_tasklet;
769 atomic_t tx_compl_cpu;
770
771 /* Tasklet to handle RxBuf Post during Rx completion */
772 struct tasklet_struct rx_compl_tasklet;
773 atomic_t rx_compl_cpu;
774
775 /* Napi struct for handling rx packet sendup. Packets are removed from
776 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
777 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
778 * to run to rx_napi_cpu.
779 */
780 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
781 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
782 struct napi_struct rx_napi_struct ____cacheline_aligned;
783 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
784 struct net_device *rx_napi_netdev; /* netdev of primary interface */
785
786 struct work_struct rx_napi_dispatcher_work;
787 struct work_struct tx_compl_dispatcher_work;
788 struct work_struct tx_dispatcher_work;
789
790 /* Number of times DPC Tasklet ran */
791 uint32 dhd_dpc_cnt;
792 /* Number of times NAPI processing got scheduled */
793 uint32 napi_sched_cnt;
794 /* Number of times NAPI processing ran on each available core */
795 uint32 *napi_percpu_run_cnt;
796 /* Number of times RX Completions got scheduled */
797 uint32 rxc_sched_cnt;
798 /* Number of times RX Completion ran on each available core */
799 uint32 *rxc_percpu_run_cnt;
800 /* Number of times TX Completions got scheduled */
801 uint32 txc_sched_cnt;
802 /* Number of times TX Completions ran on each available core */
803 uint32 *txc_percpu_run_cnt;
804 /* CPU status */
805 /* Number of times each CPU came online */
806 uint32 *cpu_online_cnt;
807 /* Number of times each CPU went offline */
808 uint32 *cpu_offline_cnt;
809
810 /* Number of times TX processing run on each core */
811 uint32 *txp_percpu_run_cnt;
812 /* Number of times TX start run on each core */
813 uint32 *tx_start_percpu_run_cnt;
814
815 /* Tx load balancing */
816
817 /* TODO: Need to see if batch processing is really required in case of TX
818 * processing. In case of RX the Dongle can send a bunch of rx completions,
819 * hence we took a 3 queue approach
820 * enque - adds the skbs to rx_pend_queue
821 * dispatch - uses a lock and adds the list of skbs from pend queue to
822 * napi queue
823 * napi processing - copies the pend_queue into a local queue and works
824 * on it.
825 * But for TX its going to be 1 skb at a time, so we are just thinking
826 * of using only one queue and use the lock supported skb queue functions
827 * to add and process it. If its in-efficient we'll re-visit the queue
828 * design.
829 */
830
831 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
832 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
833 /*
834 * From the Tasklet that actually sends out data
835 * copy the list tx_pend_queue into tx_active_queue. There by we need
836 * to spinlock to only perform the copy the rest of the code ie to
837 * construct the tx_pend_queue and the code to process tx_active_queue
838 * can be lockless. The concept is borrowed as is from RX processing
839 */
840 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
841
842 /* Control TXP in runtime, enable by default */
843 atomic_t lb_txp_active;
844
845 /*
846 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
847 * For now, the processing tasklet will also direcly operate on this
848 * queue
849 */
850 struct sk_buff_head tx_pend_queue ____cacheline_aligned;
851
852 /* cpu on which the DHD Tx is happenning */
853 atomic_t tx_cpu;
854
855 /* CPU on which the Network stack is calling the DHD's xmit function */
856 atomic_t net_tx_cpu;
857
858 /* Tasklet context from which the DHD's TX processing happens */
859 struct tasklet_struct tx_tasklet;
860
861 /*
862 * Consumer Histogram - NAPI RX Packet processing
863 * -----------------------------------------------
864 * On Each CPU, when the NAPI RX Packet processing call back was invoked
865 * how many packets were processed is captured in this data structure.
866 * Now its difficult to capture the "exact" number of packets processed.
867 * So considering the packet counter to be a 32 bit one, we have a
868 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
869 * processed is rounded off to the next power of 2 and put in the
870 * approriate "bin" the value in the bin gets incremented.
871 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
872 * and the packet count processed is as follows (assume the bin counters are 0)
873 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
874 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
875 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
876 */
877 uint32 *napi_rx_hist[HIST_BIN_SIZE];
878 uint32 *txc_hist[HIST_BIN_SIZE];
879 uint32 *rxc_hist[HIST_BIN_SIZE];
880 #endif /* DHD_LB */
881
882 #ifdef SHOW_LOGTRACE
883 struct work_struct event_log_dispatcher_work;
884 #endif /* SHOW_LOGTRACE */
885
886 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
887 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
888 struct kobject dhd_kobj;
889 #ifdef SHOW_LOGTRACE
890 struct sk_buff_head evt_trace_queue ____cacheline_aligned;
891 #endif
892 struct timer_list timesync_timer;
893 #if defined(BT_OVER_SDIO)
894 char btfw_path[PATH_MAX];
895 #endif /* defined (BT_OVER_SDIO) */
896
897 #ifdef WL_MONITOR
898 struct net_device *monitor_dev; /* monitor pseudo device */
899 struct sk_buff *monitor_skb;
900 uint monitor_len;
901 uint monitor_type; /* monitor pseudo device */
902 monitor_info_t *monitor_info;
903 #endif /* WL_MONITOR */
904 uint32 shub_enable;
905 #if defined(BT_OVER_SDIO)
906 struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
907 int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
908 #endif /* BT_OVER_SDIO */
909 #ifdef DHD_DEBUG_UART
910 bool duart_execute;
911 #endif
912 #ifdef PCIE_INB_DW
913 wait_queue_head_t ds_exit_wait;
914 #endif /* PCIE_INB_DW */
915 } dhd_info_t;
916
917 #ifdef WL_MONITOR
918 #define MONPKT_EXTRA_LEN 48
919 #endif
920
921 #define DHDIF_FWDER(dhdif) FALSE
922
923 #if defined(BT_OVER_SDIO)
924 /* Flag to indicate if driver is initialized */
925 uint dhd_driver_init_done = TRUE;
926 #else
927 /* Flag to indicate if driver is initialized */
928 uint dhd_driver_init_done = FALSE;
929 #endif
930 /* Flag to indicate if we should download firmware on driver load */
931 uint dhd_download_fw_on_driverload = TRUE;
932
933 /* Definitions to provide path to the firmware and nvram
934 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
935 */
936 char firmware_path[MOD_PARAM_PATHLEN];
937 char nvram_path[MOD_PARAM_PATHLEN];
938 char clm_path[MOD_PARAM_PATHLEN];
939 char config_path[MOD_PARAM_PATHLEN];
940 #ifdef DHD_UCODE_DOWNLOAD
941 char ucode_path[MOD_PARAM_PATHLEN];
942 #endif /* DHD_UCODE_DOWNLOAD */
943
944 module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
945
946
947 /* backup buffer for firmware and nvram path */
948 char fw_bak_path[MOD_PARAM_PATHLEN];
949 char nv_bak_path[MOD_PARAM_PATHLEN];
950
951 /* information string to keep firmware, chio, cheip version info visiable from log */
952 char info_string[MOD_PARAM_INFOLEN];
953 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
954 int op_mode = 0;
955 int disable_proptx = 0;
956 module_param(op_mode, int, 0644);
957 extern int wl_control_wl_start(struct net_device *dev);
958 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
959 struct semaphore dhd_registration_sem;
960 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
961
962 /* deferred handlers */
963 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
964 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
965 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
966 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
967
968 #ifdef DHD_UPDATE_INTF_MAC
969 static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
970 #endif /* DHD_UPDATE_INTF_MAC */
971 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
972 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
973 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
974 #ifdef WL_CFG80211
975 extern void dhd_netdev_free(struct net_device *ndev);
976 #endif /* WL_CFG80211 */
977
978 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
979 /* update rx_pkt_chainable state of dhd interface */
980 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
981 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
982
983 #ifdef HOFFLOAD_MODULES
984 char dhd_hmem_module_string[MOD_PARAM_SRLEN];
985 module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660);
986 #endif
987 /* Error bits */
988 module_param(dhd_msg_level, int, 0);
989 #if defined(WL_WIRELESS_EXT)
990 module_param(iw_msg_level, int, 0);
991 #endif
992 #ifdef WL_CFG80211
993 module_param(wl_dbg_level, int, 0);
994 #endif
995 module_param(android_msg_level, int, 0);
996 module_param(config_msg_level, int, 0);
997
998 #ifdef ARP_OFFLOAD_SUPPORT
999 /* ARP offload enable */
1000 uint dhd_arp_enable = TRUE;
1001 module_param(dhd_arp_enable, uint, 0);
1002
1003 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
1004
1005 #ifdef ENABLE_ARP_SNOOP_MODE
1006 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
1007 #else
1008 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
1009 #endif /* ENABLE_ARP_SNOOP_MODE */
1010
1011 module_param(dhd_arp_mode, uint, 0);
1012 #endif /* ARP_OFFLOAD_SUPPORT */
1013
1014 /* Disable Prop tx */
1015 module_param(disable_proptx, int, 0644);
1016 /* load firmware and/or nvram values from the filesystem */
1017 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
1018 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
1019 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
1020 #ifdef DHD_UCODE_DOWNLOAD
1021 module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
1022 #endif /* DHD_UCODE_DOWNLOAD */
1023
1024 /* Watchdog interval */
1025
1026 /* extend watchdog expiration to 2 seconds when DPC is running */
1027 #define WATCHDOG_EXTEND_INTERVAL (2000)
1028
1029 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
1030 module_param(dhd_watchdog_ms, uint, 0);
1031
1032 #ifdef DHD_PCIE_RUNTIMEPM
1033 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
1034 #endif /* DHD_PCIE_RUNTIMEPMT */
1035 #if defined(DHD_DEBUG)
1036 /* Console poll interval */
1037 uint dhd_console_ms = 0;
1038 module_param(dhd_console_ms, uint, 0644);
1039 #else
1040 uint dhd_console_ms = 0;
1041 #endif /* DHD_DEBUG */
1042
1043 uint dhd_slpauto = TRUE;
1044 module_param(dhd_slpauto, uint, 0);
1045
1046 #ifdef PKT_FILTER_SUPPORT
1047 /* Global Pkt filter enable control */
1048 uint dhd_pkt_filter_enable = TRUE;
1049 module_param(dhd_pkt_filter_enable, uint, 0);
1050 #endif
1051
1052 /* Pkt filter init setup */
1053 uint dhd_pkt_filter_init = 0;
1054 module_param(dhd_pkt_filter_init, uint, 0);
1055
1056 /* Pkt filter mode control */
1057 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
1058 uint dhd_master_mode = FALSE;
1059 #else
1060 uint dhd_master_mode = FALSE;
1061 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
1062 module_param(dhd_master_mode, uint, 0);
1063
1064 int dhd_watchdog_prio = 0;
1065 module_param(dhd_watchdog_prio, int, 0);
1066
1067 /* DPC thread priority */
1068 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
1069 module_param(dhd_dpc_prio, int, 0);
1070
1071 /* RX frame thread priority */
1072 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
1073 module_param(dhd_rxf_prio, int, 0);
1074
1075 #if !defined(BCMDBUS)
1076 extern int dhd_dongle_ramsize;
1077 module_param(dhd_dongle_ramsize, int, 0);
1078 #endif /* !BCMDBUS */
1079
1080 #ifdef WL_CFG80211
1081 int passive_channel_skip = 0;
1082 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
1083 #endif /* WL_CFG80211 */
1084
1085 /* Keep track of number of instances */
1086 static int dhd_found = 0;
1087 static int instance_base = 0; /* Starting instance number */
1088 module_param(instance_base, int, 0644);
1089
1090 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
1091 static int dhd_napi_weight = 32;
1092 module_param(dhd_napi_weight, int, 0644);
1093 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
1094
1095 #ifdef PCIE_FULL_DONGLE
1096 extern int h2d_max_txpost;
1097 module_param(h2d_max_txpost, int, 0644);
1098 #endif /* PCIE_FULL_DONGLE */
1099
1100 #ifdef DHD_DHCP_DUMP
1101 struct bootp_fmt {
1102 struct iphdr ip_header;
1103 struct udphdr udp_header;
1104 uint8 op;
1105 uint8 htype;
1106 uint8 hlen;
1107 uint8 hops;
1108 uint32 transaction_id;
1109 uint16 secs;
1110 uint16 flags;
1111 uint32 client_ip;
1112 uint32 assigned_ip;
1113 uint32 server_ip;
1114 uint32 relay_ip;
1115 uint8 hw_address[16];
1116 uint8 server_name[64];
1117 uint8 file_name[128];
1118 uint8 options[312];
1119 };
1120
1121 static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
1122 static const char dhcp_ops[][10] = {
1123 "NA", "REQUEST", "REPLY"
1124 };
1125 static const char dhcp_types[][10] = {
1126 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
1127 };
1128 static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
1129 #endif /* DHD_DHCP_DUMP */
1130
1131 #ifdef DHD_ICMP_DUMP
1132 #include <net/icmp.h>
1133 static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
1134 #endif /* DHD_ICMP_DUMP */
1135
1136 /* Functions to manage sysfs interface for dhd */
1137 static int dhd_sysfs_init(dhd_info_t *dhd);
1138 static void dhd_sysfs_exit(dhd_info_t *dhd);
1139
1140 #ifdef SHOW_LOGTRACE
1141 #if defined(CUSTOMER_HW4_DEBUG)
1142 static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
1143 static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
1144 static char *map_file_path = PLATFORM_PATH"rtecdc.map";
1145 static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
1146 static char *rom_map_file_path = PLATFORM_PATH"roml.map";
1147 #elif defined(CUSTOMER_HW2)
1148 static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
1149 static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
1150 static char *map_file_path = "/data/misc/wifi/rtecdc.map";
1151 static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
1152 static char *rom_map_file_path = "/data/misc/wifi/roml.map";
1153 #else
1154 static char *logstrs_path = "/installmedia/logstrs.bin";
1155 static char *st_str_file_path = "/installmedia/rtecdc.bin";
1156 static char *map_file_path = "/installmedia/rtecdc.map";
1157 static char *rom_st_str_file_path = "/installmedia/roml.bin";
1158 static char *rom_map_file_path = "/installmedia/roml.map";
1159 #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */
1160 static char *ram_file_str = "rtecdc";
1161 static char *rom_file_str = "roml";
1162
1163 module_param(logstrs_path, charp, S_IRUGO);
1164 module_param(st_str_file_path, charp, S_IRUGO);
1165 module_param(map_file_path, charp, S_IRUGO);
1166 module_param(rom_st_str_file_path, charp, S_IRUGO);
1167 module_param(rom_map_file_path, charp, S_IRUGO);
1168
1169 static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
1170 static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
1171 uint32 *rodata_end);
1172 static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
1173 char *map_file);
1174 #endif /* SHOW_LOGTRACE */
1175
1176 #if defined(DHD_LB)
1177
1178 static void
dhd_lb_set_default_cpus(dhd_info_t * dhd)1179 dhd_lb_set_default_cpus(dhd_info_t *dhd)
1180 {
1181 /* Default CPU allocation for the jobs */
1182 atomic_set(&dhd->rx_napi_cpu, 1);
1183 atomic_set(&dhd->rx_compl_cpu, 2);
1184 atomic_set(&dhd->tx_compl_cpu, 2);
1185 atomic_set(&dhd->tx_cpu, 2);
1186 atomic_set(&dhd->net_tx_cpu, 0);
1187 }
1188
1189 static void
dhd_cpumasks_deinit(dhd_info_t * dhd)1190 dhd_cpumasks_deinit(dhd_info_t *dhd)
1191 {
1192 free_cpumask_var(dhd->cpumask_curr_avail);
1193 free_cpumask_var(dhd->cpumask_primary);
1194 free_cpumask_var(dhd->cpumask_primary_new);
1195 free_cpumask_var(dhd->cpumask_secondary);
1196 free_cpumask_var(dhd->cpumask_secondary_new);
1197 }
1198
1199 static int
dhd_cpumasks_init(dhd_info_t * dhd)1200 dhd_cpumasks_init(dhd_info_t *dhd)
1201 {
1202 int id;
1203 uint32 cpus, num_cpus = num_possible_cpus();
1204 int ret = 0;
1205
1206 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
1207 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
1208
1209 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
1210 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
1211 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
1212 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
1213 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
1214 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
1215 ret = -ENOMEM;
1216 goto fail;
1217 }
1218
1219 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
1220 cpumask_clear(dhd->cpumask_primary);
1221 cpumask_clear(dhd->cpumask_secondary);
1222
1223 if (num_cpus > 32) {
1224 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
1225 ASSERT(0);
1226 }
1227
1228 cpus = DHD_LB_PRIMARY_CPUS;
1229 for (id = 0; id < num_cpus; id++) {
1230 if (isset(&cpus, id))
1231 cpumask_set_cpu(id, dhd->cpumask_primary);
1232 }
1233
1234 cpus = DHD_LB_SECONDARY_CPUS;
1235 for (id = 0; id < num_cpus; id++) {
1236 if (isset(&cpus, id))
1237 cpumask_set_cpu(id, dhd->cpumask_secondary);
1238 }
1239
1240 return ret;
1241 fail:
1242 dhd_cpumasks_deinit(dhd);
1243 return ret;
1244 }
1245
1246 /*
1247 * The CPU Candidacy Algorithm
1248 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1249 * The available CPUs for selection are divided into two groups
1250 * Primary Set - A CPU mask that carries the First Choice CPUs
1251 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
1252 *
1253 * There are two types of Job, that needs to be assigned to
1254 * the CPUs, from one of the above mentioned CPU group. The Jobs are
1255 * 1) Rx Packet Processing - napi_cpu
1256 * 2) Completion Processiong (Tx, RX) - compl_cpu
1257 *
1258 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
1259 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
1260 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
1261 * If there are more processors free, it assigns one to compl_cpu.
1262 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
1263 * CPU, as much as possible.
1264 *
1265 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
1266 * would allow Tx completion skb's to be released into a local free pool from
1267 * which the rx buffer posts could have been serviced. it is important to note
1268 * that a Tx packet may not have a large enough buffer for rx posting.
1269 */
dhd_select_cpu_candidacy(dhd_info_t * dhd)1270 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
1271 {
1272 uint32 primary_available_cpus; /* count of primary available cpus */
1273 uint32 secondary_available_cpus; /* count of secondary available cpus */
1274 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
1275 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
1276 uint32 tx_cpu = 0; /* cpu selected for tx processing job */
1277
1278 cpumask_clear(dhd->cpumask_primary_new);
1279 cpumask_clear(dhd->cpumask_secondary_new);
1280
1281 /*
1282 * Now select from the primary mask. Even if a Job is
1283 * already running on a CPU in secondary group, we still move
1284 * to primary CPU. So no conditional checks.
1285 */
1286 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
1287 dhd->cpumask_curr_avail);
1288
1289 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
1290 dhd->cpumask_curr_avail);
1291
1292 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
1293
1294 if (primary_available_cpus > 0) {
1295 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
1296
1297 /* If no further CPU is available,
1298 * cpumask_next returns >= nr_cpu_ids
1299 */
1300 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
1301 if (tx_cpu >= nr_cpu_ids)
1302 tx_cpu = 0;
1303
1304 /* In case there are no more CPUs, do completions & Tx in same CPU */
1305 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
1306 if (compl_cpu >= nr_cpu_ids)
1307 compl_cpu = tx_cpu;
1308 }
1309
1310 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1311 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
1312
1313 /* -- Now check for the CPUs from the secondary mask -- */
1314 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
1315
1316 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
1317 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
1318
1319 if (secondary_available_cpus > 0) {
1320 /* At this point if napi_cpu is unassigned it means no CPU
1321 * is online from Primary Group
1322 */
1323 if (napi_cpu == 0) {
1324 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
1325 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
1326 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
1327 } else if (tx_cpu == 0) {
1328 tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
1329 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
1330 } else if (compl_cpu == 0) {
1331 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
1332 }
1333
1334 /* If no CPU was available for tx processing, choose CPU 0 */
1335 if (tx_cpu >= nr_cpu_ids)
1336 tx_cpu = 0;
1337
1338 /* If no CPU was available for completion, choose CPU 0 */
1339 if (compl_cpu >= nr_cpu_ids)
1340 compl_cpu = 0;
1341 }
1342 if ((primary_available_cpus == 0) &&
1343 (secondary_available_cpus == 0)) {
1344 /* No CPUs available from primary or secondary mask */
1345 napi_cpu = 1;
1346 compl_cpu = 0;
1347 tx_cpu = 2;
1348 }
1349
1350 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1351 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
1352
1353 ASSERT(napi_cpu < nr_cpu_ids);
1354 ASSERT(compl_cpu < nr_cpu_ids);
1355 ASSERT(tx_cpu < nr_cpu_ids);
1356
1357 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
1358 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
1359 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1360 atomic_set(&dhd->tx_cpu, tx_cpu);
1361
1362 return;
1363 }
1364
1365 /*
1366 * Function to handle CPU Hotplug notifications.
1367 * One of the task it does is to trigger the CPU Candidacy algorithm
1368 * for load balancing.
1369 */
1370 int
dhd_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1371 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1372 {
1373 unsigned long int cpu = (unsigned long int)hcpu;
1374
1375 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1376 #pragma GCC diagnostic push
1377 #pragma GCC diagnostic ignored "-Wcast-qual"
1378 #endif
1379 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1380 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1381 #pragma GCC diagnostic pop
1382 #endif
1383
1384 if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
1385 DHD_INFO(("%s(): LB data is not initialized yet.\n",
1386 __FUNCTION__));
1387 return NOTIFY_BAD;
1388 }
1389
1390 switch (action)
1391 {
1392 case CPU_ONLINE:
1393 case CPU_ONLINE_FROZEN:
1394 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1395 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1396 dhd_select_cpu_candidacy(dhd);
1397 break;
1398
1399 case CPU_DOWN_PREPARE:
1400 case CPU_DOWN_PREPARE_FROZEN:
1401 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1402 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1403 dhd_select_cpu_candidacy(dhd);
1404 break;
1405 default:
1406 break;
1407 }
1408
1409 return NOTIFY_OK;
1410 }
1411
1412 #if defined(DHD_LB_STATS)
dhd_lb_stats_init(dhd_pub_t * dhdp)1413 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1414 {
1415 dhd_info_t *dhd;
1416 int i, j, num_cpus = num_possible_cpus();
1417 int alloc_size = sizeof(uint32) * num_cpus;
1418
1419 if (dhdp == NULL) {
1420 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1421 __FUNCTION__));
1422 return;
1423 }
1424
1425 dhd = dhdp->info;
1426 if (dhd == NULL) {
1427 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1428 return;
1429 }
1430
1431 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1432 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1433
1434 dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1435 if (!dhd->napi_percpu_run_cnt) {
1436 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
1437 __FUNCTION__));
1438 return;
1439 }
1440 for (i = 0; i < num_cpus; i++)
1441 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1442
1443 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1444
1445 dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1446 if (!dhd->rxc_percpu_run_cnt) {
1447 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
1448 __FUNCTION__));
1449 return;
1450 }
1451 for (i = 0; i < num_cpus; i++)
1452 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1453
1454 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1455
1456 dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1457 if (!dhd->txc_percpu_run_cnt) {
1458 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1459 __FUNCTION__));
1460 return;
1461 }
1462 for (i = 0; i < num_cpus; i++)
1463 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1464
1465 dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1466 if (!dhd->cpu_online_cnt) {
1467 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1468 __FUNCTION__));
1469 return;
1470 }
1471 for (i = 0; i < num_cpus; i++)
1472 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1473
1474 dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1475 if (!dhd->cpu_offline_cnt) {
1476 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1477 __FUNCTION__));
1478 return;
1479 }
1480 for (i = 0; i < num_cpus; i++)
1481 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1482
1483 dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1484 if (!dhd->txp_percpu_run_cnt) {
1485 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1486 __FUNCTION__));
1487 return;
1488 }
1489 for (i = 0; i < num_cpus; i++)
1490 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
1491
1492 dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1493 if (!dhd->tx_start_percpu_run_cnt) {
1494 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1495 __FUNCTION__));
1496 return;
1497 }
1498 for (i = 0; i < num_cpus; i++)
1499 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
1500
1501 for (j = 0; j < HIST_BIN_SIZE; j++) {
1502 dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1503 if (!dhd->napi_rx_hist[j]) {
1504 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1505 __FUNCTION__, j));
1506 return;
1507 }
1508 for (i = 0; i < num_cpus; i++) {
1509 DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
1510 }
1511 }
1512 #ifdef DHD_LB_TXC
1513 for (j = 0; j < HIST_BIN_SIZE; j++) {
1514 dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1515 if (!dhd->txc_hist[j]) {
1516 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1517 __FUNCTION__, j));
1518 return;
1519 }
1520 for (i = 0; i < num_cpus; i++) {
1521 DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
1522 }
1523 }
1524 #endif /* DHD_LB_TXC */
1525 #ifdef DHD_LB_RXC
1526 for (j = 0; j < HIST_BIN_SIZE; j++) {
1527 dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1528 if (!dhd->rxc_hist[j]) {
1529 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1530 __FUNCTION__, j));
1531 return;
1532 }
1533 for (i = 0; i < num_cpus; i++) {
1534 DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
1535 }
1536 }
1537 #endif /* DHD_LB_RXC */
1538 return;
1539 }
1540
dhd_lb_stats_deinit(dhd_pub_t * dhdp)1541 void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
1542 {
1543 dhd_info_t *dhd;
1544 int j, num_cpus = num_possible_cpus();
1545 int alloc_size = sizeof(uint32) * num_cpus;
1546
1547 if (dhdp == NULL) {
1548 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1549 __FUNCTION__));
1550 return;
1551 }
1552
1553 dhd = dhdp->info;
1554 if (dhd == NULL) {
1555 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1556 return;
1557 }
1558
1559 if (dhd->napi_percpu_run_cnt) {
1560 MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
1561 dhd->napi_percpu_run_cnt = NULL;
1562 }
1563 if (dhd->rxc_percpu_run_cnt) {
1564 MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
1565 dhd->rxc_percpu_run_cnt = NULL;
1566 }
1567 if (dhd->txc_percpu_run_cnt) {
1568 MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
1569 dhd->txc_percpu_run_cnt = NULL;
1570 }
1571 if (dhd->cpu_online_cnt) {
1572 MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
1573 dhd->cpu_online_cnt = NULL;
1574 }
1575 if (dhd->cpu_offline_cnt) {
1576 MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
1577 dhd->cpu_offline_cnt = NULL;
1578 }
1579
1580 if (dhd->txp_percpu_run_cnt) {
1581 MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
1582 dhd->txp_percpu_run_cnt = NULL;
1583 }
1584 if (dhd->tx_start_percpu_run_cnt) {
1585 MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
1586 dhd->tx_start_percpu_run_cnt = NULL;
1587 }
1588
1589 for (j = 0; j < HIST_BIN_SIZE; j++) {
1590 if (dhd->napi_rx_hist[j]) {
1591 MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
1592 dhd->napi_rx_hist[j] = NULL;
1593 }
1594 #ifdef DHD_LB_TXC
1595 if (dhd->txc_hist[j]) {
1596 MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
1597 dhd->txc_hist[j] = NULL;
1598 }
1599 #endif /* DHD_LB_TXC */
1600 #ifdef DHD_LB_RXC
1601 if (dhd->rxc_hist[j]) {
1602 MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
1603 dhd->rxc_hist[j] = NULL;
1604 }
1605 #endif /* DHD_LB_RXC */
1606 }
1607
1608 return;
1609 }
1610
dhd_lb_stats_dump_histo(struct bcmstrbuf * strbuf,uint32 ** hist)1611 static void dhd_lb_stats_dump_histo(
1612 struct bcmstrbuf *strbuf, uint32 **hist)
1613 {
1614 int i, j;
1615 uint32 *per_cpu_total;
1616 uint32 total = 0;
1617 uint32 num_cpus = num_possible_cpus();
1618
1619 per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC);
1620 if (!per_cpu_total) {
1621 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
1622 return;
1623 }
1624 bzero(per_cpu_total, sizeof(uint32) * num_cpus);
1625
1626 bcm_bprintf(strbuf, "CPU: \t\t");
1627 for (i = 0; i < num_cpus; i++)
1628 bcm_bprintf(strbuf, "%d\t", i);
1629 bcm_bprintf(strbuf, "\nBin\n");
1630
1631 for (i = 0; i < HIST_BIN_SIZE; i++) {
1632 bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
1633 for (j = 0; j < num_cpus; j++) {
1634 bcm_bprintf(strbuf, "%d\t", hist[i][j]);
1635 }
1636 bcm_bprintf(strbuf, "\n");
1637 }
1638 bcm_bprintf(strbuf, "Per CPU Total \t");
1639 total = 0;
1640 for (i = 0; i < num_cpus; i++) {
1641 for (j = 0; j < HIST_BIN_SIZE; j++) {
1642 per_cpu_total[i] += (hist[j][i] * (1<<j));
1643 }
1644 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1645 total += per_cpu_total[i];
1646 }
1647 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1648
1649 kfree(per_cpu_total);
1650 return;
1651 }
1652
dhd_lb_stats_dump_cpu_array(struct bcmstrbuf * strbuf,uint32 * p)1653 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1654 {
1655 int i, num_cpus = num_possible_cpus();
1656
1657 bcm_bprintf(strbuf, "CPU: \t");
1658 for (i = 0; i < num_cpus; i++)
1659 bcm_bprintf(strbuf, "%d\t", i);
1660 bcm_bprintf(strbuf, "\n");
1661
1662 bcm_bprintf(strbuf, "Val: \t");
1663 for (i = 0; i < num_cpus; i++)
1664 bcm_bprintf(strbuf, "%u\t", *(p+i));
1665 bcm_bprintf(strbuf, "\n");
1666 return;
1667 }
1668
dhd_lb_stats_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)1669 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1670 {
1671 dhd_info_t *dhd;
1672
1673 if (dhdp == NULL || strbuf == NULL) {
1674 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1675 __FUNCTION__, dhdp, strbuf));
1676 return;
1677 }
1678
1679 dhd = dhdp->info;
1680 if (dhd == NULL) {
1681 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1682 return;
1683 }
1684
1685 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1686 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1687
1688 bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
1689 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1690
1691 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1692 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1693 dhd->txc_sched_cnt);
1694
1695 #ifdef DHD_LB_RXP
1696 bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
1697 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1698 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1699 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1700 #endif /* DHD_LB_RXP */
1701
1702 #ifdef DHD_LB_RXC
1703 bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
1704 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1705 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1706 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1707 #endif /* DHD_LB_RXC */
1708
1709 #ifdef DHD_LB_TXC
1710 bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
1711 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1712 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1713 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1714 #endif /* DHD_LB_TXC */
1715
1716 #ifdef DHD_LB_TXP
1717 bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
1718 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
1719
1720 bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
1721 dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
1722 #endif /* DHD_LB_TXP */
1723
1724 bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n",
1725 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS);
1726
1727 bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n",
1728 atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu));
1729 }
1730
1731 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
next_larger_power2(uint32 num)1732 static inline uint32 next_larger_power2(uint32 num)
1733 {
1734 num--;
1735 num |= (num >> 1);
1736 num |= (num >> 2);
1737 num |= (num >> 4);
1738 num |= (num >> 8);
1739 num |= (num >> 16);
1740
1741 return (num + 1);
1742 }
1743
dhd_lb_stats_update_histo(uint32 ** bin,uint32 count,uint32 cpu)1744 static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
1745 {
1746 uint32 bin_power;
1747 uint32 *p;
1748 bin_power = next_larger_power2(count);
1749
1750 switch (bin_power) {
1751 case 1: p = bin[0] + cpu; break;
1752 case 2: p = bin[1] + cpu; break;
1753 case 4: p = bin[2] + cpu; break;
1754 case 8: p = bin[3] + cpu; break;
1755 case 16: p = bin[4] + cpu; break;
1756 case 32: p = bin[5] + cpu; break;
1757 case 64: p = bin[6] + cpu; break;
1758 case 128: p = bin[7] + cpu; break;
1759 default : p = bin[8] + cpu; break;
1760 }
1761
1762 *p = *p + 1;
1763 return;
1764 }
1765
dhd_lb_stats_update_napi_histo(dhd_pub_t * dhdp,uint32 count)1766 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1767 {
1768 int cpu;
1769 dhd_info_t *dhd = dhdp->info;
1770
1771 cpu = get_cpu();
1772 put_cpu();
1773 dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
1774
1775 return;
1776 }
1777
dhd_lb_stats_update_txc_histo(dhd_pub_t * dhdp,uint32 count)1778 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1779 {
1780 int cpu;
1781 dhd_info_t *dhd = dhdp->info;
1782
1783 cpu = get_cpu();
1784 put_cpu();
1785 dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
1786
1787 return;
1788 }
1789
dhd_lb_stats_update_rxc_histo(dhd_pub_t * dhdp,uint32 count)1790 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1791 {
1792 int cpu;
1793 dhd_info_t *dhd = dhdp->info;
1794
1795 cpu = get_cpu();
1796 put_cpu();
1797 dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
1798
1799 return;
1800 }
1801
dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t * dhdp)1802 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1803 {
1804 dhd_info_t *dhd = dhdp->info;
1805 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1806 }
1807
dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t * dhdp)1808 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1809 {
1810 dhd_info_t *dhd = dhdp->info;
1811 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1812 }
1813 #endif /* DHD_LB_STATS */
1814
1815 #endif /* DHD_LB */
1816
1817 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1818 int g_frameburst = 1;
1819 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1820
1821 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1822
1823 /* DHD Perimiter lock only used in router with bypass forwarding. */
1824 #define DHD_PERIM_RADIO_INIT() do { } while (0)
1825 #define DHD_PERIM_LOCK_TRY(unit, flag) do { } while (0)
1826 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { } while (0)
1827
1828 #ifdef PCIE_FULL_DONGLE
1829 #if defined(BCM_GMAC3)
1830 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { } while (0)
1831 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1832 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1833
1834 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1835 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1836 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1837 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1838
1839 #else /* ! BCM_GMAC3 */
1840 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1841 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1842 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1843 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1844 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1845
1846 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1847 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1848 struct list_head *snapshot_list);
1849 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1850 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1851 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1852 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1853
1854 #endif /* ! BCM_GMAC3 */
1855 #endif /* PCIE_FULL_DONGLE */
1856
1857 /* Control fw roaming */
1858 uint dhd_roam_disable = 0;
1859
1860 #ifdef BCMDBGFS
1861 extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
1862 extern void dhd_dbgfs_remove(void);
1863 #endif
1864
1865
1866 /* Control radio state */
1867 uint dhd_radio_up = 1;
1868
1869 /* Network inteface name */
1870 char iface_name[IFNAMSIZ] = {'\0'};
1871 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1872
1873 /* The following are specific to the SDIO dongle */
1874
1875 /* IOCTL response timeout */
1876 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1877
1878 /* DS Exit response timeout */
1879 int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
1880
1881 /* Idle timeout for backplane clock */
1882 int dhd_idletime = DHD_IDLETIME_TICKS;
1883 module_param(dhd_idletime, int, 0);
1884
1885 /* Use polling */
1886 uint dhd_poll = FALSE;
1887 module_param(dhd_poll, uint, 0);
1888
1889 /* Use interrupts */
1890 uint dhd_intr = TRUE;
1891 module_param(dhd_intr, uint, 0);
1892
1893 /* SDIO Drive Strength (in milliamps) */
1894 uint dhd_sdiod_drive_strength = 6;
1895 module_param(dhd_sdiod_drive_strength, uint, 0);
1896
1897 #ifdef BCMSDIO
1898 /* Tx/Rx bounds */
1899 extern uint dhd_txbound;
1900 extern uint dhd_rxbound;
1901 module_param(dhd_txbound, uint, 0);
1902 module_param(dhd_rxbound, uint, 0);
1903
1904 /* Deferred transmits */
1905 extern uint dhd_deferred_tx;
1906 module_param(dhd_deferred_tx, uint, 0);
1907
1908 #endif /* BCMSDIO */
1909
1910
1911 #ifdef SDTEST
1912 /* Echo packet generator (pkts/s) */
1913 uint dhd_pktgen = 0;
1914 module_param(dhd_pktgen, uint, 0);
1915
1916 /* Echo packet len (0 => sawtooth, max 2040) */
1917 uint dhd_pktgen_len = 0;
1918 module_param(dhd_pktgen_len, uint, 0);
1919 #endif /* SDTEST */
1920
1921
1922
1923 #ifndef BCMDBUS
1924 /* Allow delayed firmware download for debug purpose */
1925 int allow_delay_fwdl = FALSE;
1926 module_param(allow_delay_fwdl, int, 0);
1927 #endif /* !BCMDBUS */
1928
1929 extern char dhd_version[];
1930 extern char fw_version[];
1931 extern char clm_version[];
1932
1933 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1934 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1935 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1936 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1937 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1938
1939 #ifdef WLMEDIA_HTSF
1940 void htsf_update(dhd_info_t *dhd, void *data);
1941 tsf_t prev_tsf, cur_tsf;
1942
1943 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1944 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1945 static void dhd_dump_latency(void);
1946 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1947 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1948 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1949 #endif /* WLMEDIA_HTSF */
1950
1951 /* Monitor interface */
1952 int dhd_monitor_init(void *dhd_pub);
1953 int dhd_monitor_uninit(void);
1954
1955
1956 #if defined(WL_WIRELESS_EXT)
1957 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1958 #endif /* defined(WL_WIRELESS_EXT) */
1959
1960 #ifndef BCMDBUS
1961 static void dhd_dpc(ulong data);
1962 #endif /* !BCMDBUS */
1963 /* forward decl */
1964 extern int dhd_wait_pend8021x(struct net_device *dev);
1965 void dhd_os_wd_timer_extend(void *bus, bool extend);
1966
1967 #ifdef TOE
1968 #ifndef BDC
1969 #error TOE requires BDC
1970 #endif /* !BDC */
1971 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1972 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1973 #endif /* TOE */
1974
1975 static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
1976 wl_event_msg_t *event_ptr, void **data_ptr);
1977
1978 #if defined(CONFIG_PM_SLEEP)
dhd_pm_callback(struct notifier_block * nfb,unsigned long action,void * ignored)1979 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1980 {
1981 int ret = NOTIFY_DONE;
1982 bool suspend = FALSE;
1983
1984 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1985 #pragma GCC diagnostic push
1986 #pragma GCC diagnostic ignored "-Wcast-qual"
1987 #endif
1988 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1989 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1990 #pragma GCC diagnostic pop
1991 #endif
1992
1993 BCM_REFERENCE(dhdinfo);
1994 BCM_REFERENCE(suspend);
1995
1996 switch (action) {
1997 case PM_HIBERNATION_PREPARE:
1998 case PM_SUSPEND_PREPARE:
1999 suspend = TRUE;
2000 break;
2001
2002 case PM_POST_HIBERNATION:
2003 case PM_POST_SUSPEND:
2004 suspend = FALSE;
2005 break;
2006 }
2007
2008 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
2009 if (suspend) {
2010 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
2011 dhd_wlfc_suspend(&dhdinfo->pub);
2012 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
2013 } else {
2014 dhd_wlfc_resume(&dhdinfo->pub);
2015 }
2016 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
2017
2018 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
2019 KERNEL_VERSION(2, 6, 39))
2020 dhd_mmc_suspend = suspend;
2021 smp_mb();
2022 #endif
2023
2024 return ret;
2025 }
2026
2027 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
2028 * created in kernel notifier link list (with 'next' pointing to itself)
2029 */
2030 static bool dhd_pm_notifier_registered = FALSE;
2031
2032 extern int register_pm_notifier(struct notifier_block *nb);
2033 extern int unregister_pm_notifier(struct notifier_block *nb);
2034 #endif /* CONFIG_PM_SLEEP */
2035
2036 /* Request scheduling of the bus rx frame */
2037 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
2038 static void dhd_os_rxflock(dhd_pub_t *pub);
2039 static void dhd_os_rxfunlock(dhd_pub_t *pub);
2040
2041 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
2042 typedef struct dhd_dev_priv {
2043 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
2044 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
2045 int ifidx; /* interface index */
2046 void * lkup;
2047 } dhd_dev_priv_t;
2048
2049 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
2050 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
2051 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
2052 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
2053 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
2054 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
2055
2056 #if defined(DHD_OF_SUPPORT)
2057 extern int dhd_wlan_init(void);
2058 #endif /* defined(DHD_OF_SUPPORT) */
2059 /** Clear the dhd net_device's private structure. */
2060 static inline void
dhd_dev_priv_clear(struct net_device * dev)2061 dhd_dev_priv_clear(struct net_device * dev)
2062 {
2063 dhd_dev_priv_t * dev_priv;
2064 ASSERT(dev != (struct net_device *)NULL);
2065 dev_priv = DHD_DEV_PRIV(dev);
2066 dev_priv->dhd = (dhd_info_t *)NULL;
2067 dev_priv->ifp = (dhd_if_t *)NULL;
2068 dev_priv->ifidx = DHD_BAD_IF;
2069 dev_priv->lkup = (void *)NULL;
2070 }
2071
2072 /** Setup the dhd net_device's private structure. */
2073 static inline void
dhd_dev_priv_save(struct net_device * dev,dhd_info_t * dhd,dhd_if_t * ifp,int ifidx)2074 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
2075 int ifidx)
2076 {
2077 dhd_dev_priv_t * dev_priv;
2078 ASSERT(dev != (struct net_device *)NULL);
2079 dev_priv = DHD_DEV_PRIV(dev);
2080 dev_priv->dhd = dhd;
2081 dev_priv->ifp = ifp;
2082 dev_priv->ifidx = ifidx;
2083 }
2084
2085 #ifdef PCIE_FULL_DONGLE
2086
2087 /** Dummy objects are defined with state representing bad|down.
2088 * Performance gains from reducing branch conditionals, instruction parallelism,
2089 * dual issue, reducing load shadows, avail of larger pipelines.
2090 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
2091 * is accessed via the dhd_sta_t.
2092 */
2093
2094 /* Dummy dhd_info object */
2095 dhd_info_t dhd_info_null = {
2096 #if defined(BCM_GMAC3)
2097 .fwdh = FWDER_NULL,
2098 #endif
2099 .pub = {
2100 .info = &dhd_info_null,
2101 #ifdef DHDTCPACK_SUPPRESS
2102 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
2103 #endif /* DHDTCPACK_SUPPRESS */
2104 #if defined(TRAFFIC_MGMT_DWM)
2105 .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
2106 #endif
2107 .up = FALSE,
2108 .busstate = DHD_BUS_DOWN
2109 }
2110 };
2111 #define DHD_INFO_NULL (&dhd_info_null)
2112 #define DHD_PUB_NULL (&dhd_info_null.pub)
2113
2114 /* Dummy netdevice object */
2115 struct net_device dhd_net_dev_null = {
2116 .reg_state = NETREG_UNREGISTERED
2117 };
2118 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
2119
2120 /* Dummy dhd_if object */
2121 dhd_if_t dhd_if_null = {
2122 #if defined(BCM_GMAC3)
2123 .fwdh = FWDER_NULL,
2124 #endif
2125 #ifdef WMF
2126 .wmf = { .wmf_enable = TRUE },
2127 #endif
2128 .info = DHD_INFO_NULL,
2129 .net = DHD_NET_DEV_NULL,
2130 .idx = DHD_BAD_IF
2131 };
2132 #define DHD_IF_NULL (&dhd_if_null)
2133
2134 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
2135
2136 /** Interface STA list management. */
2137
2138 /** Fetch the dhd_if object, given the interface index in the dhd. */
2139 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
2140
2141 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
2142 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
2143 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
2144
2145 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
2146 static void dhd_if_del_sta_list(dhd_if_t * ifp);
2147 static void dhd_if_flush_sta(dhd_if_t * ifp);
2148
2149 /* Construct/Destruct a sta pool. */
2150 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
2151 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
2152 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2153 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
2154
2155
2156 /* Return interface pointer */
dhd_get_ifp(dhd_pub_t * dhdp,uint32 ifidx)2157 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
2158 {
2159 ASSERT(ifidx < DHD_MAX_IFS);
2160
2161 if (ifidx >= DHD_MAX_IFS)
2162 return NULL;
2163
2164 return dhdp->info->iflist[ifidx];
2165 }
2166
2167 /** Reset a dhd_sta object and free into the dhd pool. */
2168 static void
dhd_sta_free(dhd_pub_t * dhdp,dhd_sta_t * sta)2169 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
2170 {
2171 int prio;
2172
2173 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
2174
2175 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
2176
2177 /*
2178 * Flush and free all packets in all flowring's queues belonging to sta.
2179 * Packets in flow ring will be flushed later.
2180 */
2181 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2182 uint16 flowid = sta->flowid[prio];
2183
2184 if (flowid != FLOWID_INVALID) {
2185 unsigned long flags;
2186 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
2187 flow_ring_node_t * flow_ring_node;
2188
2189 #ifdef DHDTCPACK_SUPPRESS
2190 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
2191 * when there is a newly coming packet from network stack.
2192 */
2193 dhd_tcpack_info_tbl_clean(dhdp);
2194 #endif /* DHDTCPACK_SUPPRESS */
2195
2196 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
2197 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2198 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
2199
2200 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
2201 void * pkt;
2202 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
2203 PKTFREE(dhdp->osh, pkt, TRUE);
2204 }
2205 }
2206
2207 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2208 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
2209 }
2210
2211 sta->flowid[prio] = FLOWID_INVALID;
2212 }
2213
2214 id16_map_free(dhdp->staid_allocator, sta->idx);
2215 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
2216 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
2217 sta->ifidx = DHD_BAD_IF;
2218 bzero(sta->ea.octet, ETHER_ADDR_LEN);
2219 INIT_LIST_HEAD(&sta->list);
2220 sta->idx = ID16_INVALID; /* implying free */
2221 }
2222
2223 /** Allocate a dhd_sta object from the dhd pool. */
2224 static dhd_sta_t *
dhd_sta_alloc(dhd_pub_t * dhdp)2225 dhd_sta_alloc(dhd_pub_t * dhdp)
2226 {
2227 uint16 idx;
2228 dhd_sta_t * sta;
2229 dhd_sta_pool_t * sta_pool;
2230
2231 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
2232
2233 idx = id16_map_alloc(dhdp->staid_allocator);
2234 if (idx == ID16_INVALID) {
2235 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
2236 return DHD_STA_NULL;
2237 }
2238
2239 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
2240 sta = &sta_pool[idx];
2241
2242 ASSERT((sta->idx == ID16_INVALID) &&
2243 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
2244
2245 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
2246
2247 sta->idx = idx; /* implying allocated */
2248
2249 return sta;
2250 }
2251
2252 /** Delete all STAs in an interface's STA list. */
2253 static void
dhd_if_del_sta_list(dhd_if_t * ifp)2254 dhd_if_del_sta_list(dhd_if_t *ifp)
2255 {
2256 dhd_sta_t *sta, *next;
2257 unsigned long flags;
2258
2259 DHD_IF_STA_LIST_LOCK(ifp, flags);
2260 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2261 #pragma GCC diagnostic push
2262 #pragma GCC diagnostic ignored "-Wcast-qual"
2263 #endif
2264 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2265 #if defined(BCM_GMAC3)
2266 if (ifp->fwdh) {
2267 /* Remove sta from WOFA forwarder. */
2268 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta);
2269 }
2270 #endif /* BCM_GMAC3 */
2271 list_del(&sta->list);
2272 dhd_sta_free(&ifp->info->pub, sta);
2273 }
2274 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2275 #pragma GCC diagnostic pop
2276 #endif
2277 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2278
2279 return;
2280 }
2281
2282 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
2283 static void
dhd_if_flush_sta(dhd_if_t * ifp)2284 dhd_if_flush_sta(dhd_if_t * ifp)
2285 {
2286 #if defined(BCM_GMAC3)
2287
2288 if (ifp && (ifp->fwdh != FWDER_NULL)) {
2289 dhd_sta_t *sta, *next;
2290 unsigned long flags;
2291
2292 DHD_IF_STA_LIST_LOCK(ifp, flags);
2293
2294 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2295 /* Remove any sta entry from WOFA forwarder. */
2296 fwder_flush(ifp->fwdh, (uintptr_t)sta);
2297 }
2298
2299 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2300 }
2301 #endif /* BCM_GMAC3 */
2302 }
2303
2304 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
2305 static int
dhd_sta_pool_init(dhd_pub_t * dhdp,int max_sta)2306 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
2307 {
2308 int idx, prio, sta_pool_memsz;
2309 dhd_sta_t * sta;
2310 dhd_sta_pool_t * sta_pool;
2311 void * staid_allocator;
2312
2313 ASSERT(dhdp != (dhd_pub_t *)NULL);
2314 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
2315
2316 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2317 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
2318 if (staid_allocator == NULL) {
2319 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
2320 return BCME_ERROR;
2321 }
2322
2323 /* Pre allocate a pool of dhd_sta objects (one extra). */
2324 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
2325 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
2326 if (sta_pool == NULL) {
2327 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
2328 id16_map_fini(dhdp->osh, staid_allocator);
2329 return BCME_ERROR;
2330 }
2331
2332 dhdp->sta_pool = sta_pool;
2333 dhdp->staid_allocator = staid_allocator;
2334
2335 /* Initialize all sta(s) for the pre-allocated free pool. */
2336 bzero((uchar *)sta_pool, sta_pool_memsz);
2337 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
2338 sta = &sta_pool[idx];
2339 sta->idx = id16_map_alloc(staid_allocator);
2340 ASSERT(sta->idx <= max_sta);
2341 }
2342 /* Now place them into the pre-allocated free pool. */
2343 for (idx = 1; idx <= max_sta; idx++) {
2344 sta = &sta_pool[idx];
2345 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2346 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
2347 }
2348 dhd_sta_free(dhdp, sta);
2349 }
2350
2351 return BCME_OK;
2352 }
2353
2354 /** Destruct the pool of dhd_sta_t objects.
2355 * Caller must ensure that no STA objects are currently associated with an if.
2356 */
2357 static void
dhd_sta_pool_fini(dhd_pub_t * dhdp,int max_sta)2358 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
2359 {
2360 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
2361
2362 if (sta_pool) {
2363 int idx;
2364 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
2365 for (idx = 1; idx <= max_sta; idx++) {
2366 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
2367 ASSERT(sta_pool[idx].idx == ID16_INVALID);
2368 }
2369 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
2370 dhdp->sta_pool = NULL;
2371 }
2372
2373 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
2374 dhdp->staid_allocator = NULL;
2375 }
2376
2377 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2378 static void
dhd_sta_pool_clear(dhd_pub_t * dhdp,int max_sta)2379 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
2380 {
2381 int idx, prio, sta_pool_memsz;
2382 dhd_sta_t * sta;
2383 dhd_sta_pool_t * sta_pool;
2384 void *staid_allocator;
2385
2386 if (!dhdp) {
2387 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
2388 return;
2389 }
2390
2391 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
2392 staid_allocator = dhdp->staid_allocator;
2393
2394 if (!sta_pool) {
2395 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
2396 return;
2397 }
2398
2399 if (!staid_allocator) {
2400 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
2401 return;
2402 }
2403
2404 /* clear free pool */
2405 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
2406 bzero((uchar *)sta_pool, sta_pool_memsz);
2407
2408 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2409 id16_map_clear(staid_allocator, max_sta, 1);
2410
2411 /* Initialize all sta(s) for the pre-allocated free pool. */
2412 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
2413 sta = &sta_pool[idx];
2414 sta->idx = id16_map_alloc(staid_allocator);
2415 ASSERT(sta->idx <= max_sta);
2416 }
2417 /* Now place them into the pre-allocated free pool. */
2418 for (idx = 1; idx <= max_sta; idx++) {
2419 sta = &sta_pool[idx];
2420 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2421 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
2422 }
2423 dhd_sta_free(dhdp, sta);
2424 }
2425 }
2426
2427 /** Find STA with MAC address ea in an interface's STA list. */
2428 dhd_sta_t *
dhd_find_sta(void * pub,int ifidx,void * ea)2429 dhd_find_sta(void *pub, int ifidx, void *ea)
2430 {
2431 dhd_sta_t *sta;
2432 dhd_if_t *ifp;
2433 unsigned long flags;
2434
2435 ASSERT(ea != NULL);
2436 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2437 if (ifp == NULL)
2438 return DHD_STA_NULL;
2439
2440 DHD_IF_STA_LIST_LOCK(ifp, flags);
2441 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2442 #pragma GCC diagnostic push
2443 #pragma GCC diagnostic ignored "-Wcast-qual"
2444 #endif
2445 list_for_each_entry(sta, &ifp->sta_list, list) {
2446 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2447 DHD_INFO(("%s: found STA " MACDBG "\n",
2448 __FUNCTION__, MAC2STRDBG((char *)ea)));
2449 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2450 return sta;
2451 }
2452 }
2453 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2454 #pragma GCC diagnostic pop
2455 #endif
2456 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2457
2458 return DHD_STA_NULL;
2459 }
2460
2461 /** Add STA into the interface's STA list. */
2462 dhd_sta_t *
dhd_add_sta(void * pub,int ifidx,void * ea)2463 dhd_add_sta(void *pub, int ifidx, void *ea)
2464 {
2465 dhd_sta_t *sta;
2466 dhd_if_t *ifp;
2467 unsigned long flags;
2468
2469 ASSERT(ea != NULL);
2470 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2471 if (ifp == NULL)
2472 return DHD_STA_NULL;
2473
2474 sta = dhd_sta_alloc((dhd_pub_t *)pub);
2475 if (sta == DHD_STA_NULL) {
2476 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
2477 return DHD_STA_NULL;
2478 }
2479
2480 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
2481
2482 /* link the sta and the dhd interface */
2483 sta->ifp = ifp;
2484 sta->ifidx = ifidx;
2485 #ifdef DHD_WMF
2486 sta->psta_prim = NULL;
2487 #endif
2488 INIT_LIST_HEAD(&sta->list);
2489
2490 DHD_IF_STA_LIST_LOCK(ifp, flags);
2491
2492 list_add_tail(&sta->list, &ifp->sta_list);
2493
2494 #if defined(BCM_GMAC3)
2495 if (ifp->fwdh) {
2496 ASSERT(ISALIGNED(ea, 2));
2497 /* Add sta to WOFA forwarder. */
2498 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
2499 }
2500 #endif /* BCM_GMAC3 */
2501
2502 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2503
2504 return sta;
2505 }
2506
2507 /** Delete all STAs from the interface's STA list. */
2508 void
dhd_del_all_sta(void * pub,int ifidx)2509 dhd_del_all_sta(void *pub, int ifidx)
2510 {
2511 dhd_sta_t *sta, *next;
2512 dhd_if_t *ifp;
2513 unsigned long flags;
2514
2515 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2516 if (ifp == NULL)
2517 return;
2518
2519 DHD_IF_STA_LIST_LOCK(ifp, flags);
2520 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2521 #pragma GCC diagnostic push
2522 #pragma GCC diagnostic ignored "-Wcast-qual"
2523 #endif
2524 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2525 #if defined(BCM_GMAC3)
2526 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
2527 ASSERT(ISALIGNED(sta->ea.octet, 2));
2528 fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta);
2529 }
2530 #endif /* BCM_GMAC3 */
2531
2532 list_del(&sta->list);
2533 dhd_sta_free(&ifp->info->pub, sta);
2534 #ifdef DHD_L2_FILTER
2535 if (ifp->parp_enable) {
2536 /* clear Proxy ARP cache of specific Ethernet Address */
2537 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
2538 ifp->phnd_arp_table, FALSE,
2539 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2540 }
2541 #endif /* DHD_L2_FILTER */
2542 }
2543 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2544 #pragma GCC diagnostic pop
2545 #endif
2546 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2547
2548 return;
2549 }
2550
2551 /** Delete STA from the interface's STA list. */
2552 void
dhd_del_sta(void * pub,int ifidx,void * ea)2553 dhd_del_sta(void *pub, int ifidx, void *ea)
2554 {
2555 dhd_sta_t *sta, *next;
2556 dhd_if_t *ifp;
2557 unsigned long flags;
2558 char macstr[ETHER_ADDR_STR_LEN];
2559
2560 ASSERT(ea != NULL);
2561 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2562 if (ifp == NULL)
2563 return;
2564
2565 DHD_IF_STA_LIST_LOCK(ifp, flags);
2566 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2567 #pragma GCC diagnostic push
2568 #pragma GCC diagnostic ignored "-Wcast-qual"
2569 #endif
2570 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2571 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2572 #if defined(BCM_GMAC3)
2573 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
2574 ASSERT(ISALIGNED(ea, 2));
2575 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
2576 }
2577 #endif /* BCM_GMAC3 */
2578 DHD_MAC_TO_STR(((char *)ea), macstr);
2579 DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__, macstr));
2580 list_del(&sta->list);
2581 dhd_sta_free(&ifp->info->pub, sta);
2582 }
2583 }
2584 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2585 #pragma GCC diagnostic pop
2586 #endif
2587 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2588 #ifdef DHD_L2_FILTER
2589 if (ifp->parp_enable) {
2590 /* clear Proxy ARP cache of specific Ethernet Address */
2591 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
2592 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2593 }
2594 #endif /* DHD_L2_FILTER */
2595 return;
2596 }
2597
2598 /** Add STA if it doesn't exist. Not reentrant. */
2599 dhd_sta_t*
dhd_findadd_sta(void * pub,int ifidx,void * ea)2600 dhd_findadd_sta(void *pub, int ifidx, void *ea)
2601 {
2602 dhd_sta_t *sta;
2603
2604 sta = dhd_find_sta(pub, ifidx, ea);
2605
2606 if (!sta) {
2607 /* Add entry */
2608 sta = dhd_add_sta(pub, ifidx, ea);
2609 }
2610
2611 return sta;
2612 }
2613
2614 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2615 #if !defined(BCM_GMAC3)
2616 static struct list_head *
dhd_sta_list_snapshot(dhd_info_t * dhd,dhd_if_t * ifp,struct list_head * snapshot_list)2617 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
2618 {
2619 unsigned long flags;
2620 dhd_sta_t *sta, *snapshot;
2621
2622 INIT_LIST_HEAD(snapshot_list);
2623
2624 DHD_IF_STA_LIST_LOCK(ifp, flags);
2625
2626 list_for_each_entry(sta, &ifp->sta_list, list) {
2627 /* allocate one and add to snapshot */
2628 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
2629 if (snapshot == NULL) {
2630 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
2631 continue;
2632 }
2633
2634 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
2635
2636 INIT_LIST_HEAD(&snapshot->list);
2637 list_add_tail(&snapshot->list, snapshot_list);
2638 }
2639
2640 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2641
2642 return snapshot_list;
2643 }
2644
2645 static void
dhd_sta_list_snapshot_free(dhd_info_t * dhd,struct list_head * snapshot_list)2646 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
2647 {
2648 dhd_sta_t *sta, *next;
2649
2650 list_for_each_entry_safe(sta, next, snapshot_list, list) {
2651 list_del(&sta->list);
2652 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2653 }
2654 }
2655 #endif /* !BCM_GMAC3 */
2656 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2657
2658 #else
dhd_if_flush_sta(dhd_if_t * ifp)2659 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
dhd_if_del_sta_list(dhd_if_t * ifp)2660 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
dhd_sta_pool_init(dhd_pub_t * dhdp,int max_sta)2661 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
dhd_sta_pool_fini(dhd_pub_t * dhdp,int max_sta)2662 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
dhd_sta_pool_clear(dhd_pub_t * dhdp,int max_sta)2663 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
dhd_findadd_sta(void * pub,int ifidx,void * ea)2664 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
dhd_find_sta(void * pub,int ifidx,void * ea)2665 dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
dhd_del_sta(void * pub,int ifidx,void * ea)2666 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2667 #endif /* PCIE_FULL_DONGLE */
2668
2669
2670
2671 #if defined(DHD_LB)
2672
2673 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP)
2674 /**
2675 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2676 * CPU and schedules a tasklet.
2677 * @tasklet: opaque pointer to the tasklet
2678 */
2679 INLINE void
dhd_tasklet_schedule(void * tasklet)2680 dhd_tasklet_schedule(void *tasklet)
2681 {
2682 tasklet_schedule((struct tasklet_struct *)tasklet);
2683 }
2684 /**
2685 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2686 * @tasklet: tasklet to be scheduled
2687 * @on_cpu: cpu core id
2688 *
2689 * If the requested cpu is online, then an IPI is sent to this cpu via the
2690 * smp_call_function_single with no wait and the tasklet_schedule function
2691 * will be invoked to schedule the specified tasklet on the requested CPU.
2692 */
2693 INLINE void
dhd_tasklet_schedule_on(struct tasklet_struct * tasklet,int on_cpu)2694 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2695 {
2696 const int wait = 0;
2697 smp_call_function_single(on_cpu,
2698 dhd_tasklet_schedule, (void *)tasklet, wait);
2699 }
2700
2701 /**
2702 * dhd_work_schedule_on - Executes the passed work in a given CPU
2703 * @work: work to be scheduled
2704 * @on_cpu: cpu core id
2705 *
2706 * If the requested cpu is online, then an IPI is sent to this cpu via the
2707 * schedule_work_on and the work function
2708 * will be invoked to schedule the specified work on the requested CPU.
2709 */
2710
2711 INLINE void
dhd_work_schedule_on(struct work_struct * work,int on_cpu)2712 dhd_work_schedule_on(struct work_struct *work, int on_cpu)
2713 {
2714 schedule_work_on(on_cpu, work);
2715 }
2716 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */
2717
2718 #if defined(DHD_LB_TXC)
2719 /**
2720 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2721 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2722 * freeing the packets placed in the tx_compl workq
2723 */
2724 void
dhd_lb_tx_compl_dispatch(dhd_pub_t * dhdp)2725 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2726 {
2727 dhd_info_t *dhd = dhdp->info;
2728 int curr_cpu, on_cpu;
2729
2730 if (dhd->rx_napi_netdev == NULL) {
2731 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2732 return;
2733 }
2734
2735 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2736 /*
2737 * If the destination CPU is NOT online or is same as current CPU
2738 * no need to schedule the work
2739 */
2740 curr_cpu = get_cpu();
2741 put_cpu();
2742
2743 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2744
2745 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2746 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2747 } else {
2748 schedule_work(&dhd->tx_compl_dispatcher_work);
2749 }
2750 }
2751
dhd_tx_compl_dispatcher_fn(struct work_struct * work)2752 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2753 {
2754 struct dhd_info *dhd =
2755 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2756 int cpu;
2757
2758 get_online_cpus();
2759 cpu = atomic_read(&dhd->tx_compl_cpu);
2760 if (!cpu_online(cpu))
2761 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2762 else
2763 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2764 put_online_cpus();
2765 }
2766 #endif /* DHD_LB_TXC */
2767
2768 #if defined(DHD_LB_RXC)
2769 /**
2770 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2771 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2772 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2773 * placed in the rx_compl workq.
2774 *
2775 * @dhdp: pointer to dhd_pub object
2776 */
2777 void
dhd_lb_rx_compl_dispatch(dhd_pub_t * dhdp)2778 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2779 {
2780 dhd_info_t *dhd = dhdp->info;
2781 int curr_cpu, on_cpu;
2782
2783 if (dhd->rx_napi_netdev == NULL) {
2784 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2785 return;
2786 }
2787
2788 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2789 /*
2790 * If the destination CPU is NOT online or is same as current CPU
2791 * no need to schedule the work
2792 */
2793 curr_cpu = get_cpu();
2794 put_cpu();
2795 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2796
2797 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2798 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2799 } else {
2800 dhd_rx_compl_dispatcher_fn(dhdp);
2801 }
2802 }
2803
dhd_rx_compl_dispatcher_fn(dhd_pub_t * dhdp)2804 static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp)
2805 {
2806 struct dhd_info *dhd = dhdp->info;
2807 int cpu;
2808
2809 preempt_disable();
2810 cpu = atomic_read(&dhd->rx_compl_cpu);
2811 if (!cpu_online(cpu))
2812 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2813 else {
2814 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2815 }
2816 preempt_enable();
2817 }
2818 #endif /* DHD_LB_RXC */
2819
2820 #if defined(DHD_LB_TXP)
dhd_tx_dispatcher_work(struct work_struct * work)2821 static void dhd_tx_dispatcher_work(struct work_struct * work)
2822 {
2823 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2824 #pragma GCC diagnostic push
2825 #pragma GCC diagnostic ignored "-Wcast-qual"
2826 #endif
2827 struct dhd_info *dhd =
2828 container_of(work, struct dhd_info, tx_dispatcher_work);
2829 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2830 #pragma GCC diagnostic pop
2831 #endif
2832 dhd_tasklet_schedule(&dhd->tx_tasklet);
2833 }
2834
dhd_tx_dispatcher_fn(dhd_pub_t * dhdp)2835 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
2836 {
2837 int cpu;
2838 int net_tx_cpu;
2839 dhd_info_t *dhd = dhdp->info;
2840
2841 preempt_disable();
2842 cpu = atomic_read(&dhd->tx_cpu);
2843 net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
2844
2845 /*
2846 * Now if the NET_TX has pushed the packet in the same
2847 * CPU that is chosen for Tx processing, seperate it out
2848 * i.e run the TX processing tasklet in compl_cpu
2849 */
2850 if (net_tx_cpu == cpu)
2851 cpu = atomic_read(&dhd->tx_compl_cpu);
2852
2853 if (!cpu_online(cpu)) {
2854 /*
2855 * Ooohh... but the Chosen CPU is not online,
2856 * Do the job in the current CPU itself.
2857 */
2858 dhd_tasklet_schedule(&dhd->tx_tasklet);
2859 } else {
2860 /*
2861 * Schedule tx_dispatcher_work to on the cpu which
2862 * in turn will schedule tx_tasklet.
2863 */
2864 dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
2865 }
2866 preempt_enable();
2867 }
2868
2869 /**
2870 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2871 * on another cpu. The tx_tasklet will take care of actually putting
2872 * the skbs into appropriate flow ring and ringing H2D interrupt
2873 *
2874 * @dhdp: pointer to dhd_pub object
2875 */
2876 static void
dhd_lb_tx_dispatch(dhd_pub_t * dhdp)2877 dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
2878 {
2879 dhd_info_t *dhd = dhdp->info;
2880 int curr_cpu;
2881
2882 curr_cpu = get_cpu();
2883 put_cpu();
2884
2885 /* Record the CPU in which the TX request from Network stack came */
2886 atomic_set(&dhd->net_tx_cpu, curr_cpu);
2887
2888 /* Schedule the work to dispatch ... */
2889 dhd_tx_dispatcher_fn(dhdp);
2890 }
2891 #endif /* DHD_LB_TXP */
2892
2893 #if defined(DHD_LB_RXP)
2894 /**
2895 * dhd_napi_poll - Load balance napi poll function to process received
2896 * packets and send up the network stack using netif_receive_skb()
2897 *
2898 * @napi: napi object in which context this poll function is invoked
2899 * @budget: number of packets to be processed.
2900 *
2901 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2902 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2903 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2904 * packet tag and sendup.
2905 */
2906 static int
dhd_napi_poll(struct napi_struct * napi,int budget)2907 dhd_napi_poll(struct napi_struct *napi, int budget)
2908 {
2909 int ifid;
2910 const int pkt_count = 1;
2911 const int chan = 0;
2912 struct sk_buff * skb;
2913 unsigned long flags;
2914 struct dhd_info *dhd;
2915 int processed = 0;
2916 struct sk_buff_head rx_process_queue;
2917
2918 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2919 #pragma GCC diagnostic push
2920 #pragma GCC diagnostic ignored "-Wcast-qual"
2921 #endif
2922 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2923 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2924 #pragma GCC diagnostic pop
2925 #endif
2926
2927 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2928 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2929 __skb_queue_head_init(&rx_process_queue);
2930
2931 /* extract the entire rx_napi_queue into local rx_process_queue */
2932 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2933 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2934 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2935
2936 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2937 OSL_PREFETCH(skb->data);
2938
2939 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2940
2941 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2942 __FUNCTION__, skb, ifid));
2943
2944 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2945 processed++;
2946 }
2947
2948 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2949
2950 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2951 napi_complete(napi);
2952
2953 return budget - 1;
2954 }
2955
2956 /**
2957 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2958 * poll list. This function may be invoked via the smp_call_function_single
2959 * from a remote CPU.
2960 *
2961 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2962 * after the napi_struct is added to the softnet data's poll_list
2963 *
2964 * @info: pointer to a dhd_info struct
2965 */
2966 static void
dhd_napi_schedule(void * info)2967 dhd_napi_schedule(void *info)
2968 {
2969 dhd_info_t *dhd = (dhd_info_t *)info;
2970
2971 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2972 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2973
2974 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2975 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2976 __napi_schedule(&dhd->rx_napi_struct);
2977 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2978 }
2979
2980 /*
2981 * If the rx_napi_struct was already running, then we let it complete
2982 * processing all its packets. The rx_napi_struct may only run on one
2983 * core at a time, to avoid out-of-order handling.
2984 */
2985 }
2986
2987 /**
2988 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2989 * action after placing the dhd's rx_process napi object in the the remote CPU's
2990 * softnet data's poll_list.
2991 *
2992 * @dhd: dhd_info which has the rx_process napi object
2993 * @on_cpu: desired remote CPU id
2994 */
2995 static INLINE int
dhd_napi_schedule_on(dhd_info_t * dhd,int on_cpu)2996 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2997 {
2998 int wait = 0; /* asynchronous IPI */
2999 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
3000 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
3001
3002 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
3003 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
3004 __FUNCTION__, on_cpu));
3005 }
3006
3007 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
3008
3009 return 0;
3010 }
3011
3012 /*
3013 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
3014 * Why should we do this?
3015 * The candidacy algorithm is run from the call back function
3016 * registered to CPU hotplug notifier. This call back happens from Worker
3017 * context. The dhd_napi_schedule_on is also from worker context.
3018 * Note that both of this can run on two different CPUs at the same time.
3019 * So we can possibly have a window where a given CPUn is being brought
3020 * down from CPUm while we try to run a function on CPUn.
3021 * To prevent this its better have the whole code to execute an SMP
3022 * function under get_online_cpus.
3023 * This function call ensures that hotplug mechanism does not kick-in
3024 * until we are done dealing with online CPUs
3025 * If the hotplug worker is already running, no worries because the
3026 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
3027 *
3028 * The below mentioned code structure is proposed in
3029 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
3030 * for the question
3031 * Q: I need to ensure that a particular cpu is not removed when there is some
3032 * work specific to this cpu is in progress
3033 *
3034 * According to the documentation calling get_online_cpus is NOT required, if
3035 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
3036 * run from Work Queue context we have to call these functions
3037 */
dhd_rx_napi_dispatcher_fn(struct work_struct * work)3038 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
3039 {
3040 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3041 #pragma GCC diagnostic push
3042 #pragma GCC diagnostic ignored "-Wcast-qual"
3043 #endif
3044 struct dhd_info *dhd =
3045 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
3046 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3047 #pragma GCC diagnostic pop
3048 #endif
3049 int cpu;
3050
3051 get_online_cpus();
3052 cpu = atomic_read(&dhd->rx_napi_cpu);
3053
3054 if (!cpu_online(cpu))
3055 dhd_napi_schedule(dhd);
3056 else
3057 dhd_napi_schedule_on(dhd, cpu);
3058
3059 put_online_cpus();
3060 }
3061
3062 /**
3063 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
3064 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
3065 * the packets enqueued into the rx_napi_queue and sendup.
3066 * The producer's rx packet queue is appended to the rx_napi_queue before
3067 * dispatching the rx_napi_struct.
3068 */
3069 void
dhd_lb_rx_napi_dispatch(dhd_pub_t * dhdp)3070 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
3071 {
3072 unsigned long flags;
3073 dhd_info_t *dhd = dhdp->info;
3074 int curr_cpu;
3075 int on_cpu;
3076
3077 if (dhd->rx_napi_netdev == NULL) {
3078 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
3079 return;
3080 }
3081
3082 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
3083 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
3084
3085 /* append the producer's queue of packets to the napi's rx process queue */
3086 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
3087 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
3088 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
3089
3090 /*
3091 * If the destination CPU is NOT online or is same as current CPU
3092 * no need to schedule the work
3093 */
3094 curr_cpu = get_cpu();
3095 put_cpu();
3096
3097 on_cpu = atomic_read(&dhd->rx_napi_cpu);
3098 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
3099 dhd_napi_schedule(dhd);
3100 } else {
3101 schedule_work(&dhd->rx_napi_dispatcher_work);
3102 }
3103 }
3104
3105 /**
3106 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
3107 */
3108 void
dhd_lb_rx_pkt_enqueue(dhd_pub_t * dhdp,void * pkt,int ifidx)3109 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
3110 {
3111 dhd_info_t *dhd = dhdp->info;
3112
3113 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
3114 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
3115 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
3116 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
3117 }
3118 #endif /* DHD_LB_RXP */
3119
3120 #endif /* DHD_LB */
3121
3122
3123 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
dhd_bssidx2idx(dhd_pub_t * dhdp,uint32 bssidx)3124 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
3125 {
3126 dhd_if_t *ifp;
3127 dhd_info_t *dhd = dhdp->info;
3128 int i;
3129
3130 ASSERT(bssidx < DHD_MAX_IFS);
3131 ASSERT(dhdp);
3132
3133 for (i = 0; i < DHD_MAX_IFS; i++) {
3134 ifp = dhd->iflist[i];
3135 if (ifp && (ifp->bssidx == bssidx)) {
3136 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
3137 ifp->name, bssidx, i));
3138 break;
3139 }
3140 }
3141 return i;
3142 }
3143
dhd_rxf_enqueue(dhd_pub_t * dhdp,void * skb)3144 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
3145 {
3146 uint32 store_idx;
3147 uint32 sent_idx;
3148
3149 if (!skb) {
3150 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
3151 return BCME_ERROR;
3152 }
3153
3154 dhd_os_rxflock(dhdp);
3155 store_idx = dhdp->store_idx;
3156 sent_idx = dhdp->sent_idx;
3157 if (dhdp->skbbuf[store_idx] != NULL) {
3158 /* Make sure the previous packets are processed */
3159 dhd_os_rxfunlock(dhdp);
3160 #ifdef RXF_DEQUEUE_ON_BUSY
3161 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3162 skb, store_idx, sent_idx));
3163 return BCME_BUSY;
3164 #else /* RXF_DEQUEUE_ON_BUSY */
3165 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3166 skb, store_idx, sent_idx));
3167 /* removed msleep here, should use wait_event_timeout if we
3168 * want to give rx frame thread a chance to run
3169 */
3170 #if defined(WAIT_DEQUEUE)
3171 OSL_SLEEP(1);
3172 #endif
3173 return BCME_ERROR;
3174 #endif /* RXF_DEQUEUE_ON_BUSY */
3175 }
3176 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
3177 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
3178 dhdp->skbbuf[store_idx] = skb;
3179 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
3180 dhd_os_rxfunlock(dhdp);
3181
3182 return BCME_OK;
3183 }
3184
dhd_rxf_dequeue(dhd_pub_t * dhdp)3185 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
3186 {
3187 uint32 store_idx;
3188 uint32 sent_idx;
3189 void *skb;
3190
3191 dhd_os_rxflock(dhdp);
3192
3193 store_idx = dhdp->store_idx;
3194 sent_idx = dhdp->sent_idx;
3195 skb = dhdp->skbbuf[sent_idx];
3196
3197 if (skb == NULL) {
3198 dhd_os_rxfunlock(dhdp);
3199 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
3200 store_idx, sent_idx));
3201 return NULL;
3202 }
3203
3204 dhdp->skbbuf[sent_idx] = NULL;
3205 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
3206
3207 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
3208 skb, sent_idx));
3209
3210 dhd_os_rxfunlock(dhdp);
3211
3212 return skb;
3213 }
3214
dhd_process_cid_mac(dhd_pub_t * dhdp,bool prepost)3215 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
3216 {
3217 if (prepost) { /* pre process */
3218 dhd_read_cis(dhdp);
3219 dhd_check_module_cid(dhdp);
3220 dhd_check_module_mac(dhdp);
3221 dhd_set_macaddr_from_file(dhdp);
3222 } else { /* post process */
3223 dhd_write_macaddr(&dhdp->mac);
3224 dhd_clear_cis(dhdp);
3225 }
3226
3227 return 0;
3228 }
3229
3230 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3231 #if defined(PKT_FILTER_SUPPORT)
3232 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3233 static bool
_turn_on_arp_filter(dhd_pub_t * dhd,int op_mode_param)3234 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
3235 {
3236 bool _apply = FALSE;
3237 /* In case of IBSS mode, apply arp pkt filter */
3238 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
3239 _apply = TRUE;
3240 goto exit;
3241 }
3242 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
3243 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
3244 _apply = TRUE;
3245 goto exit;
3246 }
3247
3248 exit:
3249 return _apply;
3250 }
3251 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3252
3253 void
dhd_set_packet_filter(dhd_pub_t * dhd)3254 dhd_set_packet_filter(dhd_pub_t *dhd)
3255 {
3256 int i;
3257
3258 DHD_TRACE(("%s: enter\n", __FUNCTION__));
3259 if (dhd_pkt_filter_enable) {
3260 for (i = 0; i < dhd->pktfilter_count; i++) {
3261 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
3262 }
3263 }
3264 }
3265
3266 void
dhd_enable_packet_filter(int value,dhd_pub_t * dhd)3267 dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
3268 {
3269 int i;
3270
3271 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
3272 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
3273 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
3274 return;
3275 }
3276 /* 1 - Enable packet filter, only allow unicast packet to send up */
3277 /* 0 - Disable packet filter */
3278 if (dhd_pkt_filter_enable && (!value ||
3279 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
3280 {
3281 for (i = 0; i < dhd->pktfilter_count; i++) {
3282 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3283 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3284 if (value && (i == DHD_ARP_FILTER_NUM) &&
3285 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
3286 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
3287 "val %d, cnt %d, op_mode 0x%x\n",
3288 value, i, dhd->op_mode));
3289 continue;
3290 }
3291 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3292 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
3293 value, dhd_master_mode);
3294 }
3295 }
3296 }
3297
3298 int
dhd_packet_filter_add_remove(dhd_pub_t * dhdp,int add_remove,int num)3299 dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
3300 {
3301 char *filterp = NULL;
3302 int filter_id = 0;
3303
3304 switch (num) {
3305 case DHD_BROADCAST_FILTER_NUM:
3306 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
3307 filter_id = 101;
3308 break;
3309 case DHD_MULTICAST4_FILTER_NUM:
3310 filter_id = 102;
3311 if (FW_SUPPORTED((dhdp), pf6)) {
3312 if (dhdp->pktfilter[num] != NULL) {
3313 dhd_pktfilter_offload_delete(dhdp, filter_id);
3314 dhdp->pktfilter[num] = NULL;
3315 }
3316 if (!add_remove) {
3317 filterp = DISCARD_IPV4_MCAST;
3318 add_remove = 1;
3319 break;
3320 }
3321 }
3322 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
3323 break;
3324 case DHD_MULTICAST6_FILTER_NUM:
3325 filter_id = 103;
3326 if (FW_SUPPORTED((dhdp), pf6)) {
3327 if (dhdp->pktfilter[num] != NULL) {
3328 dhd_pktfilter_offload_delete(dhdp, filter_id);
3329 dhdp->pktfilter[num] = NULL;
3330 }
3331 if (!add_remove) {
3332 filterp = DISCARD_IPV6_MCAST;
3333 add_remove = 1;
3334 break;
3335 }
3336 }
3337 filterp = "103 0 0 0 0xFFFF 0x3333";
3338 break;
3339 case DHD_MDNS_FILTER_NUM:
3340 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
3341 filter_id = 104;
3342 break;
3343 case DHD_ARP_FILTER_NUM:
3344 filterp = "105 0 0 12 0xFFFF 0x0806";
3345 filter_id = 105;
3346 break;
3347 case DHD_BROADCAST_ARP_FILTER_NUM:
3348 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
3349 " 0xFFFFFFFFFFFF0000000000000806";
3350 filter_id = 106;
3351 break;
3352 default:
3353 return -EINVAL;
3354 }
3355
3356 /* Add filter */
3357 if (add_remove) {
3358 dhdp->pktfilter[num] = filterp;
3359 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
3360 } else { /* Delete filter */
3361 if (dhdp->pktfilter[num]) {
3362 dhd_pktfilter_offload_delete(dhdp, filter_id);
3363 dhdp->pktfilter[num] = NULL;
3364 }
3365 }
3366
3367 return 0;
3368 }
3369 #endif /* PKT_FILTER_SUPPORT */
3370
dhd_set_suspend(int value,dhd_pub_t * dhd)3371 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
3372 {
3373 int power_mode = PM_MAX;
3374 #ifdef SUPPORT_SENSORHUB
3375 shub_control_t shub_ctl;
3376 #endif /* SUPPORT_SENSORHUB */
3377 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
3378 int ret = 0;
3379 #ifdef DHD_USE_EARLYSUSPEND
3380 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3381 int bcn_timeout = 0;
3382 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3383 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3384 int roam_time_thresh = 0; /* (ms) */
3385 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3386 #ifndef ENABLE_FW_ROAM_SUSPEND
3387 uint roamvar = dhd->conf->roam_off_suspend;
3388 #endif /* ENABLE_FW_ROAM_SUSPEND */
3389 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3390 int bcn_li_bcn;
3391 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3392 uint nd_ra_filter = 0;
3393 #endif /* DHD_USE_EARLYSUSPEND */
3394 #ifdef PASS_ALL_MCAST_PKTS
3395 struct dhd_info *dhdinfo;
3396 uint32 allmulti;
3397 uint i;
3398 #endif /* PASS_ALL_MCAST_PKTS */
3399 #ifdef ENABLE_IPMCAST_FILTER
3400 int ipmcast_l2filter;
3401 #endif /* ENABLE_IPMCAST_FILTER */
3402 #ifdef DYNAMIC_SWOOB_DURATION
3403 #ifndef CUSTOM_INTR_WIDTH
3404 #define CUSTOM_INTR_WIDTH 100
3405 int intr_width = 0;
3406 #endif /* CUSTOM_INTR_WIDTH */
3407 #endif /* DYNAMIC_SWOOB_DURATION */
3408
3409 #if defined(BCMPCIE)
3410 int lpas = 0;
3411 int dtim_period = 0;
3412 int bcn_interval = 0;
3413 int bcn_to_dly = 0;
3414 #ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3415 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
3416 #else
3417 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
3418 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3419 #endif /* OEM_ANDROID && BCMPCIE */
3420
3421 if (!dhd)
3422 return -ENODEV;
3423
3424 #ifdef PASS_ALL_MCAST_PKTS
3425 dhdinfo = dhd->info;
3426 #endif /* PASS_ALL_MCAST_PKTS */
3427
3428 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
3429 __FUNCTION__, value, dhd->in_suspend));
3430
3431 dhd_suspend_lock(dhd);
3432
3433 #ifdef CUSTOM_SET_CPUCORE
3434 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
3435 /* set specific cpucore */
3436 dhd_set_cpucore(dhd, TRUE);
3437 #endif /* CUSTOM_SET_CPUCORE */
3438
3439 if (dhd->conf->pm >= 0)
3440 power_mode = dhd->conf->pm;
3441 else
3442 power_mode = PM_FAST;
3443
3444 if (dhd->up) {
3445 if (value && dhd->in_suspend) {
3446 #ifdef PKT_FILTER_SUPPORT
3447 dhd->early_suspended = 1;
3448 #endif
3449 /* Kernel suspended */
3450 DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__));
3451
3452 if (dhd->conf->pm_in_suspend >= 0)
3453 power_mode = dhd->conf->pm_in_suspend;
3454 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3455 sizeof(power_mode), TRUE, 0);
3456
3457 #ifdef PKT_FILTER_SUPPORT
3458 /* Enable packet filter,
3459 * only allow unicast packet to send up
3460 */
3461 dhd_enable_packet_filter(1, dhd);
3462 #ifdef APF
3463 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
3464 #endif /* APF */
3465 #endif /* PKT_FILTER_SUPPORT */
3466
3467 #ifdef SUPPORT_SENSORHUB
3468 shub_ctl.enable = 1;
3469 shub_ctl.cmd = 0x000;
3470 shub_ctl.op_mode = 1;
3471 shub_ctl.interval = 0;
3472 if (dhd->info->shub_enable == 1) {
3473 ret = dhd_iovar(dhd, 0, "shub_msreq",
3474 (char *)&shub_ctl, sizeof(shub_ctl), NULL, 0, TRUE);
3475 if (ret < 0) {
3476 DHD_ERROR(("%s SensorHub MS start: failed %d\n",
3477 __FUNCTION__, ret));
3478 }
3479 }
3480 #endif /* SUPPORT_SENSORHUB */
3481
3482
3483 #ifdef PASS_ALL_MCAST_PKTS
3484 allmulti = 0;
3485 for (i = 0; i < DHD_MAX_IFS; i++) {
3486 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3487 dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
3488 sizeof(allmulti), NULL, 0, TRUE);
3489 }
3490 #endif /* PASS_ALL_MCAST_PKTS */
3491
3492 /* If DTIM skip is set up as default, force it to wake
3493 * each third DTIM for better power savings. Note that
3494 * one side effect is a chance to miss BC/MC packet.
3495 */
3496 #ifdef WLTDLS
3497 /* Do not set bcn_li_ditm on WFD mode */
3498 if (dhd->tdls_mode) {
3499 bcn_li_dtim = 0;
3500 } else
3501 #endif /* WLTDLS */
3502 #if defined(BCMPCIE)
3503 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
3504 &bcn_interval);
3505 dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3506 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3507
3508 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
3509 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
3510 /*
3511 * Increase max roaming threshold from 2 secs to 8 secs
3512 * the real roam threshold is MIN(max_roam_threshold,
3513 * bcn_timeout/2)
3514 */
3515 lpas = 1;
3516 dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
3517 0, TRUE);
3518
3519 bcn_to_dly = 1;
3520 /*
3521 * if bcn_to_dly is 1, the real roam threshold is
3522 * MIN(max_roam_threshold, bcn_timeout -1);
3523 * notify link down event after roaming procedure complete
3524 * if we hit bcn_timeout while we are in roaming progress.
3525 */
3526 dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3527 sizeof(bcn_to_dly), NULL, 0, TRUE);
3528 /* Increase beacon timeout to 6 secs or use bigger one */
3529 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
3530 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3531 sizeof(bcn_timeout), NULL, 0, TRUE);
3532 }
3533 #else
3534 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
3535 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3536 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
3537 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
3538 #endif /* OEM_ANDROID && BCMPCIE */
3539
3540 #ifdef DHD_USE_EARLYSUSPEND
3541 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3542 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
3543 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3544 sizeof(bcn_timeout), NULL, 0, TRUE);
3545 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3546 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3547 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
3548 dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
3549 sizeof(roam_time_thresh), NULL, 0, TRUE);
3550 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3551 #ifndef ENABLE_FW_ROAM_SUSPEND
3552 /* Disable firmware roaming during suspend */
3553 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
3554 NULL, 0, TRUE);
3555 #endif /* ENABLE_FW_ROAM_SUSPEND */
3556 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3557 bcn_li_bcn = 0;
3558 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3559 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3560 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3561 #ifdef NDO_CONFIG_SUPPORT
3562 if (dhd->ndo_enable) {
3563 if (!dhd->ndo_host_ip_overflow) {
3564 /* enable ND offload on suspend */
3565 ret = dhd_ndo_enable(dhd, 1);
3566 if (ret < 0) {
3567 DHD_ERROR(("%s: failed to enable NDO\n",
3568 __FUNCTION__));
3569 }
3570 } else {
3571 DHD_INFO(("%s: NDO disabled on suspend due to"
3572 "HW capacity\n", __FUNCTION__));
3573 }
3574 }
3575 #endif /* NDO_CONFIG_SUPPORT */
3576 #ifndef APF
3577 if (FW_SUPPORTED(dhd, ndoe))
3578 #else
3579 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
3580 #endif /* APF */
3581 {
3582 /* enable IPv6 RA filter in firmware during suspend */
3583 nd_ra_filter = 1;
3584 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3585 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3586 NULL, 0, TRUE);
3587 if (ret < 0)
3588 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3589 ret));
3590 }
3591 dhd_os_suppress_logging(dhd, TRUE);
3592 #ifdef ENABLE_IPMCAST_FILTER
3593 ipmcast_l2filter = 1;
3594 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3595 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3596 NULL, 0, TRUE);
3597 #endif /* ENABLE_IPMCAST_FILTER */
3598 #ifdef DYNAMIC_SWOOB_DURATION
3599 intr_width = CUSTOM_INTR_WIDTH;
3600 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3601 sizeof(intr_width), NULL, 0, TRUE);
3602 if (ret < 0) {
3603 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3604 }
3605 #endif /* DYNAMIC_SWOOB_DURATION */
3606 #endif /* DHD_USE_EARLYSUSPEND */
3607 dhd_conf_set_suspend_resume(dhd, value);
3608 } else {
3609 dhd_conf_set_suspend_resume(dhd, value);
3610 #ifdef PKT_FILTER_SUPPORT
3611 dhd->early_suspended = 0;
3612 #endif
3613 /* Kernel resumed */
3614 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
3615
3616 #ifdef SUPPORT_SENSORHUB
3617 shub_ctl.enable = 1;
3618 shub_ctl.cmd = 0x000;
3619 shub_ctl.op_mode = 0;
3620 shub_ctl.interval = 0;
3621 if (dhd->info->shub_enable == 1) {
3622 ret = dhd_iovar(dhd, 0, "shub_msreq",
3623 (char *)&shub_ctl, sizeof(shub_ctl),
3624 NULL, 0, TRUE);
3625 if (ret < 0) {
3626 DHD_ERROR(("%s SensorHub MS stop: failed %d\n",
3627 __FUNCTION__, ret));
3628 }
3629 }
3630 #endif /* SUPPORT_SENSORHUB */
3631
3632 #ifdef DYNAMIC_SWOOB_DURATION
3633 intr_width = 0;
3634 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3635 sizeof(intr_width), NULL, 0, TRUE);
3636 if (ret < 0) {
3637 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3638 }
3639 #endif /* DYNAMIC_SWOOB_DURATION */
3640 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3641 sizeof(power_mode), TRUE, 0);
3642 #ifdef PKT_FILTER_SUPPORT
3643 /* disable pkt filter */
3644 dhd_enable_packet_filter(0, dhd);
3645 #ifdef APF
3646 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
3647 #endif /* APF */
3648 #endif /* PKT_FILTER_SUPPORT */
3649 #ifdef PASS_ALL_MCAST_PKTS
3650 allmulti = 1;
3651 for (i = 0; i < DHD_MAX_IFS; i++) {
3652 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3653 dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
3654 sizeof(allmulti), NULL, 0, TRUE);
3655 }
3656 #endif /* PASS_ALL_MCAST_PKTS */
3657 #if defined(BCMPCIE)
3658 /* restore pre-suspend setting */
3659 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3660 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3661 if (ret < 0) {
3662 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3663 }
3664
3665 dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, 0,
3666 TRUE);
3667
3668 dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3669 sizeof(bcn_to_dly), NULL, 0, TRUE);
3670
3671 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3672 sizeof(bcn_timeout), NULL, 0, TRUE);
3673 #else
3674 /* restore pre-suspend setting for dtim_skip */
3675 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3676 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3677 if (ret < 0) {
3678 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3679 }
3680 #endif /* OEM_ANDROID && BCMPCIE */
3681 #ifdef DHD_USE_EARLYSUSPEND
3682 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3683 bcn_timeout = CUSTOM_BCN_TIMEOUT;
3684 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3685 sizeof(bcn_timeout), NULL, 0, TRUE);
3686 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3687 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3688 roam_time_thresh = 2000;
3689 dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
3690 sizeof(roam_time_thresh), NULL, 0, TRUE);
3691
3692 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3693 #ifndef ENABLE_FW_ROAM_SUSPEND
3694 roamvar = dhd_roam_disable;
3695 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
3696 NULL, 0, TRUE);
3697 #endif /* ENABLE_FW_ROAM_SUSPEND */
3698 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3699 bcn_li_bcn = 1;
3700 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3701 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3702 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3703 #ifdef NDO_CONFIG_SUPPORT
3704 if (dhd->ndo_enable) {
3705 /* Disable ND offload on resume */
3706 ret = dhd_ndo_enable(dhd, 0);
3707 if (ret < 0) {
3708 DHD_ERROR(("%s: failed to disable NDO\n",
3709 __FUNCTION__));
3710 }
3711 }
3712 #endif /* NDO_CONFIG_SUPPORT */
3713 #ifndef APF
3714 if (FW_SUPPORTED(dhd, ndoe))
3715 #else
3716 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
3717 #endif /* APF */
3718 {
3719 /* disable IPv6 RA filter in firmware during suspend */
3720 nd_ra_filter = 0;
3721 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3722 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3723 NULL, 0, TRUE);
3724 if (ret < 0) {
3725 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3726 ret));
3727 }
3728 }
3729 dhd_os_suppress_logging(dhd, FALSE);
3730 #ifdef ENABLE_IPMCAST_FILTER
3731 ipmcast_l2filter = 0;
3732 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3733 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3734 NULL, 0, TRUE);
3735 #endif /* ENABLE_IPMCAST_FILTER */
3736 #endif /* DHD_USE_EARLYSUSPEND */
3737
3738 /* terence 2017029: Reject in early suspend */
3739 if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
3740 dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
3741 }
3742 }
3743 }
3744 dhd_suspend_unlock(dhd);
3745
3746 return 0;
3747 }
3748
dhd_suspend_resume_helper(struct dhd_info * dhd,int val,int force)3749 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
3750 {
3751 dhd_pub_t *dhdp = &dhd->pub;
3752 int ret = 0;
3753
3754 DHD_OS_WAKE_LOCK(dhdp);
3755 DHD_PERIM_LOCK(dhdp);
3756
3757 /* Set flag when early suspend was called */
3758 dhdp->in_suspend = val;
3759 if ((force || !dhdp->suspend_disable_flag) &&
3760 (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp)))
3761 {
3762 ret = dhd_set_suspend(val, dhdp);
3763 }
3764
3765 DHD_PERIM_UNLOCK(dhdp);
3766 DHD_OS_WAKE_UNLOCK(dhdp);
3767 return ret;
3768 }
3769
3770 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
dhd_early_suspend(struct early_suspend * h)3771 static void dhd_early_suspend(struct early_suspend *h)
3772 {
3773 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3774 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3775
3776 if (dhd)
3777 dhd_suspend_resume_helper(dhd, 1, 0);
3778 }
3779
dhd_late_resume(struct early_suspend * h)3780 static void dhd_late_resume(struct early_suspend *h)
3781 {
3782 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3783 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3784
3785 if (dhd)
3786 dhd_suspend_resume_helper(dhd, 0, 0);
3787 }
3788 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3789
3790 /*
3791 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3792 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3793 *
3794 * dhd_timeout_start(&tmo, usec);
3795 * while (!dhd_timeout_expired(&tmo))
3796 * if (poll_something())
3797 * break;
3798 * if (dhd_timeout_expired(&tmo))
3799 * fatal();
3800 */
3801
3802 void
dhd_timeout_start(dhd_timeout_t * tmo,uint usec)3803 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
3804 {
3805 tmo->limit = usec;
3806 tmo->increment = 0;
3807 tmo->elapsed = 0;
3808 tmo->tick = jiffies_to_usecs(1);
3809 }
3810
3811 int
dhd_timeout_expired(dhd_timeout_t * tmo)3812 dhd_timeout_expired(dhd_timeout_t *tmo)
3813 {
3814 /* Does nothing the first call */
3815 if (tmo->increment == 0) {
3816 tmo->increment = 1;
3817 return 0;
3818 }
3819
3820 if (tmo->elapsed >= tmo->limit)
3821 return 1;
3822
3823 /* Add the delay that's about to take place */
3824 tmo->elapsed += tmo->increment;
3825
3826 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
3827 OSL_DELAY(tmo->increment);
3828 tmo->increment *= 2;
3829 if (tmo->increment > tmo->tick)
3830 tmo->increment = tmo->tick;
3831 } else {
3832 wait_queue_head_t delay_wait;
3833 DECLARE_WAITQUEUE(wait, current);
3834 init_waitqueue_head(&delay_wait);
3835 add_wait_queue(&delay_wait, &wait);
3836 set_current_state(TASK_INTERRUPTIBLE);
3837 (void)schedule_timeout(1);
3838 remove_wait_queue(&delay_wait, &wait);
3839 set_current_state(TASK_RUNNING);
3840 }
3841
3842 return 0;
3843 }
3844
3845 int
dhd_net2idx(dhd_info_t * dhd,struct net_device * net)3846 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
3847 {
3848 int i = 0;
3849
3850 if (!dhd) {
3851 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
3852 return DHD_BAD_IF;
3853 }
3854
3855 while (i < DHD_MAX_IFS) {
3856 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
3857 return i;
3858 i++;
3859 }
3860
3861 return DHD_BAD_IF;
3862 }
3863
dhd_idx2net(void * pub,int ifidx)3864 struct net_device * dhd_idx2net(void *pub, int ifidx)
3865 {
3866 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
3867 struct dhd_info *dhd_info;
3868
3869 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
3870 return NULL;
3871 dhd_info = dhd_pub->info;
3872 if (dhd_info && dhd_info->iflist[ifidx])
3873 return dhd_info->iflist[ifidx]->net;
3874 return NULL;
3875 }
3876
3877 int
dhd_ifname2idx(dhd_info_t * dhd,char * name)3878 dhd_ifname2idx(dhd_info_t *dhd, char *name)
3879 {
3880 int i = DHD_MAX_IFS;
3881
3882 ASSERT(dhd);
3883
3884 if (name == NULL || *name == '\0')
3885 return 0;
3886
3887 while (--i > 0)
3888 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
3889 break;
3890
3891 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
3892
3893 return i; /* default - the primary interface */
3894 }
3895
3896 char *
dhd_ifname(dhd_pub_t * dhdp,int ifidx)3897 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
3898 {
3899 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3900
3901 ASSERT(dhd);
3902
3903 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
3904 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
3905 return "<if_bad>";
3906 }
3907
3908 if (dhd->iflist[ifidx] == NULL) {
3909 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
3910 return "<if_null>";
3911 }
3912
3913 if (dhd->iflist[ifidx]->net)
3914 return dhd->iflist[ifidx]->net->name;
3915
3916 return "<if_none>";
3917 }
3918
3919 uint8 *
dhd_bssidx2bssid(dhd_pub_t * dhdp,int idx)3920 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
3921 {
3922 int i;
3923 dhd_info_t *dhd = (dhd_info_t *)dhdp;
3924
3925 ASSERT(dhd);
3926 for (i = 0; i < DHD_MAX_IFS; i++)
3927 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3928 return dhd->iflist[i]->mac_addr;
3929
3930 return NULL;
3931 }
3932
3933
3934 static void
_dhd_set_multicast_list(dhd_info_t * dhd,int ifidx)3935 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3936 {
3937 struct net_device *dev;
3938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3939 struct netdev_hw_addr *ha;
3940 #else
3941 struct dev_mc_list *mclist;
3942 #endif
3943 uint32 allmulti, cnt;
3944
3945 wl_ioctl_t ioc;
3946 char *buf, *bufp;
3947 uint buflen;
3948 int ret;
3949
3950 if (!dhd->iflist[ifidx]) {
3951 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3952 return;
3953 }
3954 dev = dhd->iflist[ifidx]->net;
3955 if (!dev)
3956 return;
3957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3958 netif_addr_lock_bh(dev);
3959 #endif /* LINUX >= 2.6.27 */
3960 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3961 cnt = netdev_mc_count(dev);
3962 #else
3963 cnt = dev->mc_count;
3964 #endif /* LINUX >= 2.6.35 */
3965 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3966 netif_addr_unlock_bh(dev);
3967 #endif /* LINUX >= 2.6.27 */
3968
3969 /* Determine initial value of allmulti flag */
3970 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3971
3972 #ifdef PASS_ALL_MCAST_PKTS
3973 #ifdef PKT_FILTER_SUPPORT
3974 if (!dhd->pub.early_suspended)
3975 #endif /* PKT_FILTER_SUPPORT */
3976 allmulti = TRUE;
3977 #endif /* PASS_ALL_MCAST_PKTS */
3978
3979 /* Send down the multicast list first. */
3980
3981
3982 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3983 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3984 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3985 dhd_ifname(&dhd->pub, ifidx), cnt));
3986 return;
3987 }
3988
3989 strncpy(bufp, "mcast_list", buflen - 1);
3990 bufp[buflen - 1] = '\0';
3991 bufp += strlen("mcast_list") + 1;
3992
3993 cnt = htol32(cnt);
3994 memcpy(bufp, &cnt, sizeof(cnt));
3995 bufp += sizeof(cnt);
3996
3997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3998 netif_addr_lock_bh(dev);
3999 #endif /* LINUX >= 2.6.27 */
4000 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
4001 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4002 #pragma GCC diagnostic push
4003 #pragma GCC diagnostic ignored "-Wcast-qual"
4004 #endif
4005 netdev_for_each_mc_addr(ha, dev) {
4006 if (!cnt)
4007 break;
4008 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
4009 bufp += ETHER_ADDR_LEN;
4010 cnt--;
4011 }
4012 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4013 #pragma GCC diagnostic pop
4014 #endif
4015 #else /* LINUX < 2.6.35 */
4016 for (mclist = dev->mc_list; (mclist && (cnt > 0));
4017 cnt--, mclist = mclist->next) {
4018 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
4019 bufp += ETHER_ADDR_LEN;
4020 }
4021 #endif /* LINUX >= 2.6.35 */
4022 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
4023 netif_addr_unlock_bh(dev);
4024 #endif /* LINUX >= 2.6.27 */
4025
4026 memset(&ioc, 0, sizeof(ioc));
4027 ioc.cmd = WLC_SET_VAR;
4028 ioc.buf = buf;
4029 ioc.len = buflen;
4030 ioc.set = TRUE;
4031
4032 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
4033 if (ret < 0) {
4034 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
4035 dhd_ifname(&dhd->pub, ifidx), cnt));
4036 allmulti = cnt ? TRUE : allmulti;
4037 }
4038
4039 MFREE(dhd->pub.osh, buf, buflen);
4040
4041 /* Now send the allmulti setting. This is based on the setting in the
4042 * net_device flags, but might be modified above to be turned on if we
4043 * were trying to set some addresses and dongle rejected it...
4044 */
4045
4046 allmulti = htol32(allmulti);
4047 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
4048 sizeof(allmulti), NULL, 0, TRUE);
4049 if (ret < 0) {
4050 DHD_ERROR(("%s: set allmulti %d failed\n",
4051 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
4052 }
4053
4054 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
4055
4056 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
4057
4058 allmulti = htol32(allmulti);
4059
4060 memset(&ioc, 0, sizeof(ioc));
4061 ioc.cmd = WLC_SET_PROMISC;
4062 ioc.buf = &allmulti;
4063 ioc.len = sizeof(allmulti);
4064 ioc.set = TRUE;
4065
4066 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
4067 if (ret < 0) {
4068 DHD_ERROR(("%s: set promisc %d failed\n",
4069 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
4070 }
4071 }
4072
4073 int
_dhd_set_mac_address(dhd_info_t * dhd,int ifidx,uint8 * addr)4074 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
4075 {
4076 int ret;
4077
4078 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
4079 ETHER_ADDR_LEN, NULL, 0, TRUE);
4080 if (ret < 0) {
4081 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
4082 } else {
4083 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
4084 if (ifidx == 0)
4085 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
4086 }
4087
4088 return ret;
4089 }
4090
4091 #ifdef DHD_WMF
dhd_update_psta_interface_for_sta(dhd_pub_t * dhdp,char * ifname,void * ea,void * event_data)4092 void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
4093 void* event_data)
4094 {
4095 struct wl_psta_primary_intf_event *psta_prim_event =
4096 (struct wl_psta_primary_intf_event*)event_data;
4097 dhd_sta_t *psta_interface = NULL;
4098 dhd_sta_t *sta = NULL;
4099 uint8 ifindex;
4100 ASSERT(ifname);
4101 ASSERT(psta_prim_event);
4102 ASSERT(ea);
4103
4104 ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
4105 sta = dhd_find_sta(dhdp, ifindex, ea);
4106 if (sta != NULL) {
4107 psta_interface = dhd_find_sta(dhdp, ifindex,
4108 (void *)(psta_prim_event->prim_ea.octet));
4109 if (psta_interface != NULL) {
4110 sta->psta_prim = psta_interface;
4111 }
4112 }
4113 }
4114
4115 /* Get wmf_psta_disable configuration configuration */
dhd_get_wmf_psta_disable(dhd_pub_t * dhdp,uint32 idx)4116 int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
4117 {
4118 dhd_info_t *dhd = dhdp->info;
4119 dhd_if_t *ifp;
4120 ASSERT(idx < DHD_MAX_IFS);
4121 ifp = dhd->iflist[idx];
4122 return ifp->wmf_psta_disable;
4123 }
4124
4125 /* Set wmf_psta_disable configuration configuration */
dhd_set_wmf_psta_disable(dhd_pub_t * dhdp,uint32 idx,int val)4126 int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
4127 {
4128 dhd_info_t *dhd = dhdp->info;
4129 dhd_if_t *ifp;
4130 ASSERT(idx < DHD_MAX_IFS);
4131 ifp = dhd->iflist[idx];
4132 ifp->wmf_psta_disable = val;
4133 return 0;
4134 }
4135 #endif /* DHD_WMF */
4136
4137 #ifdef DHD_PSTA
4138 /* Get psta/psr configuration configuration */
dhd_get_psta_mode(dhd_pub_t * dhdp)4139 int dhd_get_psta_mode(dhd_pub_t *dhdp)
4140 {
4141 dhd_info_t *dhd = dhdp->info;
4142 return (int)dhd->psta_mode;
4143 }
4144 /* Set psta/psr configuration configuration */
dhd_set_psta_mode(dhd_pub_t * dhdp,uint32 val)4145 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
4146 {
4147 dhd_info_t *dhd = dhdp->info;
4148 dhd->psta_mode = val;
4149 return 0;
4150 }
4151 #endif /* DHD_PSTA */
4152
4153 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
4154 static void
dhd_update_rx_pkt_chainable_state(dhd_pub_t * dhdp,uint32 idx)4155 dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
4156 {
4157 dhd_info_t *dhd = dhdp->info;
4158 dhd_if_t *ifp;
4159
4160 ASSERT(idx < DHD_MAX_IFS);
4161
4162 ifp = dhd->iflist[idx];
4163
4164 if (
4165 #ifdef DHD_L2_FILTER
4166 (ifp->block_ping) ||
4167 #endif
4168 #ifdef DHD_WET
4169 (dhd->wet_mode) ||
4170 #endif
4171 #ifdef DHD_MCAST_REGEN
4172 (ifp->mcast_regen_bss_enable) ||
4173 #endif
4174 FALSE) {
4175 ifp->rx_pkt_chainable = FALSE;
4176 }
4177 }
4178 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
4179
4180 #ifdef DHD_WET
4181 /* Get wet configuration configuration */
dhd_get_wet_mode(dhd_pub_t * dhdp)4182 int dhd_get_wet_mode(dhd_pub_t *dhdp)
4183 {
4184 dhd_info_t *dhd = dhdp->info;
4185 return (int)dhd->wet_mode;
4186 }
4187
4188 /* Set wet configuration configuration */
dhd_set_wet_mode(dhd_pub_t * dhdp,uint32 val)4189 int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
4190 {
4191 dhd_info_t *dhd = dhdp->info;
4192 dhd->wet_mode = val;
4193 dhd_update_rx_pkt_chainable_state(dhdp, 0);
4194 return 0;
4195 }
4196 #endif /* DHD_WET */
4197
4198 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhd_role_to_nl80211_iftype(int32 role)4199 int32 dhd_role_to_nl80211_iftype(int32 role)
4200 {
4201 switch (role) {
4202 case WLC_E_IF_ROLE_STA:
4203 return NL80211_IFTYPE_STATION;
4204 case WLC_E_IF_ROLE_AP:
4205 return NL80211_IFTYPE_AP;
4206 case WLC_E_IF_ROLE_WDS:
4207 return NL80211_IFTYPE_WDS;
4208 case WLC_E_IF_ROLE_P2P_GO:
4209 return NL80211_IFTYPE_P2P_GO;
4210 case WLC_E_IF_ROLE_P2P_CLIENT:
4211 return NL80211_IFTYPE_P2P_CLIENT;
4212 case WLC_E_IF_ROLE_IBSS:
4213 case WLC_E_IF_ROLE_NAN:
4214 return NL80211_IFTYPE_ADHOC;
4215 default:
4216 return NL80211_IFTYPE_UNSPECIFIED;
4217 }
4218 }
4219 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4220
4221 static void
dhd_ifadd_event_handler(void * handle,void * event_info,u8 event)4222 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
4223 {
4224 dhd_info_t *dhd = handle;
4225 dhd_if_event_t *if_event = event_info;
4226 struct net_device *ndev;
4227 int ifidx, bssidx;
4228 int ret;
4229 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4230 struct wl_if_event_info info;
4231 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4232
4233 if (event != DHD_WQ_WORK_IF_ADD) {
4234 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4235 return;
4236 }
4237
4238 if (!dhd) {
4239 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4240 return;
4241 }
4242
4243 if (!if_event) {
4244 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4245 return;
4246 }
4247
4248 dhd_net_if_lock_local(dhd);
4249 DHD_OS_WAKE_LOCK(&dhd->pub);
4250 DHD_PERIM_LOCK(&dhd->pub);
4251
4252 ifidx = if_event->event.ifidx;
4253 bssidx = if_event->event.bssidx;
4254 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
4255
4256
4257 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4258 if (if_event->event.ifidx > 0) {
4259 bzero(&info, sizeof(info));
4260 info.ifidx = if_event->event.ifidx;
4261 info.bssidx = if_event->event.bssidx;
4262 info.role = if_event->event.role;
4263 strncpy(info.name, if_event->name, IFNAMSIZ);
4264 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
4265 &info, if_event->mac, NULL, true) != NULL) {
4266 /* Do the post interface create ops */
4267 DHD_ERROR(("Post ifcreate ops done. Returning \n"));
4268 goto done;
4269 }
4270 }
4271 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4272
4273 /* This path is for non-android case */
4274 /* The interface name in host and in event msg are same */
4275 /* if name in event msg is used to create dongle if list on host */
4276 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
4277 if_event->mac, bssidx, TRUE, if_event->name);
4278 if (!ndev) {
4279 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
4280 goto done;
4281 }
4282
4283 DHD_PERIM_UNLOCK(&dhd->pub);
4284 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
4285 DHD_PERIM_LOCK(&dhd->pub);
4286 if (ret != BCME_OK) {
4287 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
4288 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4289 goto done;
4290 }
4291 #ifndef PCIE_FULL_DONGLE
4292 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
4293 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
4294 uint32 var_int = 1;
4295 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
4296 NULL, 0, TRUE);
4297 if (ret != BCME_OK) {
4298 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
4299 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4300 }
4301 }
4302 #endif /* PCIE_FULL_DONGLE */
4303
4304 done:
4305 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4306
4307 DHD_PERIM_UNLOCK(&dhd->pub);
4308 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4309 dhd_net_if_unlock_local(dhd);
4310 }
4311
4312 static void
dhd_ifdel_event_handler(void * handle,void * event_info,u8 event)4313 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
4314 {
4315 dhd_info_t *dhd = handle;
4316 int ifidx;
4317 dhd_if_event_t *if_event = event_info;
4318
4319
4320 if (event != DHD_WQ_WORK_IF_DEL) {
4321 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4322 return;
4323 }
4324
4325 if (!dhd) {
4326 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4327 return;
4328 }
4329
4330 if (!if_event) {
4331 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4332 return;
4333 }
4334
4335 dhd_net_if_lock_local(dhd);
4336 DHD_OS_WAKE_LOCK(&dhd->pub);
4337 DHD_PERIM_LOCK(&dhd->pub);
4338
4339 ifidx = if_event->event.ifidx;
4340 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
4341
4342 DHD_PERIM_UNLOCK(&dhd->pub);
4343 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4344 if (if_event->event.ifidx > 0) {
4345 /* Do the post interface del ops */
4346 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, true) == 0) {
4347 DHD_TRACE(("Post ifdel ops done. Returning \n"));
4348 DHD_PERIM_LOCK(&dhd->pub);
4349 goto done;
4350 }
4351 }
4352 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4353
4354 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4355 DHD_PERIM_LOCK(&dhd->pub);
4356
4357 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4358 done:
4359 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4360 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4361
4362 DHD_PERIM_UNLOCK(&dhd->pub);
4363 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4364 dhd_net_if_unlock_local(dhd);
4365 }
4366
4367 #ifdef DHD_UPDATE_INTF_MAC
4368 static void
dhd_ifupdate_event_handler(void * handle,void * event_info,u8 event)4369 dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
4370 {
4371 dhd_info_t *dhd = handle;
4372 int ifidx;
4373 dhd_if_event_t *if_event = event_info;
4374
4375 if (event != DHD_WQ_WORK_IF_UPDATE) {
4376 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4377 return;
4378 }
4379
4380 if (!dhd) {
4381 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4382 return;
4383 }
4384
4385 if (!if_event) {
4386 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4387 return;
4388 }
4389
4390 dhd_net_if_lock_local(dhd);
4391 DHD_OS_WAKE_LOCK(&dhd->pub);
4392
4393 ifidx = if_event->event.ifidx;
4394 DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
4395
4396 dhd_op_if_update(&dhd->pub, ifidx);
4397
4398 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4399
4400 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4401 dhd_net_if_unlock_local(dhd);
4402 }
4403
dhd_op_if_update(dhd_pub_t * dhdpub,int ifidx)4404 int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
4405 {
4406 dhd_info_t *dhdinfo = NULL;
4407 dhd_if_t *ifp = NULL;
4408 int ret = 0;
4409 char buf[128];
4410
4411 if ((NULL == dhdpub) || (NULL == dhdpub->info)) {
4412 DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
4413 return -1;
4414 } else {
4415 dhdinfo = (dhd_info_t *)dhdpub->info;
4416 ifp = dhdinfo->iflist[ifidx];
4417 if (NULL == ifp) {
4418 DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
4419 return -2;
4420 }
4421 }
4422
4423 DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
4424 // Get MAC address
4425 strcpy(buf, "cur_etheraddr");
4426 ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
4427 if (0 > ret) {
4428 DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
4429 // avoid collision
4430 dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
4431 // force locally administrate address
4432 ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
4433 } else {
4434 DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
4435 ifp->name, ifp->idx,
4436 (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
4437 (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
4438 memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
4439 if (dhdinfo->iflist[ifp->idx]->net) {
4440 memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
4441 }
4442 }
4443
4444 return ret;
4445 }
4446 #endif /* DHD_UPDATE_INTF_MAC */
4447
4448 static void
dhd_set_mac_addr_handler(void * handle,void * event_info,u8 event)4449 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
4450 {
4451 dhd_info_t *dhd = handle;
4452 dhd_if_t *ifp = event_info;
4453
4454 if (event != DHD_WQ_WORK_SET_MAC) {
4455 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4456 }
4457
4458 if (!dhd) {
4459 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4460 return;
4461 }
4462
4463 dhd_net_if_lock_local(dhd);
4464 DHD_OS_WAKE_LOCK(&dhd->pub);
4465 DHD_PERIM_LOCK(&dhd->pub);
4466
4467 // terence 20160907: fix for not able to set mac when wlan0 is down
4468 if (ifp == NULL || !ifp->set_macaddress) {
4469 goto done;
4470 }
4471 if (ifp == NULL || !dhd->pub.up) {
4472 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4473 goto done;
4474 }
4475
4476 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
4477 ifp->set_macaddress = FALSE;
4478 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
4479 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
4480 else
4481 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
4482
4483 done:
4484 DHD_PERIM_UNLOCK(&dhd->pub);
4485 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4486 dhd_net_if_unlock_local(dhd);
4487 }
4488
4489 static void
dhd_set_mcast_list_handler(void * handle,void * event_info,u8 event)4490 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
4491 {
4492 dhd_info_t *dhd = handle;
4493 int ifidx = (int)((long int)event_info);
4494 dhd_if_t *ifp = NULL;
4495
4496 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
4497 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4498 return;
4499 }
4500
4501 if (!dhd) {
4502 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4503 return;
4504 }
4505
4506 dhd_net_if_lock_local(dhd);
4507 DHD_OS_WAKE_LOCK(&dhd->pub);
4508 DHD_PERIM_LOCK(&dhd->pub);
4509
4510 ifp = dhd->iflist[ifidx];
4511
4512 if (ifp == NULL || !dhd->pub.up) {
4513 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4514 goto done;
4515 }
4516
4517 if (ifp == NULL || !dhd->pub.up) {
4518 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4519 goto done;
4520 }
4521
4522 ifidx = ifp->idx;
4523
4524
4525 _dhd_set_multicast_list(dhd, ifidx);
4526 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
4527
4528 done:
4529 DHD_PERIM_UNLOCK(&dhd->pub);
4530 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4531 dhd_net_if_unlock_local(dhd);
4532 }
4533
4534 static int
dhd_set_mac_address(struct net_device * dev,void * addr)4535 dhd_set_mac_address(struct net_device *dev, void *addr)
4536 {
4537 int ret = 0;
4538
4539 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4540 struct sockaddr *sa = (struct sockaddr *)addr;
4541 int ifidx;
4542 dhd_if_t *dhdif;
4543
4544 ifidx = dhd_net2idx(dhd, dev);
4545 if (ifidx == DHD_BAD_IF)
4546 return -1;
4547
4548 dhdif = dhd->iflist[ifidx];
4549
4550 dhd_net_if_lock_local(dhd);
4551 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
4552 dhdif->set_macaddress = TRUE;
4553 dhd_net_if_unlock_local(dhd);
4554 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
4555 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4556 return ret;
4557 }
4558
4559 static void
dhd_set_multicast_list(struct net_device * dev)4560 dhd_set_multicast_list(struct net_device *dev)
4561 {
4562 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4563 int ifidx;
4564
4565 ifidx = dhd_net2idx(dhd, dev);
4566 if (ifidx == DHD_BAD_IF)
4567 return;
4568
4569 dhd->iflist[ifidx]->set_multicast = TRUE;
4570 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
4571 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
4572
4573 // terence 20160907: fix for not able to set mac when wlan0 is down
4574 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
4575 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4576 }
4577
4578 #ifdef DHD_UCODE_DOWNLOAD
4579 /* Get ucode path */
4580 char *
dhd_get_ucode_path(dhd_pub_t * dhdp)4581 dhd_get_ucode_path(dhd_pub_t *dhdp)
4582 {
4583 dhd_info_t *dhd = dhdp->info;
4584 return dhd->uc_path;
4585 }
4586 #endif /* DHD_UCODE_DOWNLOAD */
4587
4588 #ifdef PROP_TXSTATUS
4589 int
dhd_os_wlfc_block(dhd_pub_t * pub)4590 dhd_os_wlfc_block(dhd_pub_t *pub)
4591 {
4592 dhd_info_t *di = (dhd_info_t *)(pub->info);
4593 ASSERT(di != NULL);
4594 /* terence 20161229: don't do spin lock if proptx not enabled */
4595 if (disable_proptx)
4596 return 1;
4597 #ifdef BCMDBUS
4598 spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
4599 #else
4600 spin_lock_bh(&di->wlfc_spinlock);
4601 #endif /* BCMDBUS */
4602 return 1;
4603 }
4604
4605 int
dhd_os_wlfc_unblock(dhd_pub_t * pub)4606 dhd_os_wlfc_unblock(dhd_pub_t *pub)
4607 {
4608 dhd_info_t *di = (dhd_info_t *)(pub->info);
4609
4610 ASSERT(di != NULL);
4611 /* terence 20161229: don't do spin lock if proptx not enabled */
4612 if (disable_proptx)
4613 return 1;
4614 #ifdef BCMDBUS
4615 spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
4616 #else
4617 spin_unlock_bh(&di->wlfc_spinlock);
4618 #endif /* BCMDBUS */
4619 return 1;
4620 }
4621
4622 #endif /* PROP_TXSTATUS */
4623
4624 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
4625 typedef struct {
4626 uint16 type;
4627 const char *str;
4628 } PKTTYPE_INFO;
4629
4630 static const PKTTYPE_INFO packet_type_info[] =
4631 {
4632 { ETHER_TYPE_IP, "IP" },
4633 { ETHER_TYPE_ARP, "ARP" },
4634 { ETHER_TYPE_BRCM, "BRCM" },
4635 { ETHER_TYPE_802_1X, "802.1X" },
4636 { ETHER_TYPE_WAI, "WAPI" },
4637 { 0, ""}
4638 };
4639
_get_packet_type_str(uint16 type)4640 static const char *_get_packet_type_str(uint16 type)
4641 {
4642 int i;
4643 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
4644
4645 for (i = 0; i < n; i++) {
4646 if (packet_type_info[i].type == type)
4647 return packet_type_info[i].str;
4648 }
4649
4650 return packet_type_info[n].str;
4651 }
4652
4653 void
dhd_trx_dump(struct net_device * ndev,uint8 * dump_data,uint datalen,bool tx)4654 dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx)
4655 {
4656 uint16 protocol;
4657 char *ifname;
4658
4659 protocol = (dump_data[12] << 8) | dump_data[13];
4660 ifname = ndev ? ndev->name : "N/A";
4661
4662 if (protocol != ETHER_TYPE_BRCM) {
4663 DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname,
4664 _get_packet_type_str(protocol)));
4665 #if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
4666 prhex("Data", dump_data, datalen);
4667 #endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
4668 }
4669 }
4670 #endif /* DHD_TX_DUMP || DHD_RX_DUMP */
4671
4672 /* This routine do not support Packet chain feature, Currently tested for
4673 * proxy arp feature
4674 */
dhd_sendup(dhd_pub_t * dhdp,int ifidx,void * p)4675 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
4676 {
4677 struct sk_buff *skb;
4678 void *skbhead = NULL;
4679 void *skbprev = NULL;
4680 dhd_if_t *ifp;
4681 ASSERT(!PKTISCHAINED(p));
4682 skb = PKTTONATIVE(dhdp->osh, p);
4683
4684 ifp = dhdp->info->iflist[ifidx];
4685 skb->dev = ifp->net;
4686 #if defined(BCM_GMAC3)
4687 /* Forwarder capable interfaces use WOFA based forwarding */
4688 if (ifp->fwdh) {
4689 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
4690 uint16 * da = (uint16 *)(eh->ether_dhost);
4691 uintptr_t wofa_data;
4692 ASSERT(ISALIGNED(da, 2));
4693
4694 wofa_data = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
4695 if (wofa_data == WOFA_DATA_INVALID) { /* Unknown MAC address */
4696 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
4697 return BCME_OK;
4698 }
4699 }
4700 PKTFRMNATIVE(dhdp->osh, p);
4701 PKTFREE(dhdp->osh, p, FALSE);
4702 return BCME_OK;
4703 }
4704 #endif /* BCM_GMAC3 */
4705
4706 skb->protocol = eth_type_trans(skb, skb->dev);
4707
4708 if (in_interrupt()) {
4709 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4710 __FUNCTION__, __LINE__);
4711 netif_rx(skb);
4712 } else {
4713 if (dhdp->info->rxthread_enabled) {
4714 if (!skbhead) {
4715 skbhead = skb;
4716 } else {
4717 PKTSETNEXT(dhdp->osh, skbprev, skb);
4718 }
4719 skbprev = skb;
4720 } else {
4721 /* If the receive is not processed inside an ISR,
4722 * the softirqd must be woken explicitly to service
4723 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4724 * by netif_rx_ni(), but in earlier kernels, we need
4725 * to do it manually.
4726 */
4727 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4728 __FUNCTION__, __LINE__);
4729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4730 netif_rx_ni(skb);
4731 #else
4732 ulong flags;
4733 netif_rx(skb);
4734 local_irq_save(flags);
4735 RAISE_RX_SOFTIRQ();
4736 local_irq_restore(flags);
4737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4738 }
4739 }
4740
4741 if (dhdp->info->rxthread_enabled && skbhead)
4742 dhd_sched_rxf(dhdp, skbhead);
4743
4744 return BCME_OK;
4745 }
4746
4747 int BCMFASTPATH
__dhd_sendpkt(dhd_pub_t * dhdp,int ifidx,void * pktbuf)4748 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4749 {
4750 int ret = BCME_OK;
4751 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4752 struct ether_header *eh = NULL;
4753 #if defined(DHD_L2_FILTER)
4754 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
4755 #endif
4756
4757 /* Reject if down */
4758 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
4759 /* free the packet here since the caller won't */
4760 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4761 return -ENODEV;
4762 }
4763
4764 #ifdef PCIE_FULL_DONGLE
4765 if (dhdp->busstate == DHD_BUS_SUSPEND) {
4766 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4767 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4768 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4769 return -ENODEV;
4770 #else
4771 return NETDEV_TX_BUSY;
4772 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4773 }
4774 #endif /* PCIE_FULL_DONGLE */
4775
4776 #ifdef DHD_L2_FILTER
4777 /* if dhcp_unicast is enabled, we need to convert the */
4778 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4779 if (ifp->dhcp_unicast) {
4780 uint8* mac_addr;
4781 uint8* ehptr = NULL;
4782 int ret;
4783 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
4784 if (ret == BCME_OK) {
4785 /* if given mac address having valid entry in sta list
4786 * copy the given mac address, and return with BCME_OK
4787 */
4788 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
4789 ehptr = PKTDATA(dhdp->osh, pktbuf);
4790 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
4791 }
4792 }
4793 }
4794
4795 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4796 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4797 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4798 return BCME_ERROR;
4799 }
4800 }
4801
4802 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4803 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
4804
4805 /* Drop the packets if l2 filter has processed it already
4806 * otherwise continue with the normal path
4807 */
4808 if (ret == BCME_OK) {
4809 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4810 return BCME_ERROR;
4811 }
4812 }
4813 #endif /* DHD_L2_FILTER */
4814 /* Update multicast statistic */
4815 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
4816 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
4817 eh = (struct ether_header *)pktdata;
4818
4819 if (ETHER_ISMULTI(eh->ether_dhost))
4820 dhdp->tx_multicast++;
4821 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
4822 #ifdef DHD_LOSSLESS_ROAMING
4823 uint8 prio = (uint8)PKTPRIO(pktbuf);
4824
4825 /* back up 802.1x's priority */
4826 dhdp->prio_8021x = prio;
4827 #endif /* DHD_LOSSLESS_ROAMING */
4828 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
4829 atomic_inc(&dhd->pend_8021x_cnt);
4830 dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4831 }
4832
4833 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
4834 #ifdef DHD_DHCP_DUMP
4835 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4836 #endif /* DHD_DHCP_DUMP */
4837 #ifdef DHD_ICMP_DUMP
4838 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4839 #endif /* DHD_ICMP_DUMP */
4840 }
4841 } else {
4842 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4843 return BCME_ERROR;
4844 }
4845
4846 {
4847 /* Look into the packet and update the packet priority */
4848 #ifndef PKTPRIO_OVERRIDE
4849 if (PKTPRIO(pktbuf) == 0)
4850 #endif /* !PKTPRIO_OVERRIDE */
4851 {
4852 #if defined(QOS_MAP_SET)
4853 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
4854 #else
4855 pktsetprio(pktbuf, FALSE);
4856 #endif /* QOS_MAP_SET */
4857 }
4858 }
4859
4860
4861 #if defined(TRAFFIC_MGMT_DWM)
4862 traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
4863
4864 #ifdef BCM_GMAC3
4865 DHD_PKT_SET_DATAOFF(pktbuf, 0);
4866 #endif /* BCM_GMAC3 */
4867 #endif
4868
4869 #ifdef PCIE_FULL_DONGLE
4870 /*
4871 * Lkup the per interface hash table, for a matching flowring. If one is not
4872 * available, allocate a unique flowid and add a flowring entry.
4873 * The found or newly created flowid is placed into the pktbuf's tag.
4874 */
4875 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
4876 if (ret != BCME_OK) {
4877 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
4878 return ret;
4879 }
4880 #endif
4881
4882 #if defined(DHD_TX_DUMP)
4883 dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf),
4884 PKTLEN(dhdp->osh, pktbuf), TRUE);
4885 #endif
4886 /* terence 20150901: Micky add to ajust the 802.1X priority */
4887 /* Set the 802.1X packet with the highest priority 7 */
4888 if (dhdp->conf->pktprio8021x >= 0)
4889 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
4890
4891 #ifdef PROP_TXSTATUS
4892 if (dhd_wlfc_is_supported(dhdp)) {
4893 /* store the interface ID */
4894 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
4895
4896 /* store destination MAC in the tag as well */
4897 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
4898
4899 /* decide which FIFO this packet belongs to */
4900 if (ETHER_ISMULTI(eh->ether_dhost))
4901 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4902 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
4903 else
4904 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
4905 } else
4906 #endif /* PROP_TXSTATUS */
4907 {
4908 /* If the protocol uses a data header, apply it */
4909 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
4910 }
4911
4912 /* Use bus module to send data frame */
4913 #ifdef WLMEDIA_HTSF
4914 dhd_htsf_addtxts(dhdp, pktbuf);
4915 #endif
4916 #ifdef PROP_TXSTATUS
4917 {
4918 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
4919 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
4920 /* non-proptxstatus way */
4921 #ifdef BCMPCIE
4922 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4923 #else
4924 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4925 #endif /* BCMPCIE */
4926 }
4927 }
4928 #else
4929 #ifdef BCMPCIE
4930 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4931 #else
4932 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4933 #endif /* BCMPCIE */
4934 #endif /* PROP_TXSTATUS */
4935 #ifdef BCMDBUS
4936 if (ret)
4937 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4938 #endif /* BCMDBUS */
4939
4940 return ret;
4941 }
4942
4943 int BCMFASTPATH
dhd_sendpkt(dhd_pub_t * dhdp,int ifidx,void * pktbuf)4944 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4945 {
4946 int ret = 0;
4947 unsigned long flags;
4948
4949 DHD_GENERAL_LOCK(dhdp, flags);
4950 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
4951 DHD_ERROR(("%s: returning as busstate=%d\n",
4952 __FUNCTION__, dhdp->busstate));
4953 DHD_GENERAL_UNLOCK(dhdp, flags);
4954 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4955 return -ENODEV;
4956 }
4957 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
4958 DHD_GENERAL_UNLOCK(dhdp, flags);
4959
4960 #ifdef DHD_PCIE_RUNTIMEPM
4961 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
4962 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4963 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4964 ret = -EBUSY;
4965 goto exit;
4966 }
4967 #endif /* DHD_PCIE_RUNTIMEPM */
4968
4969 DHD_GENERAL_LOCK(dhdp, flags);
4970 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
4971 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4972 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4973 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4974 dhd_os_busbusy_wake(dhdp);
4975 DHD_GENERAL_UNLOCK(dhdp, flags);
4976 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4977 return -ENODEV;
4978 }
4979 DHD_GENERAL_UNLOCK(dhdp, flags);
4980
4981 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
4982
4983 #ifdef DHD_PCIE_RUNTIMEPM
4984 exit:
4985 #endif
4986 DHD_GENERAL_LOCK(dhdp, flags);
4987 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4988 dhd_os_busbusy_wake(dhdp);
4989 DHD_GENERAL_UNLOCK(dhdp, flags);
4990 return ret;
4991 }
4992
4993 #if defined(DHD_LB_TXP)
4994
4995 int BCMFASTPATH
dhd_lb_sendpkt(dhd_info_t * dhd,struct net_device * net,int ifidx,void * skb)4996 dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
4997 int ifidx, void *skb)
4998 {
4999 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
5000
5001 /* If the feature is disabled run-time do TX from here */
5002 if (atomic_read(&dhd->lb_txp_active) == 0) {
5003 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
5004 return __dhd_sendpkt(&dhd->pub, ifidx, skb);
5005 }
5006
5007 /* Store the address of net device and interface index in the Packet tag */
5008 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
5009 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
5010
5011 /* Enqueue the skb into tx_pend_queue */
5012 skb_queue_tail(&dhd->tx_pend_queue, skb);
5013
5014 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
5015
5016 /* Dispatch the Tx job to be processed by the tx_tasklet */
5017 dhd_lb_tx_dispatch(&dhd->pub);
5018
5019 return NETDEV_TX_OK;
5020 }
5021 #endif /* DHD_LB_TXP */
5022
5023 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5024 netdev_tx_t BCMFASTPATH
5025 #else
5026 int BCMFASTPATH
5027 #endif
dhd_start_xmit(struct sk_buff * skb,struct net_device * net)5028 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
5029 {
5030 int ret;
5031 uint datalen;
5032 void *pktbuf;
5033 dhd_info_t *dhd = DHD_DEV_INFO(net);
5034 dhd_if_t *ifp = NULL;
5035 int ifidx;
5036 unsigned long flags;
5037 #ifdef WLMEDIA_HTSF
5038 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
5039 #else
5040 uint8 htsfdlystat_sz = 0;
5041 #endif
5042 #ifdef DHD_WMF
5043 struct ether_header *eh;
5044 uint8 *iph;
5045 #endif /* DHD_WMF */
5046
5047 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5048
5049 if (dhd_query_bus_erros(&dhd->pub)) {
5050 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5051 return -ENODEV;
5052 #else
5053 return NETDEV_TX_BUSY;
5054 #endif
5055 }
5056
5057 /* terence 2017029: Reject in early suspend */
5058 if ((dhd->pub.conf->insuspend & NO_TXDATA_IN_SUSPEND) && dhd->pub.early_suspended) {
5059 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
5060 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5061 return -ENODEV;
5062 #else
5063 return NETDEV_TX_BUSY;
5064 #endif
5065 }
5066
5067 DHD_GENERAL_LOCK(&dhd->pub, flags);
5068 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
5069 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5070
5071 #ifdef DHD_PCIE_RUNTIMEPM
5072 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
5073 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
5074 /* stop the network queue temporarily until resume done */
5075 DHD_GENERAL_LOCK(&dhd->pub, flags);
5076 if (!dhdpcie_is_resume_done(&dhd->pub)) {
5077 dhd_bus_stop_queue(dhd->pub.bus);
5078 }
5079 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5080 dhd_os_busbusy_wake(&dhd->pub);
5081 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5082 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5083 return -ENODEV;
5084 #else
5085 return NETDEV_TX_BUSY;
5086 #endif
5087 }
5088 #endif /* DHD_PCIE_RUNTIMEPM */
5089
5090 DHD_GENERAL_LOCK(&dhd->pub, flags);
5091 #ifdef BCMPCIE
5092 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5093 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5094 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
5095 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5096 #ifdef PCIE_FULL_DONGLE
5097 /* Stop tx queues if suspend is in progress */
5098 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5099 dhd_bus_stop_queue(dhd->pub.bus);
5100 }
5101 #endif /* PCIE_FULL_DONGLE */
5102 dhd_os_busbusy_wake(&dhd->pub);
5103 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5104 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5105 return -ENODEV;
5106 #else
5107 return NETDEV_TX_BUSY;
5108 #endif
5109 }
5110 #else
5111 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5112 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5113 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
5114 }
5115 #endif
5116
5117 DHD_OS_WAKE_LOCK(&dhd->pub);
5118 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5119
5120
5121 #if defined(DHD_HANG_SEND_UP_TEST)
5122 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
5123 dhd->pub.busstate = DHD_BUS_DOWN;
5124 }
5125 #endif /* DHD_HANG_SEND_UP_TEST */
5126
5127 /* Reject if down */
5128 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
5129 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
5130 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
5131 netif_stop_queue(net);
5132 /* Send Event when bus down detected during data session */
5133 if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
5134 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
5135 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
5136 net_os_send_hang_message(net);
5137 }
5138 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5139 dhd_os_busbusy_wake(&dhd->pub);
5140 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5141 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5142 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5143 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5144 return -ENODEV;
5145 #else
5146 return NETDEV_TX_BUSY;
5147 #endif
5148 }
5149
5150 ifp = DHD_DEV_IFP(net);
5151 ifidx = DHD_DEV_IFIDX(net);
5152 if (ifidx == DHD_BAD_IF) {
5153 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
5154 netif_stop_queue(net);
5155 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5156 dhd_os_busbusy_wake(&dhd->pub);
5157 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5158 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5159 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5160 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5161 return -ENODEV;
5162 #else
5163 return NETDEV_TX_BUSY;
5164 #endif
5165 }
5166
5167 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5168
5169 ASSERT(ifidx == dhd_net2idx(dhd, net));
5170 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
5171
5172 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
5173
5174 /* re-align socket buffer if "skb->data" is odd address */
5175 if (((unsigned long)(skb->data)) & 0x1) {
5176 unsigned char *data = skb->data;
5177 uint32 length = skb->len;
5178 PKTPUSH(dhd->pub.osh, skb, 1);
5179 memmove(skb->data, data, length);
5180 PKTSETLEN(dhd->pub.osh, skb, length);
5181 }
5182
5183 datalen = PKTLEN(dhd->pub.osh, skb);
5184
5185 /* Make sure there's enough room for any header */
5186 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
5187 struct sk_buff *skb2;
5188
5189 DHD_INFO(("%s: insufficient headroom\n",
5190 dhd_ifname(&dhd->pub, ifidx)));
5191 dhd->pub.tx_realloc++;
5192
5193 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
5194 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
5195
5196 dev_kfree_skb(skb);
5197 if ((skb = skb2) == NULL) {
5198 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
5199 dhd_ifname(&dhd->pub, ifidx)));
5200 ret = -ENOMEM;
5201 goto done;
5202 }
5203 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
5204 }
5205
5206 /* Convert to packet */
5207 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
5208 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
5209 dhd_ifname(&dhd->pub, ifidx)));
5210 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
5211 dev_kfree_skb_any(skb);
5212 ret = -ENOMEM;
5213 goto done;
5214 }
5215
5216 #if defined(WLMEDIA_HTSF)
5217 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
5218 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
5219 struct ether_header *eh = (struct ether_header *)pktdata;
5220
5221 if (!ETHER_ISMULTI(eh->ether_dhost) &&
5222 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
5223 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
5224 }
5225 }
5226 #endif
5227 #ifdef DHD_WET
5228 /* wet related packet proto manipulation should be done in DHD
5229 since dongle doesn't have complete payload
5230 */
5231 if (WET_ENABLED(&dhd->pub) &&
5232 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
5233 DHD_INFO(("%s:%s: wet send proc failed\n",
5234 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
5235 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
5236 ret = -EFAULT;
5237 goto done;
5238 }
5239 #endif /* DHD_WET */
5240
5241 #ifdef DHD_WMF
5242 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
5243 iph = (uint8 *)eh + ETHER_HDR_LEN;
5244
5245 /* WMF processing for multicast packets
5246 * Only IPv4 packets are handled
5247 */
5248 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
5249 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
5250 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
5251 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
5252 void *sdu_clone;
5253 bool ucast_convert = FALSE;
5254 #ifdef DHD_UCAST_UPNP
5255 uint32 dest_ip;
5256
5257 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
5258 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
5259 #endif /* DHD_UCAST_UPNP */
5260 #ifdef DHD_IGMP_UCQUERY
5261 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
5262 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
5263 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
5264 #endif /* DHD_IGMP_UCQUERY */
5265 if (ucast_convert) {
5266 dhd_sta_t *sta;
5267 unsigned long flags;
5268 struct list_head snapshot_list;
5269 struct list_head *wmf_ucforward_list;
5270
5271 ret = NETDEV_TX_OK;
5272
5273 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
5274 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
5275 */
5276 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
5277
5278 /* Convert upnp/igmp query to unicast for each assoc STA */
5279 list_for_each_entry(sta, wmf_ucforward_list, list) {
5280 /* Skip sending to proxy interfaces of proxySTA */
5281 if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
5282 continue;
5283 }
5284 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
5285 ret = WMF_NOP;
5286 break;
5287 }
5288 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
5289 }
5290 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
5291
5292 DHD_GENERAL_LOCK(&dhd->pub, flags);
5293 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5294 dhd_os_busbusy_wake(&dhd->pub);
5295 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5296 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5297 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5298
5299 if (ret == NETDEV_TX_OK)
5300 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
5301
5302 return ret;
5303 } else
5304 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
5305 {
5306 /* There will be no STA info if the packet is coming from LAN host
5307 * Pass as NULL
5308 */
5309 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
5310 switch (ret) {
5311 case WMF_TAKEN:
5312 case WMF_DROP:
5313 /* Either taken by WMF or we should drop it.
5314 * Exiting send path
5315 */
5316
5317 DHD_GENERAL_LOCK(&dhd->pub, flags);
5318 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5319 dhd_os_busbusy_wake(&dhd->pub);
5320 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5321 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5322 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5323 return NETDEV_TX_OK;
5324 default:
5325 /* Continue the transmit path */
5326 break;
5327 }
5328 }
5329 }
5330 #endif /* DHD_WMF */
5331 #ifdef DHD_PSTA
5332 /* PSR related packet proto manipulation should be done in DHD
5333 * since dongle doesn't have complete payload
5334 */
5335 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
5336 ifidx, &pktbuf, TRUE) < 0)) {
5337 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
5338 dhd_ifname(&dhd->pub, ifidx)));
5339 }
5340 #endif /* DHD_PSTA */
5341
5342 #ifdef DHDTCPACK_SUPPRESS
5343 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
5344 /* If this packet has been hold or got freed, just return */
5345 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
5346 ret = 0;
5347 goto done;
5348 }
5349 } else {
5350 /* If this packet has replaced another packet and got freed, just return */
5351 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
5352 ret = 0;
5353 goto done;
5354 }
5355 }
5356 #endif /* DHDTCPACK_SUPPRESS */
5357
5358 /*
5359 * If Load Balance is enabled queue the packet
5360 * else send directly from here.
5361 */
5362 #if defined(DHD_LB_TXP)
5363 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
5364 #else
5365 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
5366 #endif
5367
5368 done:
5369 if (ret) {
5370 ifp->stats.tx_dropped++;
5371 dhd->pub.tx_dropped++;
5372 } else {
5373 #ifdef PROP_TXSTATUS
5374 /* tx_packets counter can counted only when wlfc is disabled */
5375 if (!dhd_wlfc_is_supported(&dhd->pub))
5376 #endif
5377 {
5378 dhd->pub.tx_packets++;
5379 ifp->stats.tx_packets++;
5380 ifp->stats.tx_bytes += datalen;
5381 }
5382 }
5383
5384
5385 DHD_GENERAL_LOCK(&dhd->pub, flags);
5386 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5387 dhd_os_busbusy_wake(&dhd->pub);
5388 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5389 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5390 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5391 /* Return ok: we always eat the packet */
5392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5393 return 0;
5394 #else
5395 return NETDEV_TX_OK;
5396 #endif
5397 }
5398
5399
5400 void
dhd_txflowcontrol(dhd_pub_t * dhdp,int ifidx,bool state)5401 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
5402 {
5403 struct net_device *net;
5404 dhd_info_t *dhd = dhdp->info;
5405 int i;
5406
5407 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5408
5409 ASSERT(dhd);
5410
5411 #ifdef DHD_LOSSLESS_ROAMING
5412 /* block flowcontrol during roaming */
5413 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
5414 return;
5415 }
5416 #endif
5417
5418 if (ifidx == ALL_INTERFACES) {
5419 /* Flow control on all active interfaces */
5420 dhdp->txoff = state;
5421 for (i = 0; i < DHD_MAX_IFS; i++) {
5422 if (dhd->iflist[i]) {
5423 net = dhd->iflist[i]->net;
5424 if (state == ON)
5425 netif_stop_queue(net);
5426 else
5427 netif_wake_queue(net);
5428 }
5429 }
5430 } else {
5431 if (dhd->iflist[ifidx]) {
5432 net = dhd->iflist[ifidx]->net;
5433 if (state == ON)
5434 netif_stop_queue(net);
5435 else
5436 netif_wake_queue(net);
5437 }
5438 }
5439 }
5440
5441
5442 #ifdef DHD_WMF
5443 bool
dhd_is_rxthread_enabled(dhd_pub_t * dhdp)5444 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
5445 {
5446 dhd_info_t *dhd = dhdp->info;
5447
5448 return dhd->rxthread_enabled;
5449 }
5450 #endif /* DHD_WMF */
5451
5452 #ifdef DHD_MCAST_REGEN
5453 /*
5454 * Description: This function is called to do the reverse translation
5455 *
5456 * Input eh - pointer to the ethernet header
5457 */
5458 int32
dhd_mcast_reverse_translation(struct ether_header * eh)5459 dhd_mcast_reverse_translation(struct ether_header *eh)
5460 {
5461 uint8 *iph;
5462 uint32 dest_ip;
5463
5464 iph = (uint8 *)eh + ETHER_HDR_LEN;
5465 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
5466
5467 /* Only IP packets are handled */
5468 if (eh->ether_type != hton16(ETHER_TYPE_IP))
5469 return BCME_ERROR;
5470
5471 /* Non-IPv4 multicast packets are not handled */
5472 if (IP_VER(iph) != IP_VER_4)
5473 return BCME_ERROR;
5474
5475 /*
5476 * The packet has a multicast IP and unicast MAC. That means
5477 * we have to do the reverse translation
5478 */
5479 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
5480 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
5481 return BCME_OK;
5482 }
5483
5484 return BCME_ERROR;
5485 }
5486 #endif /* MCAST_REGEN */
5487
5488 #ifdef SHOW_LOGTRACE
5489 static int
dhd_event_logtrace_pkt_process(dhd_pub_t * dhdp,struct sk_buff * skb)5490 dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
5491 {
5492 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5493 int ret = BCME_OK;
5494 uint datalen;
5495 bcm_event_msg_u_t evu;
5496 void *data = NULL;
5497 void *pktdata = NULL;
5498 bcm_event_t *pvt_data;
5499 uint pktlen;
5500
5501 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5502
5503 /* In dhd_rx_frame, header is stripped using skb_pull
5504 * of size ETH_HLEN, so adjust pktlen accordingly
5505 */
5506 pktlen = skb->len + ETH_HLEN;
5507
5508 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5509 pktdata = (void *)skb_mac_header(skb);
5510 #else
5511 pktdata = (void *)skb->mac.raw;
5512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5513
5514 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5515
5516 if (ret != BCME_OK) {
5517 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5518 __FUNCTION__, ret));
5519 goto exit;
5520 }
5521
5522 datalen = ntoh32(evu.event.datalen);
5523
5524 pvt_data = (bcm_event_t *)pktdata;
5525 data = &pvt_data[1];
5526
5527 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
5528
5529 exit:
5530 return ret;
5531 }
5532
5533 static void
dhd_event_logtrace_process(struct work_struct * work)5534 dhd_event_logtrace_process(struct work_struct * work)
5535 {
5536 /* Ignore compiler warnings due to -Werror=cast-qual */
5537 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5538 #pragma GCC diagnostic push
5539 #pragma GCC diagnostic ignored "-Wcast-qual"
5540 #endif
5541 struct dhd_info *dhd =
5542 container_of(work, struct dhd_info, event_log_dispatcher_work);
5543 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5544 #pragma GCC diagnostic pop
5545 #endif
5546
5547 dhd_pub_t *dhdp;
5548 struct sk_buff *skb;
5549
5550 if (!dhd) {
5551 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5552 return;
5553 }
5554
5555 dhdp = &dhd->pub;
5556
5557 if (!dhdp) {
5558 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5559 return;
5560 }
5561
5562 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5563
5564 /* Run while(1) loop till all skbs are dequeued */
5565 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5566 #ifdef PCIE_FULL_DONGLE
5567 int ifid;
5568 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
5569 if (ifid == DHD_EVENT_IF) {
5570 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
5571 /* For sending skb to network layer, convert it to Native PKT
5572 * after that assign skb->dev with Primary interface n/w device
5573 * as for infobuf events, we are sending special DHD_EVENT_IF
5574 */
5575 #ifdef DHD_USE_STATIC_CTRLBUF
5576 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5577 #else
5578 PKTFREE(dhdp->osh, skb, FALSE);
5579 #endif /* DHD_USE_STATIC_CTRLBUF */
5580 continue;
5581 }
5582 else {
5583 dhd_event_logtrace_pkt_process(dhdp, skb);
5584 }
5585 #else
5586 dhd_event_logtrace_pkt_process(dhdp, skb);
5587 #endif /* PCIE_FULL_DONGLE */
5588
5589 /* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5590 * macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD
5591 * else it is always sent up to network layers.
5592 */
5593 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5594 #ifdef DHD_USE_STATIC_CTRLBUF
5595 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5596 #else
5597 PKTFREE(dhdp->osh, skb, FALSE);
5598 #endif /* DHD_USE_STATIC_CTRLBUF */
5599 #else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5600 /* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI
5601 * Also as we are not in INTR context, do not call netif_rx, instead call
5602 * netif_rx_ni (for kerenl >= 2.6) which does netif_rx, disables irq, raise
5603 * NET_IF_RX softirq and enables interrupts back
5604 */
5605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5606 netif_rx_ni(skb);
5607 #else
5608 {
5609 ulong flags;
5610 netif_rx(skb);
5611 local_irq_save(flags);
5612 RAISE_RX_SOFTIRQ();
5613 local_irq_restore(flags);
5614 }
5615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5616 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5617 }
5618 }
5619
5620 void
dhd_event_logtrace_enqueue(dhd_pub_t * dhdp,int ifidx,void * pktbuf)5621 dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
5622 {
5623 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5624
5625 #ifdef PCIE_FULL_DONGLE
5626 /* Add ifidx in the PKTTAG */
5627 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
5628 #endif /* PCIE_FULL_DONGLE */
5629 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
5630
5631 schedule_work(&dhd->event_log_dispatcher_work);
5632 }
5633
5634 void
dhd_event_logtrace_flush_queue(dhd_pub_t * dhdp)5635 dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
5636 {
5637 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5638 struct sk_buff *skb;
5639
5640 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5641 #ifdef DHD_USE_STATIC_CTRLBUF
5642 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5643 #else
5644 PKTFREE(dhdp->osh, skb, FALSE);
5645 #endif /* DHD_USE_STATIC_CTRLBUF */
5646 }
5647 }
5648 #endif /* SHOW_LOGTRACE */
5649
5650 /** Called when a frame is received by the dongle on interface 'ifidx' */
5651 void
dhd_rx_frame(dhd_pub_t * dhdp,int ifidx,void * pktbuf,int numpkt,uint8 chan)5652 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
5653 {
5654 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5655 struct sk_buff *skb;
5656 uchar *eth;
5657 uint len;
5658 void *data, *pnext = NULL;
5659 int i;
5660 dhd_if_t *ifp;
5661 wl_event_msg_t event;
5662 int tout_rx = 0;
5663 int tout_ctrl = 0;
5664 void *skbhead = NULL;
5665 void *skbprev = NULL;
5666 uint16 protocol;
5667 unsigned char *dump_data;
5668 #ifdef DHD_MCAST_REGEN
5669 uint8 interface_role;
5670 if_flow_lkup_t *if_flow_lkup;
5671 unsigned long flags;
5672 #endif
5673 #ifdef DHD_WAKE_STATUS
5674 int pkt_wake = 0;
5675 wake_counts_t *wcp = NULL;
5676 #endif /* DHD_WAKE_STATUS */
5677
5678 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5679
5680 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
5681 struct ether_header *eh;
5682
5683 pnext = PKTNEXT(dhdp->osh, pktbuf);
5684 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
5685
5686 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5687 * special ifidx of DHD_EVENT_IF. This is just internal to dhd to get the data from
5688 * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5689 */
5690 if (ifidx == DHD_EVENT_IF) {
5691 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5692 * context in case of PCIe FD, in case of other bus this will be from
5693 * DPC context. If we get bunch of events from Dongle then printing all
5694 * of them from Tasklet/DPC context that too in data path is costly.
5695 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5696 * events with type WLC_E_TRACE.
5697 * We'll print this console logs from the WorkQueue context by enqueing SKB
5698 * here and Dequeuing will be done in WorkQueue and will be freed only if
5699 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
5700 */
5701 #ifdef SHOW_LOGTRACE
5702 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5703 #else /* !SHOW_LOGTRACE */
5704 /* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF,
5705 * free the PKT here itself
5706 */
5707 #ifdef DHD_USE_STATIC_CTRLBUF
5708 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5709 #else
5710 PKTFREE(dhdp->osh, pktbuf, FALSE);
5711 #endif /* DHD_USE_STATIC_CTRLBUF */
5712 #endif /* SHOW_LOGTRACE */
5713 continue;
5714 }
5715 #ifdef DHD_WAKE_STATUS
5716 #ifdef BCMDBUS
5717 wcp = NULL;
5718 #else
5719 pkt_wake = dhd_bus_get_bus_wake(dhdp);
5720 wcp = dhd_bus_get_wakecount(dhdp);
5721 #endif /* BCMDBUS */
5722 if (wcp == NULL) {
5723 /* If wakeinfo count buffer is null do not update wake count values */
5724 pkt_wake = 0;
5725 }
5726 #endif /* DHD_WAKE_STATUS */
5727
5728 ifp = dhd->iflist[ifidx];
5729 if (ifp == NULL) {
5730 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5731 __FUNCTION__));
5732 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5733 continue;
5734 }
5735
5736 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5737
5738 /* Dropping only data packets before registering net device to avoid kernel panic */
5739 #ifndef PROP_TXSTATUS_VSDB
5740 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
5741 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5742 #else
5743 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
5744 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5745 #endif /* PROP_TXSTATUS_VSDB */
5746 {
5747 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5748 __FUNCTION__));
5749 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5750 continue;
5751 }
5752
5753 #ifdef PROP_TXSTATUS
5754 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
5755 /* WLFC may send header only packet when
5756 there is an urgent message but no packet to
5757 piggy-back on
5758 */
5759 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5760 continue;
5761 }
5762 #endif
5763 #ifdef DHD_L2_FILTER
5764 /* If block_ping is enabled drop the ping packet */
5765 if (ifp->block_ping) {
5766 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
5767 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5768 continue;
5769 }
5770 }
5771 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
5772 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
5773 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5774 continue;
5775 }
5776 }
5777 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
5778 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
5779
5780 /* Drop the packets if l2 filter has processed it already
5781 * otherwise continue with the normal path
5782 */
5783 if (ret == BCME_OK) {
5784 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5785 continue;
5786 }
5787 }
5788 #endif /* DHD_L2_FILTER */
5789
5790 #ifdef DHD_MCAST_REGEN
5791 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
5792 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
5793 ASSERT(if_flow_lkup);
5794
5795 interface_role = if_flow_lkup[ifidx].role;
5796 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
5797
5798 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
5799 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
5800 ETHER_ISUCAST(eh->ether_dhost)) {
5801 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
5802 #ifdef DHD_PSTA
5803 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5804 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
5805 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
5806 if (ifidx != 0) {
5807 /* Let the primary in PSTA interface handle this
5808 * frame after unicast to Multicast conversion
5809 */
5810 ifp = dhd_get_ifp(dhdp, 0);
5811 ASSERT(ifp);
5812 }
5813 }
5814 }
5815 #endif /* PSTA */
5816 }
5817 #endif /* MCAST_REGEN */
5818
5819 #ifdef DHD_WMF
5820 /* WMF processing for multicast packets */
5821 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
5822 dhd_sta_t *sta;
5823 int ret;
5824
5825 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
5826 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
5827 switch (ret) {
5828 case WMF_TAKEN:
5829 /* The packet is taken by WMF. Continue to next iteration */
5830 continue;
5831 case WMF_DROP:
5832 /* Packet DROP decision by WMF. Toss it */
5833 DHD_ERROR(("%s: WMF decides to drop packet\n",
5834 __FUNCTION__));
5835 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5836 continue;
5837 default:
5838 /* Continue the transmit path */
5839 break;
5840 }
5841 }
5842 #endif /* DHD_WMF */
5843
5844 #ifdef DHDTCPACK_SUPPRESS
5845 dhd_tcpdata_info_get(dhdp, pktbuf);
5846 #endif
5847 skb = PKTTONATIVE(dhdp->osh, pktbuf);
5848
5849 ASSERT(ifp);
5850 skb->dev = ifp->net;
5851 #ifdef DHD_WET
5852 /* wet related packet proto manipulation should be done in DHD
5853 * since dongle doesn't have complete payload
5854 */
5855 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5856 pktbuf) < 0)) {
5857 DHD_INFO(("%s:%s: wet recv proc failed\n",
5858 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
5859 }
5860 #endif /* DHD_WET */
5861
5862 #ifdef DHD_PSTA
5863 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
5864 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
5865 dhd_ifname(dhdp, ifidx)));
5866 }
5867 #endif /* DHD_PSTA */
5868
5869 #ifdef PCIE_FULL_DONGLE
5870 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
5871 (!ifp->ap_isolate)) {
5872 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5873 if (ETHER_ISUCAST(eh->ether_dhost)) {
5874 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
5875 dhd_sendpkt(dhdp, ifidx, pktbuf);
5876 continue;
5877 }
5878 } else {
5879 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
5880 if (npktbuf)
5881 dhd_sendpkt(dhdp, ifidx, npktbuf);
5882 }
5883 }
5884 #endif /* PCIE_FULL_DONGLE */
5885
5886 /* Get the protocol, maintain skb around eth_type_trans()
5887 * The main reason for this hack is for the limitation of
5888 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5889 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5890 * coping of the packet coming from the network stack to add
5891 * BDC, Hardware header etc, during network interface registration
5892 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5893 * for BDC, Hardware header etc. and not just the ETH_HLEN
5894 */
5895 eth = skb->data;
5896 len = skb->len;
5897
5898 dump_data = skb->data;
5899
5900 protocol = (skb->data[12] << 8) | skb->data[13];
5901 if (protocol == ETHER_TYPE_802_1X) {
5902 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
5903 dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5904 }
5905
5906 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
5907 #ifdef DHD_DHCP_DUMP
5908 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5909 #endif /* DHD_DHCP_DUMP */
5910 #ifdef DHD_ICMP_DUMP
5911 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5912 #endif /* DHD_ICMP_DUMP */
5913 }
5914 #ifdef DHD_RX_DUMP
5915 dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE);
5916 #endif /* DHD_RX_DUMP */
5917 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5918 if (pkt_wake) {
5919 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
5920 }
5921 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5922
5923 skb->protocol = eth_type_trans(skb, skb->dev);
5924
5925 if (skb->pkt_type == PACKET_MULTICAST) {
5926 dhd->pub.rx_multicast++;
5927 ifp->stats.multicast++;
5928 }
5929
5930 skb->data = eth;
5931 skb->len = len;
5932
5933 #ifdef WLMEDIA_HTSF
5934 dhd_htsf_addrxts(dhdp, pktbuf);
5935 #endif
5936 #ifdef DBG_PKT_MON
5937 DHD_DBG_PKT_MON_RX(dhdp, skb);
5938 #endif /* DBG_PKT_MON */
5939 #ifdef DHD_PKT_LOGGING
5940 DHD_PKTLOG_RX(dhdp, skb);
5941 #endif /* DHD_PKT_LOGGING */
5942 /* Strip header, count, deliver upward */
5943 skb_pull(skb, ETH_HLEN);
5944
5945 /* Process special event packets and then discard them */
5946 memset(&event, 0, sizeof(event));
5947
5948 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5949 bcm_event_msg_u_t evu;
5950 int ret_event;
5951 int event_type;
5952
5953 ret_event = wl_host_event_get_data(
5954 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5955 skb_mac_header(skb),
5956 #else
5957 skb->mac.raw,
5958 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5959 len, &evu);
5960
5961 if (ret_event != BCME_OK) {
5962 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5963 __FUNCTION__, ret_event));
5964 #ifdef DHD_USE_STATIC_CTRLBUF
5965 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5966 #else
5967 PKTFREE(dhdp->osh, pktbuf, FALSE);
5968 #endif
5969 continue;
5970 }
5971
5972 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5973 event_type = ntoh32_ua((void *)&event.event_type);
5974 #ifdef SHOW_LOGTRACE
5975 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5976 * context in case of PCIe FD, in case of other bus this will be from
5977 * DPC context. If we get bunch of events from Dongle then printing all
5978 * of them from Tasklet/DPC context that too in data path is costly.
5979 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5980 * events with type WLC_E_TRACE.
5981 * We'll print this console logs from the WorkQueue context by enqueing SKB
5982 * here and Dequeuing will be done in WorkQueue and will be freed only if
5983 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
5984 */
5985 if (event_type == WLC_E_TRACE) {
5986 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
5987 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5988 continue;
5989 }
5990 #endif /* SHOW_LOGTRACE */
5991
5992 ret_event = dhd_wl_host_event(dhd, ifidx,
5993 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5994 skb_mac_header(skb),
5995 #else
5996 skb->mac.raw,
5997 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5998 len, &event, &data);
5999
6000 wl_event_to_host_order(&event);
6001 if (!tout_ctrl)
6002 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
6003
6004 #if defined(PNO_SUPPORT)
6005 if (event_type == WLC_E_PFN_NET_FOUND) {
6006 /* enforce custom wake lock to garantee that Kernel not suspended */
6007 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
6008 }
6009 #endif /* PNO_SUPPORT */
6010 if (numpkt != 1) {
6011 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
6012 __FUNCTION__));
6013 }
6014
6015 #ifdef DHD_WAKE_STATUS
6016 if (unlikely(pkt_wake)) {
6017 #ifdef DHD_WAKE_EVENT_STATUS
6018 if (event.event_type < WLC_E_LAST) {
6019 wcp->rc_event[event.event_type]++;
6020 wcp->rcwake++;
6021 pkt_wake = 0;
6022 }
6023 #endif /* DHD_WAKE_EVENT_STATUS */
6024 }
6025 #endif /* DHD_WAKE_STATUS */
6026
6027 /* For delete virtual interface event, wl_host_event returns positive
6028 * i/f index, do not proceed. just free the pkt.
6029 */
6030 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
6031 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
6032 __FUNCTION__));
6033 #ifdef DHD_USE_STATIC_CTRLBUF
6034 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6035 #else
6036 PKTFREE(dhdp->osh, pktbuf, FALSE);
6037 #endif
6038 continue;
6039 }
6040
6041 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
6042 #ifdef DHD_USE_STATIC_CTRLBUF
6043 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6044 #else
6045 PKTFREE(dhdp->osh, pktbuf, FALSE);
6046 #endif /* DHD_USE_STATIC_CTRLBUF */
6047 continue;
6048 #else
6049 /*
6050 * For the event packets, there is a possibility
6051 * of ifidx getting modifed.Thus update the ifp
6052 * once again.
6053 */
6054 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
6055 ifp = dhd->iflist[ifidx];
6056 #ifndef PROP_TXSTATUS_VSDB
6057 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
6058 #else
6059 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
6060 dhd->pub.up))
6061 #endif /* PROP_TXSTATUS_VSDB */
6062 {
6063 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
6064 __FUNCTION__));
6065 #ifdef DHD_USE_STATIC_CTRLBUF
6066 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6067 #else
6068 PKTFREE(dhdp->osh, pktbuf, FALSE);
6069 #endif
6070 continue;
6071 }
6072 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
6073 } else {
6074 tout_rx = DHD_PACKET_TIMEOUT_MS;
6075
6076 #ifdef PROP_TXSTATUS
6077 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
6078 #endif /* PROP_TXSTATUS */
6079
6080 #ifdef DHD_WAKE_STATUS
6081 if (unlikely(pkt_wake)) {
6082 wcp->rxwake++;
6083 #ifdef DHD_WAKE_RX_STATUS
6084 #define ETHER_ICMP6_HEADER 20
6085 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
6086 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
6087 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
6088
6089 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
6090 wcp->rx_arp++;
6091 if (dump_data[0] == 0xFF) { /* Broadcast */
6092 wcp->rx_bcast++;
6093 } else if (dump_data[0] & 0x01) { /* Multicast */
6094 wcp->rx_mcast++;
6095 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
6096 wcp->rx_multi_ipv6++;
6097 if ((skb->len > ETHER_ICMP6_HEADER) &&
6098 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
6099 wcp->rx_icmpv6++;
6100 if (skb->len > ETHER_ICMPV6_TYPE) {
6101 switch (dump_data[ETHER_ICMPV6_TYPE]) {
6102 case NDISC_ROUTER_ADVERTISEMENT:
6103 wcp->rx_icmpv6_ra++;
6104 break;
6105 case NDISC_NEIGHBOUR_ADVERTISEMENT:
6106 wcp->rx_icmpv6_na++;
6107 break;
6108 case NDISC_NEIGHBOUR_SOLICITATION:
6109 wcp->rx_icmpv6_ns++;
6110 break;
6111 }
6112 }
6113 }
6114 } else if (dump_data[2] == 0x5E) {
6115 wcp->rx_multi_ipv4++;
6116 } else {
6117 wcp->rx_multi_other++;
6118 }
6119 } else { /* Unicast */
6120 wcp->rx_ucast++;
6121 }
6122 #undef ETHER_ICMP6_HEADER
6123 #undef ETHER_IPV6_SADDR
6124 #undef ETHER_IPV6_DAADR
6125 #undef ETHER_ICMPV6_TYPE
6126 #endif /* DHD_WAKE_RX_STATUS */
6127 pkt_wake = 0;
6128 }
6129 #endif /* DHD_WAKE_STATUS */
6130 }
6131
6132 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
6133 if (ifp->net)
6134 ifp->net->last_rx = jiffies;
6135 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
6136
6137 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
6138 dhdp->dstats.rx_bytes += skb->len;
6139 dhdp->rx_packets++; /* Local count */
6140 ifp->stats.rx_bytes += skb->len;
6141 ifp->stats.rx_packets++;
6142 }
6143
6144 if (in_interrupt()) {
6145 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6146 __FUNCTION__, __LINE__);
6147 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6148 #if defined(DHD_LB_RXP)
6149 netif_receive_skb(skb);
6150 #else /* !defined(DHD_LB_RXP) */
6151 netif_rx(skb);
6152 #endif /* !defined(DHD_LB_RXP) */
6153 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6154 } else {
6155 if (dhd->rxthread_enabled) {
6156 if (!skbhead)
6157 skbhead = skb;
6158 else
6159 PKTSETNEXT(dhdp->osh, skbprev, skb);
6160 skbprev = skb;
6161 } else {
6162 /* If the receive is not processed inside an ISR,
6163 * the softirqd must be woken explicitly to service
6164 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6165 * by netif_rx_ni(), but in earlier kernels, we need
6166 * to do it manually.
6167 */
6168 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6169 __FUNCTION__, __LINE__);
6170
6171 #if defined(DHD_LB_RXP)
6172 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6173 netif_receive_skb(skb);
6174 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6175 #else /* !defined(DHD_LB_RXP) */
6176 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6177 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6178 netif_rx_ni(skb);
6179 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6180 #else
6181 ulong flags;
6182 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6183 netif_rx(skb);
6184 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6185 local_irq_save(flags);
6186 RAISE_RX_SOFTIRQ();
6187 local_irq_restore(flags);
6188 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6189 #endif /* !defined(DHD_LB_RXP) */
6190 }
6191 }
6192 }
6193
6194 if (dhd->rxthread_enabled && skbhead)
6195 dhd_sched_rxf(dhdp, skbhead);
6196
6197 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
6198 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
6199 }
6200
6201 void
dhd_event(struct dhd_info * dhd,char * evpkt,int evlen,int ifidx)6202 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
6203 {
6204 /* Linux version has nothing to do */
6205 return;
6206 }
6207
6208 void
dhd_txcomplete(dhd_pub_t * dhdp,void * txp,bool success)6209 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
6210 {
6211 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
6212 struct ether_header *eh;
6213 uint16 type;
6214
6215 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
6216
6217
6218 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
6219 type = ntoh16(eh->ether_type);
6220
6221 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) {
6222 atomic_dec(&dhd->pend_8021x_cnt);
6223 }
6224
6225 #ifdef PROP_TXSTATUS
6226 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
6227 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
6228 uint datalen = PKTLEN(dhd->pub.osh, txp);
6229 if (ifp != NULL) {
6230 if (success) {
6231 dhd->pub.tx_packets++;
6232 ifp->stats.tx_packets++;
6233 ifp->stats.tx_bytes += datalen;
6234 } else {
6235 ifp->stats.tx_dropped++;
6236 }
6237 }
6238 }
6239 #endif
6240 }
6241
6242 static struct net_device_stats *
dhd_get_stats(struct net_device * net)6243 dhd_get_stats(struct net_device *net)
6244 {
6245 dhd_info_t *dhd = DHD_DEV_INFO(net);
6246 dhd_if_t *ifp;
6247 int ifidx;
6248
6249 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6250
6251 if (!dhd) {
6252 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
6253 goto error;
6254 }
6255
6256 ifidx = dhd_net2idx(dhd, net);
6257 if (ifidx == DHD_BAD_IF) {
6258 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
6259 goto error;
6260 }
6261
6262 ifp = dhd->iflist[ifidx];
6263
6264 if (!ifp) {
6265 ASSERT(ifp);
6266 DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__));
6267 goto error;
6268 }
6269
6270 if (dhd->pub.up) {
6271 /* Use the protocol to get dongle stats */
6272 dhd_prot_dstats(&dhd->pub);
6273 }
6274 return &ifp->stats;
6275
6276 error:
6277 memset(&net->stats, 0, sizeof(net->stats));
6278 return &net->stats;
6279 }
6280
6281 #ifndef BCMDBUS
6282 static int
dhd_watchdog_thread(void * data)6283 dhd_watchdog_thread(void *data)
6284 {
6285 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6286 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6287 /* This thread doesn't need any user-level access,
6288 * so get rid of all our resources
6289 */
6290 if (dhd_watchdog_prio > 0) {
6291 struct sched_param param;
6292 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
6293 dhd_watchdog_prio:(MAX_RT_PRIO-1);
6294 setScheduler(current, SCHED_FIFO, ¶m);
6295 }
6296
6297 while (1) {
6298 if (down_interruptible (&tsk->sema) == 0) {
6299 unsigned long flags;
6300 unsigned long jiffies_at_start = jiffies;
6301 unsigned long time_lapse;
6302 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6303
6304 SMP_RD_BARRIER_DEPENDS();
6305 if (tsk->terminated) {
6306 break;
6307 }
6308
6309 if (dhd->pub.dongle_reset == FALSE) {
6310 DHD_TIMER(("%s:\n", __FUNCTION__));
6311 dhd_bus_watchdog(&dhd->pub);
6312
6313 #ifdef DHD_TIMESYNC
6314 /* Call the timesync module watchdog */
6315 dhd_timesync_watchdog(&dhd->pub);
6316 #endif /* DHD_TIMESYNC */
6317
6318 DHD_GENERAL_LOCK(&dhd->pub, flags);
6319 /* Count the tick for reference */
6320 dhd->pub.tickcnt++;
6321 #ifdef DHD_L2_FILTER
6322 dhd_l2_filter_watchdog(&dhd->pub);
6323 #endif /* DHD_L2_FILTER */
6324 time_lapse = jiffies - jiffies_at_start;
6325
6326 /* Reschedule the watchdog */
6327 if (dhd->wd_timer_valid) {
6328 mod_timer(&dhd->timer,
6329 jiffies +
6330 msecs_to_jiffies(dhd_watchdog_ms) -
6331 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
6332 }
6333 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6334 }
6335 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6336 } else {
6337 break;
6338 }
6339 }
6340
6341 complete_and_exit(&tsk->completed, 0);
6342 }
6343
dhd_watchdog(struct timer_list * t)6344 static void dhd_watchdog(
6345 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6346 struct timer_list *t
6347 #else
6348 ulong data
6349 #endif
6350 )
6351 {
6352 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6353 dhd_info_t *dhd = from_timer(dhd, t, timer);
6354 #else
6355 dhd_info_t *dhd = (dhd_info_t *)data;
6356 #endif
6357 unsigned long flags;
6358
6359 if (dhd->pub.dongle_reset) {
6360 return;
6361 }
6362
6363 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
6364 up(&dhd->thr_wdt_ctl.sema);
6365 return;
6366 }
6367
6368 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6369 /* Call the bus module watchdog */
6370 dhd_bus_watchdog(&dhd->pub);
6371
6372 #ifdef DHD_TIMESYNC
6373 /* Call the timesync module watchdog */
6374 dhd_timesync_watchdog(&dhd->pub);
6375 #endif /* DHD_TIMESYNC */
6376
6377 DHD_GENERAL_LOCK(&dhd->pub, flags);
6378 /* Count the tick for reference */
6379 dhd->pub.tickcnt++;
6380
6381 #ifdef DHD_L2_FILTER
6382 dhd_l2_filter_watchdog(&dhd->pub);
6383 #endif /* DHD_L2_FILTER */
6384 /* Reschedule the watchdog */
6385 if (dhd->wd_timer_valid)
6386 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
6387 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6388 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6389 }
6390
6391 #ifdef DHD_PCIE_RUNTIMEPM
6392 static int
dhd_rpm_state_thread(void * data)6393 dhd_rpm_state_thread(void *data)
6394 {
6395 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6396 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6397
6398 while (1) {
6399 if (down_interruptible (&tsk->sema) == 0) {
6400 unsigned long flags;
6401 unsigned long jiffies_at_start = jiffies;
6402 unsigned long time_lapse;
6403
6404 SMP_RD_BARRIER_DEPENDS();
6405 if (tsk->terminated) {
6406 break;
6407 }
6408
6409 if (dhd->pub.dongle_reset == FALSE) {
6410 DHD_TIMER(("%s:\n", __FUNCTION__));
6411 if (dhd->pub.up) {
6412 dhd_runtimepm_state(&dhd->pub);
6413 }
6414
6415 DHD_GENERAL_LOCK(&dhd->pub, flags);
6416 time_lapse = jiffies - jiffies_at_start;
6417
6418 /* Reschedule the watchdog */
6419 if (dhd->rpm_timer_valid) {
6420 mod_timer(&dhd->rpm_timer,
6421 jiffies +
6422 msecs_to_jiffies(dhd_runtimepm_ms) -
6423 min(msecs_to_jiffies(dhd_runtimepm_ms),
6424 time_lapse));
6425 }
6426 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6427 }
6428 } else {
6429 break;
6430 }
6431 }
6432
6433 complete_and_exit(&tsk->completed, 0);
6434 }
6435
dhd_runtimepm(struct timer_list * t)6436 static void dhd_runtimepm(
6437 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6438 struct timer_list *t
6439 #else
6440 ulong data
6441 #endif
6442 )
6443 {
6444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6445 dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);
6446 #else
6447 dhd_info_t *dhd = (dhd_info_t *)data;
6448 #endif
6449
6450 if (dhd->pub.dongle_reset) {
6451 return;
6452 }
6453
6454 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
6455 up(&dhd->thr_rpm_ctl.sema);
6456 return;
6457 }
6458 }
6459
dhd_runtime_pm_disable(dhd_pub_t * dhdp)6460 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
6461 {
6462 dhd_os_runtimepm_timer(dhdp, 0);
6463 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
6464 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6465 }
6466
dhd_runtime_pm_enable(dhd_pub_t * dhdp)6467 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
6468 {
6469 if (dhd_get_idletime(dhdp)) {
6470 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
6471 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6472 }
6473 }
6474
6475 #endif /* DHD_PCIE_RUNTIMEPM */
6476
6477
6478 #ifdef ENABLE_ADAPTIVE_SCHED
6479 static void
dhd_sched_policy(int prio)6480 dhd_sched_policy(int prio)
6481 {
6482 struct sched_param param;
6483 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
6484 param.sched_priority = 0;
6485 setScheduler(current, SCHED_NORMAL, ¶m);
6486 } else {
6487 if (get_scheduler_policy(current) != SCHED_FIFO) {
6488 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
6489 setScheduler(current, SCHED_FIFO, ¶m);
6490 }
6491 }
6492 }
6493 #endif /* ENABLE_ADAPTIVE_SCHED */
6494 #ifdef DEBUG_CPU_FREQ
dhd_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)6495 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
6496 {
6497 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
6498 struct cpufreq_freqs *freq = data;
6499 if (dhd) {
6500 if (!dhd->new_freq)
6501 goto exit;
6502 if (val == CPUFREQ_POSTCHANGE) {
6503 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6504 freq->new, freq->cpu));
6505 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
6506 }
6507 }
6508 exit:
6509 return 0;
6510 }
6511 #endif /* DEBUG_CPU_FREQ */
6512
6513 static int
dhd_dpc_thread(void * data)6514 dhd_dpc_thread(void *data)
6515 {
6516 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6517 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6518
6519 /* This thread doesn't need any user-level access,
6520 * so get rid of all our resources
6521 */
6522 if (dhd_dpc_prio > 0)
6523 {
6524 struct sched_param param;
6525 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
6526 setScheduler(current, SCHED_FIFO, ¶m);
6527 }
6528
6529 #ifdef CUSTOM_DPC_CPUCORE
6530 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
6531 #endif
6532 #ifdef CUSTOM_SET_CPUCORE
6533 dhd->pub.current_dpc = current;
6534 #endif /* CUSTOM_SET_CPUCORE */
6535 /* Run until signal received */
6536 while (1) {
6537 if (dhd->pub.conf->dpc_cpucore >= 0) {
6538 printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
6539 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
6540 dhd->pub.conf->dpc_cpucore = -1;
6541 }
6542 if (!binary_sema_down(tsk)) {
6543 #ifdef ENABLE_ADAPTIVE_SCHED
6544 dhd_sched_policy(dhd_dpc_prio);
6545 #endif /* ENABLE_ADAPTIVE_SCHED */
6546 SMP_RD_BARRIER_DEPENDS();
6547 if (tsk->terminated) {
6548 break;
6549 }
6550
6551 /* Call bus dpc unless it indicated down (then clean stop) */
6552 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6553 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6554 int resched_cnt = 0;
6555 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6556 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
6557 while (dhd_bus_dpc(dhd->pub.bus)) {
6558 /* process all data */
6559 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6560 resched_cnt++;
6561 if (resched_cnt > MAX_RESCHED_CNT) {
6562 DHD_INFO(("%s Calling msleep to"
6563 "let other processes run. \n",
6564 __FUNCTION__));
6565 dhd->pub.dhd_bug_on = true;
6566 resched_cnt = 0;
6567 OSL_SLEEP(1);
6568 }
6569 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6570 }
6571 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
6572 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6573 } else {
6574 if (dhd->pub.up)
6575 dhd_bus_stop(dhd->pub.bus, TRUE);
6576 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6577 }
6578 } else {
6579 break;
6580 }
6581 }
6582 complete_and_exit(&tsk->completed, 0);
6583 }
6584
6585 static int
dhd_rxf_thread(void * data)6586 dhd_rxf_thread(void *data)
6587 {
6588 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6589 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6590 #if defined(WAIT_DEQUEUE)
6591 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6592 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
6593 #endif
6594 dhd_pub_t *pub = &dhd->pub;
6595
6596 /* This thread doesn't need any user-level access,
6597 * so get rid of all our resources
6598 */
6599 if (dhd_rxf_prio > 0)
6600 {
6601 struct sched_param param;
6602 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
6603 setScheduler(current, SCHED_FIFO, ¶m);
6604 }
6605
6606 #ifdef CUSTOM_SET_CPUCORE
6607 dhd->pub.current_rxf = current;
6608 #endif /* CUSTOM_SET_CPUCORE */
6609 /* Run until signal received */
6610 while (1) {
6611 if (dhd->pub.conf->rxf_cpucore >= 0) {
6612 printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
6613 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
6614 dhd->pub.conf->rxf_cpucore = -1;
6615 }
6616 if (down_interruptible(&tsk->sema) == 0) {
6617 void *skb;
6618 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6619 ulong flags;
6620 #endif
6621 #ifdef ENABLE_ADAPTIVE_SCHED
6622 dhd_sched_policy(dhd_rxf_prio);
6623 #endif /* ENABLE_ADAPTIVE_SCHED */
6624
6625 SMP_RD_BARRIER_DEPENDS();
6626
6627 if (tsk->terminated) {
6628 break;
6629 }
6630 skb = dhd_rxf_dequeue(pub);
6631
6632 if (skb == NULL) {
6633 continue;
6634 }
6635 while (skb) {
6636 void *skbnext = PKTNEXT(pub->osh, skb);
6637 PKTSETNEXT(pub->osh, skb, NULL);
6638 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6639 __FUNCTION__, __LINE__);
6640 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6641 netif_rx_ni(skb);
6642 #else
6643 netif_rx(skb);
6644 local_irq_save(flags);
6645 RAISE_RX_SOFTIRQ();
6646 local_irq_restore(flags);
6647
6648 #endif
6649 skb = skbnext;
6650 }
6651 #if defined(WAIT_DEQUEUE)
6652 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
6653 OSL_SLEEP(1);
6654 watchdogTime = OSL_SYSUPTIME();
6655 }
6656 #endif
6657
6658 DHD_OS_WAKE_UNLOCK(pub);
6659 } else {
6660 break;
6661 }
6662 }
6663 complete_and_exit(&tsk->completed, 0);
6664 }
6665
6666 #ifdef BCMPCIE
dhd_dpc_enable(dhd_pub_t * dhdp)6667 void dhd_dpc_enable(dhd_pub_t *dhdp)
6668 {
6669 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6670 dhd_info_t *dhd;
6671
6672 if (!dhdp || !dhdp->info)
6673 return;
6674 dhd = dhdp->info;
6675 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6676
6677 #ifdef DHD_LB_RXP
6678 __skb_queue_head_init(&dhd->rx_pend_queue);
6679 #endif /* DHD_LB_RXP */
6680
6681 #ifdef DHD_LB_TXP
6682 skb_queue_head_init(&dhd->tx_pend_queue);
6683 #endif /* DHD_LB_TXP */
6684 }
6685 #endif /* BCMPCIE */
6686
6687 #ifdef BCMPCIE
6688 void
dhd_dpc_kill(dhd_pub_t * dhdp)6689 dhd_dpc_kill(dhd_pub_t *dhdp)
6690 {
6691 dhd_info_t *dhd;
6692
6693 if (!dhdp) {
6694 return;
6695 }
6696
6697 dhd = dhdp->info;
6698
6699 if (!dhd) {
6700 return;
6701 }
6702
6703 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6704 tasklet_kill(&dhd->tasklet);
6705 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
6706 }
6707
6708 #ifdef DHD_LB
6709 #ifdef DHD_LB_RXP
6710 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
6711 __skb_queue_purge(&dhd->rx_pend_queue);
6712 #endif /* DHD_LB_RXP */
6713 #ifdef DHD_LB_TXP
6714 cancel_work_sync(&dhd->tx_dispatcher_work);
6715 skb_queue_purge(&dhd->tx_pend_queue);
6716 #endif /* DHD_LB_TXP */
6717
6718 /* Kill the Load Balancing Tasklets */
6719 #if defined(DHD_LB_TXC)
6720 tasklet_kill(&dhd->tx_compl_tasklet);
6721 #endif /* DHD_LB_TXC */
6722 #if defined(DHD_LB_RXC)
6723 tasklet_kill(&dhd->rx_compl_tasklet);
6724 #endif /* DHD_LB_RXC */
6725 #if defined(DHD_LB_TXP)
6726 tasklet_kill(&dhd->tx_tasklet);
6727 #endif /* DHD_LB_TXP */
6728 #endif /* DHD_LB */
6729 }
6730
6731 void
dhd_dpc_tasklet_kill(dhd_pub_t * dhdp)6732 dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
6733 {
6734 dhd_info_t *dhd;
6735
6736 if (!dhdp) {
6737 return;
6738 }
6739
6740 dhd = dhdp->info;
6741
6742 if (!dhd) {
6743 return;
6744 }
6745
6746 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6747 tasklet_kill(&dhd->tasklet);
6748 }
6749 }
6750 #endif /* BCMPCIE */
6751
6752 static void
dhd_dpc(ulong data)6753 dhd_dpc(ulong data)
6754 {
6755 dhd_info_t *dhd;
6756
6757 dhd = (dhd_info_t *)data;
6758
6759 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6760 * down below , wake lock is set,
6761 * the tasklet is initialized in dhd_attach()
6762 */
6763 /* Call bus dpc unless it indicated down (then clean stop) */
6764 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6765 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6766 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
6767 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6768 if (dhd_bus_dpc(dhd->pub.bus)) {
6769 tasklet_schedule(&dhd->tasklet);
6770 }
6771 } else {
6772 dhd_bus_stop(dhd->pub.bus, TRUE);
6773 }
6774 }
6775
6776 void
dhd_sched_dpc(dhd_pub_t * dhdp)6777 dhd_sched_dpc(dhd_pub_t *dhdp)
6778 {
6779 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6780
6781 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6782 DHD_OS_WAKE_LOCK(dhdp);
6783 /* If the semaphore does not get up,
6784 * wake unlock should be done here
6785 */
6786 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
6787 DHD_OS_WAKE_UNLOCK(dhdp);
6788 }
6789 return;
6790 } else {
6791 tasklet_schedule(&dhd->tasklet);
6792 }
6793 }
6794 #endif /* BCMDBUS */
6795
6796 static void
dhd_sched_rxf(dhd_pub_t * dhdp,void * skb)6797 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
6798 {
6799 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6800 #ifdef RXF_DEQUEUE_ON_BUSY
6801 int ret = BCME_OK;
6802 int retry = 2;
6803 #endif /* RXF_DEQUEUE_ON_BUSY */
6804
6805 DHD_OS_WAKE_LOCK(dhdp);
6806
6807 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6808 #ifdef RXF_DEQUEUE_ON_BUSY
6809 do {
6810 ret = dhd_rxf_enqueue(dhdp, skb);
6811 if (ret == BCME_OK || ret == BCME_ERROR)
6812 break;
6813 else
6814 OSL_SLEEP(50); /* waiting for dequeueing */
6815 } while (retry-- > 0);
6816
6817 if (retry <= 0 && ret == BCME_BUSY) {
6818 void *skbp = skb;
6819
6820 while (skbp) {
6821 void *skbnext = PKTNEXT(dhdp->osh, skbp);
6822 PKTSETNEXT(dhdp->osh, skbp, NULL);
6823 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6824 __FUNCTION__, __LINE__);
6825 netif_rx_ni(skbp);
6826 skbp = skbnext;
6827 }
6828 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
6829 } else {
6830 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6831 up(&dhd->thr_rxf_ctl.sema);
6832 }
6833 }
6834 #else /* RXF_DEQUEUE_ON_BUSY */
6835 do {
6836 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
6837 break;
6838 } while (1);
6839 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6840 up(&dhd->thr_rxf_ctl.sema);
6841 }
6842 return;
6843 #endif /* RXF_DEQUEUE_ON_BUSY */
6844 }
6845
6846 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6847 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6848
6849 #ifdef TOE
6850 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6851 static int
dhd_toe_get(dhd_info_t * dhd,int ifidx,uint32 * toe_ol)6852 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
6853 {
6854 char buf[32];
6855 int ret;
6856
6857 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6858
6859 if (ret < 0) {
6860 if (ret == -EIO) {
6861 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
6862 ifidx)));
6863 return -EOPNOTSUPP;
6864 }
6865
6866 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6867 return ret;
6868 }
6869
6870 memcpy(toe_ol, buf, sizeof(uint32));
6871 return 0;
6872 }
6873
6874 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6875 static int
dhd_toe_set(dhd_info_t * dhd,int ifidx,uint32 toe_ol)6876 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
6877 {
6878 int toe, ret;
6879
6880 /* Set toe_ol as requested */
6881 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
6882 if (ret < 0) {
6883 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6884 dhd_ifname(&dhd->pub, ifidx), ret));
6885 return ret;
6886 }
6887
6888 /* Enable toe globally only if any components are enabled. */
6889 toe = (toe_ol != 0);
6890 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
6891 if (ret < 0) {
6892 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6893 return ret;
6894 }
6895
6896 return 0;
6897 }
6898 #endif /* TOE */
6899
6900 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
dhd_set_scb_probe(dhd_pub_t * dhd)6901 void dhd_set_scb_probe(dhd_pub_t *dhd)
6902 {
6903 wl_scb_probe_t scb_probe;
6904 int ret;
6905
6906 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6907 return;
6908 }
6909
6910 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0,
6911 (char *)&scb_probe, sizeof(scb_probe), FALSE);
6912 if (ret < 0) {
6913 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
6914 }
6915
6916 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
6917
6918 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(scb_probe),
6919 NULL, 0, TRUE);
6920 if (ret < 0) {
6921 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
6922 return;
6923 }
6924 }
6925 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6926
6927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6928 static void
dhd_ethtool_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)6929 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
6930 {
6931 dhd_info_t *dhd = DHD_DEV_INFO(net);
6932
6933 snprintf(info->driver, sizeof(info->driver), "wl");
6934 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
6935 }
6936
6937 struct ethtool_ops dhd_ethtool_ops = {
6938 .get_drvinfo = dhd_ethtool_get_drvinfo
6939 };
6940 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6941
6942
6943 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6944 static int
dhd_ethtool(dhd_info_t * dhd,void * uaddr)6945 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
6946 {
6947 struct ethtool_drvinfo info;
6948 char drvname[sizeof(info.driver)];
6949 uint32 cmd;
6950 #ifdef TOE
6951 struct ethtool_value edata;
6952 uint32 toe_cmpnt, csum_dir;
6953 int ret;
6954 #endif
6955
6956 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6957
6958 /* all ethtool calls start with a cmd word */
6959 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
6960 return -EFAULT;
6961
6962 switch (cmd) {
6963 case ETHTOOL_GDRVINFO:
6964 /* Copy out any request driver name */
6965 if (copy_from_user(&info, uaddr, sizeof(info)))
6966 return -EFAULT;
6967 strncpy(drvname, info.driver, sizeof(info.driver));
6968 drvname[sizeof(info.driver)-1] = '\0';
6969
6970 /* clear struct for return */
6971 memset(&info, 0, sizeof(info));
6972 info.cmd = cmd;
6973
6974 /* if dhd requested, identify ourselves */
6975 if (strcmp(drvname, "?dhd") == 0) {
6976 snprintf(info.driver, sizeof(info.driver), "dhd");
6977 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
6978 info.version[sizeof(info.version) - 1] = '\0';
6979 }
6980
6981 /* otherwise, require dongle to be up */
6982 else if (!dhd->pub.up) {
6983 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
6984 return -ENODEV;
6985 }
6986
6987 /* finally, report dongle driver type */
6988 else if (dhd->pub.iswl)
6989 snprintf(info.driver, sizeof(info.driver), "wl");
6990 else
6991 snprintf(info.driver, sizeof(info.driver), "xx");
6992
6993 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
6994 if (copy_to_user(uaddr, &info, sizeof(info)))
6995 return -EFAULT;
6996 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
6997 (int)sizeof(drvname), drvname, info.driver));
6998 break;
6999
7000 #ifdef TOE
7001 /* Get toe offload components from dongle */
7002 case ETHTOOL_GRXCSUM:
7003 case ETHTOOL_GTXCSUM:
7004 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7005 return ret;
7006
7007 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7008
7009 edata.cmd = cmd;
7010 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
7011
7012 if (copy_to_user(uaddr, &edata, sizeof(edata)))
7013 return -EFAULT;
7014 break;
7015
7016 /* Set toe offload components in dongle */
7017 case ETHTOOL_SRXCSUM:
7018 case ETHTOOL_STXCSUM:
7019 if (copy_from_user(&edata, uaddr, sizeof(edata)))
7020 return -EFAULT;
7021
7022 /* Read the current settings, update and write back */
7023 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7024 return ret;
7025
7026 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7027
7028 if (edata.data != 0)
7029 toe_cmpnt |= csum_dir;
7030 else
7031 toe_cmpnt &= ~csum_dir;
7032
7033 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
7034 return ret;
7035
7036 /* If setting TX checksum mode, tell Linux the new mode */
7037 if (cmd == ETHTOOL_STXCSUM) {
7038 if (edata.data)
7039 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
7040 else
7041 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
7042 }
7043
7044 break;
7045 #endif /* TOE */
7046
7047 default:
7048 return -EOPNOTSUPP;
7049 }
7050
7051 return 0;
7052 }
7053 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7054
dhd_check_hang(struct net_device * net,dhd_pub_t * dhdp,int error)7055 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
7056 {
7057 if (!dhdp) {
7058 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
7059 return FALSE;
7060 }
7061
7062 if (!dhdp->up)
7063 return FALSE;
7064
7065 #if !defined(BCMPCIE) && !defined(BCMDBUS)
7066 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
7067 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
7068 return FALSE;
7069 }
7070 #endif /* !BCMPCIE && !BCMDBUS */
7071
7072 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
7073 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
7074 #ifdef BCMPCIE
7075 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
7076 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
7077 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
7078 #else
7079 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
7080 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
7081 #endif /* BCMPCIE */
7082 if (dhdp->hang_reason == 0) {
7083 if (dhdp->dongle_trap_occured) {
7084 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
7085 #ifdef BCMPCIE
7086 } else if (dhdp->d3ackcnt_timeout) {
7087 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
7088 #endif /* BCMPCIE */
7089 } else {
7090 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
7091 }
7092 }
7093 printf("%s\n", info_string);
7094 net_os_send_hang_message(net);
7095 return TRUE;
7096 }
7097 return FALSE;
7098 }
7099
7100 #ifdef WL_MONITOR
7101 bool
dhd_monitor_enabled(dhd_pub_t * dhd,int ifidx)7102 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
7103 {
7104 return (dhd->info->monitor_type != 0);
7105 }
7106
7107 void
dhd_rx_mon_pkt(dhd_pub_t * dhdp,host_rxbuf_cmpl_t * msg,void * pkt,int ifidx)7108 dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
7109 {
7110 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7111 #ifdef HOST_RADIOTAP_CONV
7112 uint16 len = 0, offset = 0;
7113 monitor_pkt_info_t pkt_info;
7114 memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
7115 memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
7116
7117 if (!dhd->monitor_skb) {
7118 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
7119 return;
7120 }
7121
7122 len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
7123 PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
7124
7125 if (dhd->monitor_type && dhd->monitor_dev)
7126 dhd->monitor_skb->dev = dhd->monitor_dev;
7127 else {
7128 PKTFREE(dhdp->osh, pkt, FALSE);
7129 dev_kfree_skb(dhd->monitor_skb);
7130 return;
7131 }
7132
7133 PKTFREE(dhdp->osh, pkt, FALSE);
7134
7135 if (!len) {
7136 return;
7137 }
7138
7139 skb_put(dhd->monitor_skb, len);
7140 skb_pull(dhd->monitor_skb, offset);
7141
7142 dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7143 #else
7144 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
7145 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
7146 switch (amsdu_flag) {
7147 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
7148 default:
7149 if (!dhd->monitor_skb) {
7150 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
7151 return;
7152 }
7153
7154 if (dhd->monitor_type && dhd->monitor_dev)
7155 dhd->monitor_skb->dev = dhd->monitor_dev;
7156 else {
7157 PKTFREE(dhdp->osh, pkt, FALSE);
7158 dhd->monitor_skb = NULL;
7159 return;
7160 }
7161
7162 dhd->monitor_skb->protocol =
7163 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7164 dhd->monitor_len = 0;
7165 break;
7166 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
7167 if (!dhd->monitor_skb) {
7168 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
7169 return;
7170 dhd->monitor_len = 0;
7171 }
7172 if (dhd->monitor_type && dhd->monitor_dev)
7173 dhd->monitor_skb->dev = dhd->monitor_dev;
7174 else {
7175 PKTFREE(dhdp->osh, pkt, FALSE);
7176 dev_kfree_skb(dhd->monitor_skb);
7177 return;
7178 }
7179 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
7180 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7181
7182 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
7183 PKTFREE(dhdp->osh, pkt, FALSE);
7184 return;
7185 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
7186 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7187 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7188 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7189
7190 PKTFREE(dhdp->osh, pkt, FALSE);
7191 return;
7192 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
7193 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7194 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7195 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7196
7197 PKTFREE(dhdp->osh, pkt, FALSE);
7198 skb_put(dhd->monitor_skb, dhd->monitor_len);
7199 dhd->monitor_skb->protocol =
7200 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7201 dhd->monitor_len = 0;
7202 break;
7203 }
7204
7205 #endif /* HOST_RADIOTAP_CONV */
7206 if (in_interrupt()) {
7207 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
7208 __FUNCTION__, __LINE__);
7209 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7210 netif_rx(dhd->monitor_skb);
7211 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7212 } else {
7213 /* If the receive is not processed inside an ISR,
7214 * the softirqd must be woken explicitly to service
7215 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
7216 * by netif_rx_ni(), but in earlier kernels, we need
7217 * to do it manually.
7218 */
7219 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
7220 __FUNCTION__, __LINE__);
7221
7222 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7223 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7224 netif_rx_ni(dhd->monitor_skb);
7225 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7226 #else
7227 ulong flags;
7228 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7229 netif_rx(dhd->monitor_skb);
7230 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7231 local_irq_save(flags);
7232 RAISE_RX_SOFTIRQ();
7233 local_irq_restore(flags);
7234 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7235 }
7236
7237 dhd->monitor_skb = NULL;
7238 }
7239
7240 typedef struct dhd_mon_dev_priv {
7241 struct net_device_stats stats;
7242 } dhd_mon_dev_priv_t;
7243
7244 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
7245 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
7246 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
7247
7248 static int
dhd_monitor_start(struct sk_buff * skb,struct net_device * dev)7249 dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
7250 {
7251 PKTFREE(NULL, skb, FALSE);
7252 return 0;
7253 }
7254
7255 static int
dhd_monitor_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7256 dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7257 {
7258 return 0;
7259 }
7260
7261 static struct net_device_stats*
dhd_monitor_get_stats(struct net_device * dev)7262 dhd_monitor_get_stats(struct net_device *dev)
7263 {
7264 return &DHD_MON_DEV_STATS(dev);
7265 }
7266
7267 static const struct net_device_ops netdev_monitor_ops =
7268 {
7269 .ndo_start_xmit = dhd_monitor_start,
7270 .ndo_get_stats = dhd_monitor_get_stats,
7271 .ndo_do_ioctl = dhd_monitor_ioctl
7272 };
7273
7274 static void
dhd_add_monitor_if(void * handle,void * event_info,u8 event)7275 dhd_add_monitor_if(void *handle, void *event_info, u8 event)
7276 {
7277 dhd_info_t *dhd = handle;
7278 struct net_device *dev;
7279 char *devname;
7280
7281 if (event != DHD_WQ_WORK_IF_ADD) {
7282 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7283 return;
7284 }
7285
7286 if (!dhd) {
7287 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7288 return;
7289 }
7290
7291 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
7292 if (!dev) {
7293 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
7294 return;
7295 }
7296
7297 devname = "radiotap";
7298
7299 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
7300
7301 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7302 #define ARPHRD_IEEE80211_PRISM 802
7303 #endif
7304
7305 #ifndef ARPHRD_IEEE80211_RADIOTAP
7306 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7307 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7308
7309 dev->type = ARPHRD_IEEE80211_RADIOTAP;
7310
7311 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7312 dev->hard_start_xmit = dhd_monitor_start;
7313 dev->do_ioctl = dhd_monitor_ioctl;
7314 dev->get_stats = dhd_monitor_get_stats;
7315 #else
7316 dev->netdev_ops = &netdev_monitor_ops;
7317 #endif
7318
7319 if (register_netdev(dev)) {
7320 DHD_ERROR(("%s, register_netdev failed for %s\n",
7321 __FUNCTION__, dev->name));
7322 free_netdev(dev);
7323 }
7324
7325 bcmwifi_monitor_create(&dhd->monitor_info);
7326 dhd->monitor_dev = dev;
7327 }
7328
7329 static void
dhd_del_monitor_if(void * handle,void * event_info,u8 event)7330 dhd_del_monitor_if(void *handle, void *event_info, u8 event)
7331 {
7332 dhd_info_t *dhd = handle;
7333
7334 if (event != DHD_WQ_WORK_IF_DEL) {
7335 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7336 return;
7337 }
7338
7339 if (!dhd) {
7340 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7341 return;
7342 }
7343
7344 if (dhd->monitor_dev) {
7345 unregister_netdev(dhd->monitor_dev);
7346
7347 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7348 MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
7349 MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
7350 #else
7351 free_netdev(dhd->monitor_dev);
7352 #endif /* 2.6.24 */
7353
7354 dhd->monitor_dev = NULL;
7355 }
7356
7357 if (dhd->monitor_info) {
7358 bcmwifi_monitor_delete(dhd->monitor_info);
7359 dhd->monitor_info = NULL;
7360 }
7361 }
7362
7363 static void
dhd_set_monitor(dhd_pub_t * dhd,int ifidx,int val)7364 dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
7365 {
7366 dhd_info_t *info = dhd->info;
7367
7368 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
7369 if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
7370 DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
7371 return;
7372 }
7373
7374 /* Delete monitor */
7375 if (!val) {
7376 info->monitor_type = val;
7377 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
7378 dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
7379 return;
7380 }
7381
7382 /* Add monitor */
7383 info->monitor_type = val;
7384 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
7385 dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
7386 }
7387 #endif /* WL_MONITOR */
7388
dhd_ioctl_process(dhd_pub_t * pub,int ifidx,dhd_ioctl_t * ioc,void * data_buf)7389 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
7390 {
7391 int bcmerror = BCME_OK;
7392 int buflen = 0;
7393 struct net_device *net;
7394
7395 #ifdef REPORT_FATAL_TIMEOUTS
7396 if (ioc->cmd == WLC_SET_WPA_AUTH) {
7397 int wpa_auth;
7398
7399 wpa_auth = *((int *)ioc->buf);
7400 DHD_INFO(("wpa_auth:%d\n", wpa_auth));
7401 if (wpa_auth != WPA_AUTH_DISABLED) {
7402 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7403 dhd_set_join_error(pub, WLC_WPA_MASK);
7404 } else {
7405 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7406 dhd_clear_join_error(pub, WLC_WPA_MASK);
7407 }
7408 }
7409
7410 if (ioc->cmd == WLC_SET_AUTH) {
7411 int auth;
7412 auth = *((int *)ioc->buf);
7413 DHD_INFO(("Auth:%d\n", auth));
7414
7415 if (auth != WL_AUTH_OPEN_SYSTEM) {
7416 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7417 dhd_set_join_error(pub, WLC_WPA_MASK);
7418 } else {
7419 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7420 dhd_clear_join_error(pub, WLC_WPA_MASK);
7421 }
7422 }
7423 #endif /* REPORT_FATAL_TIMEOUTS */
7424 net = dhd_idx2net(pub, ifidx);
7425 if (!net) {
7426 bcmerror = BCME_BADARG;
7427 goto done;
7428 }
7429
7430 /* check for local dhd ioctl and handle it */
7431 if (ioc->driver == DHD_IOCTL_MAGIC) {
7432 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7433 if (data_buf)
7434 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
7435 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
7436 if (bcmerror)
7437 pub->bcmerror = bcmerror;
7438 goto done;
7439 }
7440
7441 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7442 if (data_buf)
7443 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
7444
7445 #ifndef BCMDBUS
7446 /* send to dongle (must be up, and wl). */
7447 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
7448 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
7449 int ret;
7450 if (atomic_read(&exit_in_progress)) {
7451 DHD_ERROR(("%s module exit in progress\n", __func__));
7452 bcmerror = BCME_DONGLE_DOWN;
7453 goto done;
7454 }
7455 ret = dhd_bus_start(pub);
7456 if (ret != 0) {
7457 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7458 bcmerror = BCME_DONGLE_DOWN;
7459 goto done;
7460 }
7461 } else {
7462 bcmerror = BCME_DONGLE_DOWN;
7463 goto done;
7464 }
7465 }
7466
7467 if (!pub->iswl) {
7468 bcmerror = BCME_DONGLE_DOWN;
7469 goto done;
7470 }
7471 #endif /* !BCMDBUS */
7472
7473 /*
7474 * Flush the TX queue if required for proper message serialization:
7475 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7476 * prevent M4 encryption and
7477 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7478 * prevent disassoc frame being sent before WPS-DONE frame.
7479 */
7480 if (ioc->cmd == WLC_SET_KEY ||
7481 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7482 strncmp("wsec_key", data_buf, 9) == 0) ||
7483 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7484 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
7485 ioc->cmd == WLC_DISASSOC)
7486 dhd_wait_pend8021x(net);
7487
7488 #ifdef WLMEDIA_HTSF
7489 if (data_buf) {
7490 /* short cut wl ioctl calls here */
7491 if (strcmp("htsf", data_buf) == 0) {
7492 dhd_ioctl_htsf_get(dhd, 0);
7493 return BCME_OK;
7494 }
7495
7496 if (strcmp("htsflate", data_buf) == 0) {
7497 if (ioc->set) {
7498 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
7499 memset(&maxdelayts, 0, sizeof(tstamp_t));
7500 maxdelay = 0;
7501 tspktcnt = 0;
7502 maxdelaypktno = 0;
7503 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
7504 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
7505 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
7506 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
7507 } else {
7508 dhd_dump_latency();
7509 }
7510 return BCME_OK;
7511 }
7512 if (strcmp("htsfclear", data_buf) == 0) {
7513 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
7514 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
7515 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
7516 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
7517 htsf_seqnum = 0;
7518 return BCME_OK;
7519 }
7520 if (strcmp("htsfhis", data_buf) == 0) {
7521 dhd_dump_htsfhisto(&vi_d1, "H to D");
7522 dhd_dump_htsfhisto(&vi_d2, "D to D");
7523 dhd_dump_htsfhisto(&vi_d3, "D to H");
7524 dhd_dump_htsfhisto(&vi_d4, "H to H");
7525 return BCME_OK;
7526 }
7527 if (strcmp("tsport", data_buf) == 0) {
7528 if (ioc->set) {
7529 memcpy(&tsport, data_buf + 7, 4);
7530 } else {
7531 DHD_ERROR(("current timestamp port: %d \n", tsport));
7532 }
7533 return BCME_OK;
7534 }
7535 }
7536 #endif /* WLMEDIA_HTSF */
7537
7538 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
7539 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
7540 #ifdef BCM_FD_AGGR
7541 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7542 #else
7543 bcmerror = BCME_UNSUPPORTED;
7544 #endif
7545 goto done;
7546 }
7547 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7548
7549 #ifdef WL_MONITOR
7550 /* Intercept monitor ioctl here, add/del monitor if */
7551 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
7552 dhd_set_monitor(pub, ifidx, *(int32*)data_buf);
7553 }
7554 #endif
7555
7556 #ifdef REPORT_FATAL_TIMEOUTS
7557 if (ioc->cmd == WLC_SCAN && bcmerror == 0) {
7558 dhd_start_scan_timer(pub);
7559 }
7560 if (ioc->cmd == WLC_SET_SSID && bcmerror == 0) {
7561 dhd_start_join_timer(pub);
7562 }
7563 #endif /* REPORT_FATAL_TIMEOUTS */
7564
7565 done:
7566 dhd_check_hang(net, pub, bcmerror);
7567
7568 return bcmerror;
7569 }
7570
7571 static int
dhd_ioctl_entry(struct net_device * net,struct ifreq * ifr,int cmd)7572 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
7573 {
7574 dhd_info_t *dhd = DHD_DEV_INFO(net);
7575 dhd_ioctl_t ioc;
7576 int bcmerror = 0;
7577 int ifidx;
7578 int ret;
7579 void *local_buf = NULL;
7580 void __user *ioc_buf_user = NULL;
7581 u16 buflen = 0;
7582
7583 if (atomic_read(&exit_in_progress)) {
7584 DHD_ERROR(("%s module exit in progress\n", __func__));
7585 bcmerror = BCME_DONGLE_DOWN;
7586 return OSL_ERROR(bcmerror);
7587 }
7588
7589 DHD_OS_WAKE_LOCK(&dhd->pub);
7590 DHD_PERIM_LOCK(&dhd->pub);
7591
7592 /* Interface up check for built-in type */
7593 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
7594 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
7595 DHD_PERIM_UNLOCK(&dhd->pub);
7596 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7597 return OSL_ERROR(BCME_NOTUP);
7598 }
7599
7600 ifidx = dhd_net2idx(dhd, net);
7601 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
7602
7603 if (ifidx == DHD_BAD_IF) {
7604 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
7605 DHD_PERIM_UNLOCK(&dhd->pub);
7606 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7607 return -1;
7608 }
7609
7610 #if defined(WL_WIRELESS_EXT)
7611 /* linux wireless extensions */
7612 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
7613 /* may recurse, do NOT lock */
7614 ret = wl_iw_ioctl(net, ifr, cmd);
7615 DHD_PERIM_UNLOCK(&dhd->pub);
7616 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7617 return ret;
7618 }
7619 #endif /* defined(WL_WIRELESS_EXT) */
7620
7621 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7622 if (cmd == SIOCETHTOOL) {
7623 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
7624 DHD_PERIM_UNLOCK(&dhd->pub);
7625 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7626 return ret;
7627 }
7628 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7629
7630 if (cmd == SIOCDEVPRIVATE+1) {
7631 ret = wl_android_priv_cmd(net, ifr, cmd);
7632 dhd_check_hang(net, &dhd->pub, ret);
7633 DHD_PERIM_UNLOCK(&dhd->pub);
7634 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7635 return ret;
7636 }
7637
7638 if (cmd != SIOCDEVPRIVATE) {
7639 DHD_PERIM_UNLOCK(&dhd->pub);
7640 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7641 return -EOPNOTSUPP;
7642 }
7643
7644 memset(&ioc, 0, sizeof(ioc));
7645
7646 #ifdef CONFIG_COMPAT
7647 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7648 if (in_compat_syscall())
7649 #else
7650 if (is_compat_task())
7651 #endif
7652 {
7653 compat_wl_ioctl_t compat_ioc;
7654 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
7655 bcmerror = BCME_BADADDR;
7656 goto done;
7657 }
7658 ioc.cmd = compat_ioc.cmd;
7659 if (ioc.cmd & WLC_SPEC_FLAG) {
7660 memset(&ioc, 0, sizeof(ioc));
7661 /* Copy the ioc control structure part of ioctl request */
7662 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7663 bcmerror = BCME_BADADDR;
7664 goto done;
7665 }
7666 ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
7667
7668 /* To differentiate between wl and dhd read 4 more byes */
7669 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7670 sizeof(uint)) != 0)) {
7671 bcmerror = BCME_BADADDR;
7672 goto done;
7673 }
7674 } else { /* ioc.cmd & WLC_SPEC_FLAG */
7675 ioc.buf = compat_ptr(compat_ioc.buf);
7676 ioc.len = compat_ioc.len;
7677 ioc.set = compat_ioc.set;
7678 ioc.used = compat_ioc.used;
7679 ioc.needed = compat_ioc.needed;
7680 /* To differentiate between wl and dhd read 4 more byes */
7681 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
7682 sizeof(uint)) != 0)) {
7683 bcmerror = BCME_BADADDR;
7684 goto done;
7685 }
7686 } /* ioc.cmd & WLC_SPEC_FLAG */
7687 } else
7688 #endif /* CONFIG_COMPAT */
7689 {
7690 /* Copy the ioc control structure part of ioctl request */
7691 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7692 bcmerror = BCME_BADADDR;
7693 goto done;
7694 }
7695 #ifdef CONFIG_COMPAT
7696 ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
7697 #endif
7698
7699 /* To differentiate between wl and dhd read 4 more byes */
7700 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7701 sizeof(uint)) != 0)) {
7702 bcmerror = BCME_BADADDR;
7703 goto done;
7704 }
7705 }
7706
7707 if (!capable(CAP_NET_ADMIN)) {
7708 bcmerror = BCME_EPERM;
7709 goto done;
7710 }
7711
7712 /* Take backup of ioc.buf and restore later */
7713 ioc_buf_user = ioc.buf;
7714
7715 if (ioc.len > 0) {
7716 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
7717 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
7718 bcmerror = BCME_NOMEM;
7719 goto done;
7720 }
7721
7722 DHD_PERIM_UNLOCK(&dhd->pub);
7723 if (copy_from_user(local_buf, ioc.buf, buflen)) {
7724 DHD_PERIM_LOCK(&dhd->pub);
7725 bcmerror = BCME_BADADDR;
7726 goto done;
7727 }
7728 DHD_PERIM_LOCK(&dhd->pub);
7729
7730 *((char *)local_buf + buflen) = '\0';
7731
7732 /* For some platforms accessing userspace memory
7733 * of ioc.buf is causing kernel panic, so to avoid that
7734 * make ioc.buf pointing to kernel space memory local_buf
7735 */
7736 ioc.buf = local_buf;
7737 }
7738
7739 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7740 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
7741 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
7742 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
7743 bcmerror = BCME_DONGLE_DOWN;
7744 goto done;
7745 }
7746
7747 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
7748
7749 /* Restore back userspace pointer to ioc.buf */
7750 ioc.buf = ioc_buf_user;
7751
7752 if (!bcmerror && buflen && local_buf && ioc.buf) {
7753 DHD_PERIM_UNLOCK(&dhd->pub);
7754 if (copy_to_user(ioc.buf, local_buf, buflen))
7755 bcmerror = -EFAULT;
7756 DHD_PERIM_LOCK(&dhd->pub);
7757 }
7758
7759 done:
7760 if (local_buf)
7761 MFREE(dhd->pub.osh, local_buf, buflen+1);
7762
7763 DHD_PERIM_UNLOCK(&dhd->pub);
7764 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7765
7766 return OSL_ERROR(bcmerror);
7767 }
7768
7769
7770 #ifdef FIX_CPU_MIN_CLOCK
dhd_init_cpufreq_fix(dhd_info_t * dhd)7771 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
7772 {
7773 if (dhd) {
7774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7775 mutex_init(&dhd->cpufreq_fix);
7776 #endif
7777 dhd->cpufreq_fix_status = FALSE;
7778 }
7779 return 0;
7780 }
7781
dhd_fix_cpu_freq(dhd_info_t * dhd)7782 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
7783 {
7784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7785 mutex_lock(&dhd->cpufreq_fix);
7786 #endif
7787 if (dhd && !dhd->cpufreq_fix_status) {
7788 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
7789 #ifdef FIX_BUS_MIN_CLOCK
7790 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
7791 #endif /* FIX_BUS_MIN_CLOCK */
7792 DHD_ERROR(("pm_qos_add_requests called\n"));
7793
7794 dhd->cpufreq_fix_status = TRUE;
7795 }
7796 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7797 mutex_unlock(&dhd->cpufreq_fix);
7798 #endif
7799 }
7800
dhd_rollback_cpu_freq(dhd_info_t * dhd)7801 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
7802 {
7803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7804 mutex_lock(&dhd ->cpufreq_fix);
7805 #endif
7806 if (dhd && dhd->cpufreq_fix_status != TRUE) {
7807 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7808 mutex_unlock(&dhd->cpufreq_fix);
7809 #endif
7810 return;
7811 }
7812
7813 pm_qos_remove_request(&dhd->dhd_cpu_qos);
7814 #ifdef FIX_BUS_MIN_CLOCK
7815 pm_qos_remove_request(&dhd->dhd_bus_qos);
7816 #endif /* FIX_BUS_MIN_CLOCK */
7817 DHD_ERROR(("pm_qos_add_requests called\n"));
7818
7819 dhd->cpufreq_fix_status = FALSE;
7820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7821 mutex_unlock(&dhd->cpufreq_fix);
7822 #endif
7823 }
7824 #endif /* FIX_CPU_MIN_CLOCK */
7825
7826 #if defined(BT_OVER_SDIO)
7827
7828 void
dhdsdio_bus_usr_cnt_inc(dhd_pub_t * dhdp)7829 dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
7830 {
7831 dhdp->info->bus_user_count++;
7832 }
7833
7834 void
dhdsdio_bus_usr_cnt_dec(dhd_pub_t * dhdp)7835 dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
7836 {
7837 dhdp->info->bus_user_count--;
7838 }
7839
7840 /* Return values:
7841 * Success: Returns 0
7842 * Failure: Returns -1 or errono code
7843 */
7844 int
dhd_bus_get(wlan_bt_handle_t handle,bus_owner_t owner)7845 dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
7846 {
7847 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7848 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7849 int ret = 0;
7850
7851 mutex_lock(&dhd->bus_user_lock);
7852 ++dhd->bus_user_count;
7853 if (dhd->bus_user_count < 0) {
7854 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7855 ret = -1;
7856 goto exit;
7857 }
7858
7859 if (dhd->bus_user_count == 1) {
7860 dhd->pub.hang_was_sent = 0;
7861
7862 /* First user, turn on WL_REG, start the bus */
7863 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
7864
7865 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
7866 /* Enable F1 */
7867 ret = dhd_bus_resume(dhdp, 0);
7868 if (ret) {
7869 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
7870 __FUNCTION__, ret));
7871 goto exit;
7872 }
7873 }
7874
7875 dhd_update_fw_nv_path(dhd);
7876 /* update firmware and nvram path to sdio bus */
7877 dhd_bus_update_fw_nv_path(dhd->pub.bus,
7878 dhd->fw_path, dhd->nv_path);
7879 /* download the firmware, Enable F2 */
7880 /* TODO: Should be done only in case of FW switch */
7881 ret = dhd_bus_devreset(dhdp, FALSE);
7882 dhd_bus_resume(dhdp, 1);
7883 if (!ret) {
7884 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
7885 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
7886 ret = -EFAULT;
7887 }
7888 } else {
7889 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
7890 }
7891 } else {
7892 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
7893 __FUNCTION__, dhd->bus_user_count));
7894 }
7895 exit:
7896 mutex_unlock(&dhd->bus_user_lock);
7897 return ret;
7898 }
7899 EXPORT_SYMBOL(dhd_bus_get);
7900
7901 /* Return values:
7902 * Success: Returns 0
7903 * Failure: Returns -1 or errono code
7904 */
7905 int
dhd_bus_put(wlan_bt_handle_t handle,bus_owner_t owner)7906 dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
7907 {
7908 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7909 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7910 int ret = 0;
7911 BCM_REFERENCE(owner);
7912
7913 mutex_lock(&dhd->bus_user_lock);
7914 --dhd->bus_user_count;
7915 if (dhd->bus_user_count < 0) {
7916 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7917 dhd->bus_user_count = 0;
7918 ret = -1;
7919 goto exit;
7920 }
7921
7922 if (dhd->bus_user_count == 0) {
7923 /* Last user, stop the bus and turn Off WL_REG */
7924 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7925 __FUNCTION__));
7926 #ifdef PROP_TXSTATUS
7927 if (dhd->pub.wlfc_enabled) {
7928 dhd_wlfc_deinit(&dhd->pub);
7929 }
7930 #endif /* PROP_TXSTATUS */
7931 #ifdef PNO_SUPPORT
7932 if (dhd->pub.pno_state) {
7933 dhd_pno_deinit(&dhd->pub);
7934 }
7935 #endif /* PNO_SUPPORT */
7936 #ifdef RTT_SUPPORT
7937 if (dhd->pub.rtt_state) {
7938 dhd_rtt_deinit(&dhd->pub);
7939 }
7940 #endif /* RTT_SUPPORT */
7941 ret = dhd_bus_devreset(dhdp, TRUE);
7942 if (!ret) {
7943 dhd_bus_suspend(dhdp);
7944 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
7945 }
7946 } else {
7947 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7948 __FUNCTION__, dhd->bus_user_count));
7949 }
7950 exit:
7951 mutex_unlock(&dhd->bus_user_lock);
7952 return ret;
7953 }
7954 EXPORT_SYMBOL(dhd_bus_put);
7955
7956 int
dhd_net_bus_get(struct net_device * dev)7957 dhd_net_bus_get(struct net_device *dev)
7958 {
7959 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7960 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
7961 }
7962
7963 int
dhd_net_bus_put(struct net_device * dev)7964 dhd_net_bus_put(struct net_device *dev)
7965 {
7966 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7967 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
7968 }
7969
7970 /*
7971 * Function to enable the Bus Clock
7972 * Returns BCME_OK on success and BCME_xxx on failure
7973 *
7974 * This function is not callable from non-sleepable context
7975 */
dhd_bus_clk_enable(wlan_bt_handle_t handle,bus_owner_t owner)7976 int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
7977 {
7978 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7979
7980 int ret;
7981
7982 dhd_os_sdlock(dhdp);
7983 /*
7984 * The second argument is TRUE, that means, we expect
7985 * the function to "wait" until the clocks are really
7986 * available
7987 */
7988 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
7989 dhd_os_sdunlock(dhdp);
7990
7991 return ret;
7992 }
7993 EXPORT_SYMBOL(dhd_bus_clk_enable);
7994
7995 /*
7996 * Function to disable the Bus Clock
7997 * Returns BCME_OK on success and BCME_xxx on failure
7998 *
7999 * This function is not callable from non-sleepable context
8000 */
dhd_bus_clk_disable(wlan_bt_handle_t handle,bus_owner_t owner)8001 int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
8002 {
8003 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
8004
8005 int ret;
8006
8007 dhd_os_sdlock(dhdp);
8008 /*
8009 * The second argument is TRUE, that means, we expect
8010 * the function to "wait" until the clocks are really
8011 * disabled
8012 */
8013 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
8014 dhd_os_sdunlock(dhdp);
8015
8016 return ret;
8017 }
8018 EXPORT_SYMBOL(dhd_bus_clk_disable);
8019
8020 /*
8021 * Function to reset bt_use_count counter to zero.
8022 *
8023 * This function is not callable from non-sleepable context
8024 */
dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)8025 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
8026 {
8027 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
8028
8029 /* take the lock and reset bt use count */
8030 dhd_os_sdlock(dhdp);
8031 dhdsdio_reset_bt_use_count(dhdp->bus);
8032 dhd_os_sdunlock(dhdp);
8033 }
8034 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
8035
8036 #endif /* BT_OVER_SDIO */
8037
8038 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
dhd_deepsleep(dhd_info_t * dhd,int flag)8039 int dhd_deepsleep(dhd_info_t *dhd, int flag)
8040 {
8041 char iovbuf[20];
8042 uint powervar = 0;
8043 dhd_pub_t *dhdp;
8044 int cnt = 0;
8045 int ret = 0;
8046
8047 dhdp = &dhd->pub;
8048
8049 switch (flag) {
8050 case 1 : /* Deepsleep on */
8051 DHD_ERROR(("dhd_deepsleep: ON\n"));
8052 /* give some time to sysioc_work before deepsleep */
8053 OSL_SLEEP(200);
8054 #ifdef PKT_FILTER_SUPPORT
8055 /* disable pkt filter */
8056 dhd_enable_packet_filter(0, dhdp);
8057 #endif /* PKT_FILTER_SUPPORT */
8058 /* Disable MPC */
8059 powervar = 0;
8060 memset(iovbuf, 0, sizeof(iovbuf));
8061 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8062 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8063
8064 /* Enable Deepsleep */
8065 powervar = 1;
8066 memset(iovbuf, 0, sizeof(iovbuf));
8067 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8068 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8069 break;
8070
8071 case 0: /* Deepsleep Off */
8072 DHD_ERROR(("dhd_deepsleep: OFF\n"));
8073
8074 /* Disable Deepsleep */
8075 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
8076 powervar = 0;
8077 memset(iovbuf, 0, sizeof(iovbuf));
8078 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
8079 iovbuf, sizeof(iovbuf));
8080 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
8081 sizeof(iovbuf), TRUE, 0);
8082
8083 memset(iovbuf, 0, sizeof(iovbuf));
8084 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
8085 iovbuf, sizeof(iovbuf));
8086 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
8087 sizeof(iovbuf), FALSE, 0)) < 0) {
8088 DHD_ERROR(("the error of dhd deepsleep status"
8089 " ret value :%d\n", ret));
8090 } else {
8091 if (!(*(int *)iovbuf)) {
8092 DHD_ERROR(("deepsleep mode is 0,"
8093 " count: %d\n", cnt));
8094 break;
8095 }
8096 }
8097 }
8098
8099 /* Enable MPC */
8100 powervar = 1;
8101 memset(iovbuf, 0, sizeof(iovbuf));
8102 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8103 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8104 break;
8105 }
8106
8107 return 0;
8108 }
8109
8110 static int
dhd_stop(struct net_device * net)8111 dhd_stop(struct net_device *net)
8112 {
8113 int ifidx = 0;
8114 #ifdef WL_CFG80211
8115 unsigned long flags = 0;
8116 #endif /* WL_CFG80211 */
8117 dhd_info_t *dhd = DHD_DEV_INFO(net);
8118 DHD_OS_WAKE_LOCK(&dhd->pub);
8119 DHD_PERIM_LOCK(&dhd->pub);
8120 printf("%s: Enter %p\n", __FUNCTION__, net);
8121 dhd->pub.rxcnt_timeout = 0;
8122 dhd->pub.txcnt_timeout = 0;
8123
8124 #ifdef BCMPCIE
8125 dhd->pub.d3ackcnt_timeout = 0;
8126 #endif /* BCMPCIE */
8127
8128 if (dhd->pub.up == 0) {
8129 goto exit;
8130 }
8131 #if defined(DHD_HANG_SEND_UP_TEST)
8132 if (dhd->pub.req_hang_type) {
8133 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
8134 __FUNCTION__, dhd->pub.req_hang_type));
8135 dhd->pub.req_hang_type = 0;
8136 }
8137 #endif /* DHD_HANG_SEND_UP_TEST */
8138
8139 dhd_if_flush_sta(DHD_DEV_IFP(net));
8140
8141 /* Disable Runtime PM before interface down */
8142 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
8143
8144 #ifdef FIX_CPU_MIN_CLOCK
8145 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
8146 dhd_rollback_cpu_freq(dhd);
8147 #endif /* FIX_CPU_MIN_CLOCK */
8148
8149 ifidx = dhd_net2idx(dhd, net);
8150 BCM_REFERENCE(ifidx);
8151
8152 /* Set state and stop OS transmissions */
8153 netif_stop_queue(net);
8154 #ifdef WL_CFG80211
8155 spin_lock_irqsave(&dhd->pub.up_lock, flags);
8156 dhd->pub.up = 0;
8157 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
8158 #else
8159 dhd->pub.up = 0;
8160 #endif /* WL_CFG80211 */
8161
8162 #ifdef WL_CFG80211
8163 if (ifidx == 0) {
8164 dhd_if_t *ifp;
8165 wl_cfg80211_down(net);
8166
8167 ifp = dhd->iflist[0];
8168 ASSERT(ifp && ifp->net);
8169 /*
8170 * For CFG80211: Clean up all the left over virtual interfaces
8171 * when the primary Interface is brought down. [ifconfig wlan0 down]
8172 */
8173 if (!dhd_download_fw_on_driverload) {
8174 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
8175 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
8176 int i;
8177 #ifdef WL_CFG80211_P2P_DEV_IF
8178 wl_cfg80211_del_p2p_wdev(net);
8179 #endif /* WL_CFG80211_P2P_DEV_IF */
8180
8181 dhd_net_if_lock_local(dhd);
8182 for (i = 1; i < DHD_MAX_IFS; i++)
8183 dhd_remove_if(&dhd->pub, i, FALSE);
8184
8185 if (ifp && ifp->net) {
8186 dhd_if_del_sta_list(ifp);
8187 }
8188 #ifdef ARP_OFFLOAD_SUPPORT
8189 if (dhd_inetaddr_notifier_registered) {
8190 dhd_inetaddr_notifier_registered = FALSE;
8191 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
8192 }
8193 #endif /* ARP_OFFLOAD_SUPPORT */
8194 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8195 if (dhd_inet6addr_notifier_registered) {
8196 dhd_inet6addr_notifier_registered = FALSE;
8197 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
8198 }
8199 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8200 dhd_net_if_unlock_local(dhd);
8201 }
8202 #if 0
8203 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
8204 cancel_work_sync(dhd->dhd_deferred_wq);
8205 #endif
8206
8207 #ifdef SHOW_LOGTRACE
8208 /* Wait till event_log_dispatcher_work finishes */
8209 cancel_work_sync(&dhd->event_log_dispatcher_work);
8210 #endif /* SHOW_LOGTRACE */
8211
8212 #if defined(DHD_LB_RXP)
8213 __skb_queue_purge(&dhd->rx_pend_queue);
8214 #endif /* DHD_LB_RXP */
8215
8216 #if defined(DHD_LB_TXP)
8217 skb_queue_purge(&dhd->tx_pend_queue);
8218 #endif /* DHD_LB_TXP */
8219 }
8220
8221 argos_register_notifier_deinit();
8222 #ifdef DHDTCPACK_SUPPRESS
8223 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8224 #endif /* DHDTCPACK_SUPPRESS */
8225 #if defined(DHD_LB_RXP)
8226 if (ifp->net == dhd->rx_napi_netdev) {
8227 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
8228 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8229 skb_queue_purge(&dhd->rx_napi_queue);
8230 napi_disable(&dhd->rx_napi_struct);
8231 netif_napi_del(&dhd->rx_napi_struct);
8232 dhd->rx_napi_netdev = NULL;
8233 }
8234 #endif /* DHD_LB_RXP */
8235 }
8236 #endif /* WL_CFG80211 */
8237
8238 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
8239
8240 #ifdef PROP_TXSTATUS
8241 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
8242 #endif
8243 #ifdef SHOW_LOGTRACE
8244 if (!dhd_download_fw_on_driverload) {
8245 /* Release the skbs from queue for WLC_E_TRACE event */
8246 dhd_event_logtrace_flush_queue(&dhd->pub);
8247 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
8248 if (dhd->event_data.fmts) {
8249 MFREE(dhd->pub.osh, dhd->event_data.fmts,
8250 dhd->event_data.fmts_size);
8251 dhd->event_data.fmts = NULL;
8252 }
8253 if (dhd->event_data.raw_fmts) {
8254 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
8255 dhd->event_data.raw_fmts_size);
8256 dhd->event_data.raw_fmts = NULL;
8257 }
8258 if (dhd->event_data.raw_sstr) {
8259 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
8260 dhd->event_data.raw_sstr_size);
8261 dhd->event_data.raw_sstr = NULL;
8262 }
8263 if (dhd->event_data.rom_raw_sstr) {
8264 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
8265 dhd->event_data.rom_raw_sstr_size);
8266 dhd->event_data.rom_raw_sstr = NULL;
8267 }
8268 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
8269 }
8270 }
8271 #endif /* SHOW_LOGTRACE */
8272 #ifdef APF
8273 dhd_dev_apf_delete_filter(net);
8274 #endif /* APF */
8275
8276 /* Stop the protocol module */
8277 dhd_prot_stop(&dhd->pub);
8278
8279 OLD_MOD_DEC_USE_COUNT;
8280 exit:
8281 #if defined(WL_WIRELESS_EXT)
8282 if (ifidx == 0) {
8283 #ifdef WL_ESCAN
8284 wl_escan_down(&dhd->pub);
8285 #else
8286 wl_iw_down(&dhd->pub);
8287 #endif /* WL_ESCAN */
8288 }
8289 #endif /* defined(WL_WIRELESS_EXT) */
8290 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
8291 #if defined(BT_OVER_SDIO)
8292 dhd_bus_put(&dhd->pub, WLAN_MODULE);
8293 wl_android_set_wifi_on_flag(FALSE);
8294 #else
8295 wl_android_wifi_off(net, TRUE);
8296 #ifdef WL_EXT_IAPSTA
8297 wl_ext_iapsta_dettach_netdev(net, ifidx);
8298 #endif
8299 } else {
8300 if (dhd->pub.conf->deepsleep)
8301 dhd_deepsleep(dhd, 1);
8302 #endif /* BT_OVER_SDIO */
8303 }
8304 dhd->pub.hang_was_sent = 0;
8305
8306 /* Clear country spec for for built-in type driver */
8307 if (!dhd_download_fw_on_driverload) {
8308 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
8309 dhd->pub.dhd_cspec.rev = 0;
8310 dhd->pub.dhd_cspec.ccode[0] = 0x00;
8311 }
8312
8313 #ifdef BCMDBGFS
8314 dhd_dbgfs_remove();
8315 #endif
8316
8317 DHD_PERIM_UNLOCK(&dhd->pub);
8318 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8319
8320 /* Destroy wakelock */
8321 if (!dhd_download_fw_on_driverload &&
8322 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
8323 DHD_OS_WAKE_LOCK_DESTROY(dhd);
8324 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
8325 }
8326 printf("%s: Exit\n", __FUNCTION__);
8327
8328 return 0;
8329 }
8330
8331 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
8332 extern bool g_first_broadcast_scan;
8333 #endif
8334
8335 #ifdef WL11U
dhd_interworking_enable(dhd_pub_t * dhd)8336 static int dhd_interworking_enable(dhd_pub_t *dhd)
8337 {
8338 uint32 enable = true;
8339 int ret = BCME_OK;
8340
8341 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
8342 if (ret < 0) {
8343 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
8344 }
8345
8346 return ret;
8347 }
8348 #endif /* WL11u */
8349
8350 static int
dhd_open(struct net_device * net)8351 dhd_open(struct net_device *net)
8352 {
8353 dhd_info_t *dhd = DHD_DEV_INFO(net);
8354 #ifdef TOE
8355 uint32 toe_ol;
8356 #endif
8357 #ifdef BCM_FD_AGGR
8358 char iovbuf[WLC_IOCTL_SMLEN];
8359 dbus_config_t config;
8360 uint32 agglimit = 0;
8361 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
8362 #endif /* BCM_FD_AGGR */
8363 int ifidx;
8364 int32 ret = 0;
8365 #if defined(OOB_INTR_ONLY)
8366 uint32 bus_type = -1;
8367 uint32 bus_num = -1;
8368 uint32 slot_num = -1;
8369 wifi_adapter_info_t *adapter = NULL;
8370 #endif
8371 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8372 int bytes_written = 0;
8373 struct dhd_conf *conf;
8374 #endif
8375
8376 if (!dhd_download_fw_on_driverload) {
8377 if (!dhd_driver_init_done) {
8378 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
8379 return -1;
8380 }
8381 }
8382
8383 printf("%s: Enter %p\n", __FUNCTION__, net);
8384 DHD_MUTEX_LOCK();
8385 /* Init wakelock */
8386 if (!dhd_download_fw_on_driverload) {
8387 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
8388 DHD_OS_WAKE_LOCK_INIT(dhd);
8389 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
8390 }
8391 #ifdef SHOW_LOGTRACE
8392 skb_queue_head_init(&dhd->evt_trace_queue);
8393
8394 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
8395 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
8396 if (ret == BCME_OK) {
8397 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
8398 st_str_file_path, map_file_path);
8399 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
8400 rom_st_str_file_path, rom_map_file_path);
8401 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
8402 }
8403 }
8404 #endif /* SHOW_LOGTRACE */
8405 }
8406
8407 #if defined(PREVENT_REOPEN_DURING_HANG)
8408 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
8409 if (dhd->pub.hang_was_sent == 1) {
8410 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
8411 /* Force to bring down WLAN interface in case dhd_stop() is not called
8412 * from the upper layer when HANG event is triggered.
8413 */
8414 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
8415 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
8416 dhd_stop(net);
8417 } else {
8418 return -1;
8419 }
8420 }
8421 #endif /* PREVENT_REOPEN_DURING_HANG */
8422
8423
8424 DHD_OS_WAKE_LOCK(&dhd->pub);
8425 DHD_PERIM_LOCK(&dhd->pub);
8426 dhd->pub.dongle_trap_occured = 0;
8427 dhd->pub.hang_was_sent = 0;
8428 dhd->pub.hang_reason = 0;
8429 dhd->pub.iovar_timeout_occured = 0;
8430 #ifdef PCIE_FULL_DONGLE
8431 dhd->pub.d3ack_timeout_occured = 0;
8432 #endif /* PCIE_FULL_DONGLE */
8433
8434 #ifdef DHD_LOSSLESS_ROAMING
8435 dhd->pub.dequeue_prec_map = ALLPRIO;
8436 #endif
8437 #if 0
8438 /*
8439 * Force start if ifconfig_up gets called before START command
8440 * We keep WEXT's wl_control_wl_start to provide backward compatibility
8441 * This should be removed in the future
8442 */
8443 ret = wl_control_wl_start(net);
8444 if (ret != 0) {
8445 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8446 ret = -1;
8447 goto exit;
8448 }
8449 #endif
8450
8451 ifidx = dhd_net2idx(dhd, net);
8452 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
8453
8454 if (ifidx < 0) {
8455 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
8456 ret = -1;
8457 goto exit;
8458 }
8459
8460 if (!dhd->iflist[ifidx]) {
8461 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
8462 ret = -1;
8463 goto exit;
8464 }
8465
8466 if (ifidx == 0) {
8467 atomic_set(&dhd->pend_8021x_cnt, 0);
8468 if (!dhd_download_fw_on_driverload) {
8469 DHD_ERROR(("\n%s\n", dhd_version));
8470 #ifdef WL_EXT_IAPSTA
8471 wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
8472 #endif
8473 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
8474 g_first_broadcast_scan = TRUE;
8475 #endif
8476 #if defined(BT_OVER_SDIO)
8477 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
8478 wl_android_set_wifi_on_flag(TRUE);
8479 #else
8480 ret = wl_android_wifi_on(net);
8481 #endif /* BT_OVER_SDIO */
8482 if (ret != 0) {
8483 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8484 __FUNCTION__, ret));
8485 ret = -1;
8486 goto exit;
8487 }
8488 }
8489 #ifdef FIX_CPU_MIN_CLOCK
8490 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
8491 dhd_init_cpufreq_fix(dhd);
8492 dhd_fix_cpu_freq(dhd);
8493 }
8494 #endif /* FIX_CPU_MIN_CLOCK */
8495 #if defined(OOB_INTR_ONLY)
8496 if (dhd->pub.conf->dpc_cpucore >= 0) {
8497 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
8498 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
8499 if (adapter) {
8500 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
8501 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
8502 }
8503 }
8504 #endif
8505
8506 if (dhd->pub.busstate != DHD_BUS_DATA) {
8507 #ifdef BCMDBUS
8508 dhd_set_path(&dhd->pub);
8509 DHD_MUTEX_UNLOCK();
8510 wait_event_interruptible_timeout(dhd->adapter->status_event,
8511 wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
8512 msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
8513 DHD_MUTEX_LOCK();
8514 ret = dbus_up(dhd->pub.bus);
8515 if (ret != 0) {
8516 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
8517 goto exit;
8518 } else {
8519 dhd->pub.busstate = DHD_BUS_DATA;
8520 }
8521 ret = dhd_sync_with_dongle(&dhd->pub);
8522 if (ret < 0) {
8523 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8524 goto exit;
8525 }
8526 #else
8527 /* try to bring up bus */
8528 DHD_PERIM_UNLOCK(&dhd->pub);
8529 ret = dhd_bus_start(&dhd->pub);
8530 DHD_PERIM_LOCK(&dhd->pub);
8531 if (ret) {
8532 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8533 ret = -1;
8534 goto exit;
8535 }
8536 #endif /* !BCMDBUS */
8537 }
8538 #ifdef WL_EXT_IAPSTA
8539 wl_ext_iapsta_attach_name(net, ifidx);
8540 #endif
8541 if (dhd_download_fw_on_driverload) {
8542 if (dhd->pub.conf->deepsleep)
8543 dhd_deepsleep(dhd, 0);
8544 }
8545
8546 #ifdef BCM_FD_AGGR
8547 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
8548
8549
8550 memset(iovbuf, 0, sizeof(iovbuf));
8551 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
8552 iovbuf, sizeof(iovbuf));
8553
8554 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
8555 agglimit = *(uint32 *)iovbuf;
8556 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
8557 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
8558 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
8559 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
8560 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
8561 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
8562 }
8563 } else {
8564 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
8565 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
8566 }
8567
8568 /* Set aggregation for TX */
8569 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
8570 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
8571
8572 /* Set aggregation for RX */
8573 memset(iovbuf, 0, sizeof(iovbuf));
8574 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
8575 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
8576 dhd->pub.info->fdaggr = 0;
8577 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
8578 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
8579 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
8580 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
8581 } else {
8582 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
8583 }
8584 #endif /* BCM_FD_AGGR */
8585
8586 #ifdef BT_OVER_SDIO
8587 if (dhd->pub.is_bt_recovery_required) {
8588 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
8589 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
8590 }
8591 dhd->pub.is_bt_recovery_required = FALSE;
8592 #endif
8593
8594 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8595 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
8596
8597 #ifdef TOE
8598 /* Get current TOE mode from dongle */
8599 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
8600 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
8601 } else {
8602 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
8603 }
8604 #endif /* TOE */
8605
8606 #if defined(DHD_LB_RXP)
8607 __skb_queue_head_init(&dhd->rx_pend_queue);
8608 if (dhd->rx_napi_netdev == NULL) {
8609 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
8610 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
8611 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
8612 dhd_napi_poll, dhd_napi_weight);
8613 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8614 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8615 napi_enable(&dhd->rx_napi_struct);
8616 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
8617 skb_queue_head_init(&dhd->rx_napi_queue);
8618 } /* rx_napi_netdev == NULL */
8619 #endif /* DHD_LB_RXP */
8620
8621 #if defined(DHD_LB_TXP)
8622 /* Use the variant that uses locks */
8623 skb_queue_head_init(&dhd->tx_pend_queue);
8624 #endif /* DHD_LB_TXP */
8625
8626 #if defined(WL_CFG80211)
8627 if (unlikely(wl_cfg80211_up(net))) {
8628 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
8629 ret = -1;
8630 goto exit;
8631 }
8632 if (!dhd_download_fw_on_driverload) {
8633 #ifdef ARP_OFFLOAD_SUPPORT
8634 dhd->pend_ipaddr = 0;
8635 if (!dhd_inetaddr_notifier_registered) {
8636 dhd_inetaddr_notifier_registered = TRUE;
8637 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8638 }
8639 #endif /* ARP_OFFLOAD_SUPPORT */
8640 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8641 if (!dhd_inet6addr_notifier_registered) {
8642 dhd_inet6addr_notifier_registered = TRUE;
8643 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8644 }
8645 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8646 }
8647
8648 argos_register_notifier_init(net);
8649 #if defined(NUM_SCB_MAX_PROBE)
8650 dhd_set_scb_probe(&dhd->pub);
8651 #endif /* NUM_SCB_MAX_PROBE */
8652 #endif /* WL_CFG80211 */
8653 #if defined(WL_WIRELESS_EXT)
8654 #ifdef WL_ESCAN
8655 if (unlikely(wl_escan_up(net, &dhd->pub))) {
8656 DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
8657 ret = -1;
8658 goto exit;
8659 }
8660 #endif
8661 #endif
8662 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8663 if (!dhd_download_fw_on_driverload) {
8664 conf = dhd_get_conf(net);
8665 if (conf) {
8666 wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written);
8667 wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written);
8668 wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written);
8669 }
8670 }
8671 #endif
8672 }
8673
8674 /* Allow transmit calls */
8675 netif_start_queue(net);
8676 dhd->pub.up = 1;
8677
8678 OLD_MOD_INC_USE_COUNT;
8679
8680 #ifdef BCMDBGFS
8681 dhd_dbgfs_init(&dhd->pub);
8682 #endif
8683
8684 exit:
8685 if (ret) {
8686 dhd_stop(net);
8687 }
8688
8689 DHD_PERIM_UNLOCK(&dhd->pub);
8690 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8691 DHD_MUTEX_UNLOCK();
8692
8693 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
8694 return ret;
8695 }
8696
dhd_do_driver_init(struct net_device * net)8697 int dhd_do_driver_init(struct net_device *net)
8698 {
8699 dhd_info_t *dhd = NULL;
8700
8701 if (!net) {
8702 DHD_ERROR(("Primary Interface not initialized \n"));
8703 return -EINVAL;
8704 }
8705
8706 DHD_MUTEX_IS_LOCK_RETURN();
8707
8708 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8709 dhd = DHD_DEV_INFO(net);
8710
8711 /* If driver is already initialized, do nothing
8712 */
8713 if (dhd->pub.busstate == DHD_BUS_DATA) {
8714 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8715 return 0;
8716 }
8717
8718 if (dhd_open(net) < 0) {
8719 DHD_ERROR(("Driver Init Failed \n"));
8720 return -1;
8721 }
8722
8723 return 0;
8724 }
8725
8726 int
dhd_event_ifadd(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)8727 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8728 {
8729 #ifdef WL_CFG80211
8730 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8731 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8732 return BCME_OK;
8733 #endif
8734
8735 /* handle IF event caused by wl commands, SoftAP, WEXT and
8736 * anything else. This has to be done asynchronously otherwise
8737 * DPC will be blocked (and iovars will timeout as DPC has no chance
8738 * to read the response back)
8739 */
8740 if (ifevent->ifidx > 0) {
8741 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8742 if (if_event == NULL) {
8743 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8744 MALLOCED(dhdinfo->pub.osh)));
8745 return BCME_NOMEM;
8746 }
8747
8748 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8749 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8750 strncpy(if_event->name, name, IFNAMSIZ);
8751 if_event->name[IFNAMSIZ - 1] = '\0';
8752 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
8753 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8754 }
8755
8756 return BCME_OK;
8757 }
8758
8759 int
dhd_event_ifdel(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)8760 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8761 {
8762 dhd_if_event_t *if_event;
8763
8764 #ifdef WL_CFG80211
8765 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8766 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8767 return BCME_OK;
8768 #endif /* WL_CFG80211 */
8769
8770 /* handle IF event caused by wl commands, SoftAP, WEXT and
8771 * anything else
8772 */
8773 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8774 if (if_event == NULL) {
8775 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8776 MALLOCED(dhdinfo->pub.osh)));
8777 return BCME_NOMEM;
8778 }
8779 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8780 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8781 strncpy(if_event->name, name, IFNAMSIZ);
8782 if_event->name[IFNAMSIZ - 1] = '\0';
8783 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
8784 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8785
8786 return BCME_OK;
8787 }
8788
8789 int
dhd_event_ifchange(dhd_info_t * dhdinfo,wl_event_data_if_t * ifevent,char * name,uint8 * mac)8790 dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8791 {
8792 #ifdef DHD_UPDATE_INTF_MAC
8793 dhd_if_event_t *if_event;
8794 #endif /* DHD_UPDATE_INTF_MAC */
8795
8796 #ifdef WL_CFG80211
8797 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8798 ifevent->ifidx, name, mac, ifevent->bssidx);
8799 #endif /* WL_CFG80211 */
8800
8801 #ifdef DHD_UPDATE_INTF_MAC
8802 /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
8803 * anything else
8804 */
8805 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8806 if (if_event == NULL) {
8807 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8808 MALLOCED(dhdinfo->pub.osh)));
8809 return BCME_NOMEM;
8810 }
8811 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8812 // construct a change event
8813 if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
8814 if_event->event.opcode = WLC_E_IF_CHANGE;
8815 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8816 strncpy(if_event->name, name, IFNAMSIZ);
8817 if_event->name[IFNAMSIZ - 1] = '\0';
8818 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
8819 dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8820 #endif /* DHD_UPDATE_INTF_MAC */
8821
8822 return BCME_OK;
8823 }
8824
8825 /* unregister and free the existing net_device interface (if any) in iflist and
8826 * allocate a new one. the slot is reused. this function does NOT register the
8827 * new interface to linux kernel. dhd_register_if does the job
8828 */
8829 struct net_device*
dhd_allocate_if(dhd_pub_t * dhdpub,int ifidx,const char * name,uint8 * mac,uint8 bssidx,bool need_rtnl_lock,const char * dngl_name)8830 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
8831 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
8832 {
8833 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8834 dhd_if_t *ifp;
8835
8836 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
8837 ifp = dhdinfo->iflist[ifidx];
8838
8839 if (ifp != NULL) {
8840 if (ifp->net != NULL) {
8841 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8842 __FUNCTION__, ifp->net->name, ifidx));
8843
8844 if (ifidx == 0) {
8845 /* For primary ifidx (0), there shouldn't be
8846 * any netdev present already.
8847 */
8848 DHD_ERROR(("Primary ifidx populated already\n"));
8849 ASSERT(0);
8850 return NULL;
8851 }
8852
8853 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
8854
8855 /* in unregister_netdev case, the interface gets freed by net->destructor
8856 * (which is set to free_netdev)
8857 */
8858 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8859 free_netdev(ifp->net);
8860 } else {
8861 netif_stop_queue(ifp->net);
8862 if (need_rtnl_lock)
8863 unregister_netdev(ifp->net);
8864 else
8865 unregister_netdevice(ifp->net);
8866 }
8867 ifp->net = NULL;
8868 }
8869 } else {
8870 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
8871 if (ifp == NULL) {
8872 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
8873 return NULL;
8874 }
8875 }
8876
8877 memset(ifp, 0, sizeof(dhd_if_t));
8878 ifp->info = dhdinfo;
8879 ifp->idx = ifidx;
8880 ifp->bssidx = bssidx;
8881 #ifdef DHD_MCAST_REGEN
8882 ifp->mcast_regen_bss_enable = FALSE;
8883 #endif
8884 /* set to TRUE rx_pkt_chainable at alloc time */
8885 ifp->rx_pkt_chainable = TRUE;
8886
8887 if (mac != NULL)
8888 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8889
8890 /* Allocate etherdev, including space for private structure */
8891 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8892 if (ifp->net == NULL) {
8893 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
8894 goto fail;
8895 }
8896
8897 /* Setup the dhd interface's netdevice private structure. */
8898 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
8899
8900 if (name && name[0]) {
8901 strncpy(ifp->net->name, name, IFNAMSIZ);
8902 ifp->net->name[IFNAMSIZ - 1] = '\0';
8903 }
8904
8905 #ifdef WL_CFG80211
8906 if (ifidx == 0) {
8907 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8908 ifp->net->destructor = free_netdev;
8909 #else
8910 ifp->net->needs_free_netdev = true;
8911 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8912 } else {
8913 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8914 ifp->net->destructor = dhd_netdev_free;
8915 #else
8916 ifp->net->needs_free_netdev = true;
8917 ifp->net->priv_destructor = dhd_netdev_free;
8918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8919 }
8920 #else
8921 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8922 ifp->net->destructor = free_netdev;
8923 #else
8924 ifp->net->needs_free_netdev = true;
8925 #endif
8926 #endif /* WL_CFG80211 */
8927 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
8928 ifp->name[IFNAMSIZ - 1] = '\0';
8929 dhdinfo->iflist[ifidx] = ifp;
8930
8931 /* initialize the dongle provided if name */
8932 if (dngl_name)
8933 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8934 else if (name)
8935 strncpy(ifp->dngl_name, name, IFNAMSIZ);
8936
8937 #ifdef PCIE_FULL_DONGLE
8938 /* Initialize STA info list */
8939 INIT_LIST_HEAD(&ifp->sta_list);
8940 DHD_IF_STA_LIST_LOCK_INIT(ifp);
8941 #endif /* PCIE_FULL_DONGLE */
8942
8943 #ifdef DHD_L2_FILTER
8944 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
8945 ifp->parp_allnode = TRUE;
8946 #endif /* DHD_L2_FILTER */
8947
8948
8949 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8950
8951 return ifp->net;
8952
8953 fail:
8954 if (ifp != NULL) {
8955 if (ifp->net != NULL) {
8956 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8957 if (ifp->net == dhdinfo->rx_napi_netdev) {
8958 napi_disable(&dhdinfo->rx_napi_struct);
8959 netif_napi_del(&dhdinfo->rx_napi_struct);
8960 skb_queue_purge(&dhdinfo->rx_napi_queue);
8961 dhdinfo->rx_napi_netdev = NULL;
8962 }
8963 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8964 dhd_dev_priv_clear(ifp->net);
8965 free_netdev(ifp->net);
8966 ifp->net = NULL;
8967 }
8968 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8969 ifp = NULL;
8970 }
8971 dhdinfo->iflist[ifidx] = NULL;
8972 return NULL;
8973 }
8974
8975 /* unregister and free the the net_device interface associated with the indexed
8976 * slot, also free the slot memory and set the slot pointer to NULL
8977 */
8978 int
dhd_remove_if(dhd_pub_t * dhdpub,int ifidx,bool need_rtnl_lock)8979 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
8980 {
8981 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8982 dhd_if_t *ifp;
8983 #ifdef PCIE_FULL_DONGLE
8984 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup;
8985 #endif /* PCIE_FULL_DONGLE */
8986
8987 ifp = dhdinfo->iflist[ifidx];
8988
8989 if (ifp != NULL) {
8990 if (ifp->net != NULL) {
8991 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
8992
8993 dhdinfo->iflist[ifidx] = NULL;
8994 /* in unregister_netdev case, the interface gets freed by net->destructor
8995 * (which is set to free_netdev)
8996 */
8997 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8998 free_netdev(ifp->net);
8999 } else {
9000 netif_tx_disable(ifp->net);
9001
9002
9003
9004 #if defined(SET_RPS_CPUS)
9005 custom_rps_map_clear(ifp->net->_rx);
9006 #endif /* SET_RPS_CPUS */
9007 if (need_rtnl_lock)
9008 unregister_netdev(ifp->net);
9009 else
9010 unregister_netdevice(ifp->net);
9011 #ifdef WL_EXT_IAPSTA
9012 wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
9013 #endif
9014 }
9015 ifp->net = NULL;
9016 }
9017 #ifdef DHD_WMF
9018 dhd_wmf_cleanup(dhdpub, ifidx);
9019 #endif /* DHD_WMF */
9020 #ifdef DHD_L2_FILTER
9021 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
9022 NULL, FALSE, dhdpub->tickcnt);
9023 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
9024 ifp->phnd_arp_table = NULL;
9025 #endif /* DHD_L2_FILTER */
9026
9027
9028 dhd_if_del_sta_list(ifp);
9029 #ifdef PCIE_FULL_DONGLE
9030 /* Delete flowrings of WDS interface */
9031 if (if_flow_lkup[ifidx].role == WLC_E_IF_ROLE_WDS) {
9032 dhd_flow_rings_delete(dhdpub, ifidx);
9033 }
9034 #endif /* PCIE_FULL_DONGLE */
9035 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
9036
9037 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
9038 ifp = NULL;
9039 }
9040
9041 return BCME_OK;
9042 }
9043
9044
9045 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9046 static struct net_device_ops dhd_ops_pri = {
9047 .ndo_open = dhd_open,
9048 .ndo_stop = dhd_stop,
9049 .ndo_get_stats = dhd_get_stats,
9050 .ndo_do_ioctl = dhd_ioctl_entry,
9051 .ndo_start_xmit = dhd_start_xmit,
9052 .ndo_set_mac_address = dhd_set_mac_address,
9053 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9054 .ndo_set_rx_mode = dhd_set_multicast_list,
9055 #else
9056 .ndo_set_multicast_list = dhd_set_multicast_list,
9057 #endif
9058 };
9059
9060 static struct net_device_ops dhd_ops_virt = {
9061 .ndo_get_stats = dhd_get_stats,
9062 .ndo_do_ioctl = dhd_ioctl_entry,
9063 .ndo_start_xmit = dhd_start_xmit,
9064 .ndo_set_mac_address = dhd_set_mac_address,
9065 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9066 .ndo_set_rx_mode = dhd_set_multicast_list,
9067 #else
9068 .ndo_set_multicast_list = dhd_set_multicast_list,
9069 #endif
9070 };
9071 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
9072
9073 #ifdef DEBUGGER
9074 extern void debugger_init(void *bus_handle);
9075 #endif
9076
9077
9078 #ifdef SHOW_LOGTRACE
9079 int
dhd_os_read_file(void * file,char * buf,uint32 size)9080 dhd_os_read_file(void *file, char *buf, uint32 size)
9081 {
9082 struct file *filep = (struct file *)file;
9083
9084 if (!file || !buf)
9085 return -1;
9086
9087 return vfs_read(filep, buf, size, &filep->f_pos);
9088 }
9089
9090 int
dhd_os_seek_file(void * file,int64 offset)9091 dhd_os_seek_file(void *file, int64 offset)
9092 {
9093 struct file *filep = (struct file *)file;
9094 if (!file)
9095 return -1;
9096
9097 /* offset can be -ve */
9098 filep->f_pos = filep->f_pos + offset;
9099
9100 return 0;
9101 }
9102
9103 static int
dhd_init_logstrs_array(osl_t * osh,dhd_event_log_t * temp)9104 dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
9105 {
9106 struct file *filep = NULL;
9107 struct kstat stat;
9108 mm_segment_t fs;
9109 char *raw_fmts = NULL;
9110 int logstrs_size = 0;
9111 int error = 0;
9112
9113 fs = get_fs();
9114 set_fs(KERNEL_DS);
9115
9116 filep = filp_open(logstrs_path, O_RDONLY, 0);
9117
9118 if (IS_ERR(filep)) {
9119 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
9120 goto fail;
9121 }
9122 error = vfs_stat(logstrs_path, &stat);
9123 if (error) {
9124 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
9125 goto fail;
9126 }
9127 logstrs_size = (int) stat.size;
9128
9129 if (logstrs_size == 0) {
9130 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9131 goto fail1;
9132 }
9133
9134 raw_fmts = MALLOC(osh, logstrs_size);
9135 if (raw_fmts == NULL) {
9136 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
9137 goto fail;
9138 }
9139 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
9140 DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
9141 goto fail;
9142 }
9143
9144 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
9145 == BCME_OK) {
9146 filp_close(filep, NULL);
9147 set_fs(fs);
9148 return BCME_OK;
9149 }
9150
9151 fail:
9152 if (raw_fmts) {
9153 MFREE(osh, raw_fmts, logstrs_size);
9154 raw_fmts = NULL;
9155 }
9156
9157 fail1:
9158 if (!IS_ERR(filep))
9159 filp_close(filep, NULL);
9160
9161 set_fs(fs);
9162 temp->fmts = NULL;
9163 return BCME_ERROR;
9164 }
9165
9166 static int
dhd_read_map(osl_t * osh,char * fname,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)9167 dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
9168 uint32 *rodata_end)
9169 {
9170 struct file *filep = NULL;
9171 mm_segment_t fs;
9172 int err = BCME_ERROR;
9173
9174 if (fname == NULL) {
9175 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
9176 return BCME_ERROR;
9177 }
9178
9179 fs = get_fs();
9180 set_fs(KERNEL_DS);
9181
9182 filep = filp_open(fname, O_RDONLY, 0);
9183 if (IS_ERR(filep)) {
9184 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
9185 goto fail;
9186 }
9187
9188 if ((err = dhd_parse_map_file(osh, filep, ramstart,
9189 rodata_start, rodata_end)) < 0)
9190 goto fail;
9191
9192 fail:
9193 if (!IS_ERR(filep))
9194 filp_close(filep, NULL);
9195
9196 set_fs(fs);
9197
9198 return err;
9199 }
9200
9201 static int
dhd_init_static_strs_array(osl_t * osh,dhd_event_log_t * temp,char * str_file,char * map_file)9202 dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
9203 {
9204 struct file *filep = NULL;
9205 mm_segment_t fs;
9206 char *raw_fmts = NULL;
9207 uint32 logstrs_size = 0;
9208
9209 int error = 0;
9210 uint32 ramstart = 0;
9211 uint32 rodata_start = 0;
9212 uint32 rodata_end = 0;
9213 uint32 logfilebase = 0;
9214
9215 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
9216 if (error != BCME_OK) {
9217 DHD_ERROR(("readmap Error!! \n"));
9218 /* don't do event log parsing in actual case */
9219 if (strstr(str_file, ram_file_str) != NULL) {
9220 temp->raw_sstr = NULL;
9221 } else if (strstr(str_file, rom_file_str) != NULL) {
9222 temp->rom_raw_sstr = NULL;
9223 }
9224 return error;
9225 }
9226 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9227 ramstart, rodata_start, rodata_end));
9228
9229 fs = get_fs();
9230 set_fs(KERNEL_DS);
9231
9232 filep = filp_open(str_file, O_RDONLY, 0);
9233 if (IS_ERR(filep)) {
9234 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
9235 goto fail;
9236 }
9237
9238 /* Full file size is huge. Just read required part */
9239 logstrs_size = rodata_end - rodata_start;
9240
9241 if (logstrs_size == 0) {
9242 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9243 goto fail1;
9244 }
9245
9246 raw_fmts = MALLOC(osh, logstrs_size);
9247 if (raw_fmts == NULL) {
9248 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9249 goto fail;
9250 }
9251
9252 logfilebase = rodata_start - ramstart;
9253
9254 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
9255 if (error < 0) {
9256 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
9257 goto fail;
9258 }
9259
9260 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
9261 if (error != logstrs_size) {
9262 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
9263 goto fail;
9264 }
9265
9266 if (strstr(str_file, ram_file_str) != NULL) {
9267 temp->raw_sstr = raw_fmts;
9268 temp->raw_sstr_size = logstrs_size;
9269 temp->ramstart = ramstart;
9270 temp->rodata_start = rodata_start;
9271 temp->rodata_end = rodata_end;
9272 } else if (strstr(str_file, rom_file_str) != NULL) {
9273 temp->rom_raw_sstr = raw_fmts;
9274 temp->rom_raw_sstr_size = logstrs_size;
9275 temp->rom_ramstart = ramstart;
9276 temp->rom_rodata_start = rodata_start;
9277 temp->rom_rodata_end = rodata_end;
9278 }
9279
9280 filp_close(filep, NULL);
9281 set_fs(fs);
9282
9283 return BCME_OK;
9284
9285 fail:
9286 if (raw_fmts) {
9287 MFREE(osh, raw_fmts, logstrs_size);
9288 raw_fmts = NULL;
9289 }
9290
9291 fail1:
9292 if (!IS_ERR(filep))
9293 filp_close(filep, NULL);
9294
9295 set_fs(fs);
9296
9297 if (strstr(str_file, ram_file_str) != NULL) {
9298 temp->raw_sstr = NULL;
9299 } else if (strstr(str_file, rom_file_str) != NULL) {
9300 temp->rom_raw_sstr = NULL;
9301 }
9302
9303 return error;
9304 }
9305
9306 #endif /* SHOW_LOGTRACE */
9307
9308 #ifdef BCMDBUS
9309 uint
dhd_get_rxsz(dhd_pub_t * pub)9310 dhd_get_rxsz(dhd_pub_t *pub)
9311 {
9312 struct net_device *net = NULL;
9313 dhd_info_t *dhd = NULL;
9314 uint rxsz;
9315
9316 /* Assign rxsz for dbus_attach */
9317 dhd = pub->info;
9318 net = dhd->iflist[0]->net;
9319 net->hard_header_len = ETH_HLEN + pub->hdrlen;
9320 rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9321
9322 return rxsz;
9323 }
9324
9325 void
dhd_set_path(dhd_pub_t * pub)9326 dhd_set_path(dhd_pub_t *pub)
9327 {
9328 dhd_info_t *dhd = NULL;
9329
9330 dhd = pub->info;
9331
9332 /* try to download image and nvram to the dongle */
9333 if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
9334 DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
9335 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
9336 dhd_bus_update_fw_nv_path(dhd->pub.bus,
9337 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
9338 }
9339 }
9340 #endif
9341
9342 dhd_pub_t *
dhd_attach(osl_t * osh,struct dhd_bus * bus,uint bus_hdrlen,void * data)9343 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
9344 #ifdef BCMDBUS
9345 , void *data
9346 #endif
9347 )
9348 {
9349 dhd_info_t *dhd = NULL;
9350 struct net_device *net = NULL;
9351 char if_name[IFNAMSIZ] = {'\0'};
9352 #ifdef SHOW_LOGTRACE
9353 int ret;
9354 #endif /* SHOW_LOGTRACE */
9355 #if defined(BCMSDIO) || defined(BCMPCIE)
9356 uint32 bus_type = -1;
9357 uint32 bus_num = -1;
9358 uint32 slot_num = -1;
9359 wifi_adapter_info_t *adapter = NULL;
9360 #elif defined(BCMDBUS)
9361 wifi_adapter_info_t *adapter = data;
9362 #endif
9363 #ifdef GET_CUSTOM_MAC_ENABLE
9364 char hw_ether[62];
9365 #endif /* GET_CUSTOM_MAC_ENABLE */
9366
9367 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
9368 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9369
9370 #ifdef STBLINUX
9371 DHD_ERROR(("%s\n", driver_target));
9372 #endif /* STBLINUX */
9373 /* will implement get_ids for DBUS later */
9374 #if defined(BCMSDIO)
9375 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
9376 #endif
9377 #if defined(BCMSDIO) || defined(BCMPCIE)
9378 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9379 #endif
9380
9381 /* Allocate primary dhd_info */
9382 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
9383 if (dhd == NULL) {
9384 dhd = MALLOC(osh, sizeof(dhd_info_t));
9385 if (dhd == NULL) {
9386 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
9387 goto dhd_null_flag;
9388 }
9389 }
9390 memset(dhd, 0, sizeof(dhd_info_t));
9391 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
9392
9393 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
9394
9395 dhd->pub.osh = osh;
9396 #ifdef DUMP_IOCTL_IOV_LIST
9397 dll_init(&(dhd->pub.dump_iovlist_head));
9398 #endif /* DUMP_IOCTL_IOV_LIST */
9399 dhd->adapter = adapter;
9400 dhd->pub.adapter = (void *)adapter;
9401 #ifdef DHD_DEBUG
9402 dll_init(&(dhd->pub.mw_list_head));
9403 #endif /* DHD_DEBUG */
9404 #ifdef BT_OVER_SDIO
9405 dhd->pub.is_bt_recovery_required = FALSE;
9406 mutex_init(&dhd->bus_user_lock);
9407 #endif /* BT_OVER_SDIO */
9408
9409 #ifdef GET_CUSTOM_MAC_ENABLE
9410 wifi_platform_get_mac_addr(dhd->adapter, hw_ether);
9411 bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
9412 #endif /* GET_CUSTOM_MAC_ENABLE */
9413 #ifdef CUSTOM_FORCE_NODFS_FLAG
9414 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
9415 dhd->pub.force_country_change = TRUE;
9416 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9417 #ifdef CUSTOM_COUNTRY_CODE
9418 get_customized_country_code(dhd->adapter,
9419 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
9420 dhd->pub.dhd_cflags);
9421 #endif /* CUSTOM_COUNTRY_CODE */
9422 #ifndef BCMDBUS
9423 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
9424 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
9425 #ifdef DHD_WET
9426 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
9427 #endif /* DHD_WET */
9428 /* Initialize thread based operation and lock */
9429 sema_init(&dhd->sdsem, 1);
9430 #endif /* !BCMDBUS */
9431
9432 /* Link to info module */
9433 dhd->pub.info = dhd;
9434
9435
9436 /* Link to bus module */
9437 dhd->pub.bus = bus;
9438 dhd->pub.hdrlen = bus_hdrlen;
9439
9440 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
9441 * because dhd_detech will check .info is NULL or not.
9442 */
9443 if (dhd_conf_attach(&dhd->pub) != 0) {
9444 DHD_ERROR(("dhd_conf_attach failed\n"));
9445 goto fail;
9446 }
9447 #ifndef BCMDBUS
9448 dhd_conf_reset(&dhd->pub);
9449 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
9450 dhd_conf_preinit(&dhd->pub);
9451 #endif /* !BCMDBUS */
9452
9453 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9454 * This is indeed a hack but we have to make it work properly before we have a better
9455 * solution
9456 */
9457 dhd_update_fw_nv_path(dhd);
9458
9459 /* Set network interface name if it was provided as module parameter */
9460 if (iface_name[0]) {
9461 int len;
9462 char ch;
9463 strncpy(if_name, iface_name, IFNAMSIZ);
9464 if_name[IFNAMSIZ - 1] = 0;
9465 len = strlen(if_name);
9466 ch = if_name[len - 1];
9467 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
9468 strncat(if_name, "%d", 2);
9469 }
9470
9471 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9472 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
9473 if (net == NULL) {
9474 goto fail;
9475 }
9476
9477
9478 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
9479 #ifdef DHD_L2_FILTER
9480 /* initialize the l2_filter_cnt */
9481 dhd->pub.l2_filter_cnt = 0;
9482 #endif
9483 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9484 net->open = NULL;
9485 #else
9486 net->netdev_ops = NULL;
9487 #endif
9488
9489 mutex_init(&dhd->dhd_iovar_mutex);
9490 sema_init(&dhd->proto_sem, 1);
9491 #ifdef DHD_ULP
9492 if (!(dhd_ulp_init(osh, &dhd->pub)))
9493 goto fail;
9494 #endif /* DHD_ULP */
9495
9496 #if defined(DHD_HANG_SEND_UP_TEST)
9497 dhd->pub.req_hang_type = 0;
9498 #endif /* DHD_HANG_SEND_UP_TEST */
9499
9500 #ifdef PROP_TXSTATUS
9501 spin_lock_init(&dhd->wlfc_spinlock);
9502
9503 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
9504 dhd->pub.plat_init = dhd_wlfc_plat_init;
9505 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
9506
9507 #ifdef DHD_WLFC_THREAD
9508 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
9509 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
9510 if (IS_ERR(dhd->pub.wlfc_thread)) {
9511 DHD_ERROR(("create wlfc thread failed\n"));
9512 goto fail;
9513 } else {
9514 wake_up_process(dhd->pub.wlfc_thread);
9515 }
9516 #endif /* DHD_WLFC_THREAD */
9517 #endif /* PROP_TXSTATUS */
9518
9519 /* Initialize other structure content */
9520 init_waitqueue_head(&dhd->ioctl_resp_wait);
9521 init_waitqueue_head(&dhd->d3ack_wait);
9522 #ifdef PCIE_INB_DW
9523 init_waitqueue_head(&dhd->ds_exit_wait);
9524 #endif /* PCIE_INB_DW */
9525 init_waitqueue_head(&dhd->ctrl_wait);
9526 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
9527 dhd->pub.dhd_bus_busy_state = 0;
9528
9529 /* Initialize the spinlocks */
9530 spin_lock_init(&dhd->sdlock);
9531 spin_lock_init(&dhd->txqlock);
9532 spin_lock_init(&dhd->rxqlock);
9533 spin_lock_init(&dhd->dhd_lock);
9534 spin_lock_init(&dhd->rxf_lock);
9535 #ifdef WLTDLS
9536 spin_lock_init(&dhd->pub.tdls_lock);
9537 #endif /* WLTDLS */
9538 #if defined(RXFRAME_THREAD)
9539 dhd->rxthread_enabled = TRUE;
9540 #endif /* defined(RXFRAME_THREAD) */
9541
9542 #ifdef DHDTCPACK_SUPPRESS
9543 spin_lock_init(&dhd->tcpack_lock);
9544 #endif /* DHDTCPACK_SUPPRESS */
9545
9546 /* Initialize Wakelock stuff */
9547 spin_lock_init(&dhd->wakelock_spinlock);
9548 spin_lock_init(&dhd->wakelock_evt_spinlock);
9549 DHD_OS_WAKE_LOCK_INIT(dhd);
9550 dhd->wakelock_counter = 0;
9551 #ifdef CONFIG_HAS_WAKELOCK
9552 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9553 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
9554 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
9555 #endif /* CONFIG_HAS_WAKELOCK */
9556
9557 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9558 mutex_init(&dhd->dhd_net_if_mutex);
9559 mutex_init(&dhd->dhd_suspend_mutex);
9560 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9561 mutex_init(&dhd->dhd_apf_mutex);
9562 #endif /* PKT_FILTER_SUPPORT && APF */
9563 #endif
9564 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9565
9566 /* Attach and link in the protocol */
9567 if (dhd_prot_attach(&dhd->pub) != 0) {
9568 DHD_ERROR(("dhd_prot_attach failed\n"));
9569 goto fail;
9570 }
9571 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
9572
9573 #ifdef DHD_TIMESYNC
9574 /* attach the timesync module */
9575 if (dhd_timesync_attach(&dhd->pub) != 0) {
9576 DHD_ERROR(("dhd_timesync_attach failed\n"));
9577 goto fail;
9578 }
9579 dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
9580 #endif /* DHD_TIMESYNC */
9581
9582 #ifdef WL_CFG80211
9583 spin_lock_init(&dhd->pub.up_lock);
9584 /* Attach and link in the cfg80211 */
9585 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
9586 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9587 goto fail;
9588 }
9589
9590 dhd_monitor_init(&dhd->pub);
9591 dhd_state |= DHD_ATTACH_STATE_CFG80211;
9592 #endif
9593 #ifdef DHD_LOG_DUMP
9594 dhd_log_dump_init(&dhd->pub);
9595 #endif /* DHD_LOG_DUMP */
9596 #if defined(WL_WIRELESS_EXT)
9597 #ifdef WL_ESCAN
9598 if (wl_escan_attach(net, &dhd->pub) != 0) {
9599 DHD_ERROR(("wl_escan_attach failed\n"));
9600 goto fail;
9601 }
9602 #else
9603 /* Attach and link in the iw */
9604 if (wl_iw_attach(net, &dhd->pub) != 0) {
9605 DHD_ERROR(("wl_iw_attach failed\n"));
9606 goto fail;
9607 }
9608 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
9609 #endif /* WL_ESCAN */
9610 #endif /* defined(WL_WIRELESS_EXT) */
9611 #ifdef WL_EXT_IAPSTA
9612 if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
9613 DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
9614 goto fail;
9615 }
9616 #endif
9617
9618 #ifdef SHOW_LOGTRACE
9619 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
9620 if (ret == BCME_OK) {
9621 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
9622 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
9623 rom_map_file_path);
9624 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9625 }
9626 #endif /* SHOW_LOGTRACE */
9627
9628 #ifdef DEBUGABILITY
9629 /* attach debug if support */
9630 if (dhd_os_dbg_attach(&dhd->pub)) {
9631 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
9632 goto fail;
9633 }
9634
9635 #ifdef DBG_PKT_MON
9636 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
9637 #ifdef DBG_PKT_MON_INIT_DEFAULT
9638 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
9639 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9640 #endif /* DBG_PKT_MON */
9641 #endif /* DEBUGABILITY */
9642 #ifdef DHD_PKT_LOGGING
9643 dhd_os_attach_pktlog(&dhd->pub);
9644 #endif /* DHD_PKT_LOGGING */
9645
9646 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
9647 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
9648 goto fail;
9649 }
9650
9651
9652
9653 #ifndef BCMDBUS
9654 /* Set up the watchdog timer */
9655 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9656 timer_setup(&dhd->timer, dhd_watchdog, 0);
9657 #else
9658 init_timer(&dhd->timer);
9659 dhd->timer.data = (ulong)dhd;
9660 dhd->timer.function = dhd_watchdog;
9661 #endif
9662 dhd->default_wd_interval = dhd_watchdog_ms;
9663
9664 if (dhd_watchdog_prio >= 0) {
9665 /* Initialize watchdog thread */
9666 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
9667 if (dhd->thr_wdt_ctl.thr_pid < 0) {
9668 goto fail;
9669 }
9670 } else {
9671 dhd->thr_wdt_ctl.thr_pid = -1;
9672 }
9673
9674 #ifdef DHD_PCIE_RUNTIMEPM
9675 /* Setup up the runtime PM Idlecount timer */
9676 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9677 timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0);
9678 #else
9679 init_timer(&dhd->rpm_timer);
9680 dhd->rpm_timer.data = (ulong)dhd;
9681 dhd->rpm_timer.function = dhd_runtimepm;
9682 #endif
9683 dhd->rpm_timer_valid = FALSE;
9684
9685 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
9686 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
9687 if (dhd->thr_rpm_ctl.thr_pid < 0) {
9688 goto fail;
9689 }
9690 #endif /* DHD_PCIE_RUNTIMEPM */
9691
9692 #ifdef DEBUGGER
9693 debugger_init((void *) bus);
9694 #endif
9695
9696 /* Set up the bottom half handler */
9697 if (dhd_dpc_prio >= 0) {
9698 /* Initialize DPC thread */
9699 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
9700 if (dhd->thr_dpc_ctl.thr_pid < 0) {
9701 goto fail;
9702 }
9703 } else {
9704 /* use tasklet for dpc */
9705 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
9706 dhd->thr_dpc_ctl.thr_pid = -1;
9707 }
9708
9709 if (dhd->rxthread_enabled) {
9710 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
9711 /* Initialize RXF thread */
9712 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
9713 if (dhd->thr_rxf_ctl.thr_pid < 0) {
9714 goto fail;
9715 }
9716 }
9717 #endif /* !BCMDBUS */
9718 #ifdef SHOW_LOGTRACE
9719 skb_queue_head_init(&dhd->evt_trace_queue);
9720 #endif /* SHOW_LOGTRACE */
9721
9722 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
9723
9724 #if defined(CONFIG_PM_SLEEP)
9725 if (!dhd_pm_notifier_registered) {
9726 dhd_pm_notifier_registered = TRUE;
9727 dhd->pm_notifier.notifier_call = dhd_pm_callback;
9728 dhd->pm_notifier.priority = 10;
9729 register_pm_notifier(&dhd->pm_notifier);
9730 }
9731
9732 #endif /* CONFIG_PM_SLEEP */
9733
9734 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9735 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
9736 dhd->early_suspend.suspend = dhd_early_suspend;
9737 dhd->early_suspend.resume = dhd_late_resume;
9738 register_early_suspend(&dhd->early_suspend);
9739 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
9740 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9741
9742 #ifdef ARP_OFFLOAD_SUPPORT
9743 dhd->pend_ipaddr = 0;
9744 if (!dhd_inetaddr_notifier_registered) {
9745 dhd_inetaddr_notifier_registered = TRUE;
9746 register_inetaddr_notifier(&dhd_inetaddr_notifier);
9747 }
9748 #endif /* ARP_OFFLOAD_SUPPORT */
9749
9750 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9751 if (!dhd_inet6addr_notifier_registered) {
9752 dhd_inet6addr_notifier_registered = TRUE;
9753 register_inet6addr_notifier(&dhd_inet6addr_notifier);
9754 }
9755 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9756 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
9757 #ifdef DEBUG_CPU_FREQ
9758 dhd->new_freq = alloc_percpu(int);
9759 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
9760 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9761 #endif
9762 #ifdef DHDTCPACK_SUPPRESS
9763 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT);
9764 #endif /* DHDTCPACK_SUPPRESS */
9765
9766 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9767 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9768
9769
9770 #ifdef DHD_DEBUG_PAGEALLOC
9771 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
9772 #endif /* DHD_DEBUG_PAGEALLOC */
9773
9774 #if defined(DHD_LB)
9775
9776 dhd_lb_set_default_cpus(dhd);
9777
9778 /* Initialize the CPU Masks */
9779 if (dhd_cpumasks_init(dhd) == 0) {
9780 /* Now we have the current CPU maps, run through candidacy */
9781 dhd_select_cpu_candidacy(dhd);
9782 /*
9783 * If we are able to initialize CPU masks, lets register to the
9784 * CPU Hotplug framework to change the CPU for each job dynamically
9785 * using candidacy algorithm.
9786 */
9787 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
9788 register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
9789 } else {
9790 /*
9791 * We are unable to initialize CPU masks, so candidacy algorithm
9792 * won't run, but still Load Balancing will be honoured based
9793 * on the CPUs allocated for a given job statically during init
9794 */
9795 dhd->cpu_notifier.notifier_call = NULL;
9796 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9797 __FUNCTION__));
9798 }
9799
9800 #ifdef DHD_LB_TXP
9801 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9802 /* Trun ON the feature by default */
9803 atomic_set(&dhd->lb_txp_active, 1);
9804 #else
9805 /* Trun OFF the feature by default */
9806 atomic_set(&dhd->lb_txp_active, 0);
9807 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9808 #endif /* DHD_LB_TXP */
9809
9810 DHD_LB_STATS_INIT(&dhd->pub);
9811
9812 /* Initialize the Load Balancing Tasklets and Napi object */
9813 #if defined(DHD_LB_TXC)
9814 tasklet_init(&dhd->tx_compl_tasklet,
9815 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
9816 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
9817 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
9818 #endif /* DHD_LB_TXC */
9819
9820 #if defined(DHD_LB_RXC)
9821 tasklet_init(&dhd->rx_compl_tasklet,
9822 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
9823 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
9824 #endif /* DHD_LB_RXC */
9825
9826 #if defined(DHD_LB_RXP)
9827 __skb_queue_head_init(&dhd->rx_pend_queue);
9828 skb_queue_head_init(&dhd->rx_napi_queue);
9829 /* Initialize the work that dispatches NAPI job to a given core */
9830 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
9831 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
9832 #endif /* DHD_LB_RXP */
9833
9834 #if defined(DHD_LB_TXP)
9835 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
9836 skb_queue_head_init(&dhd->tx_pend_queue);
9837 /* Initialize the work that dispatches TX job to a given core */
9838 tasklet_init(&dhd->tx_tasklet,
9839 dhd_lb_tx_handler, (ulong)(dhd));
9840 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
9841 #endif /* DHD_LB_TXP */
9842
9843 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
9844 #endif /* DHD_LB */
9845
9846 #ifdef SHOW_LOGTRACE
9847 INIT_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
9848 #endif /* SHOW_LOGTRACE */
9849
9850 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
9851
9852 #ifdef REPORT_FATAL_TIMEOUTS
9853 init_dhd_timeouts(&dhd->pub);
9854 #endif /* REPORT_FATAL_TIMEOUTS */
9855 #ifdef BCMPCIE
9856 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
9857 if (dhd->pub.extended_trap_data == NULL) {
9858 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
9859 }
9860 #endif /* BCMPCIE */
9861
9862 (void)dhd_sysfs_init(dhd);
9863
9864 dhd_state |= DHD_ATTACH_STATE_DONE;
9865 dhd->dhd_state = dhd_state;
9866
9867 dhd_found++;
9868
9869 return &dhd->pub;
9870
9871 fail:
9872 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9873 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9874 __FUNCTION__, dhd_state, &dhd->pub));
9875 dhd->dhd_state = dhd_state;
9876 dhd_detach(&dhd->pub);
9877 dhd_free(&dhd->pub);
9878 }
9879 dhd_null_flag:
9880 return NULL;
9881 }
9882
dhd_get_fw_mode(dhd_info_t * dhdinfo)9883 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9884 {
9885 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9886 return DHD_FLAG_HOSTAP_MODE;
9887 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9888 return DHD_FLAG_P2P_MODE;
9889 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9890 return DHD_FLAG_IBSS_MODE;
9891 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9892 return DHD_FLAG_MFG_MODE;
9893
9894 return DHD_FLAG_STA_MODE;
9895 }
9896
dhd_bus_get_fw_mode(dhd_pub_t * dhdp)9897 int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9898 {
9899 return dhd_get_fw_mode(dhdp->info);
9900 }
9901
dhd_update_fw_nv_path(dhd_info_t * dhdinfo)9902 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9903 {
9904 int fw_len;
9905 int nv_len;
9906 int clm_len;
9907 int conf_len;
9908 const char *fw = NULL;
9909 const char *nv = NULL;
9910 const char *clm = NULL;
9911 const char *conf = NULL;
9912 #ifdef DHD_UCODE_DOWNLOAD
9913 int uc_len;
9914 const char *uc = NULL;
9915 #endif /* DHD_UCODE_DOWNLOAD */
9916 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9917 int fw_path_len = sizeof(dhdinfo->fw_path);
9918 int nv_path_len = sizeof(dhdinfo->nv_path);
9919
9920
9921 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9922 * The path from adapter info is used for initialization only (as it won't change).
9923 *
9924 * The firmware_path/nvram_path module parameter may be changed by the system at run
9925 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9926 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9927 * module parameter after it is copied. We won't update the path until the module parameter
9928 * is changed again (first character is not '\0')
9929 */
9930
9931 /* set default firmware and nvram path for built-in type driver */
9932 #ifdef CONFIG_BCMDHD_FW_PATH
9933 fw = CONFIG_BCMDHD_FW_PATH;
9934 #endif /* CONFIG_BCMDHD_FW_PATH */
9935 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9936 nv = CONFIG_BCMDHD_NVRAM_PATH;
9937 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9938 // }
9939
9940 /* check if we need to initialize the path */
9941 if (dhdinfo->fw_path[0] == '\0') {
9942 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9943 fw = adapter->fw_path;
9944 }
9945 if (dhdinfo->nv_path[0] == '\0') {
9946 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9947 nv = adapter->nv_path;
9948 }
9949 if (dhdinfo->clm_path[0] == '\0') {
9950 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
9951 clm = adapter->clm_path;
9952 }
9953 if (dhdinfo->conf_path[0] == '\0') {
9954 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
9955 conf = adapter->conf_path;
9956 }
9957
9958 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9959 *
9960 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9961 */
9962 if (firmware_path[0] != '\0')
9963 fw = firmware_path;
9964 if (nvram_path[0] != '\0')
9965 nv = nvram_path;
9966 if (clm_path[0] != '\0')
9967 clm = clm_path;
9968 if (config_path[0] != '\0')
9969 conf = config_path;
9970 #ifdef DHD_UCODE_DOWNLOAD
9971 if (ucode_path[0] != '\0')
9972 uc = ucode_path;
9973 #endif /* DHD_UCODE_DOWNLOAD */
9974
9975 if (fw && fw[0] != '\0') {
9976 fw_len = strlen(fw);
9977 if (fw_len >= fw_path_len) {
9978 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9979 return FALSE;
9980 }
9981 strncpy(dhdinfo->fw_path, fw, fw_path_len);
9982 if (dhdinfo->fw_path[fw_len-1] == '\n')
9983 dhdinfo->fw_path[fw_len-1] = '\0';
9984 }
9985 if (nv && nv[0] != '\0') {
9986 nv_len = strlen(nv);
9987 if (nv_len >= nv_path_len) {
9988 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
9989 return FALSE;
9990 }
9991 memset(dhdinfo->nv_path, 0, nv_path_len);
9992 strncpy(dhdinfo->nv_path, nv, nv_path_len);
9993 #ifdef DHD_USE_SINGLE_NVRAM_FILE
9994 /* Remove "_net" or "_mfg" tag from current nvram path */
9995 {
9996 char *nvram_tag = "nvram_";
9997 char *ext_tag = ".txt";
9998 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
9999 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
10000 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
10001 if (valid_buf) {
10002 char *sp = sp_nvram + strlen(nvram_tag) - 1;
10003 uint32 padding_size = (uint32)(dhdinfo->nv_path +
10004 nv_path_len - sp);
10005 memset(sp, 0, padding_size);
10006 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
10007 nv_len = strlen(dhdinfo->nv_path);
10008 DHD_INFO(("%s: new nvram path = %s\n",
10009 __FUNCTION__, dhdinfo->nv_path));
10010 } else if (sp_nvram) {
10011 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10012 __FUNCTION__));
10013 return FALSE;
10014 } else {
10015 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10016 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
10017 }
10018 }
10019 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10020 if (dhdinfo->nv_path[nv_len-1] == '\n')
10021 dhdinfo->nv_path[nv_len-1] = '\0';
10022 }
10023 if (clm && clm[0] != '\0') {
10024 clm_len = strlen(clm);
10025 if (clm_len >= sizeof(dhdinfo->clm_path)) {
10026 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
10027 return FALSE;
10028 }
10029 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
10030 if (dhdinfo->clm_path[clm_len-1] == '\n')
10031 dhdinfo->clm_path[clm_len-1] = '\0';
10032 }
10033 if (conf && conf[0] != '\0') {
10034 conf_len = strlen(conf);
10035 if (conf_len >= sizeof(dhdinfo->conf_path)) {
10036 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
10037 return FALSE;
10038 }
10039 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
10040 if (dhdinfo->conf_path[conf_len-1] == '\n')
10041 dhdinfo->conf_path[conf_len-1] = '\0';
10042 }
10043 #ifdef DHD_UCODE_DOWNLOAD
10044 if (uc && uc[0] != '\0') {
10045 uc_len = strlen(uc);
10046 if (uc_len >= sizeof(dhdinfo->uc_path)) {
10047 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10048 return FALSE;
10049 }
10050 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
10051 if (dhdinfo->uc_path[uc_len-1] == '\n')
10052 dhdinfo->uc_path[uc_len-1] = '\0';
10053 }
10054 #endif /* DHD_UCODE_DOWNLOAD */
10055
10056 #if 0
10057 /* clear the path in module parameter */
10058 if (dhd_download_fw_on_driverload) {
10059 firmware_path[0] = '\0';
10060 nvram_path[0] = '\0';
10061 clm_path[0] = '\0';
10062 config_path[0] = '\0';
10063 }
10064 #endif
10065 #ifdef DHD_UCODE_DOWNLOAD
10066 ucode_path[0] = '\0';
10067 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
10068 #endif /* DHD_UCODE_DOWNLOAD */
10069
10070 #ifndef BCMEMBEDIMAGE
10071 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10072 if (dhdinfo->fw_path[0] == '\0') {
10073 DHD_ERROR(("firmware path not found\n"));
10074 return FALSE;
10075 }
10076 if (dhdinfo->nv_path[0] == '\0') {
10077 DHD_ERROR(("nvram path not found\n"));
10078 return FALSE;
10079 }
10080 #endif /* BCMEMBEDIMAGE */
10081
10082 return TRUE;
10083 }
10084
10085 #if defined(BT_OVER_SDIO)
dhd_update_btfw_path(dhd_info_t * dhdinfo,char * btfw_path)10086 extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
10087 {
10088 int fw_len;
10089 const char *fw = NULL;
10090 wifi_adapter_info_t *adapter = dhdinfo->adapter;
10091
10092
10093 /* Update bt firmware path. The path may be from adapter info or module parameter
10094 * The path from adapter info is used for initialization only (as it won't change).
10095 *
10096 * The btfw_path module parameter may be changed by the system at run
10097 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10098 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10099 * module parameter after it is copied. We won't update the path until the module parameter
10100 * is changed again (first character is not '\0')
10101 */
10102
10103 /* set default firmware and nvram path for built-in type driver */
10104 if (!dhd_download_fw_on_driverload) {
10105 #ifdef CONFIG_BCMDHD_BTFW_PATH
10106 fw = CONFIG_BCMDHD_BTFW_PATH;
10107 #endif /* CONFIG_BCMDHD_FW_PATH */
10108 }
10109
10110 /* check if we need to initialize the path */
10111 if (dhdinfo->btfw_path[0] == '\0') {
10112 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
10113 fw = adapter->btfw_path;
10114 }
10115
10116 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10117 */
10118 if (btfw_path[0] != '\0')
10119 fw = btfw_path;
10120
10121 if (fw && fw[0] != '\0') {
10122 fw_len = strlen(fw);
10123 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
10124 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10125 return FALSE;
10126 }
10127 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
10128 if (dhdinfo->btfw_path[fw_len-1] == '\n')
10129 dhdinfo->btfw_path[fw_len-1] = '\0';
10130 }
10131
10132 /* clear the path in module parameter */
10133 btfw_path[0] = '\0';
10134
10135 if (dhdinfo->btfw_path[0] == '\0') {
10136 DHD_ERROR(("bt firmware path not found\n"));
10137 return FALSE;
10138 }
10139
10140 return TRUE;
10141 }
10142 #endif /* defined (BT_OVER_SDIO) */
10143
10144
10145 #ifdef CUSTOMER_HW4_DEBUG
dhd_validate_chipid(dhd_pub_t * dhdp)10146 bool dhd_validate_chipid(dhd_pub_t *dhdp)
10147 {
10148 uint chipid = dhd_bus_chip_id(dhdp);
10149 uint config_chipid;
10150
10151 #ifdef BCM4361_CHIP
10152 config_chipid = BCM4361_CHIP_ID;
10153 #elif defined(BCM4359_CHIP)
10154 config_chipid = BCM4359_CHIP_ID;
10155 #elif defined(BCM4358_CHIP)
10156 config_chipid = BCM4358_CHIP_ID;
10157 #elif defined(BCM4354_CHIP)
10158 config_chipid = BCM4354_CHIP_ID;
10159 #elif defined(BCM4339_CHIP)
10160 config_chipid = BCM4339_CHIP_ID;
10161 #elif defined(BCM43349_CHIP)
10162 config_chipid = BCM43349_CHIP_ID;
10163 #elif defined(BCM4335_CHIP)
10164 config_chipid = BCM4335_CHIP_ID;
10165 #elif defined(BCM43241_CHIP)
10166 config_chipid = BCM4324_CHIP_ID;
10167 #elif defined(BCM4330_CHIP)
10168 config_chipid = BCM4330_CHIP_ID;
10169 #elif defined(BCM43430_CHIP)
10170 config_chipid = BCM43430_CHIP_ID;
10171 #elif defined(BCM43018_CHIP)
10172 config_chipid = BCM43018_CHIP_ID;
10173 #elif defined(BCM43455_CHIP)
10174 config_chipid = BCM4345_CHIP_ID;
10175 #elif defined(BCM4334W_CHIP)
10176 config_chipid = BCM43342_CHIP_ID;
10177 #elif defined(BCM43454_CHIP)
10178 config_chipid = BCM43454_CHIP_ID;
10179 #elif defined(BCM43012_CHIP_)
10180 config_chipid = BCM43012_CHIP_ID;
10181 #else
10182 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10183 " please add CONFIG_BCMXXXX into the Kernel and"
10184 " BCMXXXX_CHIP definition into the DHD driver\n",
10185 __FUNCTION__));
10186 config_chipid = 0;
10187
10188 return FALSE;
10189 #endif /* BCM4354_CHIP */
10190
10191 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10192 if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
10193 return TRUE;
10194 }
10195 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10196 #if defined(BCM4359_CHIP)
10197 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
10198 return TRUE;
10199 }
10200 #endif /* BCM4359_CHIP */
10201 #if defined(BCM4361_CHIP)
10202 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
10203 return TRUE;
10204 }
10205 #endif /* BCM4361_CHIP */
10206
10207 return config_chipid == chipid;
10208 }
10209 #endif /* CUSTOMER_HW4_DEBUG */
10210
10211 #if defined(BT_OVER_SDIO)
dhd_bt_get_pub_hndl(void)10212 wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
10213 {
10214 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
10215 /* assuming that dhd_pub_t type pointer is available from a global variable */
10216 return (wlan_bt_handle_t) g_dhd_pub;
10217 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
10218
dhd_download_btfw(wlan_bt_handle_t handle,char * btfw_path)10219 int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
10220 {
10221 int ret = -1;
10222 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
10223 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10224
10225
10226 /* Download BT firmware image to the dongle */
10227 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
10228 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
10229 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
10230 if (ret < 0) {
10231 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10232 __FUNCTION__, dhd->btfw_path));
10233 return ret;
10234 }
10235 }
10236 return ret;
10237 } EXPORT_SYMBOL(dhd_download_btfw);
10238 #endif /* defined (BT_OVER_SDIO) */
10239
10240 #ifndef BCMDBUS
10241 int
dhd_bus_start(dhd_pub_t * dhdp)10242 dhd_bus_start(dhd_pub_t *dhdp)
10243 {
10244 int ret = -1;
10245 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10246 unsigned long flags;
10247
10248 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10249 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
10250 #endif /* DHD_DEBUG && BCMSDIO */
10251 ASSERT(dhd);
10252
10253 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
10254
10255 DHD_PERIM_LOCK(dhdp);
10256 #ifdef HOFFLOAD_MODULES
10257 dhd_linux_get_modfw_address(dhdp);
10258 #endif
10259 /* try to download image and nvram to the dongle */
10260 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
10261 /* Indicate FW Download has not yet done */
10262 dhd->pub.fw_download_done = FALSE;
10263 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
10264 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
10265 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10266 fw_download_start = OSL_SYSUPTIME();
10267 #endif /* DHD_DEBUG && BCMSDIO */
10268 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
10269 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
10270 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10271 fw_download_end = OSL_SYSUPTIME();
10272 #endif /* DHD_DEBUG && BCMSDIO */
10273 if (ret < 0) {
10274 DHD_ERROR(("%s: failed to download firmware %s\n",
10275 __FUNCTION__, dhd->fw_path));
10276 DHD_PERIM_UNLOCK(dhdp);
10277 return ret;
10278 }
10279 /* Indicate FW Download has succeeded */
10280 dhd->pub.fw_download_done = TRUE;
10281 }
10282 if (dhd->pub.busstate != DHD_BUS_LOAD) {
10283 DHD_PERIM_UNLOCK(dhdp);
10284 return -ENETDOWN;
10285 }
10286
10287 #ifdef BCMSDIO
10288 dhd_os_sdlock(dhdp);
10289 #endif /* BCMSDIO */
10290
10291 /* Start the watchdog timer */
10292 dhd->pub.tickcnt = 0;
10293 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
10294
10295 /* Bring up the bus */
10296 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
10297 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
10298 #ifdef BCMSDIO
10299 dhd_os_sdunlock(dhdp);
10300 #endif /* BCMSDIO */
10301 DHD_PERIM_UNLOCK(dhdp);
10302 return ret;
10303 }
10304
10305 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
10306
10307 #ifdef DHD_ULP
10308 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
10309 #endif /* DHD_ULP */
10310 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
10311 /* Host registration for OOB interrupt */
10312 if (dhd_bus_oob_intr_register(dhdp)) {
10313 /* deactivate timer and wait for the handler to finish */
10314 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10315 DHD_GENERAL_LOCK(&dhd->pub, flags);
10316 dhd->wd_timer_valid = FALSE;
10317 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10318 del_timer_sync(&dhd->timer);
10319
10320 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10321 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10322 DHD_PERIM_UNLOCK(dhdp);
10323 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
10324 return -ENODEV;
10325 }
10326
10327 #if defined(BCMPCIE_OOB_HOST_WAKE)
10328 dhd_bus_oob_intr_set(dhdp, TRUE);
10329 #else
10330 /* Enable oob at firmware */
10331 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10332 #endif /* BCMPCIE_OOB_HOST_WAKE */
10333 #elif defined(FORCE_WOWLAN)
10334 /* Enable oob at firmware */
10335 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10336 #endif
10337 #ifdef PCIE_FULL_DONGLE
10338 {
10339 /* max_h2d_rings includes H2D common rings */
10340 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
10341
10342 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
10343 max_h2d_rings));
10344 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
10345 #ifdef BCMSDIO
10346 dhd_os_sdunlock(dhdp);
10347 #endif /* BCMSDIO */
10348 DHD_PERIM_UNLOCK(dhdp);
10349 return ret;
10350 }
10351 }
10352 #endif /* PCIE_FULL_DONGLE */
10353
10354 /* Do protocol initialization necessary for IOCTL/IOVAR */
10355 ret = dhd_prot_init(&dhd->pub);
10356 if (unlikely(ret) != BCME_OK) {
10357 DHD_PERIM_UNLOCK(dhdp);
10358 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10359 return ret;
10360 }
10361
10362 /* If bus is not ready, can't come up */
10363 if (dhd->pub.busstate != DHD_BUS_DATA) {
10364 DHD_GENERAL_LOCK(&dhd->pub, flags);
10365 dhd->wd_timer_valid = FALSE;
10366 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10367 del_timer_sync(&dhd->timer);
10368 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
10369 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10370 #ifdef BCMSDIO
10371 dhd_os_sdunlock(dhdp);
10372 #endif /* BCMSDIO */
10373 DHD_PERIM_UNLOCK(dhdp);
10374 return -ENODEV;
10375 }
10376
10377 #ifdef BCMSDIO
10378 dhd_os_sdunlock(dhdp);
10379 #endif /* BCMSDIO */
10380
10381 /* Bus is ready, query any dongle information */
10382 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10383 f2_sync_start = OSL_SYSUPTIME();
10384 #endif /* DHD_DEBUG && BCMSDIO */
10385 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
10386 DHD_GENERAL_LOCK(&dhd->pub, flags);
10387 dhd->wd_timer_valid = FALSE;
10388 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10389 del_timer_sync(&dhd->timer);
10390 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
10391 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10392 DHD_PERIM_UNLOCK(dhdp);
10393 return ret;
10394 }
10395 #if defined(CONFIG_SOC_EXYNOS8895)
10396 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
10397 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
10398 #endif /* CONFIG_SOC_EXYNOS8895 */
10399
10400 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10401 f2_sync_end = OSL_SYSUPTIME();
10402 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10403 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
10404 #endif /* DHD_DEBUG && BCMSDIO */
10405
10406 #ifdef ARP_OFFLOAD_SUPPORT
10407 if (dhd->pend_ipaddr) {
10408 #ifdef AOE_IP_ALIAS_SUPPORT
10409 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
10410 #endif /* AOE_IP_ALIAS_SUPPORT */
10411 dhd->pend_ipaddr = 0;
10412 }
10413 #endif /* ARP_OFFLOAD_SUPPORT */
10414
10415 #if defined(TRAFFIC_MGMT_DWM)
10416 bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
10417 #endif
10418 DHD_PERIM_UNLOCK(dhdp);
10419 return 0;
10420 }
10421 #endif /* !BCMDBUS */
10422
10423 #ifdef WLTDLS
_dhd_tdls_enable(dhd_pub_t * dhd,bool tdls_on,bool auto_on,struct ether_addr * mac)10424 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
10425 {
10426 uint32 tdls = tdls_on;
10427 int ret = 0;
10428 uint32 tdls_auto_op = 0;
10429 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
10430 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
10431 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
10432 BCM_REFERENCE(mac);
10433 if (!FW_SUPPORTED(dhd, tdls))
10434 return BCME_ERROR;
10435
10436 if (dhd->tdls_enable == tdls_on)
10437 goto auto_mode;
10438 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
10439 if (ret < 0) {
10440 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
10441 goto exit;
10442 }
10443 dhd->tdls_enable = tdls_on;
10444 auto_mode:
10445
10446 tdls_auto_op = auto_on;
10447 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
10448 0, TRUE);
10449 if (ret < 0) {
10450 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
10451 goto exit;
10452 }
10453
10454 if (tdls_auto_op) {
10455 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
10456 sizeof(tdls_idle_time), NULL, 0, TRUE);
10457 if (ret < 0) {
10458 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
10459 goto exit;
10460 }
10461 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
10462 sizeof(tdls_rssi_high), NULL, 0, TRUE);
10463 if (ret < 0) {
10464 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
10465 goto exit;
10466 }
10467 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
10468 sizeof(tdls_rssi_low), NULL, 0, TRUE);
10469 if (ret < 0) {
10470 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
10471 goto exit;
10472 }
10473 }
10474
10475 exit:
10476 return ret;
10477 }
10478
dhd_tdls_enable(struct net_device * dev,bool tdls_on,bool auto_on,struct ether_addr * mac)10479 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
10480 {
10481 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10482 int ret = 0;
10483 if (dhd)
10484 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
10485 else
10486 ret = BCME_ERROR;
10487 return ret;
10488 }
10489
10490 int
dhd_tdls_set_mode(dhd_pub_t * dhd,bool wfd_mode)10491 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
10492 {
10493 int ret = 0;
10494 bool auto_on = false;
10495 uint32 mode = wfd_mode;
10496
10497 #ifdef ENABLE_TDLS_AUTO_MODE
10498 if (wfd_mode) {
10499 auto_on = false;
10500 } else {
10501 auto_on = true;
10502 }
10503 #else
10504 auto_on = false;
10505 #endif /* ENABLE_TDLS_AUTO_MODE */
10506 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
10507 if (ret < 0) {
10508 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
10509 return ret;
10510 }
10511
10512 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
10513 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
10514 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
10515 return ret;
10516 }
10517
10518 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
10519 if (ret < 0) {
10520 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
10521 return ret;
10522 }
10523
10524 dhd->tdls_mode = mode;
10525 return ret;
10526 }
10527 #ifdef PCIE_FULL_DONGLE
dhd_tdls_update_peer_info(dhd_pub_t * dhdp,wl_event_msg_t * event)10528 int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
10529 {
10530 dhd_pub_t *dhd_pub = dhdp;
10531 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
10532 tdls_peer_node_t *new = NULL, *prev = NULL;
10533 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
10534 uint8 *da = (uint8 *)&event->addr.octet[0];
10535 bool connect = FALSE;
10536 uint32 reason = ntoh32(event->reason);
10537 unsigned long flags;
10538
10539 if (reason == WLC_E_TDLS_PEER_CONNECTED)
10540 connect = TRUE;
10541 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
10542 connect = FALSE;
10543 else
10544 {
10545 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
10546 return BCME_ERROR;
10547 }
10548 if (ifindex == DHD_BAD_IF)
10549 return BCME_ERROR;
10550
10551 if (connect) {
10552 while (cur != NULL) {
10553 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10554 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10555 __FUNCTION__, __LINE__));
10556 return BCME_ERROR;
10557 }
10558 cur = cur->next;
10559 }
10560
10561 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
10562 if (new == NULL) {
10563 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
10564 return BCME_ERROR;
10565 }
10566 memcpy(new->addr, da, ETHER_ADDR_LEN);
10567 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10568 new->next = dhd_pub->peer_tbl.node;
10569 dhd_pub->peer_tbl.node = new;
10570 dhd_pub->peer_tbl.tdls_peer_count++;
10571 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10572 } else {
10573 while (cur != NULL) {
10574 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10575 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
10576 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10577 if (prev)
10578 prev->next = cur->next;
10579 else
10580 dhd_pub->peer_tbl.node = cur->next;
10581 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
10582 dhd_pub->peer_tbl.tdls_peer_count--;
10583 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10584 return BCME_OK;
10585 }
10586 prev = cur;
10587 cur = cur->next;
10588 }
10589 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
10590 }
10591 return BCME_OK;
10592 }
10593 #endif /* PCIE_FULL_DONGLE */
10594 #endif
10595
dhd_is_concurrent_mode(dhd_pub_t * dhd)10596 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
10597 {
10598 if (!dhd)
10599 return FALSE;
10600
10601 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
10602 return TRUE;
10603 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
10604 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
10605 return TRUE;
10606 else
10607 return FALSE;
10608 }
10609 #if !defined(AP) && defined(WLP2P)
10610 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10611 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10612 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10613 * would still be named as fw_bcmdhd_apsta.
10614 */
10615 uint32
dhd_get_concurrent_capabilites(dhd_pub_t * dhd)10616 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
10617 {
10618 int32 ret = 0;
10619 char buf[WLC_IOCTL_SMLEN];
10620 bool mchan_supported = FALSE;
10621 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10622 * test mode, that means we only will use the mode as it is
10623 */
10624 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10625 return 0;
10626 if (FW_SUPPORTED(dhd, vsdb)) {
10627 mchan_supported = TRUE;
10628 }
10629 if (!FW_SUPPORTED(dhd, p2p)) {
10630 DHD_TRACE(("Chip does not support p2p\n"));
10631 return 0;
10632 } else {
10633 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10634 memset(buf, 0, sizeof(buf));
10635 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
10636 sizeof(buf), FALSE);
10637 if (ret < 0) {
10638 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
10639 return 0;
10640 } else {
10641 if (buf[0] == 1) {
10642 /* By default, chip supports single chan concurrency,
10643 * now lets check for mchan
10644 */
10645 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
10646 if (mchan_supported)
10647 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
10648 if (FW_SUPPORTED(dhd, rsdb)) {
10649 ret |= DHD_FLAG_RSDB_MODE;
10650 }
10651 #ifdef WL_SUPPORT_MULTIP2P
10652 if (FW_SUPPORTED(dhd, mp2p)) {
10653 ret |= DHD_FLAG_MP2P_MODE;
10654 }
10655 #endif /* WL_SUPPORT_MULTIP2P */
10656 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10657 return ret;
10658 #else
10659 return 0;
10660 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10661 }
10662 }
10663 }
10664 return 0;
10665 }
10666 #endif
10667
10668 #ifdef SUPPORT_AP_POWERSAVE
10669 #define RXCHAIN_PWRSAVE_PPS 10
10670 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
10671 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
dhd_set_ap_powersave(dhd_pub_t * dhdp,int ifidx,int enable)10672 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
10673 {
10674 int32 pps = RXCHAIN_PWRSAVE_PPS;
10675 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
10676 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
10677 int ret;
10678
10679 if (enable) {
10680 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
10681 NULL, 0, TRUE);
10682 if (ret != BCME_OK) {
10683 DHD_ERROR(("Failed to enable AP power save\n"));
10684 }
10685 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
10686 TRUE);
10687 if (ret != BCME_OK) {
10688 DHD_ERROR(("Failed to set pps\n"));
10689 }
10690 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
10691 sizeof(quiet_time), NULL, 0, TRUE);
10692 if (ret != BCME_OK) {
10693 DHD_ERROR(("Failed to set quiet time\n"));
10694 }
10695 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
10696 (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
10697 if (ret != BCME_OK) {
10698 DHD_ERROR(("Failed to set stas assoc check\n"));
10699 }
10700 } else {
10701 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
10702 NULL, 0, TRUE);
10703 if (ret != BCME_OK) {
10704 DHD_ERROR(("Failed to disable AP power save\n"));
10705 }
10706 }
10707
10708 return 0;
10709 }
10710 #endif /* SUPPORT_AP_POWERSAVE */
10711
10712
10713
10714
10715 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10716 int
dhd_enable_adps(dhd_pub_t * dhd,uint8 on)10717 dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
10718 {
10719 int i;
10720 int len;
10721 int ret = BCME_OK;
10722
10723 bcm_iov_buf_t *iov_buf = NULL;
10724 wl_adps_params_v1_t *data = NULL;
10725 char buf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10726
10727 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
10728 iov_buf = kmalloc(len, GFP_KERNEL);
10729 if (iov_buf == NULL) {
10730 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10731 ret = BCME_NOMEM;
10732 goto exit;
10733 }
10734
10735 iov_buf->version = WL_ADPS_IOV_VER;
10736 iov_buf->len = sizeof(*data);
10737 iov_buf->id = WL_ADPS_IOV_MODE;
10738
10739 data = (wl_adps_params_v1_t *)iov_buf->data;
10740 data->version = ADPS_SUB_IOV_VERSION_1;
10741 data->length = sizeof(*data);
10742 data->mode = on;
10743
10744 for (i = 1; i <= MAX_BANDS; i++) {
10745 data->band = i;
10746 bcm_mkiovar("adps", (char *)iov_buf, len, buf, sizeof(buf));
10747 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0)) < 0) {
10748 if (ret == BCME_UNSUPPORTED) {
10749 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
10750 ret = BCME_OK;
10751 goto exit;
10752 }
10753 else {
10754 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10755 __FUNCTION__, on ? "On" : "Off", i, ret));
10756 goto exit;
10757 }
10758 }
10759 }
10760
10761 exit:
10762 if (iov_buf) {
10763 kfree(iov_buf);
10764 }
10765 return ret;
10766 }
10767 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10768
10769 int
dhd_preinit_ioctls(dhd_pub_t * dhd)10770 dhd_preinit_ioctls(dhd_pub_t *dhd)
10771 {
10772 int ret = 0;
10773 char eventmask[WL_EVENTING_MASK_LEN];
10774 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10775 uint32 buf_key_b4_m4 = 1;
10776 uint8 msglen;
10777 eventmsgs_ext_t *eventmask_msg = NULL;
10778 char* iov_buf = NULL;
10779 int ret2 = 0;
10780 uint32 wnm_cap = 0;
10781 #if defined(CUSTOM_AMPDU_BA_WSIZE)
10782 uint32 ampdu_ba_wsize = 0;
10783 #endif
10784 #if defined(CUSTOM_AMPDU_MPDU)
10785 int32 ampdu_mpdu = 0;
10786 #endif
10787 #if defined(CUSTOM_AMPDU_RELEASE)
10788 int32 ampdu_release = 0;
10789 #endif
10790 #if defined(CUSTOM_AMSDU_AGGSF)
10791 int32 amsdu_aggsf = 0;
10792 #endif
10793 shub_control_t shub_ctl;
10794
10795 #if defined(BCMSDIO) || defined(BCMDBUS)
10796 #ifdef PROP_TXSTATUS
10797 int wlfc_enable = TRUE;
10798 #ifndef DISABLE_11N
10799 uint32 hostreorder = 1;
10800 uint wl_down = 1;
10801 #endif /* DISABLE_11N */
10802 #endif /* PROP_TXSTATUS */
10803 #endif /* BCMSDIO || BCMDBUS */
10804 #ifndef PCIE_FULL_DONGLE
10805 uint32 wl_ap_isolate;
10806 #endif /* PCIE_FULL_DONGLE */
10807 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10808 uint wnm_bsstrans_resp = 0;
10809 #ifdef SUPPORT_SET_CAC
10810 uint32 cac = 1;
10811 #endif /* SUPPORT_SET_CAC */
10812 #ifdef DHD_ENABLE_LPC
10813 uint32 lpc = 1;
10814 #endif /* DHD_ENABLE_LPC */
10815 uint power_mode = PM_FAST;
10816 #if defined(BCMSDIO)
10817 uint32 dongle_align = DHD_SDALIGN;
10818 uint32 glom = CUSTOM_GLOM_SETTING;
10819 #endif /* defined(BCMSDIO) */
10820 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
10821 uint32 credall = 1;
10822 #endif
10823 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
10824 uint scancache_enab = TRUE;
10825 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10826 uint32 bcn_li_bcn = 1;
10827 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10828 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
10829 #if defined(ARP_OFFLOAD_SUPPORT)
10830 int arpoe = 1;
10831 #endif
10832 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
10833 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
10834 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
10835 char buf[WLC_IOCTL_SMLEN];
10836 char *ptr;
10837 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
10838 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10839 wl_el_tag_params_t *el_tag = NULL;
10840 #endif /* DHD_8021X_DUMP */
10841 #ifdef ROAM_ENABLE
10842 uint roamvar = 0;
10843 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
10844 int roam_scan_period[2] = {10, WLC_BAND_ALL};
10845 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
10846 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10847 int roam_fullscan_period = 60;
10848 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10849 int roam_fullscan_period = 120;
10850 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10851 #ifdef DISABLE_BCNLOSS_ROAM
10852 uint roam_bcnloss_off = 1;
10853 #endif /* DISABLE_BCNLOSS_ROAM */
10854 #else
10855 #ifdef DISABLE_BUILTIN_ROAM
10856 uint roamvar = 1;
10857 #endif /* DISABLE_BUILTIN_ROAM */
10858 #endif /* ROAM_ENABLE */
10859
10860 #if defined(SOFTAP)
10861 uint dtim = 1;
10862 #endif
10863 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10864 struct ether_addr p2p_ea;
10865 #endif
10866 #ifdef SOFTAP_UAPSD_OFF
10867 uint32 wme_apsd = 0;
10868 #endif /* SOFTAP_UAPSD_OFF */
10869 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10870 uint32 apsta = 1; /* Enable APSTA mode */
10871 #elif defined(SOFTAP_AND_GC)
10872 uint32 apsta = 0;
10873 int ap_mode = 1;
10874 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10875 #ifdef GET_CUSTOM_MAC_ENABLE
10876 struct ether_addr ea_addr;
10877 char hw_ether[62];
10878 #endif /* GET_CUSTOM_MAC_ENABLE */
10879
10880 #ifdef DISABLE_11N
10881 uint32 nmode = 0;
10882 #endif /* DISABLE_11N */
10883
10884 #ifdef USE_WL_TXBF
10885 uint32 txbf = 1;
10886 #endif /* USE_WL_TXBF */
10887 #ifdef DISABLE_TXBFR
10888 uint32 txbf_bfr_cap = 0;
10889 #endif /* DISABLE_TXBFR */
10890 #if defined(PROP_TXSTATUS)
10891 #ifdef USE_WFA_CERT_CONF
10892 uint32 proptx = 0;
10893 #endif /* USE_WFA_CERT_CONF */
10894 #endif /* PROP_TXSTATUS */
10895 #if defined(SUPPORT_5G_1024QAM_VHT)
10896 uint32 vht_features = 0; /* init to 0, will be set based on each support */
10897 #endif
10898 #ifdef DISABLE_11N_PROPRIETARY_RATES
10899 uint32 ht_features = 0;
10900 #endif /* DISABLE_11N_PROPRIETARY_RATES */
10901 #ifdef CUSTOM_PSPRETEND_THR
10902 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
10903 #endif
10904 #ifdef CUSTOM_EVENT_PM_WAKE
10905 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
10906 #endif /* CUSTOM_EVENT_PM_WAKE */
10907 uint32 rsdb_mode = 0;
10908 #ifdef ENABLE_TEMP_THROTTLING
10909 wl_temp_control_t temp_control;
10910 #endif /* ENABLE_TEMP_THROTTLING */
10911 #ifdef DISABLE_PRUNED_SCAN
10912 uint32 scan_features = 0;
10913 #endif /* DISABLE_PRUNED_SCAN */
10914 #ifdef PKT_FILTER_SUPPORT
10915 dhd_pkt_filter_enable = TRUE;
10916 #ifdef APF
10917 dhd->apf_set = FALSE;
10918 #endif /* APF */
10919 #endif /* PKT_FILTER_SUPPORT */
10920 #ifdef WLTDLS
10921 dhd->tdls_enable = FALSE;
10922 dhd_tdls_set_mode(dhd, false);
10923 #endif /* WLTDLS */
10924 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10925 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10926 dhd->max_dtim_enable = TRUE;
10927 #else
10928 dhd->max_dtim_enable = FALSE;
10929 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
10930 #ifdef CUSTOM_SET_OCLOFF
10931 dhd->ocl_off = FALSE;
10932 #endif /* CUSTOM_SET_OCLOFF */
10933 DHD_TRACE(("Enter %s\n", __FUNCTION__));
10934
10935 #ifdef DHDTCPACK_SUPPRESS
10936 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
10937 #endif
10938 dhd->op_mode = 0;
10939
10940 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
10941 /* clear AP flags */
10942 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10943 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
10944
10945 #ifdef CUSTOMER_HW4_DEBUG
10946 if (!dhd_validate_chipid(dhd)) {
10947 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
10948 __FUNCTION__, dhd_bus_chip_id(dhd)));
10949 #ifndef SUPPORT_MULTIPLE_CHIPS
10950 ret = BCME_BADARG;
10951 goto done;
10952 #endif /* !SUPPORT_MULTIPLE_CHIPS */
10953 }
10954 #endif /* CUSTOMER_HW4_DEBUG */
10955 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10956 (op_mode == DHD_FLAG_MFG_MODE)) {
10957 dhd->op_mode = DHD_FLAG_MFG_MODE;
10958 #ifdef DHD_PCIE_RUNTIMEPM
10959 /* Disable RuntimePM in mfg mode */
10960 DHD_DISABLE_RUNTIME_PM(dhd);
10961 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
10962 #endif /* DHD_PCIE_RUNTIME_PM */
10963 /* Check and adjust IOCTL response timeout for Manufactring firmware */
10964 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
10965 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
10966 __FUNCTION__));
10967 } else {
10968 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
10969 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
10970 }
10971 #ifdef GET_CUSTOM_MAC_ENABLE
10972 ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
10973 if (!ret) {
10974 memset(buf, 0, sizeof(buf));
10975 bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
10976 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
10977 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
10978 if (ret < 0) {
10979 memset(buf, 0, sizeof(buf));
10980 bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
10981 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
10982 if (ret) {
10983 int i;
10984 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
10985 __FUNCTION__, MAC2STRDBG(hw_ether), ret));
10986 for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
10987 printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
10988 if ((i+1)%8 == 0)
10989 printf("\n");
10990 }
10991 ret = BCME_NOTUP;
10992 goto done;
10993 }
10994 }
10995 } else {
10996 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
10997 ret = BCME_NOTUP;
10998 goto done;
10999 }
11000 #endif /* GET_CUSTOM_MAC_ENABLE */
11001 /* Get the default device MAC address directly from firmware */
11002 memset(buf, 0, sizeof(buf));
11003 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
11004 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
11005 FALSE, 0)) < 0) {
11006 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11007 ret = BCME_NOTUP;
11008 goto done;
11009 }
11010 /* Update public MAC address after reading from Firmware */
11011 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11012
11013 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
11014 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
11015 goto done;
11016 }
11017
11018 /* get a capabilities from firmware */
11019 {
11020 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
11021 memset(dhd->fw_capabilities, 0, cap_buf_size);
11022 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
11023 FALSE);
11024 if (ret < 0) {
11025 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11026 __FUNCTION__, ret));
11027 return 0;
11028 }
11029
11030 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
11031 dhd->fw_capabilities[0] = ' ';
11032 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
11033 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
11034 }
11035
11036 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
11037 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
11038 #ifdef SET_RANDOM_MAC_SOFTAP
11039 uint rand_mac;
11040 #endif /* SET_RANDOM_MAC_SOFTAP */
11041 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
11042 #if defined(ARP_OFFLOAD_SUPPORT)
11043 arpoe = 0;
11044 #endif
11045 #ifdef PKT_FILTER_SUPPORT
11046 dhd_pkt_filter_enable = FALSE;
11047 #endif
11048 #ifdef SET_RANDOM_MAC_SOFTAP
11049 SRANDOM32((uint)jiffies);
11050 rand_mac = RANDOM32();
11051 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
11052 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
11053 iovbuf[2] = (unsigned char)vendor_oui;
11054 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
11055 iovbuf[4] = (unsigned char)(rand_mac >> 8);
11056 iovbuf[5] = (unsigned char)(rand_mac >> 16);
11057
11058 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
11059 TRUE);
11060 if (ret < 0) {
11061 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11062 } else
11063 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
11064 #endif /* SET_RANDOM_MAC_SOFTAP */
11065 #ifdef USE_DYNAMIC_F2_BLKSIZE
11066 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11067 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11068 #ifdef SUPPORT_AP_POWERSAVE
11069 dhd_set_ap_powersave(dhd, 0, TRUE);
11070 #endif /* SUPPORT_AP_POWERSAVE */
11071 #ifdef SOFTAP_UAPSD_OFF
11072 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
11073 TRUE);
11074 if (ret < 0) {
11075 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11076 __FUNCTION__, ret));
11077 }
11078 #endif /* SOFTAP_UAPSD_OFF */
11079 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
11080 /* set AP flag for specific country code of SOFTAP */
11081 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
11082 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
11083 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11084 (op_mode == DHD_FLAG_MFG_MODE)) {
11085 #if defined(ARP_OFFLOAD_SUPPORT)
11086 arpoe = 0;
11087 #endif /* ARP_OFFLOAD_SUPPORT */
11088 #ifdef PKT_FILTER_SUPPORT
11089 dhd_pkt_filter_enable = FALSE;
11090 #endif /* PKT_FILTER_SUPPORT */
11091 dhd->op_mode = DHD_FLAG_MFG_MODE;
11092 #ifdef USE_DYNAMIC_F2_BLKSIZE
11093 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11094 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11095 if (FW_SUPPORTED(dhd, rsdb)) {
11096 rsdb_mode = 0;
11097 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
11098 NULL, 0, TRUE);
11099 if (ret < 0) {
11100 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11101 __FUNCTION__, ret));
11102 }
11103 }
11104 } else {
11105 uint32 concurrent_mode = 0;
11106 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
11107 (op_mode == DHD_FLAG_P2P_MODE)) {
11108 #if defined(ARP_OFFLOAD_SUPPORT)
11109 arpoe = 0;
11110 #endif
11111 #ifdef PKT_FILTER_SUPPORT
11112 dhd_pkt_filter_enable = FALSE;
11113 #endif
11114 dhd->op_mode = DHD_FLAG_P2P_MODE;
11115 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
11116 (op_mode == DHD_FLAG_IBSS_MODE)) {
11117 dhd->op_mode = DHD_FLAG_IBSS_MODE;
11118 } else
11119 dhd->op_mode = DHD_FLAG_STA_MODE;
11120 #if !defined(AP) && defined(WLP2P)
11121 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
11122 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
11123 #if defined(ARP_OFFLOAD_SUPPORT)
11124 arpoe = 1;
11125 #endif
11126 dhd->op_mode |= concurrent_mode;
11127 }
11128
11129 /* Check if we are enabling p2p */
11130 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11131 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
11132 TRUE);
11133 if (ret < 0)
11134 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
11135
11136 #if defined(SOFTAP_AND_GC)
11137 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
11138 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
11139 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
11140 }
11141 #endif
11142 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
11143 ETHER_SET_LOCALADDR(&p2p_ea);
11144 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
11145 NULL, 0, TRUE);
11146 if (ret < 0)
11147 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
11148 else
11149 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11150 }
11151 #else
11152 (void)concurrent_mode;
11153 #endif
11154 }
11155 #ifdef BCMSDIO
11156 if (dhd->conf->sd_f2_blocksize)
11157 dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
11158 #endif
11159
11160 #if defined(RSDB_MODE_FROM_FILE)
11161 (void)dhd_rsdb_mode_from_file(dhd);
11162 #endif
11163
11164 #ifdef DISABLE_PRUNED_SCAN
11165 if (FW_SUPPORTED(dhd, rsdb)) {
11166 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11167 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
11168 if (ret < 0) {
11169 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11170 __FUNCTION__, ret));
11171 } else {
11172 memcpy(&scan_features, iovbuf, 4);
11173 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
11174 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11175 sizeof(scan_features), NULL, 0, TRUE);
11176 if (ret < 0) {
11177 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11178 __FUNCTION__, ret));
11179 }
11180 }
11181 }
11182 #endif /* DISABLE_PRUNED_SCAN */
11183
11184 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
11185 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
11186 #ifdef CUSTOMER_HW2
11187 #if defined(DHD_BLOB_EXISTENCE_CHECK)
11188 if (!dhd->pub.is_blob)
11189 #endif /* DHD_BLOB_EXISTENCE_CHECK */
11190 {
11191 /* get a ccode and revision for the country code */
11192 #if defined(CUSTOM_COUNTRY_CODE)
11193 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11194 &dhd->dhd_cspec, dhd->dhd_cflags);
11195 #else
11196 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11197 &dhd->dhd_cspec);
11198 #endif /* CUSTOM_COUNTRY_CODE */
11199 }
11200 #endif /* CUSTOMER_HW2 */
11201
11202 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11203 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
11204 dhd->info->rxthread_enabled = FALSE;
11205 else
11206 dhd->info->rxthread_enabled = TRUE;
11207 #endif
11208 /* Set Country code */
11209 if (dhd->dhd_cspec.ccode[0] != 0) {
11210 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
11211 NULL, 0, TRUE);
11212 if (ret < 0)
11213 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
11214 }
11215
11216
11217 /* Set Listen Interval */
11218 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
11219 NULL, 0, TRUE);
11220 if (ret < 0)
11221 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
11222
11223 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11224 #ifdef USE_WFA_CERT_CONF
11225 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
11226 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
11227 }
11228 #endif /* USE_WFA_CERT_CONF */
11229 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11230 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
11231 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11232 #if defined(ROAM_ENABLE)
11233 #ifdef DISABLE_BCNLOSS_ROAM
11234 dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, sizeof(roam_bcnloss_off),
11235 NULL, 0, TRUE);
11236 #endif /* DISABLE_BCNLOSS_ROAM */
11237 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
11238 sizeof(roam_trigger), TRUE, 0)) < 0)
11239 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
11240 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
11241 sizeof(roam_scan_period), TRUE, 0)) < 0)
11242 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
11243 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
11244 sizeof(roam_delta), TRUE, 0)) < 0)
11245 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
11246 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
11247 sizeof(roam_fullscan_period), NULL, 0, TRUE);
11248 if (ret < 0)
11249 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
11250 #endif /* ROAM_ENABLE */
11251
11252 #ifdef CUSTOM_EVENT_PM_WAKE
11253 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
11254 sizeof(pm_awake_thresh), NULL, 0, TRUE);
11255 if (ret < 0) {
11256 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
11257 }
11258 #endif /* CUSTOM_EVENT_PM_WAKE */
11259 #ifdef WLTDLS
11260 #ifdef ENABLE_TDLS_AUTO_MODE
11261 /* by default TDLS on and auto mode on */
11262 _dhd_tdls_enable(dhd, true, true, NULL);
11263 #else
11264 /* by default TDLS on and auto mode off */
11265 _dhd_tdls_enable(dhd, true, false, NULL);
11266 #endif /* ENABLE_TDLS_AUTO_MODE */
11267 #endif /* WLTDLS */
11268
11269 #ifdef DHD_ENABLE_LPC
11270 /* Set lpc 1 */
11271 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11272 if (ret < 0) {
11273 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
11274
11275 if (ret == BCME_NOTDOWN) {
11276 uint wl_down = 1;
11277 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11278 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11279 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
11280
11281 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11282 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
11283 }
11284 }
11285 #endif /* DHD_ENABLE_LPC */
11286
11287 #ifdef WLADPS
11288 #ifdef WLADPS_SEAK_AP_WAR
11289 dhd->disabled_adps = FALSE;
11290 #endif /* WLADPS_SEAK_AP_WAR */
11291 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11292 #ifdef ADPS_MODE_FROM_FILE
11293 dhd_adps_mode_from_file(dhd);
11294 #else
11295 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
11296 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11297 __FUNCTION__, ret));
11298 }
11299 #endif /* ADPS_MODE_FROM_FILE */
11300 }
11301 #endif /* WLADPS */
11302
11303 /* Set PowerSave mode */
11304 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
11305
11306 #if defined(BCMSDIO)
11307 /* Match Host and Dongle rx alignment */
11308 dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
11309 NULL, 0, TRUE);
11310
11311 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
11312 /* enable credall to reduce the chance of no bus credit happened. */
11313 dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
11314 #endif
11315
11316 #ifdef USE_WFA_CERT_CONF
11317 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
11318 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
11319 }
11320 #endif /* USE_WFA_CERT_CONF */
11321 if (glom != DEFAULT_GLOM_VALUE) {
11322 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
11323 dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
11324 }
11325 #endif /* defined(BCMSDIO) */
11326
11327 /* Setup timeout if Beacons are lost and roam is off to report link down */
11328 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, TRUE);
11329
11330 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11331 dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, TRUE);
11332
11333 #if defined(AP) && !defined(WLP2P)
11334 dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
11335
11336 #endif /* defined(AP) && !defined(WLP2P) */
11337
11338 #ifdef MIMO_ANT_SETTING
11339 dhd_sel_ant_from_file(dhd);
11340 #endif /* MIMO_ANT_SETTING */
11341
11342 #if defined(SOFTAP)
11343 if (ap_fw_loaded == TRUE) {
11344 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
11345 }
11346 #endif
11347
11348 #if defined(KEEP_ALIVE)
11349 {
11350 /* Set Keep Alive : be sure to use FW with -keepalive */
11351 int res;
11352
11353 #if defined(SOFTAP)
11354 if (ap_fw_loaded == FALSE)
11355 #endif
11356 if (!(dhd->op_mode &
11357 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11358 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
11359 DHD_ERROR(("%s set keeplive failed %d\n",
11360 __FUNCTION__, res));
11361 }
11362 }
11363 #endif /* defined(KEEP_ALIVE) */
11364
11365 #ifdef USE_WL_TXBF
11366 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
11367 if (ret < 0)
11368 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
11369
11370 #endif /* USE_WL_TXBF */
11371
11372 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
11373 0, TRUE);
11374 if (ret < 0) {
11375 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
11376 }
11377
11378 #ifdef DISABLE_TXBFR
11379 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
11380 0, TRUE);
11381 if (ret < 0) {
11382 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
11383 }
11384 #endif /* DISABLE_TXBFR */
11385
11386 #ifdef USE_WFA_CERT_CONF
11387 #ifdef USE_WL_FRAMEBURST
11388 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
11389 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
11390 }
11391 #endif /* USE_WL_FRAMEBURST */
11392 #ifdef DISABLE_FRAMEBURST_VSDB
11393 g_frameburst = frameburst;
11394 #endif /* DISABLE_FRAMEBURST_VSDB */
11395 #endif /* USE_WFA_CERT_CONF */
11396 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11397 /* Disable Framebursting for SofAP */
11398 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
11399 frameburst = 0;
11400 }
11401 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11402 /* Set frameburst to value */
11403 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
11404 sizeof(frameburst), TRUE, 0)) < 0) {
11405 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
11406 }
11407
11408 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
11409 if (iov_buf == NULL) {
11410 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
11411 ret = BCME_NOMEM;
11412 goto done;
11413 }
11414
11415
11416 #if defined(CUSTOM_AMPDU_BA_WSIZE)
11417 /* Set ampdu ba wsize to 64 or 16 */
11418 #ifdef CUSTOM_AMPDU_BA_WSIZE
11419 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
11420 #endif
11421 if (ampdu_ba_wsize != 0) {
11422 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize,
11423 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
11424 if (ret < 0) {
11425 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11426 __FUNCTION__, ampdu_ba_wsize, ret));
11427 }
11428 }
11429 #endif
11430
11431 #ifdef ENABLE_TEMP_THROTTLING
11432 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11433 memset(&temp_control, 0, sizeof(temp_control));
11434 temp_control.enable = 1;
11435 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
11436 ret = dhd_iovar(dhd, 0, "temp_throttle_control", (char *)&temp_control,
11437 sizeof(temp_control), NULL, 0, TRUE);
11438 if (ret < 0) {
11439 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
11440 __FUNCTION__, ret));
11441 }
11442 }
11443 #endif /* ENABLE_TEMP_THROTTLING */
11444
11445 #if defined(CUSTOM_AMPDU_MPDU)
11446 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
11447 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
11448 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu),
11449 NULL, 0, TRUE);
11450 if (ret < 0) {
11451 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11452 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
11453 }
11454 }
11455 #endif /* CUSTOM_AMPDU_MPDU */
11456
11457 #if defined(CUSTOM_AMPDU_RELEASE)
11458 ampdu_release = CUSTOM_AMPDU_RELEASE;
11459 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
11460 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release,
11461 sizeof(ampdu_release), NULL, 0, TRUE);
11462 if (ret < 0) {
11463 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11464 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
11465 }
11466 }
11467 #endif /* CUSTOM_AMPDU_RELEASE */
11468
11469 #if defined(CUSTOM_AMSDU_AGGSF)
11470 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
11471 if (amsdu_aggsf != 0) {
11472 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
11473 NULL, 0, TRUE);
11474 if (ret < 0) {
11475 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11476 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
11477 }
11478 }
11479 #endif /* CUSTOM_AMSDU_AGGSF */
11480
11481 #if defined(SUPPORT_5G_1024QAM_VHT)
11482 #ifdef SUPPORT_5G_1024QAM_VHT
11483 if (dhd_get_chipid(dhd) == BCM4361_CHIP_ID) {
11484 vht_features |= 0x6; /* 5G 1024 QAM support */
11485 }
11486 #endif /* SUPPORT_5G_1024QAM_VHT */
11487 if (vht_features) {
11488 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
11489 NULL, 0, TRUE);
11490 if (ret < 0) {
11491 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
11492
11493 if (ret == BCME_NOTDOWN) {
11494 uint wl_down = 1;
11495 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11496 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11497 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11498 " vht_features = 0x%x\n",
11499 __FUNCTION__, ret, vht_features));
11500
11501 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
11502 sizeof(vht_features), NULL, 0, TRUE);
11503 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
11504 }
11505 }
11506 }
11507 #endif
11508 #ifdef DISABLE_11N_PROPRIETARY_RATES
11509 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
11510 TRUE);
11511 if (ret < 0) {
11512 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
11513 }
11514 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11515 #ifdef CUSTOM_PSPRETEND_THR
11516 /* Turn off MPC in AP mode */
11517 ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
11518 sizeof(pspretend_thr), NULL, 0, TRUE);
11519 if (ret < 0) {
11520 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
11521 __FUNCTION__, ret));
11522 }
11523 #endif
11524
11525 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
11526 NULL, 0, TRUE);
11527 if (ret < 0) {
11528 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
11529 }
11530 #ifdef SUPPORT_SET_CAC
11531 bcm_mkiovar("cac", (char *)&cac, sizeof(cac), iovbuf, sizeof(iovbuf));
11532 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
11533 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
11534 }
11535 #endif /* SUPPORT_SET_CAC */
11536 #ifdef DHD_ULP
11537 /* Get the required details from dongle during preinit ioctl */
11538 dhd_ulp_preinit(dhd);
11539 #endif /* DHD_ULP */
11540
11541 /* Read event_msgs mask */
11542 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
11543 sizeof(iovbuf), FALSE);
11544 if (ret < 0) {
11545 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
11546 goto done;
11547 }
11548 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
11549
11550 /* Setup event_msgs */
11551 setbit(eventmask, WLC_E_SET_SSID);
11552 setbit(eventmask, WLC_E_PRUNE);
11553 setbit(eventmask, WLC_E_AUTH);
11554 setbit(eventmask, WLC_E_AUTH_IND);
11555 setbit(eventmask, WLC_E_ASSOC);
11556 setbit(eventmask, WLC_E_REASSOC);
11557 setbit(eventmask, WLC_E_REASSOC_IND);
11558 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11559 setbit(eventmask, WLC_E_DEAUTH);
11560 setbit(eventmask, WLC_E_DEAUTH_IND);
11561 setbit(eventmask, WLC_E_DISASSOC_IND);
11562 setbit(eventmask, WLC_E_DISASSOC);
11563 setbit(eventmask, WLC_E_JOIN);
11564 setbit(eventmask, WLC_E_BSSID);
11565 setbit(eventmask, WLC_E_START);
11566 setbit(eventmask, WLC_E_ASSOC_IND);
11567 setbit(eventmask, WLC_E_PSK_SUP);
11568 setbit(eventmask, WLC_E_LINK);
11569 setbit(eventmask, WLC_E_MIC_ERROR);
11570 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
11571 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
11572 #ifdef LIMIT_BORROW
11573 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
11574 #endif
11575 #ifndef WL_CFG80211
11576 setbit(eventmask, WLC_E_PMKID_CACHE);
11577 // setbit(eventmask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
11578 #endif
11579 setbit(eventmask, WLC_E_JOIN_START);
11580 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
11581 #ifdef DHD_DEBUG
11582 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
11583 #endif
11584 #ifdef WLMEDIA_HTSF
11585 setbit(eventmask, WLC_E_HTSFSYNC);
11586 #endif /* WLMEDIA_HTSF */
11587 #ifdef PNO_SUPPORT
11588 setbit(eventmask, WLC_E_PFN_NET_FOUND);
11589 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
11590 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
11591 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
11592 #endif /* PNO_SUPPORT */
11593 /* enable dongle roaming event */
11594 setbit(eventmask, WLC_E_ROAM);
11595 #ifdef WLTDLS
11596 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
11597 #endif /* WLTDLS */
11598 #ifdef WL_ESCAN
11599 setbit(eventmask, WLC_E_ESCAN_RESULT);
11600 #endif /* WL_ESCAN */
11601 #ifdef RTT_SUPPORT
11602 setbit(eventmask, WLC_E_PROXD);
11603 #endif /* RTT_SUPPORT */
11604 #ifdef WL_CFG80211
11605 setbit(eventmask, WLC_E_ESCAN_RESULT);
11606 setbit(eventmask, WLC_E_AP_STARTED);
11607 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
11608 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11609 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
11610 }
11611 #endif /* WL_CFG80211 */
11612
11613 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11614 if (dhd_logtrace_from_file(dhd)) {
11615 setbit(eventmask, WLC_E_TRACE);
11616 } else {
11617 clrbit(eventmask, WLC_E_TRACE);
11618 }
11619 #elif defined(SHOW_LOGTRACE)
11620 setbit(eventmask, WLC_E_TRACE);
11621 #else
11622 clrbit(eventmask, WLC_E_TRACE);
11623 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11624
11625 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
11626 #ifdef DHD_WMF
11627 setbit(eventmask, WLC_E_PSTA_PRIMARY_INTF_IND);
11628 #endif
11629 #ifdef CUSTOM_EVENT_PM_WAKE
11630 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
11631 #endif /* CUSTOM_EVENT_PM_WAKE */
11632 #ifdef DHD_LOSSLESS_ROAMING
11633 setbit(eventmask, WLC_E_ROAM_PREP);
11634 #endif
11635 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11636 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11637 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11638
11639 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11640 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11641 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11642
11643 /* Write updated Event mask */
11644 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
11645 if (ret < 0) {
11646 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
11647 goto done;
11648 }
11649
11650 /* make up event mask ext message iovar for event larger than 128 */
11651 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
11652 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
11653 if (eventmask_msg == NULL) {
11654 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
11655 ret = BCME_NOMEM;
11656 goto done;
11657 }
11658 bzero(eventmask_msg, msglen);
11659 eventmask_msg->ver = EVENTMSGS_VER;
11660 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11661
11662 /* Read event_msgs_ext mask */
11663 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
11664 WLC_IOCTL_SMLEN, FALSE);
11665
11666 if (ret2 == 0) { /* event_msgs_ext must be supported */
11667 bcopy(iov_buf, eventmask_msg, msglen);
11668 #ifdef RSSI_MONITOR_SUPPORT
11669 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11670 #endif /* RSSI_MONITOR_SUPPORT */
11671 #ifdef GSCAN_SUPPORT
11672 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11673 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11674 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11675 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11676 #endif /* GSCAN_SUPPORT */
11677 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11678 #ifdef BT_WIFI_HANDOVER
11679 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11680 #endif /* BT_WIFI_HANDOVER */
11681 #ifdef DBG_PKT_MON
11682 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11683 #endif /* DBG_PKT_MON */
11684 #ifdef DHD_ULP
11685 setbit(eventmask_msg->mask, WLC_E_ULP);
11686 #endif
11687 #ifdef ENABLE_TEMP_THROTTLING
11688 setbit(eventmask_msg->mask, WLC_E_TEMP_THROTTLE);
11689 #endif /* ENABLE_TEMP_THROTTLING */
11690
11691 /* Write updated Event mask */
11692 eventmask_msg->ver = EVENTMSGS_VER;
11693 eventmask_msg->command = EVENTMSGS_SET_MASK;
11694 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11695 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
11696 TRUE);
11697 if (ret < 0) {
11698 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
11699 goto done;
11700 }
11701 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
11702 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11703 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11704 __FUNCTION__, ret2));
11705 } else {
11706 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
11707 ret = ret2;
11708 goto done;
11709 }
11710
11711 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11712 /* Enabling event log trace for EAP events */
11713 el_tag = (wl_el_tag_params_t *)kmalloc(sizeof(wl_el_tag_params_t), GFP_KERNEL);
11714 if (el_tag == NULL) {
11715 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11716 (int)sizeof(wl_el_tag_params_t)));
11717 ret = BCME_NOMEM;
11718 goto done;
11719 }
11720 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11721 el_tag->set = 1;
11722 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11723 bcm_mkiovar("event_log_tag_control", (char *)el_tag,
11724 sizeof(*el_tag), iovbuf, sizeof(iovbuf));
11725 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
11726 #endif /* DHD_8021X_DUMP */
11727
11728 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
11729 sizeof(scan_assoc_time), TRUE, 0);
11730 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
11731 sizeof(scan_unassoc_time), TRUE, 0);
11732 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
11733 sizeof(scan_passive_time), TRUE, 0);
11734
11735 #ifdef ARP_OFFLOAD_SUPPORT
11736 /* Set and enable ARP offload feature for STA only */
11737 #if defined(SOFTAP)
11738 if (arpoe && !ap_fw_loaded)
11739 #else
11740 if (arpoe)
11741 #endif
11742 {
11743 dhd_arp_offload_enable(dhd, TRUE);
11744 dhd_arp_offload_set(dhd, dhd_arp_mode);
11745 } else {
11746 dhd_arp_offload_enable(dhd, FALSE);
11747 dhd_arp_offload_set(dhd, 0);
11748 }
11749 dhd_arp_enable = arpoe;
11750 #endif /* ARP_OFFLOAD_SUPPORT */
11751
11752 #ifdef PKT_FILTER_SUPPORT
11753 /* Setup default defintions for pktfilter , enable in suspend */
11754 if (dhd_master_mode) {
11755 dhd->pktfilter_count = 6;
11756 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11757 if (!FW_SUPPORTED(dhd, pf6)) {
11758 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11759 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11760 } else {
11761 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11762 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11763 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11764 }
11765 /* apply APP pktfilter */
11766 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11767
11768 /* Setup filter to allow only unicast */
11769 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11770
11771 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11772 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11773
11774 dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM] = NULL;
11775 if (FW_SUPPORTED(dhd, pf6)) {
11776 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11777 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] =
11778 "107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff";
11779 dhd->pktfilter_count = 8;
11780 }
11781
11782 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11783 dhd->pktfilter_count = 4;
11784 /* Setup filter to block broadcast and NAT Keepalive packets */
11785 /* discard all broadcast packets */
11786 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11787 /* discard NAT Keepalive packets */
11788 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11789 /* discard NAT Keepalive packets */
11790 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11791 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11792 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11793 } else
11794 dhd_conf_discard_pkt_filter(dhd);
11795 dhd_conf_add_pkt_filter(dhd);
11796
11797 #if defined(SOFTAP)
11798 if (ap_fw_loaded) {
11799 dhd_enable_packet_filter(0, dhd);
11800 }
11801 #endif /* defined(SOFTAP) */
11802 dhd_set_packet_filter(dhd);
11803 #endif /* PKT_FILTER_SUPPORT */
11804 #ifdef DISABLE_11N
11805 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11806 if (ret < 0)
11807 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11808 #endif /* DISABLE_11N */
11809
11810 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11811 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE);
11812 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11813 /* query for 'clmver' to get clm version info from firmware */
11814 memset(buf, 0, sizeof(buf));
11815 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11816 if (ret < 0)
11817 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11818 else {
11819 char *clmver_temp_buf = NULL;
11820
11821 if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11822 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11823 } else {
11824 ptr = (clmver_temp_buf + strlen("Data:"));
11825 if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
11826 DHD_ERROR(("Couldn't find New line character\n"));
11827 } else {
11828 memset(clm_version, 0, CLM_VER_STR_LEN);
11829 strncpy(clm_version, clmver_temp_buf,
11830 MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
11831 }
11832 }
11833 }
11834
11835 /* query for 'ver' to get version info from firmware */
11836 memset(buf, 0, sizeof(buf));
11837 ptr = buf;
11838 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11839 if (ret < 0)
11840 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11841 else {
11842 bcmstrtok(&ptr, "\n", 0);
11843 strncpy(fw_version, buf, FW_VER_STR_LEN);
11844 fw_version[FW_VER_STR_LEN-1] = '\0';
11845 dhd_set_version_info(dhd, buf);
11846 #ifdef WRITE_WLANINFO
11847 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11848 #endif /* WRITE_WLANINFO */
11849 }
11850 #ifdef GEN_SOFTAP_INFO_FILE
11851 sec_save_softap_info();
11852 #endif /* GEN_SOFTAP_INFO_FILE */
11853
11854 #if defined(BCMSDIO)
11855 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
11856 #endif /* defined(BCMSDIO) */
11857
11858 #if defined(BCMSDIO) || defined(BCMDBUS)
11859 #ifdef PROP_TXSTATUS
11860 if (disable_proptx ||
11861 #ifdef PROP_TXSTATUS_VSDB
11862 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
11863 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
11864 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
11865 #endif /* PROP_TXSTATUS_VSDB */
11866 FALSE) {
11867 wlfc_enable = FALSE;
11868 }
11869 ret = dhd_conf_get_disable_proptx(dhd);
11870 if (ret == 0){
11871 disable_proptx = 0;
11872 wlfc_enable = TRUE;
11873 } else if (ret >= 1) {
11874 disable_proptx = 1;
11875 wlfc_enable = FALSE;
11876 /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
11877 hostreorder = 0;
11878 }
11879
11880 #if defined(PROP_TXSTATUS)
11881 #ifdef USE_WFA_CERT_CONF
11882 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
11883 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
11884 wlfc_enable = proptx;
11885 }
11886 #endif /* USE_WFA_CERT_CONF */
11887 #endif /* PROP_TXSTATUS */
11888
11889 #ifndef DISABLE_11N
11890 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11891 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
11892 NULL, 0, TRUE);
11893 if (ret2 < 0) {
11894 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
11895 if (ret2 != BCME_UNSUPPORTED)
11896 ret = ret2;
11897
11898 if (ret == BCME_NOTDOWN) {
11899 uint wl_down = 1;
11900 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
11901 sizeof(wl_down), TRUE, 0);
11902 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
11903 __FUNCTION__, ret2, hostreorder));
11904
11905 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
11906 sizeof(hostreorder), NULL, 0, TRUE);
11907 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
11908 if (ret2 != BCME_UNSUPPORTED)
11909 ret = ret2;
11910 }
11911 if (ret2 != BCME_OK)
11912 hostreorder = 0;
11913 }
11914 #endif /* DISABLE_11N */
11915
11916
11917 if (wlfc_enable) {
11918 dhd_wlfc_init(dhd);
11919 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
11920 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
11921 }
11922 #ifndef DISABLE_11N
11923 else if (hostreorder)
11924 dhd_wlfc_hostreorder_init(dhd);
11925 #endif /* DISABLE_11N */
11926 #else
11927 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
11928 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
11929 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
11930 #endif /* PROP_TXSTATUS */
11931 #endif /* BCMSDIO || BCMDBUS */
11932 #ifndef PCIE_FULL_DONGLE
11933 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11934 if (FW_SUPPORTED(dhd, ap)) {
11935 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
11936 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
11937 NULL, 0, TRUE);
11938 if (ret < 0)
11939 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11940 }
11941 #endif /* PCIE_FULL_DONGLE */
11942 #ifdef PNO_SUPPORT
11943 if (!dhd->pno_state) {
11944 dhd_pno_init(dhd);
11945 }
11946 #endif
11947 #ifdef RTT_SUPPORT
11948 if (!dhd->rtt_state) {
11949 ret = dhd_rtt_init(dhd);
11950 if (ret < 0) {
11951 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
11952 }
11953 }
11954 #endif
11955 #ifdef WL11U
11956 dhd_interworking_enable(dhd);
11957 #endif /* WL11U */
11958
11959 #ifdef SUPPORT_SENSORHUB
11960 DHD_ERROR(("%s: SensorHub enabled %d\n",
11961 __FUNCTION__, dhd->info->shub_enable));
11962 ret2 = dhd_iovar(dhd, 0, "shub", NULL, 0,
11963 (char *)&shub_ctl, sizeof(shub_ctl), FALSE);
11964 if (ret2 < 0) {
11965 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
11966 __FUNCTION__, ret2));
11967 dhd->info->shub_enable = 0;
11968 } else {
11969 dhd->info->shub_enable = shub_ctl.enable;
11970 DHD_ERROR(("%s: checking sensorhub enable %d\n",
11971 __FUNCTION__, dhd->info->shub_enable));
11972 }
11973 #else
11974 DHD_ERROR(("%s: SensorHub diabled %d\n",
11975 __FUNCTION__, dhd->info->shub_enable));
11976 dhd->info->shub_enable = FALSE;
11977 shub_ctl.enable = FALSE;
11978 ret2 = dhd_iovar(dhd, 0, "shub", (char *)&shub_ctl, sizeof(shub_ctl),
11979 NULL, 0, TRUE);
11980 if (ret2 < 0) {
11981 DHD_ERROR(("%s failed to set ShubHub disable\n",
11982 __FUNCTION__));
11983 }
11984 #endif /* SUPPORT_SENSORHUB */
11985
11986
11987 #ifdef NDO_CONFIG_SUPPORT
11988 dhd->ndo_enable = FALSE;
11989 dhd->ndo_host_ip_overflow = FALSE;
11990 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
11991 #endif /* NDO_CONFIG_SUPPORT */
11992
11993 /* ND offload version supported */
11994 dhd->ndo_version = dhd_ndo_get_version(dhd);
11995 if (dhd->ndo_version > 0) {
11996 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
11997
11998 #ifdef NDO_CONFIG_SUPPORT
11999 /* enable Unsolicited NA filter */
12000 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
12001 if (ret < 0) {
12002 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
12003 }
12004 #endif /* NDO_CONFIG_SUPPORT */
12005 }
12006
12007 /* check dongle supports wbtext or not */
12008 dhd->wbtext_support = FALSE;
12009 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
12010 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
12011 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12012 }
12013 if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
12014 dhd->wbtext_support = TRUE;
12015 }
12016 #ifndef WBTEXT
12017 /* driver can turn off wbtext feature through makefile */
12018 if (dhd->wbtext_support) {
12019 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
12020 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
12021 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
12022 DHD_ERROR(("failed to disable WBTEXT\n"));
12023 }
12024 }
12025 #endif /* !WBTEXT */
12026
12027 /* WNM capabilities */
12028 wnm_cap = 0
12029 #ifdef WL11U
12030 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
12031 #endif
12032 #ifdef WBTEXT
12033 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
12034 #endif
12035 ;
12036 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
12037 DHD_ERROR(("failed to set WNM capabilities\n"));
12038 }
12039
12040 dhd_conf_postinit_ioctls(dhd);
12041 done:
12042
12043 if (eventmask_msg)
12044 kfree(eventmask_msg);
12045 if (iov_buf)
12046 kfree(iov_buf);
12047 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12048 if (el_tag)
12049 kfree(el_tag);
12050 #endif /* DHD_8021X_DUMP */
12051 return ret;
12052 }
12053
12054
12055 int
dhd_iovar(dhd_pub_t * pub,int ifidx,char * name,char * param_buf,uint param_len,char * res_buf,uint res_len,int set)12056 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
12057 uint res_len, int set)
12058 {
12059 char *buf = NULL;
12060 int input_len;
12061 wl_ioctl_t ioc;
12062 int ret;
12063
12064 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
12065 return BCME_BADARG;
12066
12067 input_len = strlen(name) + 1 + param_len;
12068 if (input_len > WLC_IOCTL_MAXLEN)
12069 return BCME_BADARG;
12070
12071 buf = NULL;
12072 if (set) {
12073 if (res_buf || res_len != 0) {
12074 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
12075 ret = BCME_BADARG;
12076 goto exit;
12077 }
12078 buf = kzalloc(input_len, GFP_KERNEL);
12079 if (!buf) {
12080 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12081 ret = BCME_NOMEM;
12082 goto exit;
12083 }
12084 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12085 if (!ret) {
12086 ret = BCME_NOMEM;
12087 goto exit;
12088 }
12089
12090 ioc.cmd = WLC_SET_VAR;
12091 ioc.buf = buf;
12092 ioc.len = input_len;
12093 ioc.set = set;
12094
12095 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12096 } else {
12097 if (!res_buf || !res_len) {
12098 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
12099 ret = BCME_BADARG;
12100 goto exit;
12101 }
12102
12103 if (res_len < input_len) {
12104 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
12105 res_len, input_len));
12106 buf = kzalloc(input_len, GFP_KERNEL);
12107 if (!buf) {
12108 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12109 ret = BCME_NOMEM;
12110 goto exit;
12111 }
12112 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12113 if (!ret) {
12114 ret = BCME_NOMEM;
12115 goto exit;
12116 }
12117
12118 ioc.cmd = WLC_GET_VAR;
12119 ioc.buf = buf;
12120 ioc.len = input_len;
12121 ioc.set = set;
12122
12123 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12124
12125 if (ret == BCME_OK) {
12126 memcpy(res_buf, buf, res_len);
12127 }
12128 } else {
12129 memset(res_buf, 0, res_len);
12130 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
12131 if (!ret) {
12132 ret = BCME_NOMEM;
12133 goto exit;
12134 }
12135
12136 ioc.cmd = WLC_GET_VAR;
12137 ioc.buf = res_buf;
12138 ioc.len = res_len;
12139 ioc.set = set;
12140
12141 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12142 }
12143 }
12144 exit:
12145 kfree(buf);
12146 return ret;
12147 }
12148
12149 int
dhd_getiovar(dhd_pub_t * pub,int ifidx,char * name,char * cmd_buf,uint cmd_len,char ** resptr,uint resp_len)12150 dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
12151 uint cmd_len, char **resptr, uint resp_len)
12152 {
12153 int len = resp_len;
12154 int ret;
12155 char *buf = *resptr;
12156 wl_ioctl_t ioc;
12157 if (resp_len > WLC_IOCTL_MAXLEN)
12158 return BCME_BADARG;
12159
12160 memset(buf, 0, resp_len);
12161
12162 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
12163 if (ret == 0) {
12164 return BCME_BUFTOOSHORT;
12165 }
12166
12167 memset(&ioc, 0, sizeof(ioc));
12168
12169 ioc.cmd = WLC_GET_VAR;
12170 ioc.buf = buf;
12171 ioc.len = len;
12172 ioc.set = 0;
12173
12174 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12175
12176 return ret;
12177 }
12178
12179
dhd_change_mtu(dhd_pub_t * dhdp,int new_mtu,int ifidx)12180 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
12181 {
12182 struct dhd_info *dhd = dhdp->info;
12183 struct net_device *dev = NULL;
12184
12185 ASSERT(dhd && dhd->iflist[ifidx]);
12186 dev = dhd->iflist[ifidx]->net;
12187 ASSERT(dev);
12188
12189 if (netif_running(dev)) {
12190 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
12191 return BCME_NOTDOWN;
12192 }
12193
12194 #define DHD_MIN_MTU 1500
12195 #define DHD_MAX_MTU 1752
12196
12197 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
12198 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
12199 return BCME_BADARG;
12200 }
12201
12202 dev->mtu = new_mtu;
12203 return 0;
12204 }
12205
12206 #ifdef ARP_OFFLOAD_SUPPORT
12207 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12208 void
aoe_update_host_ipv4_table(dhd_pub_t * dhd_pub,u32 ipa,bool add,int idx)12209 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
12210 {
12211 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
12212 int i;
12213 int ret;
12214
12215 bzero(ipv4_buf, sizeof(ipv4_buf));
12216
12217 /* display what we've got */
12218 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12219 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
12220 #ifdef AOE_DBG
12221 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12222 #endif
12223 /* now we saved hoste_ip table, clr it in the dongle AOE */
12224 dhd_aoe_hostip_clr(dhd_pub, idx);
12225
12226 if (ret) {
12227 DHD_ERROR(("%s failed\n", __FUNCTION__));
12228 return;
12229 }
12230
12231 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
12232 if (add && (ipv4_buf[i] == 0)) {
12233 ipv4_buf[i] = ipa;
12234 add = FALSE; /* added ipa to local table */
12235 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12236 __FUNCTION__, i));
12237 } else if (ipv4_buf[i] == ipa) {
12238 ipv4_buf[i] = 0;
12239 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12240 __FUNCTION__, ipa, i));
12241 }
12242
12243 if (ipv4_buf[i] != 0) {
12244 /* add back host_ip entries from our local cache */
12245 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
12246 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12247 __FUNCTION__, ipv4_buf[i], i));
12248 }
12249 }
12250 #ifdef AOE_DBG
12251 /* see the resulting hostip table */
12252 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12253 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
12254 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12255 #endif
12256 }
12257
12258 /*
12259 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12260 * whenever there is an event related to an IP address.
12261 * ptr : kernel provided pointer to IP address that has changed
12262 */
dhd_inetaddr_notifier_call(struct notifier_block * this,unsigned long event,void * ptr)12263 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
12264 unsigned long event,
12265 void *ptr)
12266 {
12267 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
12268
12269 dhd_info_t *dhd;
12270 dhd_pub_t *dhd_pub;
12271 int idx;
12272
12273 if (!dhd_arp_enable)
12274 return NOTIFY_DONE;
12275 if (!ifa || !(ifa->ifa_dev->dev))
12276 return NOTIFY_DONE;
12277
12278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12279 /* Filter notifications meant for non Broadcom devices */
12280 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
12281 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
12282 #if defined(WL_ENABLE_P2P_IF)
12283 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
12284 #endif /* WL_ENABLE_P2P_IF */
12285 return NOTIFY_DONE;
12286 }
12287 #endif /* LINUX_VERSION_CODE */
12288
12289 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
12290 if (!dhd)
12291 return NOTIFY_DONE;
12292
12293 dhd_pub = &dhd->pub;
12294
12295 if (dhd_pub->arp_version == 1) {
12296 idx = 0;
12297 } else {
12298 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
12299 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
12300 break;
12301 }
12302 if (idx < DHD_MAX_IFS)
12303 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
12304 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
12305 else {
12306 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
12307 idx = 0;
12308 }
12309 }
12310
12311 switch (event) {
12312 case NETDEV_UP:
12313 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12314 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12315
12316 if (dhd->pub.busstate != DHD_BUS_DATA) {
12317 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
12318 if (dhd->pend_ipaddr) {
12319 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12320 __FUNCTION__, dhd->pend_ipaddr));
12321 }
12322 dhd->pend_ipaddr = ifa->ifa_address;
12323 break;
12324 }
12325
12326 #ifdef AOE_IP_ALIAS_SUPPORT
12327 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12328 __FUNCTION__));
12329 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
12330 #endif /* AOE_IP_ALIAS_SUPPORT */
12331 break;
12332
12333 case NETDEV_DOWN:
12334 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12335 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12336 dhd->pend_ipaddr = 0;
12337 #ifdef AOE_IP_ALIAS_SUPPORT
12338 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12339 __FUNCTION__));
12340 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
12341 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
12342 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
12343 } else
12344 #endif /* AOE_IP_ALIAS_SUPPORT */
12345 {
12346 dhd_aoe_hostip_clr(&dhd->pub, idx);
12347 dhd_aoe_arp_clr(&dhd->pub, idx);
12348 }
12349 break;
12350
12351 default:
12352 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12353 __func__, ifa->ifa_label, event));
12354 break;
12355 }
12356 return NOTIFY_DONE;
12357 }
12358 #endif /* ARP_OFFLOAD_SUPPORT */
12359
12360 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12361 /* Neighbor Discovery Offload: defered handler */
12362 static void
dhd_inet6_work_handler(void * dhd_info,void * event_data,u8 event)12363 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
12364 {
12365 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
12366 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
12367 dhd_pub_t *dhdp;
12368 int ret;
12369
12370 if (!dhd) {
12371 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
12372 goto done;
12373 }
12374 dhdp = &dhd->pub;
12375
12376 if (event != DHD_WQ_WORK_IPV6_NDO) {
12377 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
12378 goto done;
12379 }
12380
12381 if (!ndo_work) {
12382 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
12383 return;
12384 }
12385
12386 switch (ndo_work->event) {
12387 case NETDEV_UP:
12388 #ifndef NDO_CONFIG_SUPPORT
12389 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
12390 ret = dhd_ndo_enable(dhdp, TRUE);
12391 if (ret < 0) {
12392 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
12393 }
12394 #endif /* !NDO_CONFIG_SUPPORT */
12395 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
12396 if (dhdp->ndo_version > 0) {
12397 /* inet6 addr notifier called only for unicast address */
12398 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
12399 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
12400 } else {
12401 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
12402 ndo_work->if_idx);
12403 }
12404 if (ret < 0) {
12405 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12406 __FUNCTION__, ret));
12407 }
12408 break;
12409 case NETDEV_DOWN:
12410 if (dhdp->ndo_version > 0) {
12411 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
12412 ret = dhd_ndo_remove_ip_by_addr(dhdp,
12413 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
12414 } else {
12415 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
12416 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
12417 }
12418 if (ret < 0) {
12419 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12420 __FUNCTION__, ret));
12421 goto done;
12422 }
12423 #ifdef NDO_CONFIG_SUPPORT
12424 if (dhdp->ndo_host_ip_overflow) {
12425 ret = dhd_dev_ndo_update_inet6addr(
12426 dhd_idx2net(dhdp, ndo_work->if_idx));
12427 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
12428 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12429 __FUNCTION__, ret));
12430 goto done;
12431 }
12432 }
12433 #else /* !NDO_CONFIG_SUPPORT */
12434 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
12435 ret = dhd_ndo_enable(dhdp, FALSE);
12436 if (ret < 0) {
12437 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
12438 goto done;
12439 }
12440 #endif /* NDO_CONFIG_SUPPORT */
12441 break;
12442
12443 default:
12444 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
12445 break;
12446 }
12447 done:
12448 /* free ndo_work. alloced while scheduling the work */
12449 if (ndo_work) {
12450 kfree(ndo_work);
12451 }
12452
12453 return;
12454 }
12455
12456 /*
12457 * Neighbor Discovery Offload: Called when an interface
12458 * is assigned with ipv6 address.
12459 * Handles only primary interface
12460 */
dhd_inet6addr_notifier_call(struct notifier_block * this,unsigned long event,void * ptr)12461 int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
12462 {
12463 dhd_info_t *dhd;
12464 dhd_pub_t *dhdp;
12465 struct inet6_ifaddr *inet6_ifa = ptr;
12466 struct ipv6_work_info_t *ndo_info;
12467 int idx;
12468
12469 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12470 /* Filter notifications meant for non Broadcom devices */
12471 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12472 return NOTIFY_DONE;
12473 }
12474 #endif /* LINUX_VERSION_CODE */
12475
12476 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12477 if (!dhd) {
12478 return NOTIFY_DONE;
12479 }
12480 dhdp = &dhd->pub;
12481
12482 /* Supports only primary interface */
12483 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12484 if (idx != 0) {
12485 return NOTIFY_DONE;
12486 }
12487
12488 /* FW capability */
12489 if (!FW_SUPPORTED(dhdp, ndoe)) {
12490 return NOTIFY_DONE;
12491 }
12492
12493 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
12494 if (!ndo_info) {
12495 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
12496 return NOTIFY_DONE;
12497 }
12498
12499 /* fill up ndo_info */
12500 ndo_info->event = event;
12501 ndo_info->if_idx = idx;
12502 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12503
12504 /* defer the work to thread as it may block kernel */
12505 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12506 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
12507 return NOTIFY_DONE;
12508 }
12509 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12510
12511 int
dhd_register_if(dhd_pub_t * dhdp,int ifidx,bool need_rtnl_lock)12512 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
12513 {
12514 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12515 dhd_if_t *ifp;
12516 struct net_device *net = NULL;
12517 int err = 0;
12518 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12519
12520 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
12521
12522 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12523 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
12524 return BCME_ERROR;
12525 }
12526
12527 ASSERT(dhd && dhd->iflist[ifidx]);
12528 ifp = dhd->iflist[ifidx];
12529 net = ifp->net;
12530 ASSERT(net && (ifp->idx == ifidx));
12531
12532 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12533 ASSERT(!net->open);
12534 net->get_stats = dhd_get_stats;
12535 net->do_ioctl = dhd_ioctl_entry;
12536 net->hard_start_xmit = dhd_start_xmit;
12537 net->set_mac_address = dhd_set_mac_address;
12538 net->set_multicast_list = dhd_set_multicast_list;
12539 net->open = net->stop = NULL;
12540 #else
12541 ASSERT(!net->netdev_ops);
12542 net->netdev_ops = &dhd_ops_virt;
12543 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12544
12545 /* Ok, link into the network layer... */
12546 if (ifidx == 0) {
12547 /*
12548 * device functions for the primary interface only
12549 */
12550 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12551 net->open = dhd_open;
12552 net->stop = dhd_stop;
12553 #else
12554 net->netdev_ops = &dhd_ops_pri;
12555 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12556 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12557 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12558 } else {
12559 /*
12560 * We have to use the primary MAC for virtual interfaces
12561 */
12562 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12563 /*
12564 * Android sets the locally administered bit to indicate that this is a
12565 * portable hotspot. This will not work in simultaneous AP/STA mode,
12566 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12567 */
12568 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12569 ETHER_ADDR_LEN)) {
12570 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12571 __func__, net->name));
12572 temp_addr[0] |= 0x02;
12573 }
12574 }
12575
12576 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12577 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12578 net->ethtool_ops = &dhd_ethtool_ops;
12579 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12580
12581 #if defined(WL_WIRELESS_EXT)
12582 #if WIRELESS_EXT < 19
12583 net->get_wireless_stats = dhd_get_wireless_stats;
12584 #endif /* WIRELESS_EXT < 19 */
12585 #if WIRELESS_EXT > 12
12586 net->wireless_handlers = &wl_iw_handler_def;
12587 #endif /* WIRELESS_EXT > 12 */
12588 #endif /* defined(WL_WIRELESS_EXT) */
12589
12590 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12591
12592 #ifdef WLMESH
12593 if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
12594 temp_addr[4] ^= 0x80;
12595 temp_addr[4] += ifidx;
12596 temp_addr[5] += ifidx;
12597 }
12598 #endif
12599 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12600
12601 if (ifidx == 0)
12602 printf("%s\n", dhd_version);
12603 #ifdef WL_EXT_IAPSTA
12604 else
12605 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
12606 #endif
12607 if (ifidx != 0) {
12608 if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
12609 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
12610 else
12611 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
12612 }
12613
12614 if (need_rtnl_lock)
12615 err = register_netdev(net);
12616 else
12617 err = register_netdevice(net);
12618
12619 if (err != 0) {
12620 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12621 goto fail;
12622 }
12623 #ifdef WL_EXT_IAPSTA
12624 if (ifidx == 0)
12625 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
12626 wl_ext_iapsta_attach_name(net, ifidx);
12627 #endif
12628
12629
12630
12631 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12632 #if defined(CUSTOMER_HW4_DEBUG)
12633 MAC2STRDBG(dhd->pub.mac.octet));
12634 #else
12635 MAC2STRDBG(net->dev_addr));
12636 #endif /* CUSTOMER_HW4_DEBUG */
12637
12638 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12639 #endif
12640
12641 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12642 KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
12643 if (ifidx == 0) {
12644 #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
12645 up(&dhd_registration_sem);
12646 #endif /* BCMLXSDMMC */
12647 if (!dhd_download_fw_on_driverload) {
12648 #ifdef WL_CFG80211
12649 wl_terminate_event_handler(net);
12650 #endif /* WL_CFG80211 */
12651 #if defined(DHD_LB_RXP)
12652 __skb_queue_purge(&dhd->rx_pend_queue);
12653 #endif /* DHD_LB_RXP */
12654
12655 #if defined(DHD_LB_TXP)
12656 skb_queue_purge(&dhd->tx_pend_queue);
12657 #endif /* DHD_LB_TXP */
12658
12659 #ifdef SHOW_LOGTRACE
12660 /* Release the skbs from queue for WLC_E_TRACE event */
12661 dhd_event_logtrace_flush_queue(dhdp);
12662 #endif /* SHOW_LOGTRACE */
12663
12664 #ifdef DHDTCPACK_SUPPRESS
12665 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
12666 #endif /* DHDTCPACK_SUPPRESS */
12667 dhd_net_bus_devreset(net, TRUE);
12668 #ifdef BCMLXSDMMC
12669 dhd_net_bus_suspend(net);
12670 #endif /* BCMLXSDMMC */
12671 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12672 #if defined(BT_OVER_SDIO)
12673 dhd->bus_user_count--;
12674 #endif /* BT_OVER_SDIO */
12675 }
12676 #if defined(WL_WIRELESS_EXT)
12677 #ifdef WL_ESCAN
12678 wl_escan_down(&dhd->pub);
12679 #endif /* WL_ESCAN */
12680 #endif /* defined(WL_WIRELESS_EXT) */
12681 }
12682 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12683 return 0;
12684
12685 fail:
12686 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12687 net->open = NULL;
12688 #else
12689 net->netdev_ops = NULL;
12690 #endif
12691 return err;
12692 }
12693
12694 void
dhd_bus_detach(dhd_pub_t * dhdp)12695 dhd_bus_detach(dhd_pub_t *dhdp)
12696 {
12697 dhd_info_t *dhd;
12698
12699 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12700
12701 if (dhdp) {
12702 dhd = (dhd_info_t *)dhdp->info;
12703 if (dhd) {
12704 /*
12705 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12706 * calling stop again will cuase SD read/write errors.
12707 */
12708 if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
12709 /* Stop the protocol module */
12710 dhd_prot_stop(&dhd->pub);
12711
12712 /* Stop the bus module */
12713 #ifdef BCMDBUS
12714 /* Force Dongle terminated */
12715 if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
12716 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
12717 __FUNCTION__));
12718 dbus_stop(dhd->pub.bus);
12719 dhd->pub.busstate = DHD_BUS_DOWN;
12720 #else
12721 dhd_bus_stop(dhd->pub.bus, TRUE);
12722 #endif /* BCMDBUS */
12723 }
12724
12725 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
12726 dhd_bus_oob_intr_unregister(dhdp);
12727 #endif
12728 }
12729 }
12730 }
12731
12732
dhd_detach(dhd_pub_t * dhdp)12733 void dhd_detach(dhd_pub_t *dhdp)
12734 {
12735 dhd_info_t *dhd;
12736 unsigned long flags;
12737 int timer_valid = FALSE;
12738 struct net_device *dev;
12739 #ifdef WL_CFG80211
12740 struct bcm_cfg80211 *cfg = NULL;
12741 #endif
12742 #ifdef HOFFLOAD_MODULES
12743 struct module_metadata *hmem = NULL;
12744 #endif
12745 if (!dhdp)
12746 return;
12747
12748 dhd = (dhd_info_t *)dhdp->info;
12749 if (!dhd)
12750 return;
12751
12752 dev = dhd->iflist[0]->net;
12753
12754 if (dev) {
12755 rtnl_lock();
12756 if (dev->flags & IFF_UP) {
12757 /* If IFF_UP is still up, it indicates that
12758 * "ifconfig wlan0 down" hasn't been called.
12759 * So invoke dev_close explicitly here to
12760 * bring down the interface.
12761 */
12762 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12763 dev_close(dev);
12764 }
12765 rtnl_unlock();
12766 }
12767
12768 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12769
12770 dhd->pub.up = 0;
12771 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12772 /* Give sufficient time for threads to start running in case
12773 * dhd_attach() has failed
12774 */
12775 OSL_SLEEP(100);
12776 }
12777 #ifdef DHD_WET
12778 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
12779 #endif /* DHD_WET */
12780 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
12781 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
12782
12783 #ifdef PROP_TXSTATUS
12784 #ifdef DHD_WLFC_THREAD
12785 if (dhd->pub.wlfc_thread) {
12786 kthread_stop(dhd->pub.wlfc_thread);
12787 dhdp->wlfc_thread_go = TRUE;
12788 wake_up_interruptible(&dhdp->wlfc_wqhead);
12789 }
12790 dhd->pub.wlfc_thread = NULL;
12791 #endif /* DHD_WLFC_THREAD */
12792 #endif /* PROP_TXSTATUS */
12793
12794 #ifdef DHD_TIMESYNC
12795 if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
12796 dhd_timesync_detach(dhdp);
12797 }
12798 #endif /* DHD_TIMESYNC */
12799 #ifdef WL_CFG80211
12800 if (dev) {
12801 wl_cfg80211_down(dev);
12802 }
12803 #endif /* WL_CFG80211 */
12804
12805 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
12806 dhd_bus_detach(dhdp);
12807 #ifdef BCMPCIE
12808 if (is_reboot == SYS_RESTART) {
12809 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
12810 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
12811 dhdpcie_bus_clock_stop(dhdp->bus);
12812 wifi_platform_set_power(dhd_wifi_platdata->adapters,
12813 FALSE, WIFI_TURNOFF_DELAY);
12814 }
12815 }
12816 #endif /* BCMPCIE */
12817 #ifndef PCIE_FULL_DONGLE
12818 if (dhdp->prot)
12819 dhd_prot_detach(dhdp);
12820 #endif /* !PCIE_FULL_DONGLE */
12821 }
12822
12823 #ifdef ARP_OFFLOAD_SUPPORT
12824 if (dhd_inetaddr_notifier_registered) {
12825 dhd_inetaddr_notifier_registered = FALSE;
12826 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
12827 }
12828 #endif /* ARP_OFFLOAD_SUPPORT */
12829 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12830 if (dhd_inet6addr_notifier_registered) {
12831 dhd_inet6addr_notifier_registered = FALSE;
12832 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
12833 }
12834 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12835 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
12836 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
12837 if (dhd->early_suspend.suspend)
12838 unregister_early_suspend(&dhd->early_suspend);
12839 }
12840 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
12841
12842 #if defined(WL_WIRELESS_EXT)
12843 #ifdef WL_ESCAN
12844 wl_escan_detach(dhdp);
12845 #else
12846 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
12847 /* Detatch and unlink in the iw */
12848 wl_iw_detach(dhdp);
12849 }
12850 #endif /* WL_ESCAN */
12851 #endif /* defined(WL_WIRELESS_EXT) */
12852 #ifdef WL_EXT_IAPSTA
12853 wl_ext_iapsta_dettach(dhdp);
12854 #endif
12855
12856 #ifdef DHD_ULP
12857 dhd_ulp_deinit(dhd->pub.osh, dhdp);
12858 #endif /* DHD_ULP */
12859
12860 /* delete all interfaces, start with virtual */
12861 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
12862 int i = 1;
12863 dhd_if_t *ifp;
12864
12865 /* Cleanup virtual interfaces */
12866 dhd_net_if_lock_local(dhd);
12867 for (i = 1; i < DHD_MAX_IFS; i++) {
12868 if (dhd->iflist[i]) {
12869 dhd_remove_if(&dhd->pub, i, TRUE);
12870 }
12871 }
12872 dhd_net_if_unlock_local(dhd);
12873
12874 /* delete primary interface 0 */
12875 ifp = dhd->iflist[0];
12876 ASSERT(ifp);
12877 ASSERT(ifp->net);
12878 if (ifp && ifp->net) {
12879 #ifdef WL_CFG80211
12880 cfg = wl_get_cfg(ifp->net);
12881 #endif
12882 /* in unregister_netdev case, the interface gets freed by net->destructor
12883 * (which is set to free_netdev)
12884 */
12885 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
12886 free_netdev(ifp->net);
12887 } else {
12888 argos_register_notifier_deinit();
12889 #ifdef SET_RPS_CPUS
12890 custom_rps_map_clear(ifp->net->_rx);
12891 #endif /* SET_RPS_CPUS */
12892 netif_tx_disable(ifp->net);
12893 unregister_netdev(ifp->net);
12894 }
12895 #ifdef PCIE_FULL_DONGLE
12896 ifp->net = DHD_NET_DEV_NULL;
12897 #else
12898 ifp->net = NULL;
12899 #endif /* PCIE_FULL_DONGLE */
12900
12901 #ifdef DHD_WMF
12902 dhd_wmf_cleanup(dhdp, 0);
12903 #endif /* DHD_WMF */
12904 #ifdef DHD_L2_FILTER
12905 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
12906 NULL, FALSE, dhdp->tickcnt);
12907 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
12908 ifp->phnd_arp_table = NULL;
12909 #endif /* DHD_L2_FILTER */
12910
12911
12912 dhd_if_del_sta_list(ifp);
12913
12914 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
12915 dhd->iflist[0] = NULL;
12916 }
12917 }
12918
12919 /* Clear the watchdog timer */
12920 DHD_GENERAL_LOCK(&dhd->pub, flags);
12921 timer_valid = dhd->wd_timer_valid;
12922 dhd->wd_timer_valid = FALSE;
12923 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12924 if (timer_valid)
12925 del_timer_sync(&dhd->timer);
12926 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
12927
12928 #ifdef BCMDBUS
12929 tasklet_kill(&dhd->tasklet);
12930 #else
12931 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
12932 #ifdef DHD_PCIE_RUNTIMEPM
12933 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
12934 PROC_STOP(&dhd->thr_rpm_ctl);
12935 }
12936 #endif /* DHD_PCIE_RUNTIMEPM */
12937 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
12938 PROC_STOP(&dhd->thr_wdt_ctl);
12939 }
12940
12941 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
12942 PROC_STOP(&dhd->thr_rxf_ctl);
12943 }
12944
12945 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
12946 PROC_STOP(&dhd->thr_dpc_ctl);
12947 } else
12948 {
12949 tasklet_kill(&dhd->tasklet);
12950 }
12951 }
12952 #endif /* BCMDBUS */
12953
12954 #ifdef DHD_LB
12955 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
12956 /* Clear the flag first to avoid calling the cpu notifier */
12957 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
12958
12959 /* Kill the Load Balancing Tasklets */
12960 #ifdef DHD_LB_RXP
12961 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
12962 __skb_queue_purge(&dhd->rx_pend_queue);
12963 #endif /* DHD_LB_RXP */
12964 #ifdef DHD_LB_TXP
12965 cancel_work_sync(&dhd->tx_dispatcher_work);
12966 tasklet_kill(&dhd->tx_tasklet);
12967 __skb_queue_purge(&dhd->tx_pend_queue);
12968 #endif /* DHD_LB_TXP */
12969 #ifdef DHD_LB_TXC
12970 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
12971 tasklet_kill(&dhd->tx_compl_tasklet);
12972 #endif /* DHD_LB_TXC */
12973 #ifdef DHD_LB_RXC
12974 tasklet_kill(&dhd->rx_compl_tasklet);
12975 #endif /* DHD_LB_RXC */
12976
12977 if (dhd->cpu_notifier.notifier_call != NULL) {
12978 unregister_cpu_notifier(&dhd->cpu_notifier);
12979 }
12980 dhd_cpumasks_deinit(dhd);
12981 DHD_LB_STATS_DEINIT(&dhd->pub);
12982 }
12983 #endif /* DHD_LB */
12984
12985 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
12986
12987 #ifdef DHD_LOG_DUMP
12988 dhd_log_dump_deinit(&dhd->pub);
12989 #endif /* DHD_LOG_DUMP */
12990 #ifdef WL_CFG80211
12991 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
12992 if (!cfg) {
12993 DHD_ERROR(("cfg NULL!\n"));
12994 ASSERT(0);
12995 } else {
12996 wl_cfg80211_detach(cfg);
12997 dhd_monitor_uninit();
12998 }
12999 }
13000 #endif
13001
13002 #ifdef DEBUGABILITY
13003 if (dhdp->dbg) {
13004 #ifdef DBG_PKT_MON
13005 dhd_os_dbg_detach_pkt_monitor(dhdp);
13006 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
13007 #endif /* DBG_PKT_MON */
13008 dhd_os_dbg_detach(dhdp);
13009 }
13010 #endif /* DEBUGABILITY */
13011 #ifdef SHOW_LOGTRACE
13012 #ifdef DHD_PKT_LOGGING
13013 dhd_os_detach_pktlog(dhdp);
13014 #endif /* DHD_PKT_LOGGING */
13015 /* Release the skbs from queue for WLC_E_TRACE event */
13016 dhd_event_logtrace_flush_queue(dhdp);
13017
13018 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
13019 if (dhd->event_data.fmts) {
13020 MFREE(dhd->pub.osh, dhd->event_data.fmts,
13021 dhd->event_data.fmts_size);
13022 dhd->event_data.fmts = NULL;
13023 }
13024 if (dhd->event_data.raw_fmts) {
13025 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
13026 dhd->event_data.raw_fmts_size);
13027 dhd->event_data.raw_fmts = NULL;
13028 }
13029 if (dhd->event_data.raw_sstr) {
13030 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
13031 dhd->event_data.raw_sstr_size);
13032 dhd->event_data.raw_sstr = NULL;
13033 }
13034 if (dhd->event_data.rom_raw_sstr) {
13035 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
13036 dhd->event_data.rom_raw_sstr_size);
13037 dhd->event_data.rom_raw_sstr = NULL;
13038 }
13039 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
13040 }
13041 #endif /* SHOW_LOGTRACE */
13042 #ifdef BCMPCIE
13043 if (dhdp->extended_trap_data)
13044 {
13045 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
13046 dhdp->extended_trap_data = NULL;
13047 }
13048 #endif /* BCMPCIE */
13049 #ifdef PNO_SUPPORT
13050 if (dhdp->pno_state)
13051 dhd_pno_deinit(dhdp);
13052 #endif
13053 #ifdef RTT_SUPPORT
13054 if (dhdp->rtt_state) {
13055 dhd_rtt_deinit(dhdp);
13056 }
13057 #endif
13058 #if defined(CONFIG_PM_SLEEP)
13059 if (dhd_pm_notifier_registered) {
13060 unregister_pm_notifier(&dhd->pm_notifier);
13061 dhd_pm_notifier_registered = FALSE;
13062 }
13063 #endif /* CONFIG_PM_SLEEP */
13064
13065 #ifdef DEBUG_CPU_FREQ
13066 if (dhd->new_freq)
13067 free_percpu(dhd->new_freq);
13068 dhd->new_freq = NULL;
13069 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
13070 #endif
13071 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
13072 #ifdef CONFIG_HAS_WAKELOCK
13073 dhd->wakelock_wd_counter = 0;
13074 wake_lock_destroy(&dhd->wl_wdwake);
13075 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
13076 wake_lock_destroy(&dhd->wl_wifi);
13077 #endif /* CONFIG_HAS_WAKELOCK */
13078 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
13079 DHD_OS_WAKE_LOCK_DESTROY(dhd);
13080 }
13081
13082
13083
13084 #ifdef DHDTCPACK_SUPPRESS
13085 /* This will free all MEM allocated for TCPACK SUPPRESS */
13086 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
13087 #endif /* DHDTCPACK_SUPPRESS */
13088
13089 #ifdef PCIE_FULL_DONGLE
13090 dhd_flow_rings_deinit(dhdp);
13091 if (dhdp->prot)
13092 dhd_prot_detach(dhdp);
13093 #endif
13094
13095 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13096 dhd_free_tdls_peer_list(dhdp);
13097 #endif
13098
13099 #ifdef HOFFLOAD_MODULES
13100 hmem = &dhdp->hmem;
13101 dhd_free_module_memory(dhdp->bus, hmem);
13102 #endif /* HOFFLOAD_MODULES */
13103 #if defined(BT_OVER_SDIO)
13104 mutex_destroy(&dhd->bus_user_lock);
13105 #endif /* BT_OVER_SDIO */
13106 #ifdef DUMP_IOCTL_IOV_LIST
13107 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
13108 #endif /* DUMP_IOCTL_IOV_LIST */
13109 #ifdef DHD_DEBUG
13110 /* memory waste feature list initilization */
13111 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
13112 #endif /* DHD_DEBUG */
13113 #ifdef WL_MONITOR
13114 dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
13115 #endif /* WL_MONITOR */
13116
13117 /* Prefer adding de-init code above this comment unless necessary.
13118 * The idea is to cancel work queue, sysfs and flags at the end.
13119 */
13120 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
13121 dhd->dhd_deferred_wq = NULL;
13122
13123 #ifdef SHOW_LOGTRACE
13124 /* Wait till event_log_dispatcher_work finishes */
13125 cancel_work_sync(&dhd->event_log_dispatcher_work);
13126 #endif /* SHOW_LOGTRACE */
13127
13128 dhd_sysfs_exit(dhd);
13129 dhd->pub.fw_download_done = FALSE;
13130 dhd_conf_detach(dhdp);
13131 }
13132
13133
13134 void
dhd_free(dhd_pub_t * dhdp)13135 dhd_free(dhd_pub_t *dhdp)
13136 {
13137 dhd_info_t *dhd;
13138 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13139
13140 if (dhdp) {
13141 int i;
13142 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13143 if (dhdp->reorder_bufs[i]) {
13144 reorder_info_t *ptr;
13145 uint32 buf_size = sizeof(struct reorder_info);
13146
13147 ptr = dhdp->reorder_bufs[i];
13148
13149 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13150 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13151 i, ptr->max_idx, buf_size));
13152
13153 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13154 dhdp->reorder_bufs[i] = NULL;
13155 }
13156 }
13157
13158 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
13159
13160 dhd = (dhd_info_t *)dhdp->info;
13161 if (dhdp->soc_ram) {
13162 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13163 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13164 #else
13165 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13166 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13167 dhdp->soc_ram = NULL;
13168 }
13169 #ifdef CACHE_FW_IMAGES
13170 if (dhdp->cached_fw) {
13171 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
13172 dhdp->cached_fw = NULL;
13173 }
13174
13175 if (dhdp->cached_nvram) {
13176 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
13177 dhdp->cached_nvram = NULL;
13178 }
13179 #endif
13180 if (dhd) {
13181 #ifdef REPORT_FATAL_TIMEOUTS
13182 deinit_dhd_timeouts(&dhd->pub);
13183 #endif /* REPORT_FATAL_TIMEOUTS */
13184
13185 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13186 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
13187 DHD_PREALLOC_DHD_INFO, 0, FALSE))
13188 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
13189 dhd = NULL;
13190 }
13191 }
13192 }
13193
13194 void
dhd_clear(dhd_pub_t * dhdp)13195 dhd_clear(dhd_pub_t *dhdp)
13196 {
13197 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13198
13199 if (dhdp) {
13200 int i;
13201 #ifdef DHDTCPACK_SUPPRESS
13202 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13203 dhd_tcpack_info_tbl_clean(dhdp);
13204 #endif /* DHDTCPACK_SUPPRESS */
13205 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13206 if (dhdp->reorder_bufs[i]) {
13207 reorder_info_t *ptr;
13208 uint32 buf_size = sizeof(struct reorder_info);
13209
13210 ptr = dhdp->reorder_bufs[i];
13211
13212 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13213 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13214 i, ptr->max_idx, buf_size));
13215
13216 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13217 dhdp->reorder_bufs[i] = NULL;
13218 }
13219 }
13220
13221 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
13222
13223 if (dhdp->soc_ram) {
13224 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13225 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13226 #else
13227 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13228 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13229 dhdp->soc_ram = NULL;
13230 }
13231 }
13232 }
13233
13234 static void
dhd_module_cleanup(void)13235 dhd_module_cleanup(void)
13236 {
13237 printf("%s: Enter\n", __FUNCTION__);
13238
13239 dhd_bus_unregister();
13240
13241 wl_android_exit();
13242
13243 dhd_wifi_platform_unregister_drv();
13244 printf("%s: Exit\n", __FUNCTION__);
13245 }
13246
13247 static void __exit
dhd_module_exit(void)13248 dhd_module_exit(void)
13249 {
13250 atomic_set(&exit_in_progress, 1);
13251 dhd_module_cleanup();
13252 unregister_reboot_notifier(&dhd_reboot_notifier);
13253 dhd_destroy_to_notifier_skt();
13254 }
13255
13256 static int __init
dhd_module_init(void)13257 dhd_module_init(void)
13258 {
13259 int err;
13260 int retry = POWERUP_MAX_RETRY;
13261
13262 printf("%s: in %s\n", __FUNCTION__, dhd_version);
13263
13264 DHD_PERIM_RADIO_INIT();
13265
13266
13267 if (firmware_path[0] != '\0') {
13268 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
13269 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13270 }
13271
13272 if (nvram_path[0] != '\0') {
13273 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
13274 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13275 }
13276
13277 do {
13278 err = dhd_wifi_platform_register_drv();
13279 if (!err) {
13280 register_reboot_notifier(&dhd_reboot_notifier);
13281 break;
13282 } else {
13283 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13284 __FUNCTION__, retry));
13285 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
13286 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
13287 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
13288 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
13289 }
13290 } while (retry--);
13291
13292 dhd_create_to_notifier_skt();
13293
13294 if (err) {
13295 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
13296 } else {
13297 if (!dhd_download_fw_on_driverload) {
13298 dhd_driver_init_done = TRUE;
13299 }
13300 }
13301
13302 printf("%s: Exit err=%d\n", __FUNCTION__, err);
13303 return err;
13304 }
13305
13306 static int
dhd_reboot_callback(struct notifier_block * this,unsigned long code,void * unused)13307 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
13308 {
13309 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
13310 if (code == SYS_RESTART) {
13311 #ifdef BCMPCIE
13312 is_reboot = code;
13313 #endif /* BCMPCIE */
13314 }
13315 return NOTIFY_DONE;
13316 }
13317
13318
13319 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13320 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13321 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13322 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \
13323 defined(CONFIG_ARCH_MSM8998)
13324 deferred_module_init_sync(dhd_module_init);
13325 #else
13326 deferred_module_init(dhd_module_init);
13327 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13328 * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998
13329 */
13330 #elif defined(USE_LATE_INITCALL_SYNC)
13331 late_initcall_sync(dhd_module_init);
13332 #else
13333 late_initcall(dhd_module_init);
13334 #endif /* USE_LATE_INITCALL_SYNC */
13335 #else
13336 module_init(dhd_module_init);
13337 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13338
13339 module_exit(dhd_module_exit);
13340
13341 /*
13342 * OS specific functions required to implement DHD driver in OS independent way
13343 */
13344 int
dhd_os_proto_block(dhd_pub_t * pub)13345 dhd_os_proto_block(dhd_pub_t *pub)
13346 {
13347 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13348
13349 if (dhd) {
13350 DHD_PERIM_UNLOCK(pub);
13351
13352 down(&dhd->proto_sem);
13353
13354 DHD_PERIM_LOCK(pub);
13355 return 1;
13356 }
13357
13358 return 0;
13359 }
13360
13361 int
dhd_os_proto_unblock(dhd_pub_t * pub)13362 dhd_os_proto_unblock(dhd_pub_t *pub)
13363 {
13364 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13365
13366 if (dhd) {
13367 up(&dhd->proto_sem);
13368 return 1;
13369 }
13370
13371 return 0;
13372 }
13373
13374 void
dhd_os_dhdiovar_lock(dhd_pub_t * pub)13375 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
13376 {
13377 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13378
13379 if (dhd) {
13380 mutex_lock(&dhd->dhd_iovar_mutex);
13381 }
13382 }
13383
13384 void
dhd_os_dhdiovar_unlock(dhd_pub_t * pub)13385 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
13386 {
13387 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13388
13389 if (dhd) {
13390 mutex_unlock(&dhd->dhd_iovar_mutex);
13391 }
13392 }
13393
13394 unsigned int
dhd_os_get_ioctl_resp_timeout(void)13395 dhd_os_get_ioctl_resp_timeout(void)
13396 {
13397 return ((unsigned int)dhd_ioctl_timeout_msec);
13398 }
13399
13400 void
dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)13401 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
13402 {
13403 dhd_ioctl_timeout_msec = (int)timeout_msec;
13404 }
13405
13406 int
dhd_os_ioctl_resp_wait(dhd_pub_t * pub,uint * condition,bool resched)13407 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched)
13408 {
13409 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13410 int timeout, timeout_tmp = dhd_ioctl_timeout_msec;
13411
13412 if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
13413 timeout_tmp = dhd_ioctl_timeout_msec;
13414 dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec;
13415 }
13416
13417 /* Convert timeout in millsecond to jiffies */
13418 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13419 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13420 #else
13421 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13422 #endif
13423
13424 DHD_PERIM_UNLOCK(pub);
13425
13426 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13427
13428 if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
13429 dhd_ioctl_timeout_msec = timeout_tmp;
13430 }
13431
13432 DHD_PERIM_LOCK(pub);
13433
13434 return timeout;
13435 }
13436
13437 int
dhd_os_ioctl_resp_wake(dhd_pub_t * pub)13438 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
13439 {
13440 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13441
13442 wake_up(&dhd->ioctl_resp_wait);
13443 return 0;
13444 }
13445
13446 int
dhd_os_d3ack_wait(dhd_pub_t * pub,uint * condition)13447 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
13448 {
13449 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13450 int timeout;
13451
13452 /* Convert timeout in millsecond to jiffies */
13453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13454 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13455 #else
13456 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13457 #endif
13458
13459 DHD_PERIM_UNLOCK(pub);
13460
13461 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13462
13463 DHD_PERIM_LOCK(pub);
13464
13465 return timeout;
13466 }
13467
13468 #ifdef PCIE_INB_DW
13469 int
dhd_os_ds_exit_wait(dhd_pub_t * pub,uint * condition)13470 dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
13471 {
13472 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13473 int timeout;
13474
13475 /* Convert timeout in millsecond to jiffies */
13476 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13477 timeout = msecs_to_jiffies(ds_exit_timeout_msec);
13478 #else
13479 timeout = ds_exit_timeout_msec * HZ / 1000;
13480 #endif
13481
13482 DHD_PERIM_UNLOCK(pub);
13483
13484 timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
13485
13486 DHD_PERIM_LOCK(pub);
13487
13488 return timeout;
13489 }
13490
13491 int
dhd_os_ds_exit_wake(dhd_pub_t * pub)13492 dhd_os_ds_exit_wake(dhd_pub_t *pub)
13493 {
13494 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13495
13496 wake_up(&dhd->ds_exit_wait);
13497 return 0;
13498 }
13499
13500 #endif /* PCIE_INB_DW */
13501
13502 int
dhd_os_d3ack_wake(dhd_pub_t * pub)13503 dhd_os_d3ack_wake(dhd_pub_t *pub)
13504 {
13505 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13506
13507 wake_up(&dhd->d3ack_wait);
13508 return 0;
13509 }
13510
13511 int
dhd_os_busbusy_wait_negation(dhd_pub_t * pub,uint * condition)13512 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
13513 {
13514 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13515 int timeout;
13516
13517 /* Wait for bus usage contexts to gracefully exit within some timeout value
13518 * Set time out to little higher than dhd_ioctl_timeout_msec,
13519 * so that IOCTL timeout should not get affected.
13520 */
13521 /* Convert timeout in millsecond to jiffies */
13522 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13523 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13524 #else
13525 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13526 #endif
13527
13528 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13529
13530 return timeout;
13531 }
13532
13533 /*
13534 * Wait until the condition *var == condition is met.
13535 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13536 * Returns 1 if the @condition evaluated to true
13537 */
13538 int
dhd_os_busbusy_wait_condition(dhd_pub_t * pub,uint * var,uint condition)13539 dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
13540 {
13541 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13542 int timeout;
13543
13544 /* Convert timeout in millsecond to jiffies */
13545 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13546 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13547 #else
13548 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13549 #endif
13550
13551 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13552
13553 return timeout;
13554 }
13555
13556
13557 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13558 /* Fix compilation error for FC11 */
13559 INLINE
13560 #endif
13561 int
dhd_os_busbusy_wake(dhd_pub_t * pub)13562 dhd_os_busbusy_wake(dhd_pub_t *pub)
13563 {
13564 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13565 /* Call wmb() to make sure before waking up the other event value gets updated */
13566 OSL_SMP_WMB();
13567 wake_up(&dhd->dhd_bus_busy_state_wait);
13568 return 0;
13569 }
13570
13571 void
dhd_os_wd_timer_extend(void * bus,bool extend)13572 dhd_os_wd_timer_extend(void *bus, bool extend)
13573 {
13574 #ifndef BCMDBUS
13575 dhd_pub_t *pub = bus;
13576 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13577
13578 if (extend)
13579 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
13580 else
13581 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13582 #endif /* !BCMDBUS */
13583 }
13584
13585
13586 void
dhd_os_wd_timer(void * bus,uint wdtick)13587 dhd_os_wd_timer(void *bus, uint wdtick)
13588 {
13589 #ifndef BCMDBUS
13590 dhd_pub_t *pub = bus;
13591 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13592 unsigned long flags;
13593
13594 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13595
13596 if (!dhd) {
13597 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13598 return;
13599 }
13600
13601 DHD_GENERAL_LOCK(pub, flags);
13602
13603 /* don't start the wd until fw is loaded */
13604 if (pub->busstate == DHD_BUS_DOWN) {
13605 DHD_GENERAL_UNLOCK(pub, flags);
13606 return;
13607 }
13608
13609 /* Totally stop the timer */
13610 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13611 dhd->wd_timer_valid = FALSE;
13612 DHD_GENERAL_UNLOCK(pub, flags);
13613 del_timer_sync(&dhd->timer);
13614 return;
13615 }
13616
13617 if (wdtick) {
13618 dhd_watchdog_ms = (uint)wdtick;
13619 /* Re arm the timer, at last watchdog period */
13620 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13621 dhd->wd_timer_valid = TRUE;
13622 }
13623 DHD_GENERAL_UNLOCK(pub, flags);
13624 #endif /* !BCMDBUS */
13625 }
13626
13627 #ifdef DHD_PCIE_RUNTIMEPM
13628 void
dhd_os_runtimepm_timer(void * bus,uint tick)13629 dhd_os_runtimepm_timer(void *bus, uint tick)
13630 {
13631 dhd_pub_t *pub = bus;
13632 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13633 unsigned long flags;
13634
13635 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13636
13637 if (!dhd) {
13638 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13639 return;
13640 }
13641
13642 DHD_GENERAL_LOCK(pub, flags);
13643
13644 /* don't start the RPM until fw is loaded */
13645 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
13646 DHD_GENERAL_UNLOCK(pub, flags);
13647 return;
13648 }
13649
13650 /* If tick is non-zero, the request is to start the timer */
13651 if (tick) {
13652 /* Start the timer only if its not already running */
13653 if (dhd->rpm_timer_valid == FALSE) {
13654 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13655 dhd->rpm_timer_valid = TRUE;
13656 }
13657 } else {
13658 /* tick is zero, we have to stop the timer */
13659 /* Stop the timer only if its running, otherwise we don't have to do anything */
13660 if (dhd->rpm_timer_valid == TRUE) {
13661 dhd->rpm_timer_valid = FALSE;
13662 DHD_GENERAL_UNLOCK(pub, flags);
13663 del_timer_sync(&dhd->rpm_timer);
13664 /* we have already released the lock, so just go to exit */
13665 goto exit;
13666 }
13667 }
13668
13669 DHD_GENERAL_UNLOCK(pub, flags);
13670 exit:
13671 return;
13672 }
13673
13674 #endif /* DHD_PCIE_RUNTIMEPM */
13675
13676 void *
dhd_os_open_image(char * filename)13677 dhd_os_open_image(char *filename)
13678 {
13679 struct file *fp;
13680 int size;
13681
13682 fp = filp_open(filename, O_RDONLY, 0);
13683 /*
13684 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13685 * Alternative:
13686 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13687 * ???
13688 */
13689 if (IS_ERR(fp)) {
13690 fp = NULL;
13691 goto err;
13692 }
13693
13694 if (!S_ISREG(file_inode(fp)->i_mode)) {
13695 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
13696 fp = NULL;
13697 goto err;
13698 }
13699
13700 size = i_size_read(file_inode(fp));
13701 if (size <= 0) {
13702 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
13703 fp = NULL;
13704 goto err;
13705 }
13706
13707 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
13708
13709 err:
13710 return fp;
13711 }
13712
13713 int
dhd_os_get_image_block(char * buf,int len,void * image)13714 dhd_os_get_image_block(char *buf, int len, void *image)
13715 {
13716 struct file *fp = (struct file *)image;
13717 int rdlen;
13718 int size;
13719
13720 if (!image) {
13721 return 0;
13722 }
13723
13724 size = i_size_read(file_inode(fp));
13725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13726 rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos);
13727 #else
13728 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
13729 #endif
13730
13731 if (len >= size && size != rdlen) {
13732 return -EIO;
13733 }
13734
13735 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
13736 if (rdlen > 0) {
13737 fp->f_pos += rdlen;
13738 }
13739 #endif
13740
13741 return rdlen;
13742 }
13743
13744 int
dhd_os_get_image_size(void * image)13745 dhd_os_get_image_size(void *image)
13746 {
13747 struct file *fp = (struct file *)image;
13748 int size;
13749 if (!image) {
13750 return 0;
13751 }
13752
13753 size = i_size_read(file_inode(fp));
13754
13755 return size;
13756 }
13757
13758 #if defined(BT_OVER_SDIO)
13759 int
dhd_os_gets_image(dhd_pub_t * pub,char * str,int len,void * image)13760 dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
13761 {
13762 struct file *fp = (struct file *)image;
13763 int rd_len;
13764 uint str_len = 0;
13765 char *str_end = NULL;
13766
13767 if (!image)
13768 return 0;
13769
13770 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13771 rd_len = kernel_read(fp, str, len, &fp->f_pos);
13772 #else
13773 rd_len = kernel_read(fp, fp->f_pos, str, len);
13774 #endif
13775 str_end = strnchr(str, len, '\n');
13776 if (str_end == NULL) {
13777 goto err;
13778 }
13779 str_len = (uint)(str_end - str);
13780
13781 /* Advance file pointer past the string length */
13782 fp->f_pos += str_len + 1;
13783 bzero(str_end, rd_len - str_len);
13784
13785 err:
13786 return str_len;
13787 }
13788 #endif /* defined (BT_OVER_SDIO) */
13789
13790
13791 void
dhd_os_close_image(void * image)13792 dhd_os_close_image(void *image)
13793 {
13794 if (image)
13795 filp_close((struct file *)image, NULL);
13796 }
13797
13798 void
dhd_os_sdlock(dhd_pub_t * pub)13799 dhd_os_sdlock(dhd_pub_t *pub)
13800 {
13801 dhd_info_t *dhd;
13802
13803 dhd = (dhd_info_t *)(pub->info);
13804
13805 #ifdef BCMDBUS
13806 spin_lock_bh(&dhd->sdlock);
13807 #else
13808 if (dhd_dpc_prio >= 0)
13809 down(&dhd->sdsem);
13810 else
13811 spin_lock_bh(&dhd->sdlock);
13812 #endif /* !BCMDBUS */
13813 }
13814
13815 void
dhd_os_sdunlock(dhd_pub_t * pub)13816 dhd_os_sdunlock(dhd_pub_t *pub)
13817 {
13818 dhd_info_t *dhd;
13819
13820 dhd = (dhd_info_t *)(pub->info);
13821
13822 #ifdef BCMDBUS
13823 spin_unlock_bh(&dhd->sdlock);
13824 #else
13825 if (dhd_dpc_prio >= 0)
13826 up(&dhd->sdsem);
13827 else
13828 spin_unlock_bh(&dhd->sdlock);
13829 #endif /* !BCMDBUS */
13830 }
13831
13832 void
dhd_os_sdlock_txq(dhd_pub_t * pub)13833 dhd_os_sdlock_txq(dhd_pub_t *pub)
13834 {
13835 dhd_info_t *dhd;
13836
13837 dhd = (dhd_info_t *)(pub->info);
13838 #ifdef BCMDBUS
13839 spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
13840 #else
13841 spin_lock_bh(&dhd->txqlock);
13842 #endif /* BCMDBUS */
13843 }
13844
13845 void
dhd_os_sdunlock_txq(dhd_pub_t * pub)13846 dhd_os_sdunlock_txq(dhd_pub_t *pub)
13847 {
13848 dhd_info_t *dhd;
13849
13850 dhd = (dhd_info_t *)(pub->info);
13851 #ifdef BCMDBUS
13852 spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
13853 #else
13854 spin_unlock_bh(&dhd->txqlock);
13855 #endif /* BCMDBUS */
13856 }
13857
13858 void
dhd_os_sdlock_rxq(dhd_pub_t * pub)13859 dhd_os_sdlock_rxq(dhd_pub_t *pub)
13860 {
13861 #if 0
13862 dhd_info_t *dhd;
13863
13864 dhd = (dhd_info_t *)(pub->info);
13865 spin_lock_bh(&dhd->rxqlock);
13866 #endif
13867 }
13868
13869 void
dhd_os_sdunlock_rxq(dhd_pub_t * pub)13870 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
13871 {
13872 #if 0
13873 dhd_info_t *dhd;
13874
13875 dhd = (dhd_info_t *)(pub->info);
13876 spin_unlock_bh(&dhd->rxqlock);
13877 #endif
13878 }
13879
13880 static void
dhd_os_rxflock(dhd_pub_t * pub)13881 dhd_os_rxflock(dhd_pub_t *pub)
13882 {
13883 dhd_info_t *dhd;
13884
13885 dhd = (dhd_info_t *)(pub->info);
13886 spin_lock_bh(&dhd->rxf_lock);
13887 }
13888
13889 static void
dhd_os_rxfunlock(dhd_pub_t * pub)13890 dhd_os_rxfunlock(dhd_pub_t *pub)
13891 {
13892 dhd_info_t *dhd;
13893
13894 dhd = (dhd_info_t *)(pub->info);
13895 spin_unlock_bh(&dhd->rxf_lock);
13896 }
13897
13898 #ifdef DHDTCPACK_SUPPRESS
13899 unsigned long
dhd_os_tcpacklock(dhd_pub_t * pub)13900 dhd_os_tcpacklock(dhd_pub_t *pub)
13901 {
13902 dhd_info_t *dhd;
13903 unsigned long flags = 0;
13904
13905 dhd = (dhd_info_t *)(pub->info);
13906
13907 if (dhd) {
13908 #ifdef BCMSDIO
13909 spin_lock_bh(&dhd->tcpack_lock);
13910 #else
13911 spin_lock_irqsave(&dhd->tcpack_lock, flags);
13912 #endif /* BCMSDIO */
13913 }
13914
13915 return flags;
13916 }
13917
13918 void
dhd_os_tcpackunlock(dhd_pub_t * pub,unsigned long flags)13919 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
13920 {
13921 dhd_info_t *dhd;
13922
13923 #ifdef BCMSDIO
13924 BCM_REFERENCE(flags);
13925 #endif /* BCMSDIO */
13926
13927 dhd = (dhd_info_t *)(pub->info);
13928
13929 if (dhd) {
13930 #ifdef BCMSDIO
13931 spin_unlock_bh(&dhd->tcpack_lock);
13932 #else
13933 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
13934 #endif /* BCMSDIO */
13935 }
13936 }
13937 #endif /* DHDTCPACK_SUPPRESS */
13938
dhd_os_prealloc(dhd_pub_t * dhdpub,int section,uint size,bool kmalloc_if_fail)13939 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
13940 {
13941 uint8* buf;
13942 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
13943
13944 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
13945 if (buf == NULL && kmalloc_if_fail)
13946 buf = kmalloc(size, flags);
13947
13948 return buf;
13949 }
13950
dhd_os_prefree(dhd_pub_t * dhdpub,void * addr,uint size)13951 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
13952 {
13953 }
13954
13955 #if defined(WL_WIRELESS_EXT)
13956 struct iw_statistics *
dhd_get_wireless_stats(struct net_device * dev)13957 dhd_get_wireless_stats(struct net_device *dev)
13958 {
13959 int res = 0;
13960 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13961
13962 if (!dhd->pub.up) {
13963 return NULL;
13964 }
13965
13966 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
13967
13968 if (res == 0)
13969 return &dhd->iw.wstats;
13970 else
13971 return NULL;
13972 }
13973 #endif /* defined(WL_WIRELESS_EXT) */
13974
13975 static int
dhd_wl_host_event(dhd_info_t * dhd,int ifidx,void * pktdata,uint16 pktlen,wl_event_msg_t * event,void ** data)13976 dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
13977 wl_event_msg_t *event, void **data)
13978 {
13979 int bcmerror = 0;
13980 #ifdef WL_CFG80211
13981 unsigned long flags = 0;
13982 #endif /* WL_CFG80211 */
13983 ASSERT(dhd != NULL);
13984
13985 #ifdef SHOW_LOGTRACE
13986 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13987 &dhd->event_data);
13988 #else
13989 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13990 NULL);
13991 #endif /* SHOW_LOGTRACE */
13992
13993 if (bcmerror != BCME_OK)
13994 return (bcmerror);
13995
13996 #if defined(WL_EXT_IAPSTA)
13997 wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data);
13998 #endif /* defined(WL_EXT_IAPSTA) */
13999 #if defined(WL_WIRELESS_EXT)
14000 if (event->bsscfgidx == 0) {
14001 /*
14002 * Wireless ext is on primary interface only
14003 */
14004
14005 ASSERT(dhd->iflist[ifidx] != NULL);
14006 ASSERT(dhd->iflist[ifidx]->net != NULL);
14007
14008 if (dhd->iflist[ifidx]->net) {
14009 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
14010 }
14011 }
14012 #endif /* defined(WL_WIRELESS_EXT) */
14013
14014 #ifdef WL_CFG80211
14015 ASSERT(dhd->iflist[ifidx] != NULL);
14016 ASSERT(dhd->iflist[ifidx]->net != NULL);
14017 if (dhd->iflist[ifidx]->net) {
14018 spin_lock_irqsave(&dhd->pub.up_lock, flags);
14019 if (dhd->pub.up) {
14020 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
14021 }
14022 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
14023 }
14024 #endif /* defined(WL_CFG80211) */
14025
14026 return (bcmerror);
14027 }
14028
14029 /* send up locally generated event */
14030 void
dhd_sendup_event(dhd_pub_t * dhdp,wl_event_msg_t * event,void * data)14031 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
14032 {
14033 /* Just return from here */
14034 return;
14035 }
14036
14037 #ifdef LOG_INTO_TCPDUMP
14038 void
dhd_sendup_log(dhd_pub_t * dhdp,void * data,int data_len)14039 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
14040 {
14041 struct sk_buff *p, *skb;
14042 uint32 pktlen;
14043 int len;
14044 dhd_if_t *ifp;
14045 dhd_info_t *dhd;
14046 uchar *skb_data;
14047 int ifidx = 0;
14048 struct ether_header eth;
14049
14050 pktlen = sizeof(eth) + data_len;
14051 dhd = dhdp->info;
14052
14053 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
14054 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
14055
14056 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
14057 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
14058 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
14059 eth.ether_type = hton16(ETHER_TYPE_BRCM);
14060
14061 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
14062 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
14063 skb = PKTTONATIVE(dhdp->osh, p);
14064 skb_data = skb->data;
14065 len = skb->len;
14066
14067 ifidx = dhd_ifname2idx(dhd, "wlan0");
14068 ifp = dhd->iflist[ifidx];
14069 if (ifp == NULL)
14070 ifp = dhd->iflist[0];
14071
14072 ASSERT(ifp);
14073 skb->dev = ifp->net;
14074 skb->protocol = eth_type_trans(skb, skb->dev);
14075 skb->data = skb_data;
14076 skb->len = len;
14077
14078 /* Strip header, count, deliver upward */
14079 skb_pull(skb, ETH_HLEN);
14080
14081 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
14082 __FUNCTION__, __LINE__);
14083 /* Send the packet */
14084 if (in_interrupt()) {
14085 netif_rx(skb);
14086 } else {
14087 netif_rx_ni(skb);
14088 }
14089 } else {
14090 /* Could not allocate a sk_buf */
14091 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
14092 }
14093 }
14094 #endif /* LOG_INTO_TCPDUMP */
14095
dhd_wait_for_event(dhd_pub_t * dhd,bool * lockvar)14096 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
14097 {
14098 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14099 struct dhd_info *dhdinfo = dhd->info;
14100
14101 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14102 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
14103 #else
14104 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
14105 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14106
14107 dhd_os_sdunlock(dhd);
14108 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
14109 dhd_os_sdlock(dhd);
14110 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14111 return;
14112 }
14113
dhd_wait_event_wakeup(dhd_pub_t * dhd)14114 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
14115 {
14116 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14117 struct dhd_info *dhdinfo = dhd->info;
14118 if (waitqueue_active(&dhdinfo->ctrl_wait))
14119 wake_up(&dhdinfo->ctrl_wait);
14120 #endif
14121 return;
14122 }
14123
14124 #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
14125 int
dhd_net_bus_devreset(struct net_device * dev,uint8 flag)14126 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
14127 {
14128 int ret;
14129
14130 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14131
14132 if (flag == TRUE) {
14133 /* Issue wl down command before resetting the chip */
14134 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
14135 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
14136 }
14137 #ifdef PROP_TXSTATUS
14138 if (dhd->pub.wlfc_enabled) {
14139 dhd_wlfc_deinit(&dhd->pub);
14140 }
14141 #endif /* PROP_TXSTATUS */
14142 #ifdef PNO_SUPPORT
14143 if (dhd->pub.pno_state) {
14144 dhd_pno_deinit(&dhd->pub);
14145 }
14146 #endif
14147 #ifdef RTT_SUPPORT
14148 if (dhd->pub.rtt_state) {
14149 dhd_rtt_deinit(&dhd->pub);
14150 }
14151 #endif /* RTT_SUPPORT */
14152
14153 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14154 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
14155 #endif /* DBG_PKT_MON */
14156 }
14157
14158 #ifdef BCMSDIO
14159 if (!flag) {
14160 dhd_update_fw_nv_path(dhd);
14161 /* update firmware and nvram path to sdio bus */
14162 dhd_bus_update_fw_nv_path(dhd->pub.bus,
14163 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
14164 }
14165 #endif /* BCMSDIO */
14166
14167 ret = dhd_bus_devreset(&dhd->pub, flag);
14168 if (ret) {
14169 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
14170 return ret;
14171 }
14172
14173 return ret;
14174 }
14175
14176 #ifdef BCMSDIO
14177 int
dhd_net_bus_suspend(struct net_device * dev)14178 dhd_net_bus_suspend(struct net_device *dev)
14179 {
14180 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14181 return dhd_bus_suspend(&dhd->pub);
14182 }
14183
14184 int
dhd_net_bus_resume(struct net_device * dev,uint8 stage)14185 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
14186 {
14187 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14188 return dhd_bus_resume(&dhd->pub, stage);
14189 }
14190
14191 #endif /* BCMSDIO */
14192 #endif /* BCMSDIO || BCMPCIE || BCMDBUS */
14193
net_os_set_suspend_disable(struct net_device * dev,int val)14194 int net_os_set_suspend_disable(struct net_device *dev, int val)
14195 {
14196 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14197 int ret = 0;
14198
14199 if (dhd) {
14200 ret = dhd->pub.suspend_disable_flag;
14201 dhd->pub.suspend_disable_flag = val;
14202 }
14203 return ret;
14204 }
14205
net_os_set_suspend(struct net_device * dev,int val,int force)14206 int net_os_set_suspend(struct net_device *dev, int val, int force)
14207 {
14208 int ret = 0;
14209 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14210
14211 if (dhd) {
14212 #ifdef CONFIG_MACH_UNIVERSAL7420
14213 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14214 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14215 ret = dhd_set_suspend(val, &dhd->pub);
14216 #else
14217 ret = dhd_suspend_resume_helper(dhd, val, force);
14218 #endif
14219 #ifdef WL_CFG80211
14220 wl_cfg80211_update_power_mode(dev);
14221 #endif
14222 }
14223 return ret;
14224 }
14225
net_os_set_suspend_bcn_li_dtim(struct net_device * dev,int val)14226 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
14227 {
14228 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14229
14230 if (dhd)
14231 dhd->pub.suspend_bcn_li_dtim = val;
14232
14233 return 0;
14234 }
14235
net_os_set_max_dtim_enable(struct net_device * dev,int val)14236 int net_os_set_max_dtim_enable(struct net_device *dev, int val)
14237 {
14238 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14239
14240 if (dhd) {
14241 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14242 __FUNCTION__, (val ? "Enable" : "Disable")));
14243 if (val) {
14244 dhd->pub.max_dtim_enable = TRUE;
14245 } else {
14246 dhd->pub.max_dtim_enable = FALSE;
14247 }
14248 } else {
14249 return -1;
14250 }
14251
14252 return 0;
14253 }
14254
14255 #ifdef PKT_FILTER_SUPPORT
net_os_rxfilter_add_remove(struct net_device * dev,int add_remove,int num)14256 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
14257 {
14258 int ret = 0;
14259
14260 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14261 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14262
14263 if (!dhd_master_mode)
14264 add_remove = !add_remove;
14265 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
14266 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
14267 return 0;
14268 }
14269
14270
14271 if (num >= dhd->pub.pktfilter_count) {
14272 return -EINVAL;
14273 }
14274
14275 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
14276 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14277
14278 return ret;
14279 }
14280
dhd_os_enable_packet_filter(dhd_pub_t * dhdp,int val)14281 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
14282
14283 {
14284 int ret = 0;
14285
14286 /* Packet filtering is set only if we still in early-suspend and
14287 * we need either to turn it ON or turn it OFF
14288 * We can always turn it OFF in case of early-suspend, but we turn it
14289 * back ON only if suspend_disable_flag was not set
14290 */
14291 if (dhdp && dhdp->up) {
14292 if (dhdp->in_suspend) {
14293 if (!val || (val && !dhdp->suspend_disable_flag))
14294 dhd_enable_packet_filter(val, dhdp);
14295 }
14296 }
14297 return ret;
14298 }
14299
14300 /* function to enable/disable packet for Network device */
net_os_enable_packet_filter(struct net_device * dev,int val)14301 int net_os_enable_packet_filter(struct net_device *dev, int val)
14302 {
14303 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14304
14305 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
14306 return dhd_os_enable_packet_filter(&dhd->pub, val);
14307 }
14308 #endif /* PKT_FILTER_SUPPORT */
14309
14310 int
dhd_dev_init_ioctl(struct net_device * dev)14311 dhd_dev_init_ioctl(struct net_device *dev)
14312 {
14313 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14314 int ret;
14315
14316 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
14317 goto done;
14318
14319 done:
14320 return ret;
14321 }
14322
14323 int
dhd_dev_get_feature_set(struct net_device * dev)14324 dhd_dev_get_feature_set(struct net_device *dev)
14325 {
14326 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
14327 dhd_pub_t *dhd = (&ptr->pub);
14328 int feature_set = 0;
14329
14330 if (FW_SUPPORTED(dhd, sta))
14331 feature_set |= WIFI_FEATURE_INFRA;
14332 if (FW_SUPPORTED(dhd, dualband))
14333 feature_set |= WIFI_FEATURE_INFRA_5G;
14334 if (FW_SUPPORTED(dhd, p2p))
14335 feature_set |= WIFI_FEATURE_P2P;
14336 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14337 feature_set |= WIFI_FEATURE_SOFT_AP;
14338 if (FW_SUPPORTED(dhd, tdls))
14339 feature_set |= WIFI_FEATURE_TDLS;
14340 if (FW_SUPPORTED(dhd, vsdb))
14341 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
14342 if (FW_SUPPORTED(dhd, nan)) {
14343 feature_set |= WIFI_FEATURE_NAN;
14344 /* NAN is essentail for d2d rtt */
14345 if (FW_SUPPORTED(dhd, rttd2d))
14346 feature_set |= WIFI_FEATURE_D2D_RTT;
14347 }
14348 #ifdef RTT_SUPPORT
14349 if (dhd->rtt_supported) {
14350 feature_set |= WIFI_FEATURE_D2D_RTT;
14351 feature_set |= WIFI_FEATURE_D2AP_RTT;
14352 }
14353 #endif /* RTT_SUPPORT */
14354 #ifdef LINKSTAT_SUPPORT
14355 feature_set |= WIFI_FEATURE_LINKSTAT;
14356 #endif /* LINKSTAT_SUPPORT */
14357
14358 #ifdef PNO_SUPPORT
14359 if (dhd_is_pno_supported(dhd)) {
14360 feature_set |= WIFI_FEATURE_PNO;
14361 #ifdef GSCAN_SUPPORT
14362 /* terence 20171115: remove to get GTS PASS
14363 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
14364 */
14365 // feature_set |= WIFI_FEATURE_GSCAN;
14366 // feature_set |= WIFI_FEATURE_HAL_EPNO;
14367 #endif /* GSCAN_SUPPORT */
14368 }
14369 #endif /* PNO_SUPPORT */
14370 #ifdef RSSI_MONITOR_SUPPORT
14371 if (FW_SUPPORTED(dhd, rssi_mon)) {
14372 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
14373 }
14374 #endif /* RSSI_MONITOR_SUPPORT */
14375 #ifdef WL11U
14376 feature_set |= WIFI_FEATURE_HOTSPOT;
14377 #endif /* WL11U */
14378 #ifdef NDO_CONFIG_SUPPORT
14379 feature_set |= WIFI_FEATURE_CONFIG_NDO;
14380 #endif /* NDO_CONFIG_SUPPORT */
14381 #ifdef KEEP_ALIVE
14382 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
14383 #endif /* KEEP_ALIVE */
14384
14385 return feature_set;
14386 }
14387
14388 int
dhd_dev_get_feature_set_matrix(struct net_device * dev,int num)14389 dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
14390 {
14391 int feature_set_full;
14392 int ret = 0;
14393
14394 feature_set_full = dhd_dev_get_feature_set(dev);
14395
14396 /* Common feature set for all interface */
14397 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
14398 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
14399 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
14400 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
14401 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
14402 (feature_set_full & WIFI_FEATURE_EPR);
14403
14404 /* Specific feature group for each interface */
14405 switch (num) {
14406 case 0:
14407 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
14408 /* Not supported yet */
14409 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14410 (feature_set_full & WIFI_FEATURE_TDLS) |
14411 (feature_set_full & WIFI_FEATURE_PNO) |
14412 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
14413 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
14414 (feature_set_full & WIFI_FEATURE_GSCAN) |
14415 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
14416 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
14417 break;
14418
14419 case 1:
14420 ret |= (feature_set_full & WIFI_FEATURE_P2P);
14421 /* Not yet verified NAN with P2P */
14422 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14423 break;
14424
14425 case 2:
14426 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
14427 (feature_set_full & WIFI_FEATURE_TDLS) |
14428 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
14429 break;
14430
14431 default:
14432 ret = WIFI_FEATURE_INVALID;
14433 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
14434 break;
14435 }
14436
14437 return ret;
14438 }
14439
14440 #ifdef CUSTOM_FORCE_NODFS_FLAG
14441 int
dhd_dev_set_nodfs(struct net_device * dev,u32 nodfs)14442 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
14443 {
14444 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14445
14446 if (nodfs)
14447 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14448 else
14449 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14450 dhd->pub.force_country_change = TRUE;
14451 return 0;
14452 }
14453 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14454
14455 #ifdef NDO_CONFIG_SUPPORT
14456 int
dhd_dev_ndo_cfg(struct net_device * dev,u8 enable)14457 dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
14458 {
14459 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14460 dhd_pub_t *dhdp = &dhd->pub;
14461 int ret = 0;
14462
14463 if (enable) {
14464 /* enable ND offload feature (will be enabled in FW on suspend) */
14465 dhdp->ndo_enable = TRUE;
14466
14467 /* Update changes of anycast address & DAD failed address */
14468 ret = dhd_dev_ndo_update_inet6addr(dev);
14469 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
14470 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
14471 return ret;
14472 }
14473 } else {
14474 /* disable ND offload feature */
14475 dhdp->ndo_enable = FALSE;
14476
14477 /* disable ND offload in FW */
14478 ret = dhd_ndo_enable(dhdp, 0);
14479 if (ret < 0) {
14480 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
14481 }
14482 }
14483 return ret;
14484 }
14485
14486 /* #pragma used as a WAR to fix build failure,
14487 * ignore dropping of 'const' qualifier in 'list_entry' macro
14488 * this pragma disables the warning only for the following function
14489 */
14490 #pragma GCC diagnostic push
14491 #pragma GCC diagnostic ignored "-Wcast-qual"
14492
14493 static int
dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev * inet6)14494 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
14495 {
14496 struct inet6_ifaddr *ifa;
14497 struct ifacaddr6 *acaddr = NULL;
14498 int addr_count = 0;
14499
14500 /* lock */
14501 read_lock_bh(&inet6->lock);
14502
14503 /* Count valid unicast address */
14504 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14505 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14506 addr_count++;
14507 }
14508 }
14509
14510 /* Count anycast address */
14511 acaddr = inet6->ac_list;
14512 while (acaddr) {
14513 addr_count++;
14514 acaddr = acaddr->aca_next;
14515 }
14516
14517 /* unlock */
14518 read_unlock_bh(&inet6->lock);
14519
14520 return addr_count;
14521 }
14522
14523 int
dhd_dev_ndo_update_inet6addr(struct net_device * dev)14524 dhd_dev_ndo_update_inet6addr(struct net_device *dev)
14525 {
14526 dhd_info_t *dhd;
14527 dhd_pub_t *dhdp;
14528 struct inet6_dev *inet6;
14529 struct inet6_ifaddr *ifa;
14530 struct ifacaddr6 *acaddr = NULL;
14531 struct in6_addr *ipv6_addr = NULL;
14532 int cnt, i;
14533 int ret = BCME_OK;
14534
14535 /*
14536 * this function evaulates host ip address in struct inet6_dev
14537 * unicast addr in inet6_dev->addr_list
14538 * anycast addr in inet6_dev->ac_list
14539 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14540 * access on null(freed) pointer.
14541 */
14542
14543 if (dev) {
14544 inet6 = dev->ip6_ptr;
14545 if (!inet6) {
14546 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
14547 return BCME_ERROR;
14548 }
14549
14550 dhd = DHD_DEV_INFO(dev);
14551 if (!dhd) {
14552 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
14553 return BCME_ERROR;
14554 }
14555 dhdp = &dhd->pub;
14556
14557 if (dhd_net2idx(dhd, dev) != 0) {
14558 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
14559 return BCME_ERROR;
14560 }
14561 } else {
14562 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
14563 return BCME_ERROR;
14564 }
14565
14566 /* Check host IP overflow */
14567 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
14568 if (cnt > dhdp->ndo_max_host_ip) {
14569 if (!dhdp->ndo_host_ip_overflow) {
14570 dhdp->ndo_host_ip_overflow = TRUE;
14571 /* Disable ND offload in FW */
14572 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
14573 ret = dhd_ndo_enable(dhdp, 0);
14574 }
14575
14576 return ret;
14577 }
14578
14579 /*
14580 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14581 * driver need to lock inet6_dev while accessing structure. but, driver
14582 * cannot use ioctl while inet6_dev locked since it requires scheduling
14583 * hence, copy addresses to the buffer and do ioctl after unlock.
14584 */
14585 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14586 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14587 if (!ipv6_addr) {
14588 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
14589 return BCME_NOMEM;
14590 }
14591
14592 /* Find DAD failed unicast address to be removed */
14593 cnt = 0;
14594 read_lock_bh(&inet6->lock);
14595 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14596 /* DAD failed unicast address */
14597 if ((ifa->flags & IFA_F_DADFAILED) &&
14598 (cnt < dhdp->ndo_max_host_ip)) {
14599 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14600 cnt++;
14601 }
14602 }
14603 read_unlock_bh(&inet6->lock);
14604
14605 /* Remove DAD failed unicast address */
14606 for (i = 0; i < cnt; i++) {
14607 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
14608 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
14609 if (ret < 0) {
14610 goto done;
14611 }
14612 }
14613
14614 /* Remove all anycast address */
14615 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14616 if (ret < 0) {
14617 goto done;
14618 }
14619
14620 /*
14621 * if ND offload was disabled due to host ip overflow,
14622 * attempt to add valid unicast address.
14623 */
14624 if (dhdp->ndo_host_ip_overflow) {
14625 /* Find valid unicast address */
14626 cnt = 0;
14627 read_lock_bh(&inet6->lock);
14628 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14629 /* valid unicast address */
14630 if (!(ifa->flags & IFA_F_DADFAILED) &&
14631 (cnt < dhdp->ndo_max_host_ip)) {
14632 memcpy(&ipv6_addr[cnt], &ifa->addr,
14633 sizeof(struct in6_addr));
14634 cnt++;
14635 }
14636 }
14637 read_unlock_bh(&inet6->lock);
14638
14639 /* Add valid unicast address */
14640 for (i = 0; i < cnt; i++) {
14641 ret = dhd_ndo_add_ip_with_type(dhdp,
14642 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
14643 if (ret < 0) {
14644 goto done;
14645 }
14646 }
14647 }
14648
14649 /* Find anycast address */
14650 cnt = 0;
14651 read_lock_bh(&inet6->lock);
14652 acaddr = inet6->ac_list;
14653 while (acaddr) {
14654 if (cnt < dhdp->ndo_max_host_ip) {
14655 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14656 cnt++;
14657 }
14658 acaddr = acaddr->aca_next;
14659 }
14660 read_unlock_bh(&inet6->lock);
14661
14662 /* Add anycast address */
14663 for (i = 0; i < cnt; i++) {
14664 ret = dhd_ndo_add_ip_with_type(dhdp,
14665 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14666 if (ret < 0) {
14667 goto done;
14668 }
14669 }
14670
14671 /* Now All host IP addr were added successfully */
14672 if (dhdp->ndo_host_ip_overflow) {
14673 dhdp->ndo_host_ip_overflow = FALSE;
14674 if (dhdp->in_suspend) {
14675 /* drvier is in (early) suspend state, need to enable ND offload in FW */
14676 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
14677 ret = dhd_ndo_enable(dhdp, 1);
14678 }
14679 }
14680
14681 done:
14682 if (ipv6_addr) {
14683 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14684 }
14685
14686 return ret;
14687 }
14688 #pragma GCC diagnostic pop
14689
14690 #endif /* NDO_CONFIG_SUPPORT */
14691
14692 #ifdef PNO_SUPPORT
14693 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
14694 int
dhd_dev_pno_stop_for_ssid(struct net_device * dev)14695 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
14696 {
14697 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14698
14699 return (dhd_pno_stop_for_ssid(&dhd->pub));
14700 }
14701
14702 /* Linux wrapper to call common dhd_pno_set_for_ssid */
14703 int
dhd_dev_pno_set_for_ssid(struct net_device * dev,wlc_ssid_ext_t * ssids_local,int nssid,uint16 scan_fr,int pno_repeat,int pno_freq_expo_max,uint16 * channel_list,int nchan)14704 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
14705 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
14706 {
14707 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14708
14709 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
14710 pno_repeat, pno_freq_expo_max, channel_list, nchan));
14711 }
14712
14713 /* Linux wrapper to call common dhd_pno_enable */
14714 int
dhd_dev_pno_enable(struct net_device * dev,int enable)14715 dhd_dev_pno_enable(struct net_device *dev, int enable)
14716 {
14717 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14718
14719 return (dhd_pno_enable(&dhd->pub, enable));
14720 }
14721
14722 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
14723 int
dhd_dev_pno_set_for_hotlist(struct net_device * dev,wl_pfn_bssid_t * p_pfn_bssid,struct dhd_pno_hotlist_params * hotlist_params)14724 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
14725 struct dhd_pno_hotlist_params *hotlist_params)
14726 {
14727 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14728 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
14729 }
14730 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
14731 int
dhd_dev_pno_stop_for_batch(struct net_device * dev)14732 dhd_dev_pno_stop_for_batch(struct net_device *dev)
14733 {
14734 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14735 return (dhd_pno_stop_for_batch(&dhd->pub));
14736 }
14737
14738 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
14739 int
dhd_dev_pno_set_for_batch(struct net_device * dev,struct dhd_pno_batch_params * batch_params)14740 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
14741 {
14742 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14743 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
14744 }
14745
14746 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
14747 int
dhd_dev_pno_get_for_batch(struct net_device * dev,char * buf,int bufsize)14748 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
14749 {
14750 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14751 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
14752 }
14753 #endif /* PNO_SUPPORT */
14754
14755 #if defined(PNO_SUPPORT)
14756 #ifdef GSCAN_SUPPORT
14757 bool
dhd_dev_is_legacy_pno_enabled(struct net_device * dev)14758 dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
14759 {
14760 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14761
14762 return (dhd_is_legacy_pno_enabled(&dhd->pub));
14763 }
14764
14765 int
dhd_dev_set_epno(struct net_device * dev)14766 dhd_dev_set_epno(struct net_device *dev)
14767 {
14768 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14769 if (!dhd) {
14770 return BCME_ERROR;
14771 }
14772 return dhd_pno_set_epno(&dhd->pub);
14773 }
14774 int
dhd_dev_flush_fw_epno(struct net_device * dev)14775 dhd_dev_flush_fw_epno(struct net_device *dev)
14776 {
14777 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14778 if (!dhd) {
14779 return BCME_ERROR;
14780 }
14781 return dhd_pno_flush_fw_epno(&dhd->pub);
14782 }
14783
14784 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14785 int
dhd_dev_pno_set_cfg_gscan(struct net_device * dev,dhd_pno_gscan_cmd_cfg_t type,void * buf,bool flush)14786 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14787 void *buf, bool flush)
14788 {
14789 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14790
14791 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
14792 }
14793
14794 /* Linux wrapper to call common dhd_wait_batch_results_complete */
14795 int
dhd_dev_wait_batch_results_complete(struct net_device * dev)14796 dhd_dev_wait_batch_results_complete(struct net_device *dev)
14797 {
14798 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14799
14800 return (dhd_wait_batch_results_complete(&dhd->pub));
14801 }
14802
14803 /* Linux wrapper to call common dhd_pno_lock_batch_results */
14804 int
dhd_dev_pno_lock_access_batch_results(struct net_device * dev)14805 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
14806 {
14807 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14808
14809 return (dhd_pno_lock_batch_results(&dhd->pub));
14810 }
14811 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
14812 void
dhd_dev_pno_unlock_access_batch_results(struct net_device * dev)14813 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
14814 {
14815 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14816
14817 return (dhd_pno_unlock_batch_results(&dhd->pub));
14818 }
14819
14820 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
14821 int
dhd_dev_pno_run_gscan(struct net_device * dev,bool run,bool flush)14822 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
14823 {
14824 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14825
14826 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
14827 }
14828
14829 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
14830 int
dhd_dev_pno_enable_full_scan_result(struct net_device * dev,bool real_time_flag)14831 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
14832 {
14833 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14834
14835 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
14836 }
14837
14838 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
14839 void *
dhd_dev_hotlist_scan_event(struct net_device * dev,const void * data,int * send_evt_bytes,hotlist_type_t type)14840 dhd_dev_hotlist_scan_event(struct net_device *dev,
14841 const void *data, int *send_evt_bytes, hotlist_type_t type)
14842 {
14843 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14844
14845 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
14846 }
14847
14848 /* Linux wrapper to call common dhd_process_full_gscan_result */
14849 void *
dhd_dev_process_full_gscan_result(struct net_device * dev,const void * data,uint32 len,int * send_evt_bytes)14850 dhd_dev_process_full_gscan_result(struct net_device *dev,
14851 const void *data, uint32 len, int *send_evt_bytes)
14852 {
14853 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14854
14855 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
14856 }
14857
14858 void
dhd_dev_gscan_hotlist_cache_cleanup(struct net_device * dev,hotlist_type_t type)14859 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
14860 {
14861 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14862
14863 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
14864
14865 return;
14866 }
14867
14868 int
dhd_dev_gscan_batch_cache_cleanup(struct net_device * dev)14869 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
14870 {
14871 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14872
14873 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
14874 }
14875
14876 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
14877 int
dhd_dev_retrieve_batch_scan(struct net_device * dev)14878 dhd_dev_retrieve_batch_scan(struct net_device *dev)
14879 {
14880 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14881
14882 return (dhd_retreive_batch_scan_results(&dhd->pub));
14883 }
14884
14885 /* Linux wrapper to call common dhd_pno_process_epno_result */
dhd_dev_process_epno_result(struct net_device * dev,const void * data,uint32 event,int * send_evt_bytes)14886 void * dhd_dev_process_epno_result(struct net_device *dev,
14887 const void *data, uint32 event, int *send_evt_bytes)
14888 {
14889 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14890
14891 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
14892 }
14893
14894 int
dhd_dev_set_lazy_roam_cfg(struct net_device * dev,wlc_roam_exp_params_t * roam_param)14895 dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
14896 wlc_roam_exp_params_t *roam_param)
14897 {
14898 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14899 wl_roam_exp_cfg_t roam_exp_cfg;
14900 int err;
14901
14902 if (!roam_param) {
14903 return BCME_BADARG;
14904 }
14905
14906 DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n",
14907 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
14908 DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
14909 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
14910 roam_param->cur_bssid_boost));
14911 DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
14912 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
14913
14914 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
14915 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14916 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
14917 if (dhd->pub.lazy_roam_enable) {
14918 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
14919 }
14920 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14921 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14922 TRUE);
14923 if (err < 0) {
14924 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14925 }
14926 return err;
14927 }
14928
14929 int
dhd_dev_lazy_roam_enable(struct net_device * dev,uint32 enable)14930 dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
14931 {
14932 int err;
14933 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14934 wl_roam_exp_cfg_t roam_exp_cfg;
14935
14936 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
14937 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14938 if (enable) {
14939 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
14940 }
14941
14942 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14943 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14944 TRUE);
14945 if (err < 0) {
14946 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14947 } else {
14948 dhd->pub.lazy_roam_enable = (enable != 0);
14949 }
14950 return err;
14951 }
14952
14953 int
dhd_dev_set_lazy_roam_bssid_pref(struct net_device * dev,wl_bssid_pref_cfg_t * bssid_pref,uint32 flush)14954 dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
14955 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
14956 {
14957 int err;
14958 int len;
14959 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14960
14961 bssid_pref->version = BSSID_PREF_LIST_VERSION;
14962 /* By default programming bssid pref flushes out old values */
14963 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
14964 len = sizeof(wl_bssid_pref_cfg_t);
14965 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
14966 err = dhd_iovar(&(dhd->pub), 0, "roam_exp_bssid_pref", (char *)bssid_pref,
14967 len, NULL, 0, TRUE);
14968 if (err != BCME_OK) {
14969 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
14970 }
14971 return err;
14972 }
14973
14974 int
dhd_dev_set_blacklist_bssid(struct net_device * dev,maclist_t * blacklist,uint32 len,uint32 flush)14975 dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
14976 uint32 len, uint32 flush)
14977 {
14978 int err;
14979 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14980 int macmode;
14981
14982 if (blacklist) {
14983 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
14984 len, TRUE, 0);
14985 if (err != BCME_OK) {
14986 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
14987 return err;
14988 }
14989 }
14990 /* By default programming blacklist flushes out old values */
14991 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
14992 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
14993 sizeof(macmode), TRUE, 0);
14994 if (err != BCME_OK) {
14995 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
14996 }
14997 return err;
14998 }
14999
15000 int
dhd_dev_set_whitelist_ssid(struct net_device * dev,wl_ssid_whitelist_t * ssid_whitelist,uint32 len,uint32 flush)15001 dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
15002 uint32 len, uint32 flush)
15003 {
15004 int err;
15005 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15006 wl_ssid_whitelist_t whitelist_ssid_flush;
15007
15008 if (!ssid_whitelist) {
15009 if (flush) {
15010 ssid_whitelist = &whitelist_ssid_flush;
15011 ssid_whitelist->ssid_count = 0;
15012 } else {
15013 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
15014 return BCME_BADARG;
15015 }
15016 }
15017 ssid_whitelist->version = SSID_WHITELIST_VERSION;
15018 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
15019 err = dhd_iovar(&(dhd->pub), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist,
15020 len, NULL, 0, TRUE);
15021 if (err != BCME_OK) {
15022 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15023 }
15024 return err;
15025 }
15026 #endif /* GSCAN_SUPPORT */
15027
15028 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15029 /* Linux wrapper to call common dhd_pno_get_gscan */
15030 void *
dhd_dev_pno_get_gscan(struct net_device * dev,dhd_pno_gscan_cmd_cfg_t type,void * info,uint32 * len)15031 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15032 void *info, uint32 *len)
15033 {
15034 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15035
15036 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
15037 }
15038 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15039 #endif
15040
15041 #ifdef RSSI_MONITOR_SUPPORT
15042 int
dhd_dev_set_rssi_monitor_cfg(struct net_device * dev,int start,int8 max_rssi,int8 min_rssi)15043 dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
15044 int8 max_rssi, int8 min_rssi)
15045 {
15046 int err;
15047 wl_rssi_monitor_cfg_t rssi_monitor;
15048 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15049
15050 rssi_monitor.version = RSSI_MONITOR_VERSION;
15051 rssi_monitor.max_rssi = max_rssi;
15052 rssi_monitor.min_rssi = min_rssi;
15053 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
15054 err = dhd_iovar(&(dhd->pub), 0, "rssi_monitor", (char *)&rssi_monitor,
15055 sizeof(rssi_monitor), NULL, 0, TRUE);
15056 if (err < 0 && err != BCME_UNSUPPORTED) {
15057 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15058 }
15059 return err;
15060 }
15061 #endif /* RSSI_MONITOR_SUPPORT */
15062
15063 #ifdef DHDTCPACK_SUPPRESS
dhd_dev_set_tcpack_sup_mode_cfg(struct net_device * dev,uint8 enable)15064 int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
15065 {
15066 int err;
15067 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15068
15069 err = dhd_tcpack_suppress_set(&(dhd->pub), enable);
15070 if (err != BCME_OK) {
15071 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15072 }
15073 return err;
15074 }
15075 #endif /* DHDTCPACK_SUPPRESS */
15076
15077 int
dhd_dev_cfg_rand_mac_oui(struct net_device * dev,uint8 * oui)15078 dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
15079 {
15080 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15081 dhd_pub_t *dhdp = &dhd->pub;
15082
15083 if (!dhdp || !oui) {
15084 DHD_ERROR(("NULL POINTER : %s\n",
15085 __FUNCTION__));
15086 return BCME_ERROR;
15087 }
15088 if (ETHER_ISMULTI(oui)) {
15089 DHD_ERROR(("Expected unicast OUI\n"));
15090 return BCME_ERROR;
15091 } else {
15092 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
15093 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
15094 DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui[0],
15095 rand_mac_oui[1], rand_mac_oui[2]));
15096 }
15097 return BCME_OK;
15098 }
15099
15100 int
dhd_set_rand_mac_oui(dhd_pub_t * dhd)15101 dhd_set_rand_mac_oui(dhd_pub_t *dhd)
15102 {
15103 int err;
15104 wl_pfn_macaddr_cfg_t wl_cfg;
15105 uint8 *rand_mac_oui = dhd->rand_mac_oui;
15106
15107 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
15108 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
15109 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
15110 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
15111 wl_cfg.flags = 0;
15112 } else {
15113 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
15114 }
15115
15116 DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui[0],
15117 rand_mac_oui[1], rand_mac_oui[2]));
15118
15119 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
15120 if (err < 0) {
15121 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
15122 }
15123 return err;
15124 }
15125
15126 #ifdef RTT_SUPPORT
15127 #ifdef WL_CFG80211
15128 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15129 int
dhd_dev_rtt_set_cfg(struct net_device * dev,void * buf)15130 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
15131 {
15132 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15133
15134 return (dhd_rtt_set_cfg(&dhd->pub, buf));
15135 }
15136
15137 int
dhd_dev_rtt_cancel_cfg(struct net_device * dev,struct ether_addr * mac_list,int mac_cnt)15138 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
15139 {
15140 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15141
15142 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
15143 }
15144
15145 int
dhd_dev_rtt_register_noti_callback(struct net_device * dev,void * ctx,dhd_rtt_compl_noti_fn noti_fn)15146 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
15147 {
15148 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15149
15150 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
15151 }
15152
15153 int
dhd_dev_rtt_unregister_noti_callback(struct net_device * dev,dhd_rtt_compl_noti_fn noti_fn)15154 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
15155 {
15156 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15157
15158 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
15159 }
15160
15161 int
dhd_dev_rtt_capability(struct net_device * dev,rtt_capabilities_t * capa)15162 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
15163 {
15164 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15165
15166 return (dhd_rtt_capability(&dhd->pub, capa));
15167 }
15168
15169 int
dhd_dev_rtt_avail_channel(struct net_device * dev,wifi_channel_info * channel_info)15170 dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
15171 {
15172 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15173 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
15174 }
15175
15176 int
dhd_dev_rtt_enable_responder(struct net_device * dev,wifi_channel_info * channel_info)15177 dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
15178 {
15179 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15180 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
15181 }
15182
dhd_dev_rtt_cancel_responder(struct net_device * dev)15183 int dhd_dev_rtt_cancel_responder(struct net_device *dev)
15184 {
15185 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15186 return (dhd_rtt_cancel_responder(&dhd->pub));
15187 }
15188 #endif /* WL_CFG80211 */
15189 #endif /* RTT_SUPPORT */
15190
15191 #ifdef KEEP_ALIVE
15192 #define KA_TEMP_BUF_SIZE 512
15193 #define KA_FRAME_SIZE 300
15194
15195 int
dhd_dev_start_mkeep_alive(dhd_pub_t * dhd_pub,uint8 mkeep_alive_id,uint8 * ip_pkt,uint16 ip_pkt_len,uint8 * src_mac,uint8 * dst_mac,uint32 period_msec)15196 dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
15197 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
15198 {
15199 const int ETHERTYPE_LEN = 2;
15200 char *pbuf = NULL;
15201 const char *str;
15202 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15203 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15204 int buf_len = 0;
15205 int str_len = 0;
15206 int res = BCME_ERROR;
15207 int len_bytes = 0;
15208 int i = 0;
15209
15210 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15211 char *pmac_frame = NULL;
15212 char *pmac_frame_begin = NULL;
15213
15214 /*
15215 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15216 * dongle shall reject a mkeep_alive request.
15217 */
15218 if (!dhd_support_sta_mode(dhd_pub))
15219 return res;
15220
15221 DHD_TRACE(("%s execution\n", __FUNCTION__));
15222
15223 if ((pbuf = kzalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
15224 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15225 res = BCME_NOMEM;
15226 return res;
15227 }
15228
15229 if ((pmac_frame = kzalloc(KA_FRAME_SIZE, GFP_KERNEL)) == NULL) {
15230 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
15231 res = BCME_NOMEM;
15232 goto exit;
15233 }
15234 pmac_frame_begin = pmac_frame;
15235
15236 /*
15237 * Get current mkeep-alive status.
15238 */
15239 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
15240 KA_TEMP_BUF_SIZE, FALSE);
15241 if (res < 0) {
15242 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15243 goto exit;
15244 } else {
15245 /* Check available ID whether it is occupied */
15246 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15247 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15248 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15249 __FUNCTION__, mkeep_alive_id));
15250
15251 /* Current occupied ID info */
15252 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
15253 DHD_ERROR((" Id : %d\n"
15254 " Period: %d msec\n"
15255 " Length: %d\n"
15256 " Packet: 0x",
15257 mkeep_alive_pktp->keep_alive_id,
15258 dtoh32(mkeep_alive_pktp->period_msec),
15259 dtoh16(mkeep_alive_pktp->len_bytes)));
15260
15261 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15262 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
15263 }
15264 DHD_ERROR(("\n"));
15265
15266 res = BCME_NOTFOUND;
15267 goto exit;
15268 }
15269 }
15270
15271 /* Request the specified ID */
15272 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15273 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
15274 str = "mkeep_alive";
15275 str_len = strlen(str);
15276 strncpy(pbuf, str, str_len);
15277 pbuf[str_len] = '\0';
15278
15279 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
15280 mkeep_alive_pkt.period_msec = htod32(period_msec);
15281 buf_len = str_len + 1;
15282 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15283 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15284
15285 /* ID assigned */
15286 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15287
15288 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
15289
15290 /*
15291 * Build up Ethernet Frame
15292 */
15293
15294 /* Mapping dest mac addr */
15295 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
15296 pmac_frame += ETHER_ADDR_LEN;
15297
15298 /* Mapping src mac addr */
15299 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
15300 pmac_frame += ETHER_ADDR_LEN;
15301
15302 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15303 *(pmac_frame++) = 0x08;
15304 *(pmac_frame++) = 0x00;
15305
15306 /* Mapping IP pkt */
15307 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
15308 pmac_frame += ip_pkt_len;
15309
15310 /*
15311 * Length of ether frame (assume to be all hexa bytes)
15312 * = src mac + dst mac + ether type + ip pkt len
15313 */
15314 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
15315 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15316 buf_len += len_bytes;
15317 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
15318
15319 /*
15320 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15321 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15322 * guarantee that the buffer is properly aligned.
15323 */
15324 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
15325
15326 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
15327 exit:
15328 kfree(pmac_frame_begin);
15329 kfree(pbuf);
15330 return res;
15331 }
15332
15333 int
dhd_dev_stop_mkeep_alive(dhd_pub_t * dhd_pub,uint8 mkeep_alive_id)15334 dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
15335 {
15336 char *pbuf;
15337 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15338 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
15339 int res = BCME_ERROR;
15340 int i;
15341
15342 /*
15343 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15344 * dongle shall reject a mkeep_alive request.
15345 */
15346 if (!dhd_support_sta_mode(dhd_pub))
15347 return res;
15348
15349 DHD_TRACE(("%s execution\n", __FUNCTION__));
15350
15351 /*
15352 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15353 */
15354 if ((pbuf = kmalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
15355 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15356 return res;
15357 }
15358
15359 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
15360 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
15361 if (res < 0) {
15362 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15363 goto exit;
15364 } else {
15365 /* Check occupied ID */
15366 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15367 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
15368 DHD_INFO((" Id : %d\n"
15369 " Period: %d msec\n"
15370 " Length: %d\n"
15371 " Packet: 0x",
15372 mkeep_alive_pktp->keep_alive_id,
15373 dtoh32(mkeep_alive_pktp->period_msec),
15374 dtoh16(mkeep_alive_pktp->len_bytes)));
15375
15376 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15377 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15378 }
15379 DHD_INFO(("\n"));
15380 }
15381
15382 /* Make it stop if available */
15383 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15384 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
15385 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15386
15387 mkeep_alive_pkt.period_msec = 0;
15388 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15389 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15390 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15391
15392 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
15393 (char *)&mkeep_alive_pkt,
15394 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
15395 } else {
15396 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
15397 res = BCME_NOTFOUND;
15398 }
15399 exit:
15400 kfree(pbuf);
15401 return res;
15402 }
15403 #endif /* KEEP_ALIVE */
15404
15405 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
_dhd_apf_lock_local(dhd_info_t * dhd)15406 static void _dhd_apf_lock_local(dhd_info_t *dhd)
15407 {
15408 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15409 if (dhd) {
15410 mutex_lock(&dhd->dhd_apf_mutex);
15411 }
15412 #endif
15413 }
15414
_dhd_apf_unlock_local(dhd_info_t * dhd)15415 static void _dhd_apf_unlock_local(dhd_info_t *dhd)
15416 {
15417 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15418 if (dhd) {
15419 mutex_unlock(&dhd->dhd_apf_mutex);
15420 }
15421 #endif
15422 }
15423
15424 static int
__dhd_apf_add_filter(struct net_device * ndev,uint32 filter_id,u8 * program,uint32 program_len)15425 __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
15426 u8* program, uint32 program_len)
15427 {
15428 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15429 dhd_pub_t *dhdp = &dhd->pub;
15430 wl_pkt_filter_t * pkt_filterp;
15431 wl_apf_program_t *apf_program;
15432 char *buf;
15433 u32 cmd_len, buf_len;
15434 int ifidx, ret;
15435 gfp_t kflags;
15436 char cmd[] = "pkt_filter_add";
15437
15438 ifidx = dhd_net2idx(dhd, ndev);
15439 if (ifidx == DHD_BAD_IF) {
15440 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15441 return -ENODEV;
15442 }
15443
15444 cmd_len = sizeof(cmd);
15445
15446 /* Check if the program_len is more than the expected len
15447 * and if the program is NULL return from here.
15448 */
15449 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
15450 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15451 __FUNCTION__, program_len, program));
15452 return -EINVAL;
15453 }
15454 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
15455 WL_APF_PROGRAM_FIXED_LEN + program_len;
15456
15457 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
15458 buf = kzalloc(buf_len, kflags);
15459 if (unlikely(!buf)) {
15460 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15461 return -ENOMEM;
15462 }
15463
15464 memcpy(buf, cmd, cmd_len);
15465
15466 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
15467 pkt_filterp->id = htod32(filter_id);
15468 pkt_filterp->negate_match = htod32(FALSE);
15469 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15470
15471 apf_program = &pkt_filterp->u.apf_program;
15472 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15473 apf_program->instr_len = htod16(program_len);
15474 memcpy(apf_program->instrs, program, program_len);
15475
15476 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15477 if (unlikely(ret)) {
15478 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15479 __FUNCTION__, filter_id, ret));
15480 }
15481
15482 if (buf) {
15483 kfree(buf);
15484 }
15485 return ret;
15486 }
15487
15488 static int
__dhd_apf_config_filter(struct net_device * ndev,uint32 filter_id,uint32 mode,uint32 enable)15489 __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
15490 uint32 mode, uint32 enable)
15491 {
15492 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15493 dhd_pub_t *dhdp = &dhd->pub;
15494 wl_pkt_filter_enable_t * pkt_filterp;
15495 char *buf;
15496 u32 cmd_len, buf_len;
15497 int ifidx, ret;
15498 gfp_t kflags;
15499 char cmd[] = "pkt_filter_enable";
15500
15501 ifidx = dhd_net2idx(dhd, ndev);
15502 if (ifidx == DHD_BAD_IF) {
15503 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15504 return -ENODEV;
15505 }
15506
15507 cmd_len = sizeof(cmd);
15508 buf_len = cmd_len + sizeof(*pkt_filterp);
15509
15510 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
15511 buf = kzalloc(buf_len, kflags);
15512 if (unlikely(!buf)) {
15513 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15514 return -ENOMEM;
15515 }
15516
15517 memcpy(buf, cmd, cmd_len);
15518
15519 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
15520 pkt_filterp->id = htod32(filter_id);
15521 pkt_filterp->enable = htod32(enable);
15522
15523 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15524 if (unlikely(ret)) {
15525 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15526 __FUNCTION__, filter_id, ret));
15527 goto exit;
15528 }
15529
15530 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
15531 WLC_SET_VAR, TRUE, ifidx);
15532 if (unlikely(ret)) {
15533 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15534 __FUNCTION__, filter_id, ret));
15535 }
15536
15537 exit:
15538 if (buf) {
15539 kfree(buf);
15540 }
15541 return ret;
15542 }
15543
15544 static int
__dhd_apf_delete_filter(struct net_device * ndev,uint32 filter_id)15545 __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
15546 {
15547 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15548 dhd_pub_t *dhdp = &dhd->pub;
15549 int ifidx, ret;
15550
15551 ifidx = dhd_net2idx(dhd, ndev);
15552 if (ifidx == DHD_BAD_IF) {
15553 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15554 return -ENODEV;
15555 }
15556
15557 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
15558 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
15559 if (unlikely(ret)) {
15560 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15561 __FUNCTION__, filter_id, ret));
15562 }
15563
15564 return ret;
15565 }
15566
dhd_apf_lock(struct net_device * dev)15567 void dhd_apf_lock(struct net_device *dev)
15568 {
15569 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15570 _dhd_apf_lock_local(dhd);
15571 }
15572
dhd_apf_unlock(struct net_device * dev)15573 void dhd_apf_unlock(struct net_device *dev)
15574 {
15575 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15576 _dhd_apf_unlock_local(dhd);
15577 }
15578
15579 int
dhd_dev_apf_get_version(struct net_device * ndev,uint32 * version)15580 dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
15581 {
15582 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15583 dhd_pub_t *dhdp = &dhd->pub;
15584 int ifidx, ret;
15585
15586 if (!FW_SUPPORTED(dhdp, apf)) {
15587 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15588
15589 /*
15590 * Notify Android framework that APF is not supported by setting
15591 * version as zero.
15592 */
15593 *version = 0;
15594 return BCME_OK;
15595 }
15596
15597 ifidx = dhd_net2idx(dhd, ndev);
15598 if (ifidx == DHD_BAD_IF) {
15599 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15600 return -ENODEV;
15601 }
15602
15603 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
15604 WLC_GET_VAR, FALSE, ifidx);
15605 if (unlikely(ret)) {
15606 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15607 __FUNCTION__, ret));
15608 }
15609
15610 return ret;
15611 }
15612
15613 int
dhd_dev_apf_get_max_len(struct net_device * ndev,uint32 * max_len)15614 dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
15615 {
15616 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15617 dhd_pub_t *dhdp = &dhd->pub;
15618 int ifidx, ret;
15619
15620 if (!FW_SUPPORTED(dhdp, apf)) {
15621 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15622 *max_len = 0;
15623 return BCME_OK;
15624 }
15625
15626 ifidx = dhd_net2idx(dhd, ndev);
15627 if (ifidx == DHD_BAD_IF) {
15628 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
15629 return -ENODEV;
15630 }
15631
15632 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
15633 WLC_GET_VAR, FALSE, ifidx);
15634 if (unlikely(ret)) {
15635 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15636 __FUNCTION__, ret));
15637 }
15638
15639 return ret;
15640 }
15641
15642 int
dhd_dev_apf_add_filter(struct net_device * ndev,u8 * program,uint32 program_len)15643 dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
15644 uint32 program_len)
15645 {
15646 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15647 dhd_pub_t *dhdp = &dhd->pub;
15648 int ret;
15649
15650 DHD_APF_LOCK(ndev);
15651
15652 /* delete, if filter already exists */
15653 if (dhdp->apf_set) {
15654 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15655 if (unlikely(ret)) {
15656 goto exit;
15657 }
15658 dhdp->apf_set = FALSE;
15659 }
15660
15661 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
15662 if (ret) {
15663 goto exit;
15664 }
15665 dhdp->apf_set = TRUE;
15666
15667 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15668 /* Driver is still in (early) suspend state, enable APF filter back */
15669 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15670 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15671 }
15672 exit:
15673 DHD_APF_UNLOCK(ndev);
15674
15675 return ret;
15676 }
15677
15678 int
dhd_dev_apf_enable_filter(struct net_device * ndev)15679 dhd_dev_apf_enable_filter(struct net_device *ndev)
15680 {
15681 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15682 dhd_pub_t *dhdp = &dhd->pub;
15683 int ret = 0;
15684
15685 DHD_APF_LOCK(ndev);
15686
15687 if (dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15688 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15689 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15690 }
15691
15692 DHD_APF_UNLOCK(ndev);
15693
15694 return ret;
15695 }
15696
15697 int
dhd_dev_apf_disable_filter(struct net_device * ndev)15698 dhd_dev_apf_disable_filter(struct net_device *ndev)
15699 {
15700 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15701 dhd_pub_t *dhdp = &dhd->pub;
15702 int ret = 0;
15703
15704 DHD_APF_LOCK(ndev);
15705
15706 if (dhdp->apf_set) {
15707 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15708 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
15709 }
15710
15711 DHD_APF_UNLOCK(ndev);
15712
15713 return ret;
15714 }
15715
15716 int
dhd_dev_apf_delete_filter(struct net_device * ndev)15717 dhd_dev_apf_delete_filter(struct net_device *ndev)
15718 {
15719 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15720 dhd_pub_t *dhdp = &dhd->pub;
15721 int ret = 0;
15722
15723 DHD_APF_LOCK(ndev);
15724
15725 if (dhdp->apf_set) {
15726 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15727 if (!ret) {
15728 dhdp->apf_set = FALSE;
15729 }
15730 }
15731
15732 DHD_APF_UNLOCK(ndev);
15733
15734 return ret;
15735 }
15736 #endif /* PKT_FILTER_SUPPORT && APF */
15737
15738 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
dhd_hang_process(void * dhd_info,void * event_info,u8 event)15739 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
15740 {
15741 dhd_info_t *dhd;
15742 struct net_device *dev;
15743
15744 dhd = (dhd_info_t *)dhd_info;
15745 if (!dhd || !dhd->iflist[0])
15746 return;
15747 dev = dhd->iflist[0]->net;
15748
15749 if (dev) {
15750 /*
15751 * For HW2, dev_close need to be done to recover
15752 * from upper layer after hang. For Interposer skip
15753 * dev_close so that dhd iovars can be used to take
15754 * socramdump after crash, also skip for HW4 as
15755 * handling of hang event is different
15756 */
15757 #if !defined(CUSTOMER_HW2_INTERPOSER)
15758 rtnl_lock();
15759 dev_close(dev);
15760 rtnl_unlock();
15761 #endif
15762 #if defined(WL_WIRELESS_EXT)
15763 wl_iw_send_priv_event(dev, "HANG");
15764 #endif
15765 #if defined(WL_CFG80211)
15766 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15767 #endif
15768 }
15769 }
15770
15771 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
15772 extern dhd_pub_t *link_recovery;
dhd_host_recover_link(void)15773 void dhd_host_recover_link(void)
15774 {
15775 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
15776 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
15777 dhd_bus_set_linkdown(link_recovery, TRUE);
15778 dhd_os_send_hang_message(link_recovery);
15779 }
15780 EXPORT_SYMBOL(dhd_host_recover_link);
15781 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
15782
dhd_os_send_hang_message(dhd_pub_t * dhdp)15783 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
15784 {
15785 int ret = 0;
15786 if (dhdp) {
15787 #if defined(DHD_HANG_SEND_UP_TEST)
15788 if (dhdp->req_hang_type) {
15789 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
15790 __FUNCTION__, dhdp->req_hang_type));
15791 dhdp->req_hang_type = 0;
15792 }
15793 #endif /* DHD_HANG_SEND_UP_TEST */
15794
15795 if (!dhdp->hang_was_sent) {
15796 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
15797 dhdp->hang_counts++;
15798 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
15799 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
15800 __func__, dhdp->hang_counts));
15801 BUG_ON(1);
15802 }
15803 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
15804 #ifdef DHD_DEBUG_UART
15805 /* If PCIe lane has broken, execute the debug uart application
15806 * to gether a ramdump data from dongle via uart
15807 */
15808 if (!dhdp->info->duart_execute) {
15809 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
15810 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
15811 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
15812 }
15813 #endif /* DHD_DEBUG_UART */
15814 dhdp->hang_was_sent = 1;
15815 #ifdef BT_OVER_SDIO
15816 dhdp->is_bt_recovery_required = TRUE;
15817 #endif
15818 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
15819 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
15820 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
15821 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
15822 }
15823 }
15824 return ret;
15825 }
15826
net_os_send_hang_message(struct net_device * dev)15827 int net_os_send_hang_message(struct net_device *dev)
15828 {
15829 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15830 int ret = 0;
15831
15832 if (dhd) {
15833 /* Report FW problem when enabled */
15834 if (dhd->pub.hang_report) {
15835 #ifdef BT_OVER_SDIO
15836 if (netif_running(dev)) {
15837 #endif /* BT_OVER_SDIO */
15838 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
15839 ret = dhd_os_send_hang_message(&dhd->pub);
15840 #else
15841 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15842 #endif
15843 #ifdef BT_OVER_SDIO
15844 }
15845 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
15846 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
15847 #endif /* BT_OVER_SDIO */
15848 } else {
15849 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
15850 __FUNCTION__));
15851 }
15852 }
15853 return ret;
15854 }
15855
net_os_send_hang_message_reason(struct net_device * dev,const char * string_num)15856 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
15857 {
15858 dhd_info_t *dhd = NULL;
15859 dhd_pub_t *dhdp = NULL;
15860 int reason;
15861
15862 dhd = DHD_DEV_INFO(dev);
15863 if (dhd) {
15864 dhdp = &dhd->pub;
15865 }
15866
15867 if (!dhd || !dhdp) {
15868 return 0;
15869 }
15870
15871 reason = bcm_strtoul(string_num, NULL, 0);
15872 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
15873
15874 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
15875 reason = 0;
15876 }
15877
15878 dhdp->hang_reason = reason;
15879
15880 return net_os_send_hang_message(dev);
15881 }
15882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
15883
15884
dhd_net_wifi_platform_set_power(struct net_device * dev,bool on,unsigned long delay_msec)15885 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
15886 {
15887 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15888 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
15889 }
15890
dhd_force_country_change(struct net_device * dev)15891 bool dhd_force_country_change(struct net_device *dev)
15892 {
15893 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15894
15895 if (dhd && dhd->pub.up)
15896 return dhd->pub.force_country_change;
15897 return FALSE;
15898 }
15899
dhd_get_customized_country_code(struct net_device * dev,char * country_iso_code,wl_country_t * cspec)15900 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
15901 wl_country_t *cspec)
15902 {
15903 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15904 #if defined(DHD_BLOB_EXISTENCE_CHECK)
15905 if (!dhd->pub.is_blob)
15906 #endif /* DHD_BLOB_EXISTENCE_CHECK */
15907 {
15908 #if defined(CUSTOM_COUNTRY_CODE)
15909 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
15910 dhd->pub.dhd_cflags);
15911 #else
15912 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
15913 #endif /* CUSTOM_COUNTRY_CODE */
15914 }
15915
15916 BCM_REFERENCE(dhd);
15917 }
15918
dhd_bus_country_set(struct net_device * dev,wl_country_t * cspec,bool notify)15919 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
15920 {
15921 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15922 #ifdef WL_CFG80211
15923 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15924 #endif
15925
15926 if (dhd && dhd->pub.up) {
15927 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
15928 #ifdef WL_CFG80211
15929 wl_update_wiphybands(cfg, notify);
15930 #endif
15931 }
15932 }
15933
dhd_bus_band_set(struct net_device * dev,uint band)15934 void dhd_bus_band_set(struct net_device *dev, uint band)
15935 {
15936 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15937 #ifdef WL_CFG80211
15938 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15939 #endif
15940 if (dhd && dhd->pub.up) {
15941 #ifdef WL_CFG80211
15942 wl_update_wiphybands(cfg, true);
15943 #endif
15944 }
15945 }
15946
dhd_net_set_fw_path(struct net_device * dev,char * fw)15947 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
15948 {
15949 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15950
15951 if (!fw || fw[0] == '\0')
15952 return -EINVAL;
15953
15954 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
15955 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
15956
15957 #if defined(SOFTAP)
15958 if (strstr(fw, "apsta") != NULL) {
15959 DHD_INFO(("GOT APSTA FIRMWARE\n"));
15960 ap_fw_loaded = TRUE;
15961 } else {
15962 DHD_INFO(("GOT STA FIRMWARE\n"));
15963 ap_fw_loaded = FALSE;
15964 }
15965 #endif
15966 return 0;
15967 }
15968
dhd_net_if_lock(struct net_device * dev)15969 void dhd_net_if_lock(struct net_device *dev)
15970 {
15971 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15972 dhd_net_if_lock_local(dhd);
15973 }
15974
dhd_net_if_unlock(struct net_device * dev)15975 void dhd_net_if_unlock(struct net_device *dev)
15976 {
15977 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15978 dhd_net_if_unlock_local(dhd);
15979 }
15980
dhd_net_if_lock_local(dhd_info_t * dhd)15981 static void dhd_net_if_lock_local(dhd_info_t *dhd)
15982 {
15983 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15984 if (dhd)
15985 mutex_lock(&dhd->dhd_net_if_mutex);
15986 #endif
15987 }
15988
dhd_net_if_unlock_local(dhd_info_t * dhd)15989 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
15990 {
15991 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15992 if (dhd)
15993 mutex_unlock(&dhd->dhd_net_if_mutex);
15994 #endif
15995 }
15996
dhd_suspend_lock(dhd_pub_t * pub)15997 static void dhd_suspend_lock(dhd_pub_t *pub)
15998 {
15999 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16000 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16001 if (dhd)
16002 mutex_lock(&dhd->dhd_suspend_mutex);
16003 #endif
16004 }
16005
dhd_suspend_unlock(dhd_pub_t * pub)16006 static void dhd_suspend_unlock(dhd_pub_t *pub)
16007 {
16008 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16009 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16010 if (dhd)
16011 mutex_unlock(&dhd->dhd_suspend_mutex);
16012 #endif
16013 }
16014
dhd_os_general_spin_lock(dhd_pub_t * pub)16015 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
16016 {
16017 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16018 unsigned long flags = 0;
16019
16020 if (dhd)
16021 spin_lock_irqsave(&dhd->dhd_lock, flags);
16022
16023 return flags;
16024 }
16025
dhd_os_general_spin_unlock(dhd_pub_t * pub,unsigned long flags)16026 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
16027 {
16028 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16029
16030 if (dhd)
16031 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
16032 }
16033
16034 /* Linux specific multipurpose spinlock API */
16035 void *
dhd_os_spin_lock_init(osl_t * osh)16036 dhd_os_spin_lock_init(osl_t *osh)
16037 {
16038 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16039 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16040 /* and this results in kernel asserts in internal builds */
16041 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
16042 if (lock)
16043 spin_lock_init(lock);
16044 return ((void *)lock);
16045 }
16046 void
dhd_os_spin_lock_deinit(osl_t * osh,void * lock)16047 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
16048 {
16049 if (lock)
16050 MFREE(osh, lock, sizeof(spinlock_t) + 4);
16051 }
16052 unsigned long
dhd_os_spin_lock(void * lock)16053 dhd_os_spin_lock(void *lock)
16054 {
16055 unsigned long flags = 0;
16056
16057 if (lock)
16058 spin_lock_irqsave((spinlock_t *)lock, flags);
16059
16060 return flags;
16061 }
16062 void
dhd_os_spin_unlock(void * lock,unsigned long flags)16063 dhd_os_spin_unlock(void *lock, unsigned long flags)
16064 {
16065 if (lock)
16066 spin_unlock_irqrestore((spinlock_t *)lock, flags);
16067 }
16068
16069 static int
dhd_get_pend_8021x_cnt(dhd_info_t * dhd)16070 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
16071 {
16072 return (atomic_read(&dhd->pend_8021x_cnt));
16073 }
16074
16075 #define MAX_WAIT_FOR_8021X_TX 100
16076
16077 int
dhd_wait_pend8021x(struct net_device * dev)16078 dhd_wait_pend8021x(struct net_device *dev)
16079 {
16080 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16081 int timeout = msecs_to_jiffies(10);
16082 int ntimes = MAX_WAIT_FOR_8021X_TX;
16083 int pend = dhd_get_pend_8021x_cnt(dhd);
16084
16085 while (ntimes && pend) {
16086 if (pend) {
16087 set_current_state(TASK_INTERRUPTIBLE);
16088 DHD_PERIM_UNLOCK(&dhd->pub);
16089 schedule_timeout(timeout);
16090 DHD_PERIM_LOCK(&dhd->pub);
16091 set_current_state(TASK_RUNNING);
16092 ntimes--;
16093 }
16094 pend = dhd_get_pend_8021x_cnt(dhd);
16095 }
16096 if (ntimes == 0)
16097 {
16098 atomic_set(&dhd->pend_8021x_cnt, 0);
16099 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
16100 }
16101 return pend;
16102 }
16103
16104 #if defined(DHD_DEBUG)
write_file(const char * file_name,uint32 flags,uint8 * buf,int size)16105 int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
16106 {
16107 int ret = 0;
16108 struct file *fp = NULL;
16109 mm_segment_t old_fs;
16110 loff_t pos = 0;
16111 /* change to KERNEL_DS address limit */
16112 old_fs = get_fs();
16113 set_fs(KERNEL_DS);
16114
16115 /* open file to write */
16116 fp = filp_open(file_name, flags, 0664);
16117 if (IS_ERR(fp)) {
16118 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
16119 ret = -1;
16120 goto exit;
16121 }
16122
16123 /* Write buf to file */
16124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
16125 ret = kernel_write(fp, buf, size, &pos);
16126 #else
16127 ret = vfs_write(fp, buf, size, &pos);
16128 #endif
16129 if (ret < 0) {
16130 DHD_ERROR(("write file error, err = %d\n", ret));
16131 goto exit;
16132 }
16133
16134 /* Sync file from filesystem to physical media */
16135 ret = vfs_fsync(fp, 0);
16136 if (ret < 0) {
16137 DHD_ERROR(("sync file error, error = %d\n", ret));
16138 goto exit;
16139 }
16140 ret = BCME_OK;
16141
16142 exit:
16143 /* close file before return */
16144 if (!IS_ERR(fp))
16145 filp_close(fp, current->files);
16146
16147 /* restore previous address limit */
16148 set_fs(old_fs);
16149
16150 return ret;
16151 }
16152 #endif
16153
16154 #ifdef DHD_DEBUG
16155 static void
dhd_convert_memdump_type_to_str(uint32 type,char * buf)16156 dhd_convert_memdump_type_to_str(uint32 type, char *buf)
16157 {
16158 char *type_str = NULL;
16159
16160 switch (type) {
16161 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
16162 type_str = "resumed_on_timeout";
16163 break;
16164 case DUMP_TYPE_D3_ACK_TIMEOUT:
16165 type_str = "D3_ACK_timeout";
16166 break;
16167 case DUMP_TYPE_DONGLE_TRAP:
16168 type_str = "Dongle_Trap";
16169 break;
16170 case DUMP_TYPE_MEMORY_CORRUPTION:
16171 type_str = "Memory_Corruption";
16172 break;
16173 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
16174 type_str = "PKTID_AUDIT_Fail";
16175 break;
16176 case DUMP_TYPE_PKTID_INVALID:
16177 type_str = "PKTID_INVALID";
16178 break;
16179 case DUMP_TYPE_SCAN_TIMEOUT:
16180 type_str = "SCAN_timeout";
16181 break;
16182 case DUMP_TYPE_JOIN_TIMEOUT:
16183 type_str = "JOIN_timeout";
16184 break;
16185 case DUMP_TYPE_SCAN_BUSY:
16186 type_str = "SCAN_Busy";
16187 break;
16188 case DUMP_TYPE_BY_SYSDUMP:
16189 type_str = "BY_SYSDUMP";
16190 break;
16191 case DUMP_TYPE_BY_LIVELOCK:
16192 type_str = "BY_LIVELOCK";
16193 break;
16194 case DUMP_TYPE_AP_LINKUP_FAILURE:
16195 type_str = "BY_AP_LINK_FAILURE";
16196 break;
16197 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
16198 type_str = "INVALID_ACCESS";
16199 break;
16200 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
16201 type_str = "CFG_VENDOR_TRIGGERED";
16202 break;
16203 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
16204 type_str = "ERROR_RX_TIMED_OUT";
16205 break;
16206 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
16207 type_str = "ERROR_TX_TIMED_OUT";
16208 break;
16209 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
16210 type_str = "BY_INVALID_RING_RDWR";
16211 break;
16212 case DUMP_TYPE_DONGLE_HOST_EVENT:
16213 type_str = "BY_DONGLE_HOST_EVENT";
16214 break;
16215 case DUMP_TYPE_TRANS_ID_MISMATCH:
16216 type_str = "BY_TRANS_ID_MISMATCH";
16217 break;
16218 case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL:
16219 type_str = "HANG_IFACE_OP_FAIL";
16220 break;
16221 #ifdef SUPPORT_LINKDOWN_RECOVERY
16222 case DUMP_TYPE_READ_SHM_FAIL:
16223 type_str = "READ_SHM_FAIL";
16224 break;
16225 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16226 default:
16227 type_str = "Unknown_type";
16228 break;
16229 }
16230
16231 strncpy(buf, type_str, strlen(type_str));
16232 buf[strlen(type_str)] = 0;
16233 }
16234
16235 int
write_dump_to_file(dhd_pub_t * dhd,uint8 * buf,int size,char * fname)16236 write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
16237 {
16238 int ret = 0;
16239 char memdump_path[128];
16240 char memdump_type[32];
16241 struct timespec64 curtime;
16242 uint32 file_mode;
16243 struct timespec64 now;
16244
16245 /* Init file name */
16246 memset(memdump_path, 0, sizeof(memdump_path));
16247 memset(memdump_type, 0, sizeof(memdump_type));
16248 now = ktime_to_timespec64(ktime_get_boottime());
16249 curtime.tv_sec = now.tv_sec;
16250 curtime.tv_nsec = now.tv_nsec;
16251 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
16252 #ifdef CUSTOMER_HW4_DEBUG
16253 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16254 DHD_COMMON_DUMP_PATH, fname, memdump_type,
16255 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec/1000);
16256 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16257 #elif defined(CUSTOMER_HW2)
16258 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16259 "/data/misc/wifi/", fname, memdump_type,
16260 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec/1000);
16261 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16262 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16263 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16264 "/data/misc/wifi/", fname, memdump_type,
16265 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec/1000);
16266 file_mode = O_CREAT | O_WRONLY;
16267 #else
16268 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16269 "/installmedia/", fname, memdump_type,
16270 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec/1000);
16271 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16272 * calling BUG_ON immediately after collecting the socram dump.
16273 * So the file write operation should directly write the contents into the
16274 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16275 * instead of appending.
16276 */
16277 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16278 {
16279 struct file *fp = filp_open(memdump_path, file_mode, 0664);
16280 /* Check if it is live Brix image having /installmedia, else use /data */
16281 if (IS_ERR(fp)) {
16282 DHD_ERROR(("open file %s, try /data/\n", memdump_path));
16283 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16284 "/data/", fname, memdump_type,
16285 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec/1000);
16286 } else {
16287 filp_close(fp, NULL);
16288 }
16289 }
16290 #endif /* CUSTOMER_HW4_DEBUG */
16291
16292 /* print SOCRAM dump file path */
16293 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
16294
16295 /* Write file */
16296 ret = write_file(memdump_path, file_mode, buf, size);
16297
16298 return ret;
16299 }
16300 #endif /* DHD_DEBUG */
16301
dhd_os_wake_lock_timeout(dhd_pub_t * pub)16302 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
16303 {
16304 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16305 unsigned long flags;
16306 int ret = 0;
16307
16308 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16309 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16310 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16311 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16312 #ifdef CONFIG_HAS_WAKELOCK
16313 if (dhd->wakelock_rx_timeout_enable)
16314 wake_lock_timeout(&dhd->wl_rxwake,
16315 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16316 if (dhd->wakelock_ctrl_timeout_enable)
16317 wake_lock_timeout(&dhd->wl_ctrlwake,
16318 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16319 #endif
16320 dhd->wakelock_rx_timeout_enable = 0;
16321 dhd->wakelock_ctrl_timeout_enable = 0;
16322 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16323 }
16324 return ret;
16325 }
16326
net_os_wake_lock_timeout(struct net_device * dev)16327 int net_os_wake_lock_timeout(struct net_device *dev)
16328 {
16329 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16330 int ret = 0;
16331
16332 if (dhd)
16333 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16334 return ret;
16335 }
16336
dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t * pub,int val)16337 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
16338 {
16339 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16340 unsigned long flags;
16341
16342 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16343 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16344 if (val > dhd->wakelock_rx_timeout_enable)
16345 dhd->wakelock_rx_timeout_enable = val;
16346 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16347 }
16348 return 0;
16349 }
16350
dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t * pub,int val)16351 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
16352 {
16353 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16354 unsigned long flags;
16355
16356 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16357 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16358 if (val > dhd->wakelock_ctrl_timeout_enable)
16359 dhd->wakelock_ctrl_timeout_enable = val;
16360 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16361 }
16362 return 0;
16363 }
16364
dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t * pub)16365 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
16366 {
16367 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16368 unsigned long flags;
16369
16370 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16371 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16372 dhd->wakelock_ctrl_timeout_enable = 0;
16373 #ifdef CONFIG_HAS_WAKELOCK
16374 if (wake_lock_active(&dhd->wl_ctrlwake))
16375 wake_unlock(&dhd->wl_ctrlwake);
16376 #endif
16377 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16378 }
16379 return 0;
16380 }
16381
net_os_wake_lock_rx_timeout_enable(struct net_device * dev,int val)16382 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
16383 {
16384 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16385 int ret = 0;
16386
16387 if (dhd)
16388 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16389 return ret;
16390 }
16391
net_os_wake_lock_ctrl_timeout_enable(struct net_device * dev,int val)16392 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
16393 {
16394 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16395 int ret = 0;
16396
16397 if (dhd)
16398 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16399 return ret;
16400 }
16401
16402
16403 #if defined(DHD_TRACE_WAKE_LOCK)
16404 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16405 #include <linux/hashtable.h>
16406 #else
16407 #include <linux/hash.h>
16408 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16409
16410
16411 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16412 /* Define 2^5 = 32 bucket size hash table */
16413 DEFINE_HASHTABLE(wklock_history, 5);
16414 #else
16415 /* Define 2^5 = 32 bucket size hash table */
16416 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
16417 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16418
16419 int trace_wklock_onoff = 1;
16420 typedef enum dhd_wklock_type {
16421 DHD_WAKE_LOCK,
16422 DHD_WAKE_UNLOCK,
16423 DHD_WAIVE_LOCK,
16424 DHD_RESTORE_LOCK
16425 } dhd_wklock_t;
16426
16427 struct wk_trace_record {
16428 unsigned long addr; /* Address of the instruction */
16429 dhd_wklock_t lock_type; /* lock_type */
16430 unsigned long long counter; /* counter information */
16431 struct hlist_node wklock_node; /* hash node */
16432 };
16433
find_wklock_entry(unsigned long addr)16434 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
16435 {
16436 struct wk_trace_record *wklock_info;
16437 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16438 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
16439 #else
16440 struct hlist_node *entry;
16441 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
16442 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
16443 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16444 {
16445 if (wklock_info->addr == addr) {
16446 return wklock_info;
16447 }
16448 }
16449 return NULL;
16450 }
16451
16452
16453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16454 #define HASH_ADD(hashtable, node, key) \
16455 do { \
16456 hash_add(hashtable, node, key); \
16457 } while (0);
16458 #else
16459 #define HASH_ADD(hashtable, node, key) \
16460 do { \
16461 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16462 hlist_add_head(node, &hashtable[index]); \
16463 } while (0);
16464 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16465
16466 #define STORE_WKLOCK_RECORD(wklock_type) \
16467 do { \
16468 struct wk_trace_record *wklock_info = NULL; \
16469 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16470 wklock_info = find_wklock_entry(func_addr); \
16471 if (wklock_info) { \
16472 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16473 wklock_info->counter = dhd->wakelock_counter; \
16474 } else { \
16475 wklock_info->counter++; \
16476 } \
16477 } else { \
16478 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16479 if (!wklock_info) {\
16480 printk("Can't allocate wk_trace_record \n"); \
16481 } else { \
16482 wklock_info->addr = func_addr; \
16483 wklock_info->lock_type = wklock_type; \
16484 if (wklock_type == DHD_WAIVE_LOCK || \
16485 wklock_type == DHD_RESTORE_LOCK) { \
16486 wklock_info->counter = dhd->wakelock_counter; \
16487 } else { \
16488 wklock_info->counter++; \
16489 } \
16490 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16491 } \
16492 } \
16493 } while (0);
16494
dhd_wk_lock_rec_dump(void)16495 static inline void dhd_wk_lock_rec_dump(void)
16496 {
16497 int bkt;
16498 struct wk_trace_record *wklock_info;
16499
16500 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16501 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
16502 #else
16503 struct hlist_node *entry = NULL;
16504 int max_index = ARRAY_SIZE(wklock_history);
16505 for (bkt = 0; bkt < max_index; bkt++)
16506 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
16507 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16508 {
16509 switch (wklock_info->lock_type) {
16510 case DHD_WAKE_LOCK:
16511 printk("wakelock lock : %pS lock_counter : %llu \n",
16512 (void *)wklock_info->addr, wklock_info->counter);
16513 break;
16514 case DHD_WAKE_UNLOCK:
16515 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
16516 (void *)wklock_info->addr, wklock_info->counter);
16517 break;
16518 case DHD_WAIVE_LOCK:
16519 printk("wakelock waive : %pS before_waive : %llu \n",
16520 (void *)wklock_info->addr, wklock_info->counter);
16521 break;
16522 case DHD_RESTORE_LOCK:
16523 printk("wakelock restore : %pS, after_waive : %llu \n",
16524 (void *)wklock_info->addr, wklock_info->counter);
16525 break;
16526 }
16527 }
16528 }
16529
dhd_wk_lock_trace_init(struct dhd_info * dhd)16530 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
16531 {
16532 unsigned long flags;
16533 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16534 int i;
16535 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16536
16537 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16538 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16539 hash_init(wklock_history);
16540 #else
16541 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
16542 INIT_HLIST_HEAD(&wklock_history[i]);
16543 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16544 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16545 }
16546
dhd_wk_lock_trace_deinit(struct dhd_info * dhd)16547 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
16548 {
16549 int bkt;
16550 struct wk_trace_record *wklock_info;
16551 struct hlist_node *tmp;
16552 unsigned long flags;
16553 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16554 struct hlist_node *entry = NULL;
16555 int max_index = ARRAY_SIZE(wklock_history);
16556 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16557
16558 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16559 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16560 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
16561 #else
16562 for (bkt = 0; bkt < max_index; bkt++)
16563 hlist_for_each_entry_safe(wklock_info, entry, tmp,
16564 &wklock_history[bkt], wklock_node)
16565 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16566 {
16567 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16568 hash_del(&wklock_info->wklock_node);
16569 #else
16570 hlist_del_init(&wklock_info->wklock_node);
16571 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16572 kfree(wklock_info);
16573 }
16574 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16575 }
16576
dhd_wk_lock_stats_dump(dhd_pub_t * dhdp)16577 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
16578 {
16579 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
16580 unsigned long flags;
16581
16582 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
16583 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16584 dhd_wk_lock_rec_dump();
16585 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16586 }
16587 #else
16588 #define STORE_WKLOCK_RECORD(wklock_type)
16589 #endif /* ! DHD_TRACE_WAKE_LOCK */
16590
dhd_os_wake_lock(dhd_pub_t * pub)16591 int dhd_os_wake_lock(dhd_pub_t *pub)
16592 {
16593 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16594 unsigned long flags;
16595 int ret = 0;
16596
16597 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16598 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16599 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16600 #ifdef CONFIG_HAS_WAKELOCK
16601 wake_lock(&dhd->wl_wifi);
16602 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16603 dhd_bus_dev_pm_stay_awake(pub);
16604 #endif
16605 }
16606 #ifdef DHD_TRACE_WAKE_LOCK
16607 if (trace_wklock_onoff) {
16608 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
16609 }
16610 #endif /* DHD_TRACE_WAKE_LOCK */
16611 dhd->wakelock_counter++;
16612 ret = dhd->wakelock_counter;
16613 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16614 }
16615
16616 return ret;
16617 }
16618
dhd_event_wake_lock(dhd_pub_t * pub)16619 void dhd_event_wake_lock(dhd_pub_t *pub)
16620 {
16621 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16622
16623 if (dhd) {
16624 #ifdef CONFIG_HAS_WAKELOCK
16625 wake_lock(&dhd->wl_evtwake);
16626 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16627 dhd_bus_dev_pm_stay_awake(pub);
16628 #endif
16629 }
16630 }
16631
16632 void
dhd_pm_wake_lock_timeout(dhd_pub_t * pub,int val)16633 dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
16634 {
16635 #ifdef CONFIG_HAS_WAKELOCK
16636 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16637
16638 if (dhd) {
16639 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
16640 }
16641 #endif /* CONFIG_HAS_WAKE_LOCK */
16642 }
16643
16644 void
dhd_txfl_wake_lock_timeout(dhd_pub_t * pub,int val)16645 dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
16646 {
16647 #ifdef CONFIG_HAS_WAKELOCK
16648 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16649
16650 if (dhd) {
16651 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
16652 }
16653 #endif /* CONFIG_HAS_WAKE_LOCK */
16654 }
16655
net_os_wake_lock(struct net_device * dev)16656 int net_os_wake_lock(struct net_device *dev)
16657 {
16658 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16659 int ret = 0;
16660
16661 if (dhd)
16662 ret = dhd_os_wake_lock(&dhd->pub);
16663 return ret;
16664 }
16665
dhd_os_wake_unlock(dhd_pub_t * pub)16666 int dhd_os_wake_unlock(dhd_pub_t *pub)
16667 {
16668 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16669 unsigned long flags;
16670 int ret = 0;
16671
16672 dhd_os_wake_lock_timeout(pub);
16673 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16674 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16675
16676 if (dhd->wakelock_counter > 0) {
16677 dhd->wakelock_counter--;
16678 #ifdef DHD_TRACE_WAKE_LOCK
16679 if (trace_wklock_onoff) {
16680 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
16681 }
16682 #endif /* DHD_TRACE_WAKE_LOCK */
16683 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16684 #ifdef CONFIG_HAS_WAKELOCK
16685 wake_unlock(&dhd->wl_wifi);
16686 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16687 dhd_bus_dev_pm_relax(pub);
16688 #endif
16689 }
16690 ret = dhd->wakelock_counter;
16691 }
16692 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16693 }
16694 return ret;
16695 }
16696
dhd_event_wake_unlock(dhd_pub_t * pub)16697 void dhd_event_wake_unlock(dhd_pub_t *pub)
16698 {
16699 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16700
16701 if (dhd) {
16702 #ifdef CONFIG_HAS_WAKELOCK
16703 wake_unlock(&dhd->wl_evtwake);
16704 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16705 dhd_bus_dev_pm_relax(pub);
16706 #endif
16707 }
16708 }
16709
dhd_pm_wake_unlock(dhd_pub_t * pub)16710 void dhd_pm_wake_unlock(dhd_pub_t *pub)
16711 {
16712 #ifdef CONFIG_HAS_WAKELOCK
16713 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16714
16715 if (dhd) {
16716 /* if wl_pmwake is active, unlock it */
16717 if (wake_lock_active(&dhd->wl_pmwake)) {
16718 wake_unlock(&dhd->wl_pmwake);
16719 }
16720 }
16721 #endif /* CONFIG_HAS_WAKELOCK */
16722 }
16723
dhd_txfl_wake_unlock(dhd_pub_t * pub)16724 void dhd_txfl_wake_unlock(dhd_pub_t *pub)
16725 {
16726 #ifdef CONFIG_HAS_WAKELOCK
16727 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16728
16729 if (dhd) {
16730 /* if wl_txflwake is active, unlock it */
16731 if (wake_lock_active(&dhd->wl_txflwake)) {
16732 wake_unlock(&dhd->wl_txflwake);
16733 }
16734 }
16735 #endif /* CONFIG_HAS_WAKELOCK */
16736 }
16737
dhd_os_check_wakelock(dhd_pub_t * pub)16738 int dhd_os_check_wakelock(dhd_pub_t *pub)
16739 {
16740 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16741 KERNEL_VERSION(2, 6, 36)))
16742 dhd_info_t *dhd;
16743
16744 if (!pub)
16745 return 0;
16746 dhd = (dhd_info_t *)(pub->info);
16747 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16748
16749 #ifdef CONFIG_HAS_WAKELOCK
16750 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
16751 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
16752 (wake_lock_active(&dhd->wl_wdwake))))
16753 return 1;
16754 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16755 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
16756 return 1;
16757 #endif
16758 return 0;
16759 }
16760
16761 int
dhd_os_check_wakelock_all(dhd_pub_t * pub)16762 dhd_os_check_wakelock_all(dhd_pub_t *pub)
16763 {
16764 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16765 KERNEL_VERSION(2, 6, 36)))
16766 #if defined(CONFIG_HAS_WAKELOCK)
16767 int l1, l2, l3, l4, l7, l8, l9;
16768 int l5 = 0, l6 = 0;
16769 int c, lock_active;
16770 #endif /* CONFIG_HAS_WAKELOCK */
16771 dhd_info_t *dhd;
16772
16773 if (!pub) {
16774 return 0;
16775 }
16776 dhd = (dhd_info_t *)(pub->info);
16777 if (!dhd) {
16778 return 0;
16779 }
16780 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16781
16782 #ifdef CONFIG_HAS_WAKELOCK
16783 c = dhd->wakelock_counter;
16784 l1 = wake_lock_active(&dhd->wl_wifi);
16785 l2 = wake_lock_active(&dhd->wl_wdwake);
16786 l3 = wake_lock_active(&dhd->wl_rxwake);
16787 l4 = wake_lock_active(&dhd->wl_ctrlwake);
16788 l7 = wake_lock_active(&dhd->wl_evtwake);
16789 #ifdef BCMPCIE_OOB_HOST_WAKE
16790 l5 = wake_lock_active(&dhd->wl_intrwake);
16791 #endif /* BCMPCIE_OOB_HOST_WAKE */
16792 #ifdef DHD_USE_SCAN_WAKELOCK
16793 l6 = wake_lock_active(&dhd->wl_scanwake);
16794 #endif /* DHD_USE_SCAN_WAKELOCK */
16795 l8 = wake_lock_active(&dhd->wl_pmwake);
16796 l9 = wake_lock_active(&dhd->wl_txflwake);
16797 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
16798
16799 /* Indicate to the Host to avoid going to suspend if internal locks are up */
16800 if (lock_active) {
16801 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16802 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16803 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
16804 return 1;
16805 }
16806 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16807 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
16808 return 1;
16809 }
16810 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
16811 return 0;
16812 }
16813
net_os_wake_unlock(struct net_device * dev)16814 int net_os_wake_unlock(struct net_device *dev)
16815 {
16816 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16817 int ret = 0;
16818
16819 if (dhd)
16820 ret = dhd_os_wake_unlock(&dhd->pub);
16821 return ret;
16822 }
16823
dhd_os_wd_wake_lock(dhd_pub_t * pub)16824 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
16825 {
16826 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16827 unsigned long flags;
16828 int ret = 0;
16829
16830 if (dhd) {
16831 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16832 #ifdef CONFIG_HAS_WAKELOCK
16833 /* if wakelock_wd_counter was never used : lock it at once */
16834 if (!dhd->wakelock_wd_counter)
16835 wake_lock(&dhd->wl_wdwake);
16836 #endif
16837 dhd->wakelock_wd_counter++;
16838 ret = dhd->wakelock_wd_counter;
16839 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16840 }
16841 return ret;
16842 }
16843
dhd_os_wd_wake_unlock(dhd_pub_t * pub)16844 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
16845 {
16846 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16847 unsigned long flags;
16848 int ret = 0;
16849
16850 if (dhd) {
16851 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16852 if (dhd->wakelock_wd_counter) {
16853 dhd->wakelock_wd_counter = 0;
16854 #ifdef CONFIG_HAS_WAKELOCK
16855 wake_unlock(&dhd->wl_wdwake);
16856 #endif
16857 }
16858 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16859 }
16860 return ret;
16861 }
16862
16863 #ifdef BCMPCIE_OOB_HOST_WAKE
16864 void
dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t * pub,int val)16865 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
16866 {
16867 #ifdef CONFIG_HAS_WAKELOCK
16868 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16869
16870 if (dhd) {
16871 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
16872 }
16873 #endif /* CONFIG_HAS_WAKELOCK */
16874 }
16875
16876 void
dhd_os_oob_irq_wake_unlock(dhd_pub_t * pub)16877 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
16878 {
16879 #ifdef CONFIG_HAS_WAKELOCK
16880 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16881
16882 if (dhd) {
16883 /* if wl_intrwake is active, unlock it */
16884 if (wake_lock_active(&dhd->wl_intrwake)) {
16885 wake_unlock(&dhd->wl_intrwake);
16886 }
16887 }
16888 #endif /* CONFIG_HAS_WAKELOCK */
16889 }
16890 #endif /* BCMPCIE_OOB_HOST_WAKE */
16891
16892 #ifdef DHD_USE_SCAN_WAKELOCK
16893 void
dhd_os_scan_wake_lock_timeout(dhd_pub_t * pub,int val)16894 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
16895 {
16896 #ifdef CONFIG_HAS_WAKELOCK
16897 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16898
16899 if (dhd) {
16900 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
16901 }
16902 #endif /* CONFIG_HAS_WAKELOCK */
16903 }
16904
16905 void
dhd_os_scan_wake_unlock(dhd_pub_t * pub)16906 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
16907 {
16908 #ifdef CONFIG_HAS_WAKELOCK
16909 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16910
16911 if (dhd) {
16912 /* if wl_scanwake is active, unlock it */
16913 if (wake_lock_active(&dhd->wl_scanwake)) {
16914 wake_unlock(&dhd->wl_scanwake);
16915 }
16916 }
16917 #endif /* CONFIG_HAS_WAKELOCK */
16918 }
16919 #endif /* DHD_USE_SCAN_WAKELOCK */
16920
16921 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
16922 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
16923 */
dhd_os_wake_lock_waive(dhd_pub_t * pub)16924 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
16925 {
16926 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16927 unsigned long flags;
16928 int ret = 0;
16929
16930 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16931 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16932
16933 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16934 if (dhd->waive_wakelock == FALSE) {
16935 #ifdef DHD_TRACE_WAKE_LOCK
16936 if (trace_wklock_onoff) {
16937 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
16938 }
16939 #endif /* DHD_TRACE_WAKE_LOCK */
16940 /* record current lock status */
16941 dhd->wakelock_before_waive = dhd->wakelock_counter;
16942 dhd->waive_wakelock = TRUE;
16943 }
16944 ret = dhd->wakelock_wd_counter;
16945 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16946 }
16947 return ret;
16948 }
16949
dhd_os_wake_lock_restore(dhd_pub_t * pub)16950 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
16951 {
16952 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16953 unsigned long flags;
16954 int ret = 0;
16955
16956 if (!dhd)
16957 return 0;
16958 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
16959 return 0;
16960
16961 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16962
16963 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16964 if (!dhd->waive_wakelock)
16965 goto exit;
16966
16967 dhd->waive_wakelock = FALSE;
16968 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
16969 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
16970 * the lock in between, do the same by calling wake_unlock or pm_relax
16971 */
16972 #ifdef DHD_TRACE_WAKE_LOCK
16973 if (trace_wklock_onoff) {
16974 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
16975 }
16976 #endif /* DHD_TRACE_WAKE_LOCK */
16977
16978 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
16979 #ifdef CONFIG_HAS_WAKELOCK
16980 wake_lock(&dhd->wl_wifi);
16981 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16982 dhd_bus_dev_pm_stay_awake(&dhd->pub);
16983 #endif
16984 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
16985 #ifdef CONFIG_HAS_WAKELOCK
16986 wake_unlock(&dhd->wl_wifi);
16987 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16988 dhd_bus_dev_pm_relax(&dhd->pub);
16989 #endif
16990 }
16991 dhd->wakelock_before_waive = 0;
16992 exit:
16993 ret = dhd->wakelock_wd_counter;
16994 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16995 return ret;
16996 }
16997
dhd_os_wake_lock_init(struct dhd_info * dhd)16998 void dhd_os_wake_lock_init(struct dhd_info *dhd)
16999 {
17000 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
17001 dhd->wakelock_counter = 0;
17002 dhd->wakelock_rx_timeout_enable = 0;
17003 dhd->wakelock_ctrl_timeout_enable = 0;
17004 #ifdef CONFIG_HAS_WAKELOCK
17005 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17006 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
17007 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
17008 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
17009 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
17010 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
17011 #ifdef BCMPCIE_OOB_HOST_WAKE
17012 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
17013 #endif /* BCMPCIE_OOB_HOST_WAKE */
17014 #ifdef DHD_USE_SCAN_WAKELOCK
17015 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
17016 #endif /* DHD_USE_SCAN_WAKELOCK */
17017 #endif /* CONFIG_HAS_WAKELOCK */
17018 #ifdef DHD_TRACE_WAKE_LOCK
17019 dhd_wk_lock_trace_init(dhd);
17020 #endif /* DHD_TRACE_WAKE_LOCK */
17021 }
17022
dhd_os_wake_lock_destroy(struct dhd_info * dhd)17023 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
17024 {
17025 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
17026 #ifdef CONFIG_HAS_WAKELOCK
17027 dhd->wakelock_counter = 0;
17028 dhd->wakelock_rx_timeout_enable = 0;
17029 dhd->wakelock_ctrl_timeout_enable = 0;
17030 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17031 wake_lock_destroy(&dhd->wl_rxwake);
17032 wake_lock_destroy(&dhd->wl_ctrlwake);
17033 wake_lock_destroy(&dhd->wl_evtwake);
17034 wake_lock_destroy(&dhd->wl_pmwake);
17035 wake_lock_destroy(&dhd->wl_txflwake);
17036 #ifdef BCMPCIE_OOB_HOST_WAKE
17037 wake_lock_destroy(&dhd->wl_intrwake);
17038 #endif /* BCMPCIE_OOB_HOST_WAKE */
17039 #ifdef DHD_USE_SCAN_WAKELOCK
17040 wake_lock_destroy(&dhd->wl_scanwake);
17041 #endif /* DHD_USE_SCAN_WAKELOCK */
17042 #ifdef DHD_TRACE_WAKE_LOCK
17043 dhd_wk_lock_trace_deinit(dhd);
17044 #endif /* DHD_TRACE_WAKE_LOCK */
17045 #endif /* CONFIG_HAS_WAKELOCK */
17046 }
17047
dhd_os_check_if_up(dhd_pub_t * pub)17048 bool dhd_os_check_if_up(dhd_pub_t *pub)
17049 {
17050 if (!pub)
17051 return FALSE;
17052 return pub->up;
17053 }
17054
17055 /* function to collect firmware, chip id and chip version info */
dhd_set_version_info(dhd_pub_t * dhdp,char * fw)17056 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
17057 {
17058 int i;
17059
17060 i = snprintf(info_string, sizeof(info_string),
17061 " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
17062 printf("%s\n", info_string);
17063
17064 if (!dhdp)
17065 return;
17066
17067 i = snprintf(&info_string[i], sizeof(info_string) - i,
17068 "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
17069 dhd_conf_get_chiprev(dhdp));
17070 }
17071
dhd_ioctl_entry_local(struct net_device * net,wl_ioctl_t * ioc,int cmd)17072 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
17073 {
17074 int ifidx;
17075 int ret = 0;
17076 dhd_info_t *dhd = NULL;
17077
17078 if (!net || !DEV_PRIV(net)) {
17079 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
17080 return -EINVAL;
17081 }
17082
17083 dhd = DHD_DEV_INFO(net);
17084 if (!dhd)
17085 return -EINVAL;
17086
17087 ifidx = dhd_net2idx(dhd, net);
17088 if (ifidx == DHD_BAD_IF) {
17089 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
17090 return -ENODEV;
17091 }
17092
17093 DHD_OS_WAKE_LOCK(&dhd->pub);
17094 DHD_PERIM_LOCK(&dhd->pub);
17095
17096 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
17097 dhd_check_hang(net, &dhd->pub, ret);
17098
17099 DHD_PERIM_UNLOCK(&dhd->pub);
17100 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17101
17102 return ret;
17103 }
17104
dhd_os_check_hang(dhd_pub_t * dhdp,int ifidx,int ret)17105 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
17106 {
17107 struct net_device *net;
17108
17109 net = dhd_idx2net(dhdp, ifidx);
17110 if (!net) {
17111 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
17112 return -EINVAL;
17113 }
17114
17115 return dhd_check_hang(net, dhdp, ret);
17116 }
17117
17118 /* Return instance */
dhd_get_instance(dhd_pub_t * dhdp)17119 int dhd_get_instance(dhd_pub_t *dhdp)
17120 {
17121 return dhdp->info->unit;
17122 }
17123
17124
17125 #ifdef PROP_TXSTATUS
17126
dhd_wlfc_plat_init(void * dhd)17127 void dhd_wlfc_plat_init(void *dhd)
17128 {
17129 #ifdef USE_DYNAMIC_F2_BLKSIZE
17130 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
17131 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17132 return;
17133 }
17134
dhd_wlfc_plat_deinit(void * dhd)17135 void dhd_wlfc_plat_deinit(void *dhd)
17136 {
17137 #ifdef USE_DYNAMIC_F2_BLKSIZE
17138 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17139 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17140 return;
17141 }
17142
dhd_wlfc_skip_fc(void * dhdp,uint8 idx)17143 bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
17144 {
17145 #ifdef SKIP_WLFC_ON_CONCURRENT
17146
17147 #ifdef WL_CFG80211
17148 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
17149 if (net)
17150 /* enable flow control in vsdb mode */
17151 return !(wl_cfg80211_is_concurrent_mode(net));
17152 #else
17153 return TRUE; /* skip flow control */
17154 #endif /* WL_CFG80211 */
17155
17156 #else
17157 return FALSE;
17158 #endif /* SKIP_WLFC_ON_CONCURRENT */
17159 return FALSE;
17160 }
17161 #endif /* PROP_TXSTATUS */
17162
17163 #ifdef BCMDBGFS
17164 #include <linux/debugfs.h>
17165
17166 typedef struct dhd_dbgfs {
17167 struct dentry *debugfs_dir;
17168 struct dentry *debugfs_mem;
17169 dhd_pub_t *dhdp;
17170 uint32 size;
17171 } dhd_dbgfs_t;
17172
17173 dhd_dbgfs_t g_dbgfs;
17174
17175 extern uint32 dhd_readregl(void *bp, uint32 addr);
17176 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
17177
17178 static int
dhd_dbg_state_open(struct inode * inode,struct file * file)17179 dhd_dbg_state_open(struct inode *inode, struct file *file)
17180 {
17181 file->private_data = inode->i_private;
17182 return 0;
17183 }
17184
17185 static ssize_t
dhd_dbg_state_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)17186 dhd_dbg_state_read(struct file *file, char __user *ubuf,
17187 size_t count, loff_t *ppos)
17188 {
17189 ssize_t rval;
17190 uint32 tmp;
17191 loff_t pos = *ppos;
17192 size_t ret;
17193
17194 if (pos < 0)
17195 return -EINVAL;
17196 if (pos >= g_dbgfs.size || !count)
17197 return 0;
17198 if (count > g_dbgfs.size - pos)
17199 count = g_dbgfs.size - pos;
17200
17201 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17202 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17203
17204 ret = copy_to_user(ubuf, &tmp, 4);
17205 if (ret == count)
17206 return -EFAULT;
17207
17208 count -= ret;
17209 *ppos = pos + count;
17210 rval = count;
17211
17212 return rval;
17213 }
17214
17215
17216 static ssize_t
dhd_debugfs_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)17217 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
17218 {
17219 loff_t pos = *ppos;
17220 size_t ret;
17221 uint32 buf;
17222
17223 if (pos < 0)
17224 return -EINVAL;
17225 if (pos >= g_dbgfs.size || !count)
17226 return 0;
17227 if (count > g_dbgfs.size - pos)
17228 count = g_dbgfs.size - pos;
17229
17230 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
17231 if (ret == count)
17232 return -EFAULT;
17233
17234 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17235 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17236
17237 return count;
17238 }
17239
17240
17241 loff_t
dhd_debugfs_lseek(struct file * file,loff_t off,int whence)17242 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
17243 {
17244 loff_t pos = -1;
17245
17246 switch (whence) {
17247 case 0:
17248 pos = off;
17249 break;
17250 case 1:
17251 pos = file->f_pos + off;
17252 break;
17253 case 2:
17254 pos = g_dbgfs.size - off;
17255 }
17256 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17257 }
17258
17259 static const struct file_operations dhd_dbg_state_ops = {
17260 .read = dhd_dbg_state_read,
17261 .write = dhd_debugfs_write,
17262 .open = dhd_dbg_state_open,
17263 .llseek = dhd_debugfs_lseek
17264 };
17265
dhd_dbgfs_create(void)17266 static void dhd_dbgfs_create(void)
17267 {
17268 if (g_dbgfs.debugfs_dir) {
17269 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
17270 NULL, &dhd_dbg_state_ops);
17271 }
17272 }
17273
dhd_dbgfs_init(dhd_pub_t * dhdp)17274 void dhd_dbgfs_init(dhd_pub_t *dhdp)
17275 {
17276 g_dbgfs.dhdp = dhdp;
17277 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
17278
17279 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
17280 if (IS_ERR(g_dbgfs.debugfs_dir)) {
17281 g_dbgfs.debugfs_dir = NULL;
17282 return;
17283 }
17284
17285 dhd_dbgfs_create();
17286
17287 return;
17288 }
17289
dhd_dbgfs_remove(void)17290 void dhd_dbgfs_remove(void)
17291 {
17292 debugfs_remove(g_dbgfs.debugfs_mem);
17293 debugfs_remove(g_dbgfs.debugfs_dir);
17294
17295 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
17296 }
17297 #endif /* BCMDBGFS */
17298
17299 #ifdef WLMEDIA_HTSF
17300
17301 static
dhd_htsf_addtxts(dhd_pub_t * dhdp,void * pktbuf)17302 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
17303 {
17304 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
17305 struct sk_buff *skb;
17306 uint32 htsf = 0;
17307 uint16 dport = 0, oldmagic = 0xACAC;
17308 char *p1;
17309 htsfts_t ts;
17310
17311 /* timestamp packet */
17312
17313 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
17314
17315 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
17316 memcpy(&dport, p1+40, 2);
17317 dport = ntoh16(dport);
17318 }
17319
17320 /* timestamp only if icmp or udb iperf with port 5555 */
17321 if (dport >= tsport && dport <= tsport + 20) {
17322 skb = (struct sk_buff *) pktbuf;
17323
17324 htsf = dhd_get_htsf(dhd, 0);
17325 memset(skb->data + 44, 0, 2); /* clear checksum */
17326 memcpy(skb->data+82, &oldmagic, 2);
17327 memcpy(skb->data+84, &htsf, 4);
17328
17329 memset(&ts, 0, sizeof(htsfts_t));
17330 ts.magic = HTSFMAGIC;
17331 ts.prio = PKTPRIO(pktbuf);
17332 ts.seqnum = htsf_seqnum++;
17333 ts.c10 = get_cycles();
17334 ts.t10 = htsf;
17335 ts.endmagic = HTSFENDMAGIC;
17336
17337 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
17338 }
17339 }
17340
dhd_dump_htsfhisto(histo_t * his,char * s)17341 static void dhd_dump_htsfhisto(histo_t *his, char *s)
17342 {
17343 int pktcnt = 0, curval = 0, i;
17344 for (i = 0; i < (NUMBIN-2); i++) {
17345 curval += 500;
17346 printf("%d ", his->bin[i]);
17347 pktcnt += his->bin[i];
17348 }
17349 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
17350 his->bin[NUMBIN-1], s);
17351 }
17352
17353 static
sorttobin(int value,histo_t * histo)17354 void sorttobin(int value, histo_t *histo)
17355 {
17356 int i, binval = 0;
17357
17358 if (value < 0) {
17359 histo->bin[NUMBIN-1]++;
17360 return;
17361 }
17362 if (value > histo->bin[NUMBIN-2]) /* store the max value */
17363 histo->bin[NUMBIN-2] = value;
17364
17365 for (i = 0; i < (NUMBIN-2); i++) {
17366 binval += 500; /* 500m s bins */
17367 if (value <= binval) {
17368 histo->bin[i]++;
17369 return;
17370 }
17371 }
17372 histo->bin[NUMBIN-3]++;
17373 }
17374
17375 static
dhd_htsf_addrxts(dhd_pub_t * dhdp,void * pktbuf)17376 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
17377 {
17378 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
17379 struct sk_buff *skb;
17380 char *p1;
17381 uint16 old_magic;
17382 int d1, d2, d3, end2end;
17383 htsfts_t *htsf_ts;
17384 uint32 htsf;
17385
17386 skb = PKTTONATIVE(dhdp->osh, pktbuf);
17387 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
17388
17389 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
17390 memcpy(&old_magic, p1+78, 2);
17391 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
17392 } else {
17393 return;
17394 }
17395
17396 if (htsf_ts->magic == HTSFMAGIC) {
17397 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
17398 htsf_ts->cE0 = get_cycles();
17399 }
17400
17401 if (old_magic == 0xACAC) {
17402 tspktcnt++;
17403 htsf = dhd_get_htsf(dhd, 0);
17404 memcpy(skb->data+92, &htsf, sizeof(uint32));
17405
17406 memcpy(&ts[tsidx].t1, skb->data+80, 16);
17407
17408 d1 = ts[tsidx].t2 - ts[tsidx].t1;
17409 d2 = ts[tsidx].t3 - ts[tsidx].t2;
17410 d3 = ts[tsidx].t4 - ts[tsidx].t3;
17411 end2end = ts[tsidx].t4 - ts[tsidx].t1;
17412
17413 sorttobin(d1, &vi_d1);
17414 sorttobin(d2, &vi_d2);
17415 sorttobin(d3, &vi_d3);
17416 sorttobin(end2end, &vi_d4);
17417
17418 if (end2end > 0 && end2end > maxdelay) {
17419 maxdelay = end2end;
17420 maxdelaypktno = tspktcnt;
17421 memcpy(&maxdelayts, &ts[tsidx], 16);
17422 }
17423 if (++tsidx >= TSMAX)
17424 tsidx = 0;
17425 }
17426 }
17427
dhd_get_htsf(dhd_info_t * dhd,int ifidx)17428 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
17429 {
17430 uint32 htsf = 0, cur_cycle, delta, delta_us;
17431 uint32 factor, baseval, baseval2;
17432 cycles_t t;
17433
17434 t = get_cycles();
17435 cur_cycle = t;
17436
17437 if (cur_cycle > dhd->htsf.last_cycle)
17438 delta = cur_cycle - dhd->htsf.last_cycle;
17439 else {
17440 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
17441 }
17442
17443 delta = delta >> 4;
17444
17445 if (dhd->htsf.coef) {
17446 /* times ten to get the first digit */
17447 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
17448 baseval = (delta*10)/factor;
17449 baseval2 = (delta*10)/(factor+1);
17450 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
17451 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
17452 } else {
17453 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
17454 }
17455
17456 return htsf;
17457 }
17458
dhd_dump_latency(void)17459 static void dhd_dump_latency(void)
17460 {
17461 int i, max = 0;
17462 int d1, d2, d3, d4, d5;
17463
17464 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
17465 for (i = 0; i < TSMAX; i++) {
17466 d1 = ts[i].t2 - ts[i].t1;
17467 d2 = ts[i].t3 - ts[i].t2;
17468 d3 = ts[i].t4 - ts[i].t3;
17469 d4 = ts[i].t4 - ts[i].t1;
17470 d5 = ts[max].t4-ts[max].t1;
17471 if (d4 > d5 && d4 > 0) {
17472 max = i;
17473 }
17474 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
17475 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
17476 d1, d2, d3, d4, i);
17477 }
17478
17479 printf("current idx = %d \n", tsidx);
17480
17481 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
17482 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
17483 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
17484 maxdelayts.t2 - maxdelayts.t1,
17485 maxdelayts.t3 - maxdelayts.t2,
17486 maxdelayts.t4 - maxdelayts.t3,
17487 maxdelayts.t4 - maxdelayts.t1);
17488 }
17489
17490
17491 static int
dhd_ioctl_htsf_get(dhd_info_t * dhd,int ifidx)17492 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
17493 {
17494 char buf[32];
17495 int ret;
17496 uint32 s1, s2;
17497
17498 struct tsf {
17499 uint32 low;
17500 uint32 high;
17501 } tsf_buf;
17502
17503 memset(&tsf_buf, 0, sizeof(tsf_buf));
17504
17505 s1 = dhd_get_htsf(dhd, 0);
17506 ret = dhd_iovar(&dhd->pub, ifidx, "tsf", NULL, 0, buf, sizeof(buf), FALSE);
17507 if (ret < 0) {
17508 if (ret == -EIO) {
17509 DHD_ERROR(("%s: tsf is not supported by device\n",
17510 dhd_ifname(&dhd->pub, ifidx)));
17511 return -EOPNOTSUPP;
17512 }
17513 return ret;
17514 }
17515 s2 = dhd_get_htsf(dhd, 0);
17516
17517 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
17518 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
17519 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
17520 dhd->htsf.coefdec2, s2-tsf_buf.low);
17521 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
17522 return 0;
17523 }
17524
htsf_update(dhd_info_t * dhd,void * data)17525 void htsf_update(dhd_info_t *dhd, void *data)
17526 {
17527 static ulong cur_cycle = 0, prev_cycle = 0;
17528 uint32 htsf, tsf_delta = 0;
17529 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
17530 ulong b, a;
17531 cycles_t t;
17532
17533 /* cycles_t in inlcude/mips/timex.h */
17534
17535 t = get_cycles();
17536
17537 prev_cycle = cur_cycle;
17538 cur_cycle = t;
17539
17540 if (cur_cycle > prev_cycle)
17541 cyc_delta = cur_cycle - prev_cycle;
17542 else {
17543 b = cur_cycle;
17544 a = prev_cycle;
17545 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
17546 }
17547
17548 if (data == NULL)
17549 printf(" tsf update ata point er is null \n");
17550
17551 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
17552 memcpy(&cur_tsf, data, sizeof(tsf_t));
17553
17554 if (cur_tsf.low == 0) {
17555 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
17556 return;
17557 }
17558
17559 if (cur_tsf.low > prev_tsf.low)
17560 tsf_delta = (cur_tsf.low - prev_tsf.low);
17561 else {
17562 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
17563 cur_tsf.low, prev_tsf.low));
17564 if (cur_tsf.high > prev_tsf.high) {
17565 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
17566 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
17567 } else {
17568 return; /* do not update */
17569 }
17570 }
17571
17572 if (tsf_delta) {
17573 hfactor = cyc_delta / tsf_delta;
17574 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
17575 dec1 = tmp/tsf_delta;
17576 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
17577 tmp = (tmp - (dec1*tsf_delta))*10;
17578 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
17579
17580 if (dec3 > 4) {
17581 if (dec2 == 9) {
17582 dec2 = 0;
17583 if (dec1 == 9) {
17584 dec1 = 0;
17585 hfactor++;
17586 } else {
17587 dec1++;
17588 }
17589 } else {
17590 dec2++;
17591 }
17592 }
17593 }
17594
17595 if (hfactor) {
17596 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
17597 dhd->htsf.coef = hfactor;
17598 dhd->htsf.last_cycle = cur_cycle;
17599 dhd->htsf.last_tsf = cur_tsf.low;
17600 dhd->htsf.coefdec1 = dec1;
17601 dhd->htsf.coefdec2 = dec2;
17602 } else {
17603 htsf = prev_tsf.low;
17604 }
17605 }
17606
17607 #endif /* WLMEDIA_HTSF */
17608
17609 #ifdef CUSTOM_SET_CPUCORE
dhd_set_cpucore(dhd_pub_t * dhd,int set)17610 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
17611 {
17612 int e_dpc = 0, e_rxf = 0, retry_set = 0;
17613
17614 if (!(dhd->chan_isvht80)) {
17615 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17616 return;
17617 }
17618
17619 if (DPC_CPUCORE) {
17620 do {
17621 if (set == TRUE) {
17622 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17623 cpumask_of(DPC_CPUCORE));
17624 } else {
17625 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17626 cpumask_of(PRIMARY_CPUCORE));
17627 }
17628 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17629 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
17630 return;
17631 }
17632 if (e_dpc < 0)
17633 OSL_SLEEP(1);
17634 } while (e_dpc < 0);
17635 }
17636 if (RXF_CPUCORE) {
17637 do {
17638 if (set == TRUE) {
17639 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17640 cpumask_of(RXF_CPUCORE));
17641 } else {
17642 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17643 cpumask_of(PRIMARY_CPUCORE));
17644 }
17645 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17646 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
17647 return;
17648 }
17649 if (e_rxf < 0)
17650 OSL_SLEEP(1);
17651 } while (e_rxf < 0);
17652 }
17653 #ifdef DHD_OF_SUPPORT
17654 interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
17655 #endif /* DHD_OF_SUPPORT */
17656 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
17657
17658 return;
17659 }
17660 #endif /* CUSTOM_SET_CPUCORE */
17661
17662 #ifdef DHD_MCAST_REGEN
17663 /* Get interface specific ap_isolate configuration */
dhd_get_mcast_regen_bss_enable(dhd_pub_t * dhdp,uint32 idx)17664 int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
17665 {
17666 dhd_info_t *dhd = dhdp->info;
17667 dhd_if_t *ifp;
17668
17669 ASSERT(idx < DHD_MAX_IFS);
17670
17671 ifp = dhd->iflist[idx];
17672
17673 return ifp->mcast_regen_bss_enable;
17674 }
17675
17676 /* Set interface specific mcast_regen configuration */
dhd_set_mcast_regen_bss_enable(dhd_pub_t * dhdp,uint32 idx,int val)17677 int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
17678 {
17679 dhd_info_t *dhd = dhdp->info;
17680 dhd_if_t *ifp;
17681
17682 ASSERT(idx < DHD_MAX_IFS);
17683
17684 ifp = dhd->iflist[idx];
17685
17686 ifp->mcast_regen_bss_enable = val;
17687
17688 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17689 * is enabled
17690 */
17691 dhd_update_rx_pkt_chainable_state(dhdp, idx);
17692 return BCME_OK;
17693 }
17694 #endif /* DHD_MCAST_REGEN */
17695
17696 /* Get interface specific ap_isolate configuration */
dhd_get_ap_isolate(dhd_pub_t * dhdp,uint32 idx)17697 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
17698 {
17699 dhd_info_t *dhd = dhdp->info;
17700 dhd_if_t *ifp;
17701
17702 ASSERT(idx < DHD_MAX_IFS);
17703
17704 ifp = dhd->iflist[idx];
17705
17706 return ifp->ap_isolate;
17707 }
17708
17709 /* Set interface specific ap_isolate configuration */
dhd_set_ap_isolate(dhd_pub_t * dhdp,uint32 idx,int val)17710 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
17711 {
17712 dhd_info_t *dhd = dhdp->info;
17713 dhd_if_t *ifp;
17714
17715 ASSERT(idx < DHD_MAX_IFS);
17716
17717 ifp = dhd->iflist[idx];
17718
17719 if (ifp)
17720 ifp->ap_isolate = val;
17721
17722 return 0;
17723 }
17724
17725 #ifdef DHD_FW_COREDUMP
17726 #if defined(CONFIG_X86)
17727 #define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
17728 #define MEMDUMPINFO_INST "/data/.memdump.info"
17729 #endif /* CONFIG_X86 && OEM_ANDROID */
17730
17731 #ifdef CUSTOMER_HW4_DEBUG
17732 #define MEMDUMPINFO PLATFORM_PATH".memdump.info"
17733 #elif defined(CUSTOMER_HW2)
17734 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17735 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
17736 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17737 #else
17738 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17739 #endif /* CUSTOMER_HW4_DEBUG */
17740
dhd_get_memdump_info(dhd_pub_t * dhd)17741 void dhd_get_memdump_info(dhd_pub_t *dhd)
17742 {
17743 struct file *fp = NULL;
17744 uint32 mem_val = DUMP_MEMFILE_MAX;
17745 int ret = 0;
17746 char *filepath = MEMDUMPINFO;
17747
17748 /* Read memdump info from the file */
17749 fp = filp_open(filepath, O_RDONLY, 0);
17750 if (IS_ERR(fp)) {
17751 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17752 #if defined(CONFIG_X86)
17753 /* Check if it is Live Brix Image */
17754 if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
17755 goto done;
17756 }
17757 /* Try if it is Installed Brix Image */
17758 filepath = MEMDUMPINFO_INST;
17759 DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
17760 fp = filp_open(filepath, O_RDONLY, 0);
17761 if (IS_ERR(fp)) {
17762 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17763 goto done;
17764 }
17765 #else /* Non Brix Android platform */
17766 goto done;
17767 #endif /* CONFIG_X86 && OEM_ANDROID */
17768 }
17769
17770 /* Handle success case */
17771 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
17772 ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
17773 #else
17774 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
17775 #endif
17776 if (ret < 0) {
17777 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
17778 filp_close(fp, NULL);
17779 goto done;
17780 }
17781
17782 mem_val = bcm_atoi((char *)&mem_val);
17783
17784 filp_close(fp, NULL);
17785
17786 #ifdef DHD_INIT_DEFAULT_MEMDUMP
17787 if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
17788 mem_val = DUMP_MEMFILE_BUGON;
17789 #endif /* DHD_INIT_DEFAULT_MEMDUMP */
17790
17791 done:
17792 #ifdef CUSTOMER_HW4_DEBUG
17793 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
17794 #else
17795 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
17796 #endif /* CUSTOMER_HW4_DEBUG */
17797
17798 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
17799 }
17800
dhd_schedule_memdump(dhd_pub_t * dhdp,uint8 * buf,uint32 size)17801 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
17802 {
17803 dhd_dump_t *dump = NULL;
17804 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
17805 if (dump == NULL) {
17806 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
17807 return;
17808 }
17809 dump->buf = buf;
17810 dump->bufsize = size;
17811
17812 #if defined(CONFIG_ARM64)
17813 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
17814 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
17815 #elif defined(__ARM_ARCH_7A__)
17816 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
17817 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
17818 #endif /* __ARM_ARCH_7A__ */
17819 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
17820 BUG_ON(1);
17821 }
17822
17823 #ifdef DHD_LOG_DUMP
17824 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
17825 dhd_schedule_log_dump(dhdp);
17826 }
17827 #endif /* DHD_LOG_DUMP */
17828 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
17829 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17830 }
17831
17832 static void
dhd_mem_dump(void * handle,void * event_info,u8 event)17833 dhd_mem_dump(void *handle, void *event_info, u8 event)
17834 {
17835 dhd_info_t *dhd = handle;
17836 dhd_dump_t *dump = event_info;
17837
17838 if (!dhd) {
17839 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17840 return;
17841 }
17842
17843 if (!dump) {
17844 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
17845 return;
17846 }
17847
17848 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
17849 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
17850 dhd->pub.memdump_success = FALSE;
17851 }
17852
17853 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
17854 #ifdef DHD_LOG_DUMP
17855 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
17856 #endif /* DHD_LOG_DUMP */
17857 #ifdef DHD_DEBUG_UART
17858 dhd->pub.memdump_success == TRUE &&
17859 #endif /* DHD_DEBUG_UART */
17860 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
17861 #ifdef SHOW_LOGTRACE
17862 /* Wait till event_log_dispatcher_work finishes */
17863 cancel_work_sync(&dhd->event_log_dispatcher_work);
17864 #endif /* SHOW_LOGTRACE */
17865
17866 BUG_ON(1);
17867 }
17868 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
17869 }
17870 #endif /* DHD_FW_COREDUMP */
17871
17872 #ifdef DHD_SSSR_DUMP
17873
17874 static void
dhd_sssr_dump(void * handle,void * event_info,u8 event)17875 dhd_sssr_dump(void *handle, void *event_info, u8 event)
17876 {
17877 dhd_info_t *dhd = handle;
17878 dhd_pub_t *dhdp;
17879 int i;
17880 char before_sr_dump[128];
17881 char after_sr_dump[128];
17882
17883 if (!dhd) {
17884 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17885 return;
17886 }
17887
17888 dhdp = &dhd->pub;
17889
17890 for (i = 0; i < MAX_NUM_D11CORES; i++) {
17891 /* Init file name */
17892 memset(before_sr_dump, 0, sizeof(before_sr_dump));
17893 memset(after_sr_dump, 0, sizeof(after_sr_dump));
17894
17895 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
17896 "sssr_core", i, "before_SR");
17897 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
17898 "sssr_core", i, "after_SR");
17899
17900 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
17901 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
17902 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
17903 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
17904 __FUNCTION__));
17905 }
17906 }
17907 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
17908 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
17909 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
17910 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
17911 __FUNCTION__));
17912 }
17913 }
17914 }
17915
17916 if (dhdp->sssr_vasip_buf_before) {
17917 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_before,
17918 dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_before_SR")) {
17919 DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n",
17920 __FUNCTION__));
17921 }
17922 }
17923
17924 if (dhdp->sssr_vasip_buf_after) {
17925 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_after,
17926 dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_after_SR")) {
17927 DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n",
17928 __FUNCTION__));
17929 }
17930 }
17931 }
17932
17933 void
dhd_schedule_sssr_dump(dhd_pub_t * dhdp)17934 dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
17935 {
17936 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
17937 DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17938 }
17939 #endif /* DHD_SSSR_DUMP */
17940
17941 #ifdef DHD_LOG_DUMP
17942 static void
dhd_log_dump(void * handle,void * event_info,u8 event)17943 dhd_log_dump(void *handle, void *event_info, u8 event)
17944 {
17945 dhd_info_t *dhd = handle;
17946
17947 if (!dhd) {
17948 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17949 return;
17950 }
17951
17952 if (do_dhd_log_dump(&dhd->pub)) {
17953 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
17954 return;
17955 }
17956 }
17957
dhd_schedule_log_dump(dhd_pub_t * dhdp)17958 void dhd_schedule_log_dump(dhd_pub_t *dhdp)
17959 {
17960 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
17961 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
17962 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17963 }
17964
17965 static int
do_dhd_log_dump(dhd_pub_t * dhdp)17966 do_dhd_log_dump(dhd_pub_t *dhdp)
17967 {
17968 int ret = 0, i = 0;
17969 struct file *fp = NULL;
17970 mm_segment_t old_fs;
17971 loff_t pos = 0;
17972 unsigned int wr_size = 0;
17973 char dump_path[128];
17974 struct timeval curtime;
17975 uint32 file_mode;
17976 unsigned long flags = 0;
17977 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
17978
17979 const char *pre_strs =
17980 "-------------------- General log ---------------------------\n";
17981
17982 const char *post_strs =
17983 "-------------------- Specific log --------------------------\n";
17984
17985 struct timespec64 ts;
17986
17987 if (!dhdp) {
17988 return -1;
17989 }
17990
17991 DHD_ERROR(("DHD version: %s\n", dhd_version));
17992 DHD_ERROR(("F/W version: %s\n", fw_version));
17993
17994 /* change to KERNEL_DS address limit */
17995 old_fs = get_fs();
17996 set_fs(KERNEL_DS);
17997
17998 /* Init file name */
17999 memset(dump_path, 0, sizeof(dump_path));
18000 getnstimeofday(&ts);
18001 curtime->tv_sec = ts.tv_sec;
18002 curtime->tv_usec = ts.tv_nsec/1000;
18003
18004 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
18005 DHD_COMMON_DUMP_PATH "debug_dump",
18006 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
18007 file_mode = O_CREAT | O_WRONLY | O_SYNC;
18008
18009 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18010 fp = filp_open(dump_path, file_mode, 0664);
18011 if (IS_ERR(fp)) {
18012 ret = PTR_ERR(fp);
18013 DHD_ERROR(("open file error, err = %d\n", ret));
18014 goto exit;
18015 }
18016
18017 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18018 ret = kernel_write(fp, pre_strs, strlen(pre_strs), &pos);
18019 #else
18020 ret = vfs_write(fp, pre_strs, strlen(pre_strs), &pos);
18021 #endif
18022 if (ret < 0) {
18023 DHD_ERROR(("write file error, err = %d\n", ret));
18024 goto exit;
18025 }
18026
18027 do {
18028 unsigned int buf_size = (unsigned int)(dld_buf->max -
18029 (unsigned long)dld_buf->buffer);
18030 if (dld_buf->wraparound) {
18031 wr_size = buf_size;
18032 } else {
18033 if (!dld_buf->buffer[0]) { /* print log if buf is empty. */
18034 DHD_ERROR_EX(("Buffer is empty. No event/log.\n"));
18035 }
18036 wr_size = (unsigned int)(dld_buf->present - dld_buf->front);
18037 }
18038
18039 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18040 ret = kernel_write(fp, dld_buf->buffer, wr_size, &pos);
18041 #else
18042 ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
18043 #endif
18044 if (ret < 0) {
18045 DHD_ERROR(("write file error, err = %d\n", ret));
18046 goto exit;
18047 }
18048
18049 /* re-init dhd_log_dump_buf structure */
18050 spin_lock_irqsave(&dld_buf->lock, flags);
18051 dld_buf->wraparound = 0;
18052 dld_buf->present = dld_buf->front;
18053 dld_buf->remain = buf_size;
18054 bzero(dld_buf->buffer, buf_size);
18055 spin_unlock_irqrestore(&dld_buf->lock, flags);
18056 ret = BCME_OK;
18057
18058 if (++i < DLD_BUFFER_NUM) {
18059 dld_buf = &g_dld_buf[i];
18060 } else {
18061 break;
18062 }
18063
18064 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18065 ret = kernel_write(fp, post_strs, strlen(post_strs), &pos);
18066 #else
18067 ret = vfs_write(fp, post_strs, strlen(post_strs), &pos);
18068 #endif
18069 if (ret < 0) {
18070 DHD_ERROR(("write file error, err = %d\n", ret));
18071 goto exit;
18072 }
18073 } while (1);
18074
18075 exit:
18076 #if defined(STAT_REPORT)
18077 if (!IS_ERR(fp) && ret >= 0) {
18078 wl_stat_report_file_save(dhdp, fp);
18079 }
18080 #endif /* STAT_REPORT */
18081
18082 if (!IS_ERR(fp)) {
18083 filp_close(fp, NULL);
18084 }
18085 set_fs(old_fs);
18086
18087 return ret;
18088 }
18089 #endif /* DHD_LOG_DUMP */
18090
18091
18092 #ifdef BCMASSERT_LOG
18093 #ifdef CUSTOMER_HW4_DEBUG
18094 #define ASSERTINFO PLATFORM_PATH".assert.info"
18095 #elif defined(CUSTOMER_HW2)
18096 #define ASSERTINFO "/data/misc/wifi/.assert.info"
18097 #else
18098 #define ASSERTINFO "/installmedia/.assert.info"
18099 #endif /* CUSTOMER_HW4_DEBUG */
dhd_get_assert_info(dhd_pub_t * dhd)18100 void dhd_get_assert_info(dhd_pub_t *dhd)
18101 {
18102 struct file *fp = NULL;
18103 char *filepath = ASSERTINFO;
18104 int mem_val = -1;
18105
18106 /*
18107 * Read assert info from the file
18108 * 0: Trigger Kernel crash by panic()
18109 * 1: Print out the logs and don't trigger Kernel panic. (default)
18110 * 2: Trigger Kernel crash by BUG()
18111 * File doesn't exist: Keep default value (1).
18112 */
18113 fp = filp_open(filepath, O_RDONLY, 0);
18114 if (IS_ERR(fp)) {
18115 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
18116 } else {
18117 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18118 ssize_t ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
18119 #else
18120 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
18121 #endif
18122 if (ret < 0) {
18123 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
18124 } else {
18125 mem_val = bcm_atoi((char *)&mem_val);
18126 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
18127 }
18128 filp_close(fp, NULL);
18129 }
18130 #ifdef CUSTOMER_HW4_DEBUG
18131 /* By default. set to 1, No Kernel Panic */
18132 g_assert_type = (mem_val >= 0) ? mem_val : 1;
18133 #else
18134 /* By default. set to 0, Kernel Panic */
18135 g_assert_type = (mem_val >= 0) ? mem_val : 0;
18136 #endif
18137 }
18138 #endif /* BCMASSERT_LOG */
18139
18140 /*
18141 * This call is to get the memdump size so that,
18142 * halutil can alloc that much buffer in user space.
18143 */
18144 int
dhd_os_socram_dump(struct net_device * dev,uint32 * dump_size)18145 dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
18146 {
18147 int ret = BCME_OK;
18148 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18149 dhd_pub_t *dhdp = &dhd->pub;
18150
18151 if (dhdp->busstate == DHD_BUS_DOWN) {
18152 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
18153 return BCME_ERROR;
18154 }
18155
18156 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
18157 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18158 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
18159 return BCME_ERROR;
18160 }
18161
18162 ret = dhd_common_socram_dump(dhdp);
18163 if (ret == BCME_OK) {
18164 *dump_size = dhdp->soc_ram_length;
18165 }
18166 return ret;
18167 }
18168
18169 /*
18170 * This is to get the actual memdup after getting the memdump size
18171 */
18172 int
dhd_os_get_socram_dump(struct net_device * dev,char ** buf,uint32 * size)18173 dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
18174 {
18175 int ret = BCME_OK;
18176 int orig_len = 0;
18177 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18178 dhd_pub_t *dhdp = &dhd->pub;
18179 if (buf == NULL)
18180 return BCME_ERROR;
18181 orig_len = *size;
18182 if (dhdp->soc_ram) {
18183 if (orig_len >= dhdp->soc_ram_length) {
18184 memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
18185 /* reset the storage of dump */
18186 memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
18187 *size = dhdp->soc_ram_length;
18188 } else {
18189 ret = BCME_BUFTOOSHORT;
18190 DHD_ERROR(("The length of the buffer is too short"
18191 " to save the memory dump with %d\n", dhdp->soc_ram_length));
18192 }
18193 } else {
18194 DHD_ERROR(("socram_dump is not ready to get\n"));
18195 ret = BCME_NOTREADY;
18196 }
18197 return ret;
18198 }
18199
18200 int
dhd_os_get_version(struct net_device * dev,bool dhd_ver,char ** buf,uint32 size)18201 dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
18202 {
18203 char *fw_str;
18204
18205 if (size == 0)
18206 return BCME_BADARG;
18207
18208 fw_str = strstr(info_string, "Firmware: ");
18209 if (fw_str == NULL) {
18210 return BCME_ERROR;
18211 }
18212
18213 memset(*buf, 0, size);
18214 if (dhd_ver) {
18215 strncpy(*buf, dhd_version, size - 1);
18216 } else {
18217 strncpy(*buf, fw_str, size - 1);
18218 }
18219 return BCME_OK;
18220 }
18221
18222 #ifdef DHD_WMF
18223 /* Returns interface specific WMF configuration */
dhd_wmf_conf(dhd_pub_t * dhdp,uint32 idx)18224 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
18225 {
18226 dhd_info_t *dhd = dhdp->info;
18227 dhd_if_t *ifp;
18228
18229 ASSERT(idx < DHD_MAX_IFS);
18230
18231 ifp = dhd->iflist[idx];
18232 return &ifp->wmf;
18233 }
18234 #endif /* DHD_WMF */
18235
18236 #if defined(TRAFFIC_MGMT_DWM)
traffic_mgmt_pkt_set_prio(dhd_pub_t * dhdp,void * pktbuf)18237 void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
18238 {
18239 struct ether_header *eh;
18240 struct ethervlan_header *evh;
18241 uint8 *pktdata, *ip_body;
18242 uint8 dwm_filter;
18243 uint8 tos_tc = 0;
18244 uint8 dscp = 0;
18245 pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
18246 eh = (struct ether_header *) pktdata;
18247 ip_body = NULL;
18248
18249 if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
18250 if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
18251 evh = (struct ethervlan_header *)eh;
18252 if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
18253 (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
18254 ip_body = pktdata + sizeof(struct ethervlan_header);
18255 }
18256 } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
18257 (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
18258 ip_body = pktdata + sizeof(struct ether_header);
18259 }
18260 if (ip_body) {
18261 tos_tc = IP_TOS46(ip_body);
18262 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
18263 }
18264
18265 if (dscp < DHD_DWM_TBL_SIZE) {
18266 dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
18267 if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
18268 PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
18269 }
18270 }
18271 }
18272 }
18273 #endif
18274
dhd_sta_associated(dhd_pub_t * dhdp,uint32 bssidx,uint8 * mac)18275 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
18276 {
18277 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
18278 }
18279
18280 #ifdef DHD_L2_FILTER
18281 arp_table_t*
dhd_get_ifp_arp_table_handle(dhd_pub_t * dhdp,uint32 bssidx)18282 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
18283 {
18284 dhd_info_t *dhd = dhdp->info;
18285 dhd_if_t *ifp;
18286
18287 ASSERT(bssidx < DHD_MAX_IFS);
18288
18289 ifp = dhd->iflist[bssidx];
18290 return ifp->phnd_arp_table;
18291 }
18292
dhd_get_parp_status(dhd_pub_t * dhdp,uint32 idx)18293 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
18294 {
18295 dhd_info_t *dhd = dhdp->info;
18296 dhd_if_t *ifp;
18297
18298 ASSERT(idx < DHD_MAX_IFS);
18299
18300 ifp = dhd->iflist[idx];
18301
18302 if (ifp)
18303 return ifp->parp_enable;
18304 else
18305 return FALSE;
18306 }
18307
18308 /* Set interface specific proxy arp configuration */
dhd_set_parp_status(dhd_pub_t * dhdp,uint32 idx,int val)18309 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18310 {
18311 dhd_info_t *dhd = dhdp->info;
18312 dhd_if_t *ifp;
18313 ASSERT(idx < DHD_MAX_IFS);
18314 ifp = dhd->iflist[idx];
18315
18316 if (!ifp)
18317 return BCME_ERROR;
18318
18319 /* At present all 3 variables are being
18320 * handled at once
18321 */
18322 ifp->parp_enable = val;
18323 ifp->parp_discard = val;
18324 ifp->parp_allnode = val;
18325
18326 /* Flush ARP entries when disabled */
18327 if (val == FALSE) {
18328 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
18329 FALSE, dhdp->tickcnt);
18330 }
18331 return BCME_OK;
18332 }
18333
dhd_parp_discard_is_enabled(dhd_pub_t * dhdp,uint32 idx)18334 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18335 {
18336 dhd_info_t *dhd = dhdp->info;
18337 dhd_if_t *ifp;
18338
18339 ASSERT(idx < DHD_MAX_IFS);
18340
18341 ifp = dhd->iflist[idx];
18342
18343 ASSERT(ifp);
18344 return ifp->parp_discard;
18345 }
18346
18347 bool
dhd_parp_allnode_is_enabled(dhd_pub_t * dhdp,uint32 idx)18348 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18349 {
18350 dhd_info_t *dhd = dhdp->info;
18351 dhd_if_t *ifp;
18352
18353 ASSERT(idx < DHD_MAX_IFS);
18354
18355 ifp = dhd->iflist[idx];
18356
18357 ASSERT(ifp);
18358
18359 return ifp->parp_allnode;
18360 }
18361
dhd_get_dhcp_unicast_status(dhd_pub_t * dhdp,uint32 idx)18362 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
18363 {
18364 dhd_info_t *dhd = dhdp->info;
18365 dhd_if_t *ifp;
18366
18367 ASSERT(idx < DHD_MAX_IFS);
18368
18369 ifp = dhd->iflist[idx];
18370
18371 ASSERT(ifp);
18372
18373 return ifp->dhcp_unicast;
18374 }
18375
dhd_set_dhcp_unicast_status(dhd_pub_t * dhdp,uint32 idx,int val)18376 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
18377 {
18378 dhd_info_t *dhd = dhdp->info;
18379 dhd_if_t *ifp;
18380 ASSERT(idx < DHD_MAX_IFS);
18381 ifp = dhd->iflist[idx];
18382
18383 ASSERT(ifp);
18384
18385 ifp->dhcp_unicast = val;
18386 return BCME_OK;
18387 }
18388
dhd_get_block_ping_status(dhd_pub_t * dhdp,uint32 idx)18389 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
18390 {
18391 dhd_info_t *dhd = dhdp->info;
18392 dhd_if_t *ifp;
18393
18394 ASSERT(idx < DHD_MAX_IFS);
18395
18396 ifp = dhd->iflist[idx];
18397
18398 ASSERT(ifp);
18399
18400 return ifp->block_ping;
18401 }
18402
dhd_set_block_ping_status(dhd_pub_t * dhdp,uint32 idx,int val)18403 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
18404 {
18405 dhd_info_t *dhd = dhdp->info;
18406 dhd_if_t *ifp;
18407 ASSERT(idx < DHD_MAX_IFS);
18408 ifp = dhd->iflist[idx];
18409
18410 ASSERT(ifp);
18411
18412 ifp->block_ping = val;
18413 /* Disable rx_pkt_chain feature for interface if block_ping option is
18414 * enabled
18415 */
18416 dhd_update_rx_pkt_chainable_state(dhdp, idx);
18417 return BCME_OK;
18418 }
18419
dhd_get_grat_arp_status(dhd_pub_t * dhdp,uint32 idx)18420 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
18421 {
18422 dhd_info_t *dhd = dhdp->info;
18423 dhd_if_t *ifp;
18424
18425 ASSERT(idx < DHD_MAX_IFS);
18426
18427 ifp = dhd->iflist[idx];
18428
18429 ASSERT(ifp);
18430
18431 return ifp->grat_arp;
18432 }
18433
dhd_set_grat_arp_status(dhd_pub_t * dhdp,uint32 idx,int val)18434 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18435 {
18436 dhd_info_t *dhd = dhdp->info;
18437 dhd_if_t *ifp;
18438 ASSERT(idx < DHD_MAX_IFS);
18439 ifp = dhd->iflist[idx];
18440
18441 ASSERT(ifp);
18442
18443 ifp->grat_arp = val;
18444
18445 return BCME_OK;
18446 }
18447 #endif /* DHD_L2_FILTER */
18448
18449
18450 #if defined(SET_RPS_CPUS)
dhd_rps_cpus_enable(struct net_device * net,int enable)18451 int dhd_rps_cpus_enable(struct net_device *net, int enable)
18452 {
18453 dhd_info_t *dhd = DHD_DEV_INFO(net);
18454 dhd_if_t *ifp;
18455 int ifidx;
18456 char * RPS_CPU_SETBUF;
18457
18458 ifidx = dhd_net2idx(dhd, net);
18459 if (ifidx == DHD_BAD_IF) {
18460 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
18461 return -ENODEV;
18462 }
18463
18464 if (ifidx == PRIMARY_INF) {
18465 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
18466 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
18467 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
18468 } else {
18469 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
18470 RPS_CPU_SETBUF = RPS_CPUS_MASK;
18471 }
18472 } else if (ifidx == VIRTUAL_INF) {
18473 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
18474 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
18475 } else {
18476 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
18477 return -EINVAL;
18478 }
18479
18480 ifp = dhd->iflist[ifidx];
18481 if (ifp) {
18482 if (enable) {
18483 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
18484 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
18485 } else {
18486 custom_rps_map_clear(ifp->net->_rx);
18487 }
18488 } else {
18489 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
18490 return -ENODEV;
18491 }
18492 return BCME_OK;
18493 }
18494
custom_rps_map_set(struct netdev_rx_queue * queue,char * buf,size_t len)18495 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
18496 {
18497 struct rps_map *old_map, *map;
18498 cpumask_var_t mask;
18499 int err, cpu, i;
18500 static DEFINE_SPINLOCK(rps_map_lock);
18501
18502 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
18503
18504 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
18505 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
18506 return -ENOMEM;
18507 }
18508
18509 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
18510 if (err) {
18511 free_cpumask_var(mask);
18512 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
18513 return err;
18514 }
18515
18516 map = kzalloc(max_t(unsigned int,
18517 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
18518 GFP_KERNEL);
18519 if (!map) {
18520 free_cpumask_var(mask);
18521 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
18522 return -ENOMEM;
18523 }
18524
18525 i = 0;
18526 for_each_cpu(cpu, mask) {
18527 map->cpus[i++] = cpu;
18528 }
18529
18530 if (i) {
18531 map->len = i;
18532 } else {
18533 kfree(map);
18534 map = NULL;
18535 free_cpumask_var(mask);
18536 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
18537 return -1;
18538 }
18539
18540 spin_lock(&rps_map_lock);
18541 old_map = rcu_dereference_protected(queue->rps_map,
18542 lockdep_is_held(&rps_map_lock));
18543 rcu_assign_pointer(queue->rps_map, map);
18544 spin_unlock(&rps_map_lock);
18545
18546 if (map) {
18547 static_key_slow_inc(&rps_needed);
18548 }
18549 if (old_map) {
18550 kfree_rcu(old_map, rcu);
18551 static_key_slow_dec(&rps_needed);
18552 }
18553 free_cpumask_var(mask);
18554
18555 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
18556 return map->len;
18557 }
18558
custom_rps_map_clear(struct netdev_rx_queue * queue)18559 void custom_rps_map_clear(struct netdev_rx_queue *queue)
18560 {
18561 struct rps_map *map;
18562
18563 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
18564
18565 map = rcu_dereference_protected(queue->rps_map, 1);
18566 if (map) {
18567 RCU_INIT_POINTER(queue->rps_map, NULL);
18568 kfree_rcu(map, rcu);
18569 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
18570 }
18571 }
18572 #endif
18573
18574
18575
18576 #ifdef DHD_DEBUG_PAGEALLOC
18577
18578 void
dhd_page_corrupt_cb(void * handle,void * addr_corrupt,size_t len)18579 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
18580 {
18581 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
18582
18583 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
18584 __FUNCTION__, addr_corrupt, (uint32)len));
18585
18586 DHD_OS_WAKE_LOCK(dhdp);
18587 prhex("Page Corruption:", addr_corrupt, len);
18588 dhd_dump_to_kernelog(dhdp);
18589 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
18590 /* Load the dongle side dump to host memory and then BUG_ON() */
18591 dhdp->memdump_enabled = DUMP_MEMONLY;
18592 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
18593 dhd_bus_mem_dump(dhdp);
18594 #endif /* BCMPCIE && DHD_FW_COREDUMP */
18595 DHD_OS_WAKE_UNLOCK(dhdp);
18596 }
18597 EXPORT_SYMBOL(dhd_page_corrupt_cb);
18598 #endif /* DHD_DEBUG_PAGEALLOC */
18599
18600 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
18601 void
dhd_pktid_error_handler(dhd_pub_t * dhdp)18602 dhd_pktid_error_handler(dhd_pub_t *dhdp)
18603 {
18604 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
18605 DHD_OS_WAKE_LOCK(dhdp);
18606 dhd_dump_to_kernelog(dhdp);
18607 #ifdef DHD_FW_COREDUMP
18608 /* Load the dongle side dump to host memory */
18609 if (dhdp->memdump_enabled == DUMP_DISABLED) {
18610 dhdp->memdump_enabled = DUMP_MEMFILE;
18611 }
18612 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
18613 dhd_bus_mem_dump(dhdp);
18614 #endif /* DHD_FW_COREDUMP */
18615 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
18616 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
18617 DHD_OS_WAKE_UNLOCK(dhdp);
18618 }
18619 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
18620
18621 struct net_device *
dhd_linux_get_primary_netdev(dhd_pub_t * dhdp)18622 dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
18623 {
18624 dhd_info_t *dhd = dhdp->info;
18625
18626 if (dhd->iflist[0] && dhd->iflist[0]->net)
18627 return dhd->iflist[0]->net;
18628 else
18629 return NULL;
18630 }
18631
18632 #ifdef DHD_DHCP_DUMP
18633 static void
dhd_dhcp_dump(char * ifname,uint8 * pktdata,bool tx)18634 dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
18635 {
18636 struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
18637 struct iphdr *h = &b->ip_header;
18638 uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
18639 int dhcp_type = 0, len, opt_len;
18640
18641 /* check IP header */
18642 if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
18643 return;
18644 }
18645
18646 /* check UDP port for bootp (67, 68) */
18647 if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
18648 b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
18649 return;
18650 }
18651
18652 /* check header length */
18653 if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
18654 return;
18655 }
18656
18657 len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
18658 opt_len = len
18659 - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
18660
18661 /* parse bootp options */
18662 if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
18663 ptr = &b->options[4];
18664 while (ptr < end && *ptr != 0xff) {
18665 opt = ptr++;
18666 if (*opt == 0) {
18667 continue;
18668 }
18669 ptr += *ptr + 1;
18670 if (ptr >= end) {
18671 break;
18672 }
18673 /* 53 is dhcp type */
18674 if (*opt == 53) {
18675 if (opt[1]) {
18676 dhcp_type = opt[2];
18677 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
18678 ifname, dhcp_types[dhcp_type],
18679 tx ? "TX" : "RX", dhcp_ops[b->op]));
18680 break;
18681 }
18682 }
18683 }
18684 }
18685 }
18686 #endif /* DHD_DHCP_DUMP */
18687
18688 #ifdef DHD_ICMP_DUMP
18689 static void
dhd_icmp_dump(char * ifname,uint8 * pktdata,bool tx)18690 dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
18691 {
18692 uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
18693 struct iphdr *iph = (struct iphdr *)pkt;
18694 struct icmphdr *icmph;
18695
18696 /* check IP header */
18697 if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
18698 return;
18699 }
18700
18701 icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
18702 if (icmph->type == ICMP_ECHO) {
18703 DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
18704 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
18705 } else if (icmph->type == ICMP_ECHOREPLY) {
18706 DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
18707 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
18708 } else {
18709 DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
18710 ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
18711 }
18712 }
18713 #endif /* DHD_ICMP_DUMP */
18714
18715 #ifdef SHOW_LOGTRACE
18716 void
dhd_get_read_buf_ptr(dhd_pub_t * dhd_pub,trace_buf_info_t * trace_buf_info)18717 dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
18718 {
18719 dhd_dbg_ring_status_t ring_status;
18720 uint32 rlen;
18721
18722 rlen = dhd_dbg_ring_pull_single(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
18723 TRACE_LOG_BUF_MAX_SIZE, TRUE);
18724 trace_buf_info->size = rlen;
18725 trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
18726 if (rlen == 0) {
18727 trace_buf_info->availability = BUF_NOT_AVAILABLE;
18728 return;
18729 }
18730 dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
18731 if (ring_status.written_bytes != ring_status.read_bytes) {
18732 trace_buf_info->availability = NEXT_BUF_AVAIL;
18733 }
18734 }
18735 #endif /* SHOW_LOGTRACE */
18736
18737 bool
dhd_fw_download_status(dhd_pub_t * dhd_pub)18738 dhd_fw_download_status(dhd_pub_t * dhd_pub)
18739 {
18740 return dhd_pub->fw_download_done;
18741 }
18742
18743 int
dhd_create_to_notifier_skt(void)18744 dhd_create_to_notifier_skt(void)
18745 {
18746 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18747 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
18748 /* Kernel version 3.6 is a special case which accepts 4 arguments */
18749 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &g_cfg);
18750 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
18751 /* Kernel version 3.5 and below use this old API format */
18752 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
18753 dhd_process_daemon_msg, NULL, THIS_MODULE);
18754 #else
18755 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, &g_cfg);
18756 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
18757 if (!nl_to_event_sk)
18758 {
18759 printf("Error creating socket.\n");
18760 return -1;
18761 }
18762 DHD_INFO(("nl_to socket created successfully...\n"));
18763 return 0;
18764 }
18765
18766 void
dhd_destroy_to_notifier_skt(void)18767 dhd_destroy_to_notifier_skt(void)
18768 {
18769 DHD_INFO(("Destroying nl_to socket\n"));
18770 if (nl_to_event_sk) {
18771 netlink_kernel_release(nl_to_event_sk);
18772 }
18773 }
18774
18775 static void
dhd_recv_msg_from_daemon(struct sk_buff * skb)18776 dhd_recv_msg_from_daemon(struct sk_buff *skb)
18777 {
18778 struct nlmsghdr *nlh;
18779 bcm_to_info_t *cmd;
18780
18781 nlh = (struct nlmsghdr *)skb->data;
18782 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
18783 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
18784 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
18785 DHD_INFO(("DHD Daemon Started\n"));
18786 }
18787 }
18788
18789 int
dhd_send_msg_to_daemon(struct sk_buff * skb,void * data,int size)18790 dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
18791 {
18792 struct nlmsghdr *nlh;
18793 struct sk_buff *skb_out;
18794
18795 if (!nl_to_event_sk) {
18796 DHD_INFO(("No socket available\n"));
18797 return -1;
18798 }
18799
18800 BCM_REFERENCE(skb);
18801 if (sender_pid == 0) {
18802 DHD_INFO(("Invalid PID 0\n"));
18803 return -1;
18804 }
18805
18806 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
18807 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
18808 return -1;
18809 }
18810 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
18811 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
18812 memcpy(nlmsg_data(nlh), (char *)data, size);
18813
18814 if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
18815 DHD_INFO(("Error sending message\n"));
18816 }
18817 return 0;
18818 }
18819
18820
18821 static void
dhd_process_daemon_msg(struct sk_buff * skb)18822 dhd_process_daemon_msg(struct sk_buff *skb)
18823 {
18824 bcm_to_info_t to_info;
18825
18826 to_info.magic = BCM_TO_MAGIC;
18827 to_info.reason = REASON_DAEMON_STARTED;
18828 to_info.trap = NO_TRAP;
18829
18830 dhd_recv_msg_from_daemon(skb);
18831 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
18832 }
18833
18834 #ifdef REPORT_FATAL_TIMEOUTS
18835 static void
dhd_send_trap_to_fw(dhd_pub_t * pub,int reason,int trap)18836 dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
18837 {
18838 bcm_to_info_t to_info;
18839
18840 to_info.magic = BCM_TO_MAGIC;
18841 to_info.reason = reason;
18842 to_info.trap = trap;
18843
18844 DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
18845 dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
18846 }
18847
18848 void
dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub,timeout_reasons_t reason)18849 dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
18850 {
18851 int to_reason;
18852 int trap = NO_TRAP;
18853 switch (reason) {
18854 case DHD_REASON_COMMAND_TO:
18855 to_reason = REASON_COMMAND_TO;
18856 trap = DO_TRAP;
18857 break;
18858 case DHD_REASON_JOIN_TO:
18859 to_reason = REASON_JOIN_TO;
18860 break;
18861 case DHD_REASON_SCAN_TO:
18862 to_reason = REASON_SCAN_TO;
18863 break;
18864 case DHD_REASON_OQS_TO:
18865 to_reason = REASON_OQS_TO;
18866 trap = DO_TRAP;
18867 break;
18868 default:
18869 to_reason = REASON_UNKOWN;
18870 }
18871 dhd_send_trap_to_fw(pub, to_reason, trap);
18872 }
18873 #endif /* REPORT_FATAL_TIMEOUTS */
18874
18875 #ifdef DHD_LOG_DUMP
18876 void
dhd_log_dump_init(dhd_pub_t * dhd)18877 dhd_log_dump_init(dhd_pub_t *dhd)
18878 {
18879 struct dhd_log_dump_buf *dld_buf;
18880 int i = 0;
18881 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18882 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
18883 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18884
18885 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18886 dld_buf = &g_dld_buf[i];
18887 spin_lock_init(&dld_buf->lock);
18888 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18889 dld_buf->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, dld_buf_size[i]);
18890 #else
18891 dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
18892 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18893
18894 if (!dld_buf->buffer) {
18895 dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
18896 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
18897
18898 if (!dld_buf->buffer) {
18899 DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i));
18900 goto fail;
18901 }
18902 }
18903
18904 dld_buf->wraparound = 0;
18905 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
18906 dld_buf->present = dld_buf->front = dld_buf->buffer;
18907 dld_buf->remain = dld_buf_size[i];
18908 dld_buf->enable = 1;
18909 }
18910 return;
18911
18912 fail:
18913 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18914 if (dld_buf[i].buffer) {
18915 kfree(dld_buf[i].buffer);
18916 }
18917 }
18918 }
18919
18920 void
dhd_log_dump_deinit(dhd_pub_t * dhd)18921 dhd_log_dump_deinit(dhd_pub_t *dhd)
18922 {
18923 struct dhd_log_dump_buf *dld_buf;
18924 int i = 0;
18925
18926 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18927 dld_buf = &g_dld_buf[i];
18928 dld_buf->enable = 0;
18929 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18930 DHD_OS_PREFREE(dhd, dld_buf->buffer, dld_buf_size[i]);
18931 #else
18932 kfree(dld_buf->buffer);
18933 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18934 }
18935 }
18936
18937 void
dhd_log_dump_write(int type,const char * fmt,...)18938 dhd_log_dump_write(int type, const char *fmt, ...)
18939 {
18940 int len = 0;
18941 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
18942 va_list args;
18943 unsigned long flags = 0;
18944 struct dhd_log_dump_buf *dld_buf = NULL;
18945
18946 switch (type)
18947 {
18948 case DLD_BUF_TYPE_GENERAL:
18949 dld_buf = &g_dld_buf[type];
18950 break;
18951 case DLD_BUF_TYPE_SPECIAL:
18952 dld_buf = &g_dld_buf[type];
18953 break;
18954 default:
18955 DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
18956 __FUNCTION__, type));
18957 return;
18958 }
18959
18960 if (dld_buf->enable != 1) {
18961 return;
18962 }
18963
18964 va_start(args, fmt);
18965
18966 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
18967 /* Non ANSI C99 compliant returns -1,
18968 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
18969 */
18970 if (len < 0) {
18971 return;
18972 }
18973
18974 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
18975 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
18976 tmp_buf[len] = '\0';
18977 }
18978
18979 /* make a critical section to eliminate race conditions */
18980 spin_lock_irqsave(&dld_buf->lock, flags);
18981 if (dld_buf->remain < len) {
18982 dld_buf->wraparound = 1;
18983 dld_buf->present = dld_buf->front;
18984 dld_buf->remain = dld_buf_size[type];
18985 }
18986
18987 strncpy(dld_buf->present, tmp_buf, len);
18988 dld_buf->remain -= len;
18989 dld_buf->present += len;
18990 spin_unlock_irqrestore(&dld_buf->lock, flags);
18991
18992 /* double check invalid memory operation */
18993 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
18994 va_end(args);
18995 }
18996
18997 char*
dhd_log_dump_get_timestamp(void)18998 dhd_log_dump_get_timestamp(void)
18999 {
19000 static char buf[16];
19001 u64 ts_nsec;
19002 unsigned long rem_nsec;
19003
19004 ts_nsec = local_clock();
19005 rem_nsec = do_div(ts_nsec, 1000000000);
19006 snprintf(buf, sizeof(buf), "%5lu.%06lu",
19007 (unsigned long)ts_nsec, rem_nsec / 1000);
19008
19009 return buf;
19010 }
19011 #endif /* DHD_LOG_DUMP */
19012
19013 int
dhd_write_file(const char * filepath,char * buf,int buf_len)19014 dhd_write_file(const char *filepath, char *buf, int buf_len)
19015 {
19016 struct file *fp = NULL;
19017 mm_segment_t old_fs;
19018 int ret = 0;
19019
19020 /* change to KERNEL_DS address limit */
19021 old_fs = get_fs();
19022 set_fs(KERNEL_DS);
19023
19024 /* File is always created. */
19025 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
19026 if (IS_ERR(fp)) {
19027 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
19028 __FUNCTION__, filepath, PTR_ERR(fp)));
19029 ret = BCME_ERROR;
19030 } else {
19031 if (fp->f_mode & FMODE_WRITE) {
19032 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19033 ret = kernel_write(fp, buf, buf_len, &fp->f_pos);
19034 #else
19035 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
19036 #endif
19037 if (ret < 0) {
19038 DHD_ERROR(("%s: Couldn't write file '%s'\n",
19039 __FUNCTION__, filepath));
19040 ret = BCME_ERROR;
19041 } else {
19042 ret = BCME_OK;
19043 }
19044 }
19045 filp_close(fp, NULL);
19046 }
19047
19048 /* restore previous address limit */
19049 set_fs(old_fs);
19050
19051 return ret;
19052 }
19053
19054 int
dhd_read_file(const char * filepath,char * buf,int buf_len)19055 dhd_read_file(const char *filepath, char *buf, int buf_len)
19056 {
19057 struct file *fp = NULL;
19058 mm_segment_t old_fs;
19059 int ret;
19060
19061 /* change to KERNEL_DS address limit */
19062 old_fs = get_fs();
19063 set_fs(KERNEL_DS);
19064
19065 fp = filp_open(filepath, O_RDONLY, 0);
19066 if (IS_ERR(fp)) {
19067 set_fs(old_fs);
19068 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
19069 return BCME_ERROR;
19070 }
19071
19072 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19073 ret = kernel_read(fp, buf, buf_len, NULL);
19074 #else
19075 ret = kernel_read(fp, 0, buf, buf_len);
19076 #endif
19077 filp_close(fp, NULL);
19078
19079 /* restore previous address limit */
19080 set_fs(old_fs);
19081
19082 /* Return the number of bytes read */
19083 if (ret > 0) {
19084 /* Success to read */
19085 ret = 0;
19086 } else {
19087 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
19088 __FUNCTION__, filepath, ret));
19089 ret = BCME_ERROR;
19090 }
19091
19092 return ret;
19093 }
19094
19095 int
dhd_write_file_and_check(const char * filepath,char * buf,int buf_len)19096 dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
19097 {
19098 int ret;
19099
19100 ret = dhd_write_file(filepath, buf, buf_len);
19101 if (ret < 0) {
19102 return ret;
19103 }
19104
19105 /* Read the file again and check if the file size is not zero */
19106 memset(buf, 0, buf_len);
19107 ret = dhd_read_file(filepath, buf, buf_len);
19108
19109 return ret;
19110 }
19111
19112 #ifdef DHD_LB_TXP
19113 #define DHD_LB_TXBOUND 64
19114 /*
19115 * Function that performs the TX processing on a given CPU
19116 */
19117 bool
dhd_lb_tx_process(dhd_info_t * dhd)19118 dhd_lb_tx_process(dhd_info_t *dhd)
19119 {
19120 struct sk_buff *skb;
19121 int cnt = 0;
19122 struct net_device *net;
19123 int ifidx;
19124 bool resched = FALSE;
19125
19126 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
19127 if (dhd == NULL) {
19128 DHD_ERROR((" Null pointer DHD \r\n"));
19129 return resched;
19130 }
19131
19132 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
19133
19134 /* Base Loop to perform the actual Tx */
19135 do {
19136 skb = skb_dequeue(&dhd->tx_pend_queue);
19137 if (skb == NULL) {
19138 DHD_TRACE(("Dequeued a Null Packet \r\n"));
19139 break;
19140 }
19141 cnt++;
19142
19143 net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
19144 ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
19145
19146 BCM_REFERENCE(net);
19147 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
19148 net, ifidx));
19149
19150 __dhd_sendpkt(&dhd->pub, ifidx, skb);
19151
19152 if (cnt >= DHD_LB_TXBOUND) {
19153 resched = TRUE;
19154 break;
19155 }
19156 } while (1);
19157
19158 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
19159
19160 return resched;
19161 }
19162
19163 void
dhd_lb_tx_handler(unsigned long data)19164 dhd_lb_tx_handler(unsigned long data)
19165 {
19166 dhd_info_t *dhd = (dhd_info_t *)data;
19167
19168 if (dhd_lb_tx_process(dhd)) {
19169 dhd_tasklet_schedule(&dhd->tx_tasklet);
19170 }
19171 }
19172
19173 #endif /* DHD_LB_TXP */
19174
19175 /* ----------------------------------------------------------------------------
19176 * Infrastructure code for sysfs interface support for DHD
19177 *
19178 * What is sysfs interface?
19179 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
19180 *
19181 * Why sysfs interface?
19182 * This is the Linux standard way of changing/configuring Run Time parameters
19183 * for a driver. We can use this interface to control "linux" specific driver
19184 * parameters.
19185 *
19186 * -----------------------------------------------------------------------------
19187 */
19188
19189 #include <linux/sysfs.h>
19190 #include <linux/kobject.h>
19191
19192 #if defined(DHD_TRACE_WAKE_LOCK)
19193
19194 /* Function to show the history buffer */
19195 static ssize_t
show_wklock_trace(struct dhd_info * dev,char * buf)19196 show_wklock_trace(struct dhd_info *dev, char *buf)
19197 {
19198 ssize_t ret = 0;
19199 dhd_info_t *dhd = (dhd_info_t *)dev;
19200
19201 buf[ret] = '\n';
19202 buf[ret+1] = 0;
19203
19204 dhd_wk_lock_stats_dump(&dhd->pub);
19205 return ret+1;
19206 }
19207
19208 /* Function to enable/disable wakelock trace */
19209 static ssize_t
wklock_trace_onoff(struct dhd_info * dev,const char * buf,size_t count)19210 wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
19211 {
19212 unsigned long onoff;
19213 unsigned long flags;
19214 dhd_info_t *dhd = (dhd_info_t *)dev;
19215
19216 onoff = bcm_strtoul(buf, NULL, 10);
19217 if (onoff != 0 && onoff != 1) {
19218 return -EINVAL;
19219 }
19220
19221 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
19222 trace_wklock_onoff = onoff;
19223 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
19224 if (trace_wklock_onoff) {
19225 printk("ENABLE WAKLOCK TRACE\n");
19226 } else {
19227 printk("DISABLE WAKELOCK TRACE\n");
19228 }
19229
19230 return (ssize_t)(onoff+1);
19231 }
19232 #endif /* DHD_TRACE_WAKE_LOCK */
19233
19234 #if defined(DHD_LB_TXP)
19235 static ssize_t
show_lbtxp(struct dhd_info * dev,char * buf)19236 show_lbtxp(struct dhd_info *dev, char *buf)
19237 {
19238 ssize_t ret = 0;
19239 unsigned long onoff;
19240 dhd_info_t *dhd = (dhd_info_t *)dev;
19241
19242 onoff = atomic_read(&dhd->lb_txp_active);
19243 ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
19244 onoff);
19245 return ret;
19246 }
19247
19248 static ssize_t
lbtxp_onoff(struct dhd_info * dev,const char * buf,size_t count)19249 lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
19250 {
19251 unsigned long onoff;
19252 dhd_info_t *dhd = (dhd_info_t *)dev;
19253 int i;
19254
19255 onoff = bcm_strtoul(buf, NULL, 10);
19256
19257 sscanf(buf, "%lu", &onoff);
19258 if (onoff != 0 && onoff != 1) {
19259 return -EINVAL;
19260 }
19261 atomic_set(&dhd->lb_txp_active, onoff);
19262
19263 /* Since the scheme is changed clear the counters */
19264 for (i = 0; i < NR_CPUS; i++) {
19265 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
19266 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
19267 }
19268
19269 return count;
19270 }
19271
19272 #endif /* DHD_LB_TXP */
19273 /*
19274 * Generic Attribute Structure for DHD.
19275 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
19276 * to instantiate an object of type dhd_attr, populate it with
19277 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
19278 * and add the object to default_attrs[] array, that gets registered
19279 * to the kobject of dhd (named bcm-dhd).
19280 */
19281
19282 struct dhd_attr {
19283 struct attribute attr;
19284 ssize_t(*show)(struct dhd_info *, char *);
19285 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
19286 };
19287
19288 #if defined(DHD_TRACE_WAKE_LOCK)
19289 static struct dhd_attr dhd_attr_wklock =
19290 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
19291 #endif /* defined(DHD_TRACE_WAKE_LOCK */
19292
19293 #if defined(DHD_LB_TXP)
19294 static struct dhd_attr dhd_attr_lbtxp =
19295 __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
19296 #endif /* DHD_LB_TXP */
19297
19298 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
19299 static struct attribute *default_attrs[] = {
19300 #if defined(DHD_TRACE_WAKE_LOCK)
19301 &dhd_attr_wklock.attr,
19302 #endif /* DHD_TRACE_WAKE_LOCK */
19303 #if defined(DHD_LB_TXP)
19304 &dhd_attr_lbtxp.attr,
19305 #endif /* DHD_LB_TXP */
19306 NULL
19307 };
19308
19309 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
19310 #define to_attr(a) container_of(a, struct dhd_attr, attr)
19311
19312 /*
19313 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19314 * node under "bcm-dhd" the show function is called.
19315 */
dhd_show(struct kobject * kobj,struct attribute * attr,char * buf)19316 static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
19317 {
19318 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19319 #pragma GCC diagnostic push
19320 #pragma GCC diagnostic ignored "-Wcast-qual"
19321 #endif
19322 dhd_info_t *dhd = to_dhd(kobj);
19323 struct dhd_attr *d_attr = to_attr(attr);
19324 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19325 #pragma GCC diagnostic pop
19326 #endif
19327 int ret;
19328
19329 if (d_attr->show)
19330 ret = d_attr->show(dhd, buf);
19331 else
19332 ret = -EIO;
19333
19334 return ret;
19335 }
19336
19337 /*
19338 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19339 * node under "bcm-dhd" the store function is called.
19340 */
dhd_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)19341 static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
19342 const char *buf, size_t count)
19343 {
19344 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19345 #pragma GCC diagnostic push
19346 #pragma GCC diagnostic ignored "-Wcast-qual"
19347 #endif
19348 dhd_info_t *dhd = to_dhd(kobj);
19349 struct dhd_attr *d_attr = to_attr(attr);
19350 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19351 #pragma GCC diagnostic pop
19352 #endif
19353 int ret;
19354
19355 if (d_attr->store)
19356 ret = d_attr->store(dhd, buf, count);
19357 else
19358 ret = -EIO;
19359
19360 return ret;
19361 }
19362
19363 static struct sysfs_ops dhd_sysfs_ops = {
19364 .show = dhd_show,
19365 .store = dhd_store,
19366 };
19367
19368 static struct kobj_type dhd_ktype = {
19369 .sysfs_ops = &dhd_sysfs_ops,
19370 .default_attrs = default_attrs,
19371 };
19372
19373 /* Create a kobject and attach to sysfs interface */
dhd_sysfs_init(dhd_info_t * dhd)19374 static int dhd_sysfs_init(dhd_info_t *dhd)
19375 {
19376 int ret = -1;
19377
19378 if (dhd == NULL) {
19379 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
19380 return ret;
19381 }
19382
19383 /* Initialize the kobject */
19384 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
19385 if (ret) {
19386 kobject_put(&dhd->dhd_kobj);
19387 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
19388 return ret;
19389 }
19390
19391 /*
19392 * We are always responsible for sending the uevent that the kobject
19393 * was added to the system.
19394 */
19395 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
19396
19397 return ret;
19398 }
19399
19400 /* Done with the kobject and detach the sysfs interface */
dhd_sysfs_exit(dhd_info_t * dhd)19401 static void dhd_sysfs_exit(dhd_info_t *dhd)
19402 {
19403 if (dhd == NULL) {
19404 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
19405 return;
19406 }
19407
19408 /* Releae the kobject */
19409 if (dhd->dhd_kobj.state_initialized)
19410 kobject_put(&dhd->dhd_kobj);
19411 }
19412
19413 #ifdef DHD_DEBUG_UART
19414 bool
dhd_debug_uart_is_running(struct net_device * dev)19415 dhd_debug_uart_is_running(struct net_device *dev)
19416 {
19417 dhd_info_t *dhd = DHD_DEV_INFO(dev);
19418
19419 if (dhd->duart_execute) {
19420 return TRUE;
19421 }
19422
19423 return FALSE;
19424 }
19425
19426 static void
dhd_debug_uart_exec_rd(void * handle,void * event_info,u8 event)19427 dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
19428 {
19429 dhd_pub_t *dhdp = handle;
19430 dhd_debug_uart_exec(dhdp, "rd");
19431 }
19432
19433 static void
dhd_debug_uart_exec(dhd_pub_t * dhdp,char * cmd)19434 dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
19435 {
19436 int ret;
19437
19438 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
19439 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
19440
19441 #ifdef DHD_FW_COREDUMP
19442 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
19443 #endif
19444 {
19445 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
19446 #ifdef DHD_FW_COREDUMP
19447 dhdp->memdump_success == FALSE ||
19448 #endif
19449 FALSE) {
19450 dhdp->info->duart_execute = TRUE;
19451 DHD_ERROR(("DHD: %s - execute %s %s\n",
19452 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
19453 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
19454 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
19455 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
19456 dhdp->info->duart_execute = FALSE;
19457
19458 #ifdef DHD_LOG_DUMP
19459 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
19460 #endif
19461 {
19462 BUG_ON(1);
19463 }
19464 }
19465 }
19466 }
19467 #endif /* DHD_DEBUG_UART */
19468
19469 #if defined(DHD_BLOB_EXISTENCE_CHECK)
19470 void
dhd_set_blob_support(dhd_pub_t * dhdp,char * fw_path)19471 dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
19472 {
19473 struct file *fp;
19474 char *filepath = CONFIG_BCMDHD_CLM_PATH;
19475
19476 fp = filp_open(filepath, O_RDONLY, 0);
19477 if (IS_ERR(fp)) {
19478 DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__));
19479 dhdp->is_blob = FALSE;
19480 } else {
19481 DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__));
19482 dhdp->is_blob = TRUE;
19483 #if defined(CONCATE_BLOB)
19484 strncat(fw_path, "_blob", strlen("_blob"));
19485 #else
19486 BCM_REFERENCE(fw_path);
19487 #endif /* SKIP_CONCATE_BLOB */
19488 filp_close(fp, NULL);
19489 }
19490 }
19491 #endif /* DHD_BLOB_EXISTENCE_CHECK */
19492
19493 #if defined(PCIE_FULL_DONGLE)
19494 /** test / loopback */
19495 void
dmaxfer_free_dmaaddr_handler(void * handle,void * event_info,u8 event)19496 dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
19497 {
19498 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
19499 dhd_info_t *dhd_info = (dhd_info_t *)handle;
19500 dhd_pub_t *dhdp = &dhd_info->pub;
19501
19502 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
19503 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
19504 return;
19505 }
19506
19507 if ((dhd_info == NULL) || (dhdp == NULL)) {
19508 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
19509 return;
19510 }
19511
19512 if (dmmap == NULL) {
19513 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
19514 return;
19515 }
19516 dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
19517 }
19518
19519
19520 void
dhd_schedule_dmaxfer_free(dhd_pub_t * dhdp,dmaxref_mem_map_t * dmmap)19521 dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
19522 {
19523 dhd_info_t *dhd_info = dhdp->info;
19524
19525 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
19526 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
19527 }
19528 #endif /* PCIE_FULL_DONGLE */
19529 /* ---------------------------- End of sysfs implementation ------------------------------------- */
19530 #ifdef HOFFLOAD_MODULES
19531 void
dhd_linux_get_modfw_address(dhd_pub_t * dhd)19532 dhd_linux_get_modfw_address(dhd_pub_t *dhd)
19533 {
19534 const char* module_name = NULL;
19535 const struct firmware *module_fw;
19536 struct module_metadata *hmem = &dhd->hmem;
19537
19538 if (dhd_hmem_module_string[0] != '\0') {
19539 module_name = dhd_hmem_module_string;
19540 } else {
19541 DHD_ERROR(("%s No module image name specified\n", __FUNCTION__));
19542 return;
19543 }
19544 if (request_firmware(&module_fw, module_name, dhd_bus_to_dev(dhd->bus))) {
19545 DHD_ERROR(("modules.img not available\n"));
19546 return;
19547 }
19548 if (!dhd_alloc_module_memory(dhd->bus, module_fw->size, hmem)) {
19549 release_firmware(module_fw);
19550 return;
19551 }
19552 memcpy(hmem->data, module_fw->data, module_fw->size);
19553 release_firmware(module_fw);
19554 }
19555 #endif /* HOFFLOAD_MODULES */
19556
19557 #ifdef SET_PCIE_IRQ_CPU_CORE
19558 void
dhd_set_irq_cpucore(dhd_pub_t * dhdp,int set)19559 dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
19560 {
19561 unsigned int irq;
19562 if (!dhdp) {
19563 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
19564 return;
19565 }
19566
19567 if (!dhdp->bus) {
19568 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
19569 return;
19570 }
19571
19572 if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
19573 return;
19574 }
19575
19576 set_irq_cpucore(irq, set);
19577 }
19578 #endif /* SET_PCIE_IRQ_CPU_CORE */
19579
19580 #if defined(DHD_HANG_SEND_UP_TEST)
19581 void
dhd_make_hang_with_reason(struct net_device * dev,const char * string_num)19582 dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
19583 {
19584 dhd_info_t *dhd = NULL;
19585 dhd_pub_t *dhdp = NULL;
19586 uint reason = HANG_REASON_MAX;
19587 char buf[WLC_IOCTL_SMLEN] = {0, };
19588 uint32 fw_test_code = 0;
19589 dhd = DHD_DEV_INFO(dev);
19590
19591 if (dhd) {
19592 dhdp = &dhd->pub;
19593 }
19594
19595 if (!dhd || !dhdp) {
19596 return;
19597 }
19598
19599 reason = (uint) bcm_strtoul(string_num, NULL, 0);
19600 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
19601
19602 if (reason == 0) {
19603 if (dhdp->req_hang_type) {
19604 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
19605 __FUNCTION__, dhdp->req_hang_type));
19606 dhdp->req_hang_type = 0;
19607 return;
19608 } else {
19609 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
19610 return;
19611 }
19612 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
19613 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
19614 return;
19615 }
19616
19617 if (dhdp->req_hang_type != 0) {
19618 DHD_ERROR(("Already HANG requested for test\n"));
19619 return;
19620 }
19621
19622 switch (reason) {
19623 case HANG_REASON_IOCTL_RESP_TIMEOUT:
19624 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
19625 dhdp->req_hang_type = reason;
19626 fw_test_code = 102; /* resumed on timeour */
19627 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
19628 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
19629 break;
19630 case HANG_REASON_DONGLE_TRAP:
19631 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
19632 dhdp->req_hang_type = reason;
19633 fw_test_code = 99; /* dongle trap */
19634 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
19635 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
19636 break;
19637 case HANG_REASON_D3_ACK_TIMEOUT:
19638 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
19639 dhdp->req_hang_type = reason;
19640 break;
19641 case HANG_REASON_BUS_DOWN:
19642 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
19643 dhdp->req_hang_type = reason;
19644 break;
19645 case HANG_REASON_PCIE_LINK_DOWN:
19646 case HANG_REASON_MSGBUF_LIVELOCK:
19647 dhdp->req_hang_type = 0;
19648 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
19649 break;
19650 case HANG_REASON_IFACE_OP_FAILURE:
19651 DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason));
19652 dhdp->req_hang_type = reason;
19653 break;
19654 case HANG_REASON_HT_AVAIL_ERROR:
19655 dhdp->req_hang_type = 0;
19656 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
19657 break;
19658 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
19659 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
19660 dhdp->req_hang_type = reason;
19661 break;
19662 default:
19663 dhdp->req_hang_type = 0;
19664 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
19665 break;
19666 }
19667 }
19668 #endif /* DHD_HANG_SEND_UP_TEST */
19669 #ifdef DHD_WAKE_STATUS
19670 wake_counts_t*
dhd_get_wakecount(dhd_pub_t * dhdp)19671 dhd_get_wakecount(dhd_pub_t *dhdp)
19672 {
19673 #ifdef BCMDBUS
19674 return NULL;
19675 #else
19676 return dhd_bus_get_wakecount(dhdp);
19677 #endif /* BCMDBUS */
19678 }
19679 #endif /* DHD_WAKE_STATUS */
19680
19681 #ifdef BCM_ASLR_HEAP
19682 uint32
dhd_get_random_number(void)19683 dhd_get_random_number(void)
19684 {
19685 uint32 rand = 0;
19686 get_random_bytes_arch(&rand, sizeof(rand));
19687 return rand;
19688 }
19689 #endif /* BCM_ASLR_HEAP */
19690
19691 #ifdef DHD_PKT_LOGGING
19692 void
dhd_pktlog_dump(void * handle,void * event_info,u8 event)19693 dhd_pktlog_dump(void *handle, void *event_info, u8 event)
19694 {
19695 dhd_info_t *dhd = handle;
19696
19697 if (!dhd) {
19698 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
19699 return;
19700 }
19701
19702 if (dhd_pktlog_write_file(&dhd->pub)) {
19703 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
19704 return;
19705 }
19706 }
19707
19708 void
dhd_schedule_pktlog_dump(dhd_pub_t * dhdp)19709 dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
19710 {
19711 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
19712 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
19713 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
19714 }
19715 #endif /* DHD_PKT_LOGGING */
19716
dhd_get_pub(struct net_device * dev)19717 void *dhd_get_pub(struct net_device *dev)
19718 {
19719 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
19720 if (dhdinfo)
19721 return (void *)&dhdinfo->pub;
19722 else {
19723 printf("%s: null dhdinfo\n", __FUNCTION__);
19724 return NULL;
19725 }
19726 }
19727
dhd_get_conf(struct net_device * dev)19728 void *dhd_get_conf(struct net_device *dev)
19729 {
19730 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
19731 if (dhdinfo)
19732 return (void *)dhdinfo->pub.conf;
19733 else {
19734 printf("%s: null dhdinfo\n", __FUNCTION__);
19735 return NULL;
19736 }
19737 }
19738
dhd_os_wd_timer_enabled(void * bus)19739 bool dhd_os_wd_timer_enabled(void *bus)
19740 {
19741 dhd_pub_t *pub = bus;
19742 dhd_info_t *dhd = (dhd_info_t *)pub->info;
19743
19744 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
19745 if (!dhd) {
19746 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
19747 return FALSE;
19748 }
19749 return dhd->wd_timer_valid;
19750 }
19751
19752 MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
19753