1 /* 2 * DHD Linux header file - contains private structure definition of the Linux 3 * specific layer 4 * 5 * Copyright (C) 1999-2019, Broadcom. 6 * 7 * Unless you and Broadcom execute a separate written software license 8 * agreement governing use of this software, this software is licensed to you 9 * under the terms of the GNU General Public License version 2 (the "GPL"), 10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the 11 * following added to such license: 12 * 13 * As a special exception, the copyright holders of this software give you 14 * permission to link this software with independent modules, and to copy and 15 * distribute the resulting executable under terms of your choice, provided that 16 * you also meet, for each linked independent module, the terms and conditions 17 * of the license of that module. An independent module is a module which is 18 * not derived from this software. The special exception does not apply to any 19 * modifications of the software. 20 * 21 * Notwithstanding the above, under no circumstances may you combine this 22 * software in any way with any other Broadcom software provided under a license 23 * other than the GPL, without Broadcom's express prior written consent. 24 * 25 * 26 * <<Broadcom-WL-IPTag/Open:>> 27 * 28 * $Id: dhd_linux_priv.h 815919 2019-04-22 09:06:50Z $ 29 */ 30 31 #ifndef __DHD_LINUX_PRIV_H__ 32 #define __DHD_LINUX_PRIV_H__ 33 34 #include <osl.h> 35 36 #ifdef SHOW_LOGTRACE 37 #include <linux/syscalls.h> 38 #include <event_log.h> 39 #endif /* SHOW_LOGTRACE */ 40 #include <linux/skbuff.h> 41 #include <linux/spinlock.h> 42 #ifdef CONFIG_COMPAT 43 #include <linux/compat.h> 44 #endif /* CONFIG COMPAT */ 45 #include <dngl_stats.h> 46 #include <dhd.h> 47 #include <dhd_dbg.h> 48 #include <dhd_debug.h> 49 #include <dhd_linux.h> 50 #include <dhd_bus.h> 51 52 #ifdef PCIE_FULL_DONGLE 53 #include <bcmmsgbuf.h> 54 #include <dhd_flowring.h> 55 #endif /* PCIE_FULL_DONGLE */ 56 57 /* 58 * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c 59 * Local private structure (extension of pub) 60 */ 61 typedef struct dhd_info { 62 #if defined(WL_WIRELESS_EXT) 63 wl_iw_t iw; /* wireless extensions state (must be first) */ 64 #endif /* defined(WL_WIRELESS_EXT) */ 65 dhd_pub_t pub; 66 /* for supporting multiple interfaces. 67 * static_ifs hold the net ifaces without valid FW IF 68 */ 69 dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; 70 71 wifi_adapter_info_t 72 *adapter; /* adapter information, interrupt, fw path etc. */ 73 char fw_path[PATH_MAX]; /* path to firmware image */ 74 char nv_path[PATH_MAX]; /* path to nvram vars file */ 75 char clm_path[PATH_MAX]; /* path to clm vars file */ 76 char conf_path[PATH_MAX]; /* path to config vars file */ 77 #ifdef DHD_UCODE_DOWNLOAD 78 char uc_path[PATH_MAX]; /* path to ucode image */ 79 #endif /* DHD_UCODE_DOWNLOAD */ 80 81 /* serialize dhd iovars */ 82 struct mutex dhd_iovar_mutex; 83 84 struct semaphore proto_sem; 85 #ifdef PROP_TXSTATUS 86 spinlock_t wlfc_spinlock; 87 88 #ifdef BCMDBUS 89 ulong wlfc_lock_flags; 90 ulong wlfc_pub_lock_flags; 91 #endif /* BCMDBUS */ 92 #endif /* PROP_TXSTATUS */ 93 wait_queue_head_t ioctl_resp_wait; 94 wait_queue_head_t d3ack_wait; 95 wait_queue_head_t dhd_bus_busy_state_wait; 96 wait_queue_head_t dmaxfer_wait; 97 uint32 default_wd_interval; 98 99 timer_list_compat_t timer; 100 bool wd_timer_valid; 101 struct tasklet_struct tasklet; 102 spinlock_t sdlock; 103 spinlock_t txqlock; 104 spinlock_t dhd_lock; 105 spinlock_t txoff_lock; 106 #ifdef BCMDBUS 107 ulong txqlock_flags; 108 #endif /* BCMDBUS */ 109 110 #ifndef BCMDBUS 111 struct semaphore sdsem; 112 tsk_ctl_t thr_dpc_ctl; 113 tsk_ctl_t thr_wdt_ctl; 114 #endif /* BCMDBUS */ 115 116 tsk_ctl_t thr_rxf_ctl; 117 spinlock_t rxf_lock; 118 bool rxthread_enabled; 119 120 /* Wakelocks */ 121 #if defined(CONFIG_HAS_WAKELOCK) 122 struct wake_lock wl_wifi; /* Wifi wakelock */ 123 struct wake_lock wl_rxwake; /* Wifi rx wakelock */ 124 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ 125 struct wake_lock wl_wdwake; /* Wifi wd wakelock */ 126 struct wake_lock wl_evtwake; /* Wifi event wakelock */ 127 struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ 128 struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ 129 #ifdef BCMPCIE_OOB_HOST_WAKE 130 struct wake_lock wl_intrwake; /* Host wakeup wakelock */ 131 #endif /* BCMPCIE_OOB_HOST_WAKE */ 132 #ifdef DHD_USE_SCAN_WAKELOCK 133 struct wake_lock wl_scanwake; /* Wifi scan wakelock */ 134 #endif /* DHD_USE_SCAN_WAKELOCK */ 135 #endif /* CONFIG_HAS_WAKELOCK */ 136 137 /* net_device interface lock, prevent race conditions among net_dev 138 * interface calls and wifi_on or wifi_off 139 */ 140 struct mutex dhd_net_if_mutex; 141 struct mutex dhd_suspend_mutex; 142 #if defined(PKT_FILTER_SUPPORT) && defined(APF) 143 struct mutex dhd_apf_mutex; 144 #endif /* PKT_FILTER_SUPPORT && APF */ 145 spinlock_t wakelock_spinlock; 146 spinlock_t wakelock_evt_spinlock; 147 uint32 wakelock_counter; 148 int wakelock_wd_counter; 149 int wakelock_rx_timeout_enable; 150 int wakelock_ctrl_timeout_enable; 151 bool waive_wakelock; 152 uint32 wakelock_before_waive; 153 154 /* Thread to issue ioctl for multicast */ 155 wait_queue_head_t ctrl_wait; 156 atomic_t pend_8021x_cnt; 157 dhd_attach_states_t dhd_state; 158 #ifdef SHOW_LOGTRACE 159 dhd_event_log_t event_data; 160 #endif /* SHOW_LOGTRACE */ 161 162 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) 163 struct early_suspend early_suspend; 164 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ 165 166 #ifdef ARP_OFFLOAD_SUPPORT 167 u32 pend_ipaddr; 168 #endif /* ARP_OFFLOAD_SUPPORT */ 169 #ifdef DHDTCPACK_SUPPRESS 170 spinlock_t tcpack_lock; 171 #endif /* DHDTCPACK_SUPPRESS */ 172 #ifdef FIX_CPU_MIN_CLOCK 173 bool cpufreq_fix_status; 174 struct mutex cpufreq_fix; 175 struct pm_qos_request dhd_cpu_qos; 176 #ifdef FIX_BUS_MIN_CLOCK 177 struct pm_qos_request dhd_bus_qos; 178 #endif /* FIX_BUS_MIN_CLOCK */ 179 #endif /* FIX_CPU_MIN_CLOCK */ 180 void *dhd_deferred_wq; 181 #ifdef DEBUG_CPU_FREQ 182 struct notifier_block freq_trans; 183 int __percpu *new_freq; 184 #endif // endif 185 unsigned int unit; 186 struct notifier_block pm_notifier; 187 #ifdef DHD_PSTA 188 uint32 psta_mode; /* PSTA or PSR */ 189 #endif /* DHD_PSTA */ 190 #ifdef DHD_WET 191 uint32 wet_mode; 192 #endif /* DHD_WET */ 193 #ifdef DHD_DEBUG 194 dhd_dump_t *dump; 195 struct timer_list join_timer; 196 u32 join_timeout_val; 197 bool join_timer_active; 198 uint scan_time_count; 199 struct timer_list scan_timer; 200 bool scan_timer_active; 201 #endif // endif 202 #if defined(DHD_LB) 203 /* CPU Load Balance dynamic CPU selection */ 204 205 /* Variable that tracks the currect CPUs available for candidacy */ 206 cpumask_var_t cpumask_curr_avail; 207 208 /* Primary and secondary CPU mask */ 209 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ 210 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ 211 212 struct notifier_block cpu_notifier; 213 214 /* Tasklet to handle Tx Completion packet freeing */ 215 struct tasklet_struct tx_compl_tasklet; 216 atomic_t tx_compl_cpu; 217 218 /* Tasklet to handle RxBuf Post during Rx completion */ 219 struct tasklet_struct rx_compl_tasklet; 220 atomic_t rx_compl_cpu; 221 222 /* Napi struct for handling rx packet sendup. Packets are removed from 223 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then 224 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 225 * to run to rx_napi_cpu. 226 */ 227 struct sk_buff_head rx_pend_queue ____cacheline_aligned; 228 struct sk_buff_head rx_napi_queue ____cacheline_aligned; 229 struct napi_struct rx_napi_struct ____cacheline_aligned; 230 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ 231 struct net_device *rx_napi_netdev; /* netdev of primary interface */ 232 233 struct work_struct rx_napi_dispatcher_work; 234 struct work_struct tx_compl_dispatcher_work; 235 struct work_struct tx_dispatcher_work; 236 struct work_struct rx_compl_dispatcher_work; 237 238 /* Number of times DPC Tasklet ran */ 239 uint32 dhd_dpc_cnt; 240 /* Number of times NAPI processing got scheduled */ 241 uint32 napi_sched_cnt; 242 /* Number of times NAPI processing ran on each available core */ 243 uint32 *napi_percpu_run_cnt; 244 /* Number of times RX Completions got scheduled */ 245 uint32 rxc_sched_cnt; 246 /* Number of times RX Completion ran on each available core */ 247 uint32 *rxc_percpu_run_cnt; 248 /* Number of times TX Completions got scheduled */ 249 uint32 txc_sched_cnt; 250 /* Number of times TX Completions ran on each available core */ 251 uint32 *txc_percpu_run_cnt; 252 /* CPU status */ 253 /* Number of times each CPU came online */ 254 uint32 *cpu_online_cnt; 255 /* Number of times each CPU went offline */ 256 uint32 *cpu_offline_cnt; 257 258 /* Number of times TX processing run on each core */ 259 uint32 *txp_percpu_run_cnt; 260 /* Number of times TX start run on each core */ 261 uint32 *tx_start_percpu_run_cnt; 262 263 /* Tx load balancing */ 264 265 /* Need to see if batch processing is really required in case of TX 266 * processing. In case of RX the Dongle can send a bunch of rx completions, 267 * hence we took a 3 queue approach 268 * enque - adds the skbs to rx_pend_queue 269 * dispatch - uses a lock and adds the list of skbs from pend queue to 270 * napi queue 271 * napi processing - copies the pend_queue into a local queue and works 272 * on it. 273 * But for TX its going to be 1 skb at a time, so we are just thinking 274 * of using only one queue and use the lock supported skb queue functions 275 * to add and process it. If its in-efficient we'll re-visit the queue 276 * design. 277 */ 278 279 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ 280 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ 281 /* 282 * From the Tasklet that actually sends out data 283 * copy the list tx_pend_queue into tx_active_queue. There by we need 284 * to spinlock to only perform the copy the rest of the code ie to 285 * construct the tx_pend_queue and the code to process tx_active_queue 286 * can be lockless. The concept is borrowed as is from RX processing 287 */ 288 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ 289 290 /* Control TXP in runtime, enable by default */ 291 atomic_t lb_txp_active; 292 293 /* Control RXP in runtime, enable by default */ 294 atomic_t lb_rxp_active; 295 296 /* 297 * When the NET_TX tries to send a TX packet put it into tx_pend_queue 298 * For now, the processing tasklet will also direcly operate on this 299 * queue 300 */ 301 struct sk_buff_head tx_pend_queue ____cacheline_aligned; 302 303 /* Control RXP in runtime, enable by default */ 304 /* cpu on which the DHD Tx is happenning */ 305 atomic_t tx_cpu; 306 307 /* CPU on which the Network stack is calling the DHD's xmit function */ 308 atomic_t net_tx_cpu; 309 310 /* Tasklet context from which the DHD's TX processing happens */ 311 struct tasklet_struct tx_tasklet; 312 313 /* 314 * Consumer Histogram - NAPI RX Packet processing 315 * ----------------------------------------------- 316 * On Each CPU, when the NAPI RX Packet processing call back was invoked 317 * how many packets were processed is captured in this data structure. 318 * Now its difficult to capture the "exact" number of packets processed. 319 * So considering the packet counter to be a 32 bit one, we have a 320 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets 321 * processed is rounded off to the next power of 2 and put in the 322 * approriate "bin" the value in the bin gets incremented. 323 * For example, assume that in CPU 1 if NAPI Rx runs 3 times 324 * and the packet count processed is as follows (assume the bin counters are 325 * 0) iteration 1 - 10 (the bin counter 2^4 increments to 1) iteration 2 - 326 * 30 (the bin counter 2^5 increments to 1) iteration 3 - 15 (the bin 327 * counter 2^4 increments by 1 to become 2) 328 */ 329 uint32 *napi_rx_hist[HIST_BIN_SIZE]; 330 uint32 *txc_hist[HIST_BIN_SIZE]; 331 uint32 *rxc_hist[HIST_BIN_SIZE]; 332 #endif /* DHD_LB */ 333 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) 334 struct work_struct axi_error_dispatcher_work; 335 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ 336 #ifdef SHOW_LOGTRACE 337 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE 338 tsk_ctl_t thr_logtrace_ctl; 339 #else 340 struct delayed_work event_log_dispatcher_work; 341 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ 342 #endif /* SHOW_LOGTRACE */ 343 344 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) 345 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ 346 struct kobject dhd_kobj; 347 struct kobject dhd_conf_file_kobj; 348 struct timer_list timesync_timer; 349 #if defined(BT_OVER_SDIO) 350 char btfw_path[PATH_MAX]; 351 #endif /* defined (BT_OVER_SDIO) */ 352 #ifdef WL_MONITOR 353 struct net_device *monitor_dev; /* monitor pseudo device */ 354 struct sk_buff *monitor_skb; 355 uint monitor_len; 356 uint monitor_type; /* monitor pseudo device */ 357 #endif /* WL_MONITOR */ 358 #if defined(BT_OVER_SDIO) 359 struct mutex 360 bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ 361 int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ 362 #endif /* BT_OVER_SDIO */ 363 #ifdef SHOW_LOGTRACE 364 struct sk_buff_head evt_trace_queue ____cacheline_aligned; 365 #endif // endif 366 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM 367 struct workqueue_struct *tx_wq; 368 struct workqueue_struct *rx_wq; 369 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ 370 #ifdef DHD_DEBUG_UART 371 bool duart_execute; 372 #endif /* DHD_DEBUG_UART */ 373 struct mutex logdump_lock; 374 /* indicates mem_dump was scheduled as work queue or called directly */ 375 bool scheduled_memdump; 376 struct work_struct dhd_hang_process_work; 377 #ifdef DHD_HP2P 378 spinlock_t hp2p_lock; 379 #endif /* DHD_HP2P */ 380 } dhd_info_t; 381 382 extern int dhd_sysfs_init(dhd_info_t *dhd); 383 extern void dhd_sysfs_exit(dhd_info_t *dhd); 384 extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); 385 extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); 386 387 int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); 388 389 #if defined(DHD_LB) 390 #if defined(DHD_LB_TXP) 391 int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, 392 void *skb); 393 void dhd_tx_dispatcher_work(struct work_struct *work); 394 void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); 395 void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); 396 void dhd_lb_tx_handler(unsigned long data); 397 #endif /* DHD_LB_TXP */ 398 399 #if defined(DHD_LB_RXP) 400 int dhd_napi_poll(struct napi_struct *napi, int budget); 401 void dhd_rx_napi_dispatcher_fn(struct work_struct *work); 402 void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); 403 void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); 404 #endif /* DHD_LB_RXP */ 405 406 void dhd_lb_set_default_cpus(dhd_info_t *dhd); 407 void dhd_cpumasks_deinit(dhd_info_t *dhd); 408 int dhd_cpumasks_init(dhd_info_t *dhd); 409 410 void dhd_select_cpu_candidacy(dhd_info_t *dhd); 411 412 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) 413 int dhd_cpu_startup_callback(unsigned int cpu); 414 int dhd_cpu_teardown_callback(unsigned int cpu); 415 #else 416 int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, 417 void *hcpu); 418 #endif /* LINUX_VERSION_CODE < 4.10.0 */ 419 420 int dhd_register_cpuhp_callback(dhd_info_t *dhd); 421 int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); 422 423 #if defined(DHD_LB_TXC) 424 void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); 425 #endif /* DHD_LB_TXC */ 426 427 #if defined(DHD_LB_RXC) 428 void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); 429 void dhd_rx_compl_dispatcher_fn(struct work_struct *work); 430 #endif /* DHD_LB_RXC */ 431 432 #endif /* DHD_LB */ 433 434 #if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) 435 void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); 436 #endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ 437 438 #endif /* __DHD_LINUX_PRIV_H__ */ 439