• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * DHD Linux header file - contains private structure definition of the Linux specific layer
4  *
5  * Copyright (C) 1999-2019, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_linux_priv.h 815919 2019-04-22 09:06:50Z $
29  */
30 
31 #ifndef __DHD_LINUX_PRIV_H__
32 #define __DHD_LINUX_PRIV_H__
33 
34 #include <osl.h>
35 
36 #ifdef SHOW_LOGTRACE
37 #include <linux/syscalls.h>
38 #include <event_log.h>
39 #endif /* SHOW_LOGTRACE */
40 #include <linux/skbuff.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_COMPAT
43 #include <linux/compat.h>
44 #endif /* CONFIG COMPAT */
45 #include <dngl_stats.h>
46 #include <dhd.h>
47 #include <dhd_dbg.h>
48 #include <dhd_debug.h>
49 #include <dhd_linux.h>
50 #include <dhd_bus.h>
51 
52 #ifdef PCIE_FULL_DONGLE
53 #include <bcmmsgbuf.h>
54 #include <dhd_flowring.h>
55 #endif /* PCIE_FULL_DONGLE */
56 
57 /*
58  * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
59  * Local private structure (extension of pub)
60  */
61 typedef struct dhd_info {
62 #if defined(WL_WIRELESS_EXT)
63 	wl_iw_t		iw;		/* wireless extensions state (must be first) */
64 #endif /* defined(WL_WIRELESS_EXT) */
65 	dhd_pub_t pub;
66 	 /* for supporting multiple interfaces.
67 	  * static_ifs hold the net ifaces without valid FW IF
68 	  */
69 	dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
70 
71 	wifi_adapter_info_t *adapter;			/* adapter information, interrupt, fw path etc. */
72 	char fw_path[PATH_MAX];		/* path to firmware image */
73 	char nv_path[PATH_MAX];		/* path to nvram vars file */
74 	char clm_path[PATH_MAX];		/* path to clm vars file */
75 	char conf_path[PATH_MAX];	/* path to config vars file */
76 #ifdef DHD_UCODE_DOWNLOAD
77 	char uc_path[PATH_MAX];	/* path to ucode image */
78 #endif /* DHD_UCODE_DOWNLOAD */
79 
80 	/* serialize dhd iovars */
81 	struct mutex dhd_iovar_mutex;
82 
83 	struct semaphore proto_sem;
84 #ifdef PROP_TXSTATUS
85 	spinlock_t	wlfc_spinlock;
86 
87 #ifdef BCMDBUS
88 	ulong		wlfc_lock_flags;
89 	ulong		wlfc_pub_lock_flags;
90 #endif /* BCMDBUS */
91 #endif /* PROP_TXSTATUS */
92 	wait_queue_head_t ioctl_resp_wait;
93 	wait_queue_head_t d3ack_wait;
94 	wait_queue_head_t dhd_bus_busy_state_wait;
95 	wait_queue_head_t dmaxfer_wait;
96 	uint32	default_wd_interval;
97 
98 	timer_list_compat_t timer;
99 	bool wd_timer_valid;
100 	struct tasklet_struct tasklet;
101 	spinlock_t	sdlock;
102 	spinlock_t	txqlock;
103 	spinlock_t	dhd_lock;
104 #ifdef BCMDBUS
105 	ulong		txqlock_flags;
106 #else
107 
108 	struct semaphore sdsem;
109 	tsk_ctl_t	thr_dpc_ctl;
110 	tsk_ctl_t	thr_wdt_ctl;
111 #endif /* BCMDBUS */
112 
113 	tsk_ctl_t	thr_rxf_ctl;
114 	spinlock_t	rxf_lock;
115 	bool		rxthread_enabled;
116 
117 	/* Wakelocks */
118 #if defined(CONFIG_HAS_WAKELOCK)
119 	struct wake_lock wl_wifi;   /* Wifi wakelock */
120 	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
121 	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
122 	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
123 	struct wake_lock wl_evtwake; /* Wifi event wakelock */
124 	struct wake_lock wl_pmwake;   /* Wifi pm handler wakelock */
125 	struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
126 #ifdef BCMPCIE_OOB_HOST_WAKE
127 	struct wake_lock wl_intrwake; /* Host wakeup wakelock */
128 #endif /* BCMPCIE_OOB_HOST_WAKE */
129 #ifdef DHD_USE_SCAN_WAKELOCK
130 	struct wake_lock wl_scanwake;  /* Wifi scan wakelock */
131 #endif /* DHD_USE_SCAN_WAKELOCK */
132 #endif /* CONFIG_HAS_WAKELOCK */
133 
134 	/* net_device interface lock, prevent race conditions among net_dev interface
135 	 * calls and wifi_on or wifi_off
136 	 */
137 	struct mutex dhd_net_if_mutex;
138 	struct mutex dhd_suspend_mutex;
139 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
140 	struct mutex dhd_apf_mutex;
141 #endif /* PKT_FILTER_SUPPORT && APF */
142 	spinlock_t wakelock_spinlock;
143 	spinlock_t wakelock_evt_spinlock;
144 	uint32 wakelock_counter;
145 	int wakelock_wd_counter;
146 	int wakelock_rx_timeout_enable;
147 	int wakelock_ctrl_timeout_enable;
148 	bool waive_wakelock;
149 	uint32 wakelock_before_waive;
150 
151 	/* Thread to issue ioctl for multicast */
152 	wait_queue_head_t ctrl_wait;
153 	atomic_t pend_8021x_cnt;
154 	dhd_attach_states_t dhd_state;
155 #ifdef SHOW_LOGTRACE
156 	dhd_event_log_t event_data;
157 #endif /* SHOW_LOGTRACE */
158 
159 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
160 	struct early_suspend early_suspend;
161 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
162 
163 #ifdef ARP_OFFLOAD_SUPPORT
164 	u32 pend_ipaddr;
165 #endif /* ARP_OFFLOAD_SUPPORT */
166 #ifdef DHDTCPACK_SUPPRESS
167 	spinlock_t	tcpack_lock;
168 #endif /* DHDTCPACK_SUPPRESS */
169 #ifdef FIX_CPU_MIN_CLOCK
170 	bool cpufreq_fix_status;
171 	struct mutex cpufreq_fix;
172 	struct pm_qos_request dhd_cpu_qos;
173 #ifdef FIX_BUS_MIN_CLOCK
174 	struct pm_qos_request dhd_bus_qos;
175 #endif /* FIX_BUS_MIN_CLOCK */
176 #endif /* FIX_CPU_MIN_CLOCK */
177 	void			*dhd_deferred_wq;
178 #ifdef DEBUG_CPU_FREQ
179 	struct notifier_block freq_trans;
180 	int __percpu *new_freq;
181 #endif // endif
182 	unsigned int unit;
183 	struct notifier_block pm_notifier;
184 #ifdef DHD_PSTA
185 	uint32	psta_mode;	/* PSTA or PSR */
186 #endif /* DHD_PSTA */
187 #ifdef DHD_WET
188 	        uint32  wet_mode;
189 #endif /* DHD_WET */
190 #ifdef DHD_DEBUG
191 	dhd_dump_t *dump;
192 	struct timer_list join_timer;
193 	u32 join_timeout_val;
194 	bool join_timer_active;
195 	uint scan_time_count;
196 	struct timer_list scan_timer;
197 	bool scan_timer_active;
198 #endif // endif
199 #if defined(DHD_LB)
200 	/* CPU Load Balance dynamic CPU selection */
201 
202 	/* Variable that tracks the currect CPUs available for candidacy */
203 	cpumask_var_t cpumask_curr_avail;
204 
205 	/* Primary and secondary CPU mask */
206 	cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
207 	cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
208 
209 	struct notifier_block cpu_notifier;
210 
211 	/* Tasklet to handle Tx Completion packet freeing */
212 	struct tasklet_struct tx_compl_tasklet;
213 	atomic_t                   tx_compl_cpu;
214 
215 	/* Tasklet to handle RxBuf Post during Rx completion */
216 	struct tasklet_struct rx_compl_tasklet;
217 	atomic_t                   rx_compl_cpu;
218 
219 	/* Napi struct for handling rx packet sendup. Packets are removed from
220 	 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
221 	 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
222 	 * to run to rx_napi_cpu.
223 	 */
224 	struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
225 	struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
226 	struct napi_struct    rx_napi_struct ____cacheline_aligned;
227 	atomic_t                   rx_napi_cpu; /* cpu on which the napi is dispatched */
228 	struct net_device    *rx_napi_netdev; /* netdev of primary interface */
229 
230 	struct work_struct    rx_napi_dispatcher_work;
231 	struct work_struct	  tx_compl_dispatcher_work;
232 	struct work_struct    tx_dispatcher_work;
233 	struct work_struct	  rx_compl_dispatcher_work;
234 
235 	/* Number of times DPC Tasklet ran */
236 	uint32	dhd_dpc_cnt;
237 	/* Number of times NAPI processing got scheduled */
238 	uint32	napi_sched_cnt;
239 	/* Number of times NAPI processing ran on each available core */
240 	uint32	*napi_percpu_run_cnt;
241 	/* Number of times RX Completions got scheduled */
242 	uint32	rxc_sched_cnt;
243 	/* Number of times RX Completion ran on each available core */
244 	uint32	*rxc_percpu_run_cnt;
245 	/* Number of times TX Completions got scheduled */
246 	uint32	txc_sched_cnt;
247 	/* Number of times TX Completions ran on each available core */
248 	uint32	*txc_percpu_run_cnt;
249 	/* CPU status */
250 	/* Number of times each CPU came online */
251 	uint32	*cpu_online_cnt;
252 	/* Number of times each CPU went offline */
253 	uint32	*cpu_offline_cnt;
254 
255 	/* Number of times TX processing run on each core */
256 	uint32	*txp_percpu_run_cnt;
257 	/* Number of times TX start run on each core */
258 	uint32	*tx_start_percpu_run_cnt;
259 
260 	/* Tx load balancing */
261 
262 	/* TODO: Need to see if batch processing is really required in case of TX
263 	 * processing. In case of RX the Dongle can send a bunch of rx completions,
264 	 * hence we took a 3 queue approach
265 	 * enque - adds the skbs to rx_pend_queue
266 	 * dispatch - uses a lock and adds the list of skbs from pend queue to
267 	 *            napi queue
268 	 * napi processing - copies the pend_queue into a local queue and works
269 	 * on it.
270 	 * But for TX its going to be 1 skb at a time, so we are just thinking
271 	 * of using only one queue and use the lock supported skb queue functions
272 	 * to add and process it. If its in-efficient we'll re-visit the queue
273 	 * design.
274 	 */
275 
276 	/* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
277 	/* struct sk_buff_head		tx_pend_queue  ____cacheline_aligned;  */
278 	/*
279 	 * From the Tasklet that actually sends out data
280 	 * copy the list tx_pend_queue into tx_active_queue. There by we need
281 	 * to spinlock to only perform the copy the rest of the code ie to
282 	 * construct the tx_pend_queue and the code to process tx_active_queue
283 	 * can be lockless. The concept is borrowed as is from RX processing
284 	 */
285 	/* struct sk_buff_head		tx_active_queue  ____cacheline_aligned; */
286 
287 	/* Control TXP in runtime, enable by default */
288 	atomic_t                lb_txp_active;
289 
290 	/* Control RXP in runtime, enable by default */
291 	atomic_t                lb_rxp_active;
292 
293 	/*
294 	 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
295 	 * For now, the processing tasklet will also direcly operate on this
296 	 * queue
297 	 */
298 	struct sk_buff_head	tx_pend_queue  ____cacheline_aligned;
299 
300 	/* Control RXP in runtime, enable by default */
301 	/* cpu on which the DHD Tx is happenning */
302 	atomic_t		tx_cpu;
303 
304 	/* CPU on which the Network stack is calling the DHD's xmit function */
305 	atomic_t		net_tx_cpu;
306 
307 	/* Tasklet context from which the DHD's TX processing happens */
308 	struct tasklet_struct tx_tasklet;
309 
310 	/*
311 	 * Consumer Histogram - NAPI RX Packet processing
312 	 * -----------------------------------------------
313 	 * On Each CPU, when the NAPI RX Packet processing call back was invoked
314 	 * how many packets were processed is captured in this data structure.
315 	 * Now its difficult to capture the "exact" number of packets processed.
316 	 * So considering the packet counter to be a 32 bit one, we have a
317 	 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
318 	 * processed is rounded off to the next power of 2 and put in the
319 	 * approriate "bin" the value in the bin gets incremented.
320 	 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
321 	 * and the packet count processed is as follows (assume the bin counters are 0)
322 	 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
323 	 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
324 	 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
325 	 */
326 	uint32 *napi_rx_hist[HIST_BIN_SIZE];
327 	uint32 *txc_hist[HIST_BIN_SIZE];
328 	uint32 *rxc_hist[HIST_BIN_SIZE];
329 #endif /* DHD_LB */
330 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
331 	struct work_struct	  axi_error_dispatcher_work;
332 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
333 #ifdef SHOW_LOGTRACE
334 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
335 	tsk_ctl_t	  thr_logtrace_ctl;
336 #else
337 	struct delayed_work	  event_log_dispatcher_work;
338 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
339 #endif /* SHOW_LOGTRACE */
340 
341 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
342 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
343 	struct kobject dhd_kobj;
344 	struct kobject dhd_conf_file_kobj;
345 	struct timer_list timesync_timer;
346 #if defined(BT_OVER_SDIO)
347     char btfw_path[PATH_MAX];
348 #endif /* defined (BT_OVER_SDIO) */
349 #ifdef WL_MONITOR
350 	struct net_device *monitor_dev; /* monitor pseudo device */
351 	struct sk_buff *monitor_skb;
352 	uint	monitor_len;
353 	uint	monitor_type;   /* monitor pseudo device */
354 #endif /* WL_MONITOR */
355 #if defined(BT_OVER_SDIO)
356     struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
357     int     bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
358 #endif /* BT_OVER_SDIO */
359 #ifdef SHOW_LOGTRACE
360 	struct sk_buff_head   evt_trace_queue     ____cacheline_aligned;
361 #endif // endif
362 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
363 	struct workqueue_struct *tx_wq;
364 	struct workqueue_struct *rx_wq;
365 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
366 #ifdef DHD_DEBUG_UART
367 	bool duart_execute;
368 #endif	/* DHD_DEBUG_UART */
369 	struct mutex logdump_lock;
370 	/* indicates mem_dump was scheduled as work queue or called directly */
371 	bool scheduled_memdump;
372 	struct work_struct dhd_hang_process_work;
373 #ifdef DHD_HP2P
374 	spinlock_t	hp2p_lock;
375 #endif /* DHD_HP2P */
376 } dhd_info_t;
377 
378 extern int dhd_sysfs_init(dhd_info_t *dhd);
379 extern void dhd_sysfs_exit(dhd_info_t *dhd);
380 extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
381 extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
382 
383 int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
384 
385 #if defined(DHD_LB)
386 #if defined(DHD_LB_TXP)
387 int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
388 void dhd_tx_dispatcher_work(struct work_struct * work);
389 void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
390 void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
391 void dhd_lb_tx_handler(unsigned long data);
392 #endif /* DHD_LB_TXP */
393 
394 #if defined(DHD_LB_RXP)
395 int dhd_napi_poll(struct napi_struct *napi, int budget);
396 void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
397 void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
398 void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
399 #endif /* DHD_LB_RXP */
400 
401 void dhd_lb_set_default_cpus(dhd_info_t *dhd);
402 void dhd_cpumasks_deinit(dhd_info_t *dhd);
403 int dhd_cpumasks_init(dhd_info_t *dhd);
404 
405 void dhd_select_cpu_candidacy(dhd_info_t *dhd);
406 
407 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
408 int dhd_cpu_startup_callback(unsigned int cpu);
409 int dhd_cpu_teardown_callback(unsigned int cpu);
410 #else
411 int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
412 #endif /* LINUX_VERSION_CODE < 4.10.0 */
413 
414 int dhd_register_cpuhp_callback(dhd_info_t *dhd);
415 int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
416 
417 #if defined(DHD_LB_TXC)
418 void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
419 #endif /* DHD_LB_TXC */
420 
421 #if defined(DHD_LB_RXC)
422 void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
423 void dhd_rx_compl_dispatcher_fn(struct work_struct * work);
424 #endif /* DHD_LB_RXC */
425 
426 #endif /* DHD_LB */
427 
428 #if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
429 void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
430 #endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
431 
432 #endif /* __DHD_LINUX_PRIV_H__ */
433