• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux-specific abstractions to gain some independence from linux kernel versions.
4  * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
5  *
6  * Copyright (C) 1999-2019, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $
30  */
31 
32 #ifndef _linuxver_h_
33 #define _linuxver_h_
34 
35 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
36 #pragma GCC diagnostic push
37 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
38 #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
39 #endif // endif
40 
41 #include <typedefs.h>
42 #include <linux/version.h>
43 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
44 #include <linux/config.h>
45 #else
46 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
47 #include <generated/autoconf.h>
48 #else
49 #include <linux/autoconf.h>
50 #endif // endif
51 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
52 
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
54 #include <linux/kconfig.h>
55 #endif // endif
56 #include <linux/module.h>
57 
58 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
59 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
60 #ifdef __UNDEF_NO_VERSION__
61 #undef __NO_VERSION__
62 #else
63 #define __NO_VERSION__
64 #endif // endif
65 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
66 
67 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
68 #define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
69 #define module_param_string(_name_, _string_, _size_, _perm_) \
70 		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
71 #endif // endif
72 
73 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
74 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
75 #include <linux/malloc.h>
76 #else
77 #include <linux/slab.h>
78 #endif // endif
79 
80 #include <linux/types.h>
81 #include <linux/init.h>
82 #include <linux/mm.h>
83 #include <linux/string.h>
84 #include <linux/pci.h>
85 #include <linux/interrupt.h>
86 #include <linux/kthread.h>
87 #include <linux/netdevice.h>
88 #include <linux/time.h>
89 #include <linux/rtc.h>
90 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
91 #include <linux/semaphore.h>
92 #else
93 #include <asm/semaphore.h>
94 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
96 #undef IP_TOS
97 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
98 #include <asm/io.h>
99 
100 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
101 #include <linux/workqueue.h>
102 #else
103 #include <linux/tqueue.h>
104 #ifndef work_struct
105 #define work_struct tq_struct
106 #endif // endif
107 #ifndef INIT_WORK
108 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
109 #endif // endif
110 #ifndef schedule_work
111 #define schedule_work(_work) schedule_task((_work))
112 #endif // endif
113 #ifndef flush_scheduled_work
114 #define flush_scheduled_work() flush_scheduled_tasks()
115 #endif // endif
116 #endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
117 
118 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
119 #define DAEMONIZE(a)	do { \
120 		allow_signal(SIGKILL);	\
121 		allow_signal(SIGTERM);	\
122 	} while (0)
123 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
124 	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
125 #define DAEMONIZE(a) daemonize(a); \
126 	allow_signal(SIGKILL); \
127 	allow_signal(SIGTERM);
128 #else /* Linux 2.4 (w/o preemption patch) */
129 #define RAISE_RX_SOFTIRQ() \
130 	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
131 #define DAEMONIZE(a) daemonize(); \
132 	do { if (a) \
133 		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
134 	} while (0);
135 #endif /* LINUX_VERSION_CODE  */
136 
137 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
138 #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
139 #else
140 #define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
141 #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
142 	(RHEL_MAJOR == 5))
143 /* Exclude RHEL 5 */
144 typedef void (*work_func_t)(void *work);
145 #endif // endif
146 #endif	/* >= 2.6.20 */
147 
148 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
149 /* Some distributions have their own 2.6.x compatibility layers */
150 #ifndef IRQ_NONE
151 typedef void irqreturn_t;
152 #define IRQ_NONE
153 #define IRQ_HANDLED
154 #define IRQ_RETVAL(x)
155 #endif // endif
156 #else
157 typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
158 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
159 
160 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
161 #define IRQF_SHARED	SA_SHIRQ
162 #endif /* < 2.6.18 */
163 
164 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
165 #ifdef	CONFIG_NET_RADIO
166 #define	CONFIG_WIRELESS_EXT
167 #endif // endif
168 #endif	/* < 2.6.17 */
169 
170 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
171 #define MOD_INC_USE_COUNT
172 #define MOD_DEC_USE_COUNT
173 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
174 
175 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
176 #include <linux/sched.h>
177 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
178 
179 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
180 #include <linux/sched/rt.h>
181 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
182 
183 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
184 #include <net/lib80211.h>
185 #endif // endif
186 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
187 #include <linux/ieee80211.h>
188 #else
189 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
190 #include <net/ieee80211.h>
191 #endif // endif
192 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
193 
194 #ifndef __exit
195 #define __exit
196 #endif // endif
197 #ifndef __devexit
198 #define __devexit
199 #endif // endif
200 #ifndef __devinit
201 #  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
202 #    define __devinit	__init
203 #  else
204 /* All devices are hotpluggable since linux 3.8.0 */
205 #    define __devinit
206 #  endif
207 #endif /* !__devinit */
208 #ifndef __devinitdata
209 #define __devinitdata
210 #endif // endif
211 #ifndef __devexit_p
212 #define __devexit_p(x)	x
213 #endif // endif
214 
215 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
216 
217 #define pci_get_drvdata(dev)		(dev)->sysdata
218 #define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
219 
220 /*
221  * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
222  */
223 
224 struct pci_device_id {
225 	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
226 	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
227 	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
228 	unsigned long driver_data;		/* Data private to the driver */
229 };
230 
231 struct pci_driver {
232 	struct list_head node;
233 	char *name;
234 	const struct pci_device_id *id_table;	/* NULL if wants all devices */
235 	int (*probe)(struct pci_dev *dev,
236 	             const struct pci_device_id *id); /* New device inserted */
237 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
238 						 * capable driver)
239 						 */
240 	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
241 	void (*resume)(struct pci_dev *dev);	/* Device woken up */
242 };
243 
244 #define MODULE_DEVICE_TABLE(type, name)
245 #define PCI_ANY_ID (~0)
246 
247 /* compatpci.c */
248 #define pci_module_init pci_register_driver
249 extern int pci_register_driver(struct pci_driver *drv);
250 extern void pci_unregister_driver(struct pci_driver *drv);
251 
252 #endif /* PCI registration */
253 
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
255 #define pci_module_init pci_register_driver
256 #endif // endif
257 
258 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
259 #ifdef MODULE
260 #define module_init(x) int init_module(void) { return x(); }
261 #define module_exit(x) void cleanup_module(void) { x(); }
262 #else
263 #define module_init(x)	__initcall(x);
264 #define module_exit(x)	__exitcall(x);
265 #endif // endif
266 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
267 
268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
269 #define WL_USE_NETDEV_OPS
270 #else
271 #undef WL_USE_NETDEV_OPS
272 #endif // endif
273 
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
275 #define WL_CONFIG_RFKILL
276 #else
277 #undef WL_CONFIG_RFKILL
278 #endif // endif
279 
280 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
281 #define list_for_each(pos, head) \
282 	for (pos = (head)->next; pos != (head); pos = pos->next)
283 #endif // endif
284 
285 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
286 #define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
287 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
288 #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
289 #endif // endif
290 
291 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
292 #define pci_enable_device(dev) do { } while (0)
293 #endif // endif
294 
295 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
296 #define net_device device
297 #endif // endif
298 
299 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
300 
301 /*
302  * DMA mapping
303  *
304  * See linux/Documentation/DMA-mapping.txt
305  */
306 
307 #ifndef PCI_DMA_TODEVICE
308 #define	PCI_DMA_TODEVICE	1
309 #define	PCI_DMA_FROMDEVICE	2
310 #endif // endif
311 
312 typedef u32 dma_addr_t;
313 
314 /* Pure 2^n version of get_order */
get_order(unsigned long size)315 static inline int get_order(unsigned long size)
316 {
317 	int order;
318 
319 	size = (size-1) >> (PAGE_SHIFT-1);
320 	order = -1;
321 	do {
322 		size >>= 1;
323 		order++;
324 	} while (size);
325 	return order;
326 }
327 
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)328 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
329                                          dma_addr_t *dma_handle)
330 {
331 	void *ret;
332 	int gfp = GFP_ATOMIC | GFP_DMA;
333 
334 	ret = (void *)__get_free_pages(gfp, get_order(size));
335 
336 	if (ret != NULL) {
337 		memset(ret, 0, size);
338 		*dma_handle = virt_to_bus(ret);
339 	}
340 	return ret;
341 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)342 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
343                                        void *vaddr, dma_addr_t dma_handle)
344 {
345 	free_pages((unsigned long)vaddr, get_order(size));
346 }
347 #define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
348 #define pci_unmap_single(cookie, address, size, dir)
349 
350 #endif /* DMA mapping */
351 
352 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
353 
354 typedef struct timer_list timer_list_compat_t;
355 
356 #define init_timer_compat(timer_compat, cb, priv) \
357 	init_timer(timer_compat); \
358 	(timer_compat)->data = (ulong)priv; \
359 	(timer_compat)->function = cb
360 #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
361 #define timer_expires(timer_compat) (timer_compat)->expires
362 
363 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
364 
365 typedef struct timer_list_compat {
366 	struct timer_list timer;
367 	void *arg;
368 	void (*callback)(ulong arg);
369 } timer_list_compat_t;
370 
371 extern void timer_cb_compat(struct timer_list *tl);
372 
373 #define init_timer_compat(timer_compat, cb, priv) \
374 	(timer_compat)->arg = priv; \
375 	(timer_compat)->callback = cb; \
376 	timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
377 #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
378 #define timer_expires(timer_compat) (timer_compat)->timer.expires
379 
380 #define del_timer(t) del_timer(&((t)->timer))
381 #define del_timer_sync(t) del_timer_sync(&((t)->timer))
382 #define timer_pending(t) timer_pending(&((t)->timer))
383 #define add_timer(t) add_timer(&((t)->timer))
384 #define mod_timer(t, j) mod_timer(&((t)->timer), j)
385 
386 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
387 
388 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
389 #define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
390 #else
391 #define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b)
392 #endif /* LINUX_VER >= 3.19.0 */
393 
394 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
395 
396 #define dev_kfree_skb_any(a)		dev_kfree_skb(a)
397 #define netif_down(dev)			do { (dev)->start = 0; } while (0)
398 
399 /* pcmcia-cs provides its own netdevice compatibility layer */
400 #ifndef _COMPAT_NETDEVICE_H
401 
402 /*
403  * SoftNet
404  *
405  * For pre-softnet kernels we need to tell the upper layer not to
406  * re-enter start_xmit() while we are in there. However softnet
407  * guarantees not to enter while we are in there so there is no need
408  * to do the netif_stop_queue() dance unless the transmit queue really
409  * gets stuck. This should also improve performance according to tests
410  * done by Aman Singla.
411  */
412 
413 #define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
414 #define netif_wake_queue(dev) \
415 		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
416 #define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
417 
netif_start_queue(struct net_device * dev)418 static inline void netif_start_queue(struct net_device *dev)
419 {
420 	dev->tbusy = 0;
421 	dev->interrupt = 0;
422 	dev->start = 1;
423 }
424 
425 #define netif_queue_stopped(dev)	(dev)->tbusy
426 #define netif_running(dev)		(dev)->start
427 
428 #endif /* _COMPAT_NETDEVICE_H */
429 
430 #define netif_device_attach(dev)	netif_start_queue(dev)
431 #define netif_device_detach(dev)	netif_stop_queue(dev)
432 
433 /* 2.4.x renamed bottom halves to tasklets */
434 #define tasklet_struct				tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)435 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
436 {
437 	queue_task(tasklet, &tq_immediate);
438 	mark_bh(IMMEDIATE_BH);
439 }
440 
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)441 static inline void tasklet_init(struct tasklet_struct *tasklet,
442                                 void (*func)(unsigned long),
443                                 unsigned long data)
444 {
445 	tasklet->next = NULL;
446 	tasklet->sync = 0;
447 	tasklet->routine = (void (*)(void *))func;
448 	tasklet->data = (void *)data;
449 }
450 #define tasklet_kill(tasklet)	{ do {} while (0); }
451 
452 /* 2.4.x introduced del_timer_sync() */
453 #define del_timer_sync(timer) del_timer(timer)
454 
455 #else
456 
457 #define netif_down(dev)
458 
459 #endif /* SoftNet */
460 
461 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
462 
463 /*
464  * Emit code to initialise a tq_struct's routine and data pointers
465  */
466 #define PREPARE_TQUEUE(_tq, _routine, _data)			\
467 	do {							\
468 		(_tq)->routine = _routine;			\
469 		(_tq)->data = _data;				\
470 	} while (0)
471 
472 /*
473  * Emit code to initialise all of a tq_struct
474  */
475 #define INIT_TQUEUE(_tq, _routine, _data)			\
476 	do {							\
477 		INIT_LIST_HEAD(&(_tq)->list);			\
478 		(_tq)->sync = 0;				\
479 		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
480 	} while (0)
481 
482 #endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
483 
484 /* Power management related macro & routines */
485 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
486 #define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
487 #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
488 #else
489 #define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
490 #define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
491 #endif // endif
492 
493 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
494 static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)495 pci_save_state(struct pci_dev *dev, u32 *buffer)
496 {
497 	int i;
498 	if (buffer) {
499 		for (i = 0; i < 16; i++)
500 			pci_read_config_dword(dev, i * 4, &buffer[i]);
501 	}
502 	return 0;
503 }
504 
505 static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)506 pci_restore_state(struct pci_dev *dev, u32 *buffer)
507 {
508 	int i;
509 
510 	if (buffer) {
511 		for (i = 0; i < 16; i++)
512 			pci_write_config_dword(dev, i * 4, buffer[i]);
513 	}
514 	/*
515 	 * otherwise, write the context information we know from bootup.
516 	 * This works around a problem where warm-booting from Windows
517 	 * combined with a D3(hot)->D0 transition causes PCI config
518 	 * header data to be forgotten.
519 	 */
520 	else {
521 		for (i = 0; i < 6; i ++)
522 			pci_write_config_dword(dev,
523 			                       PCI_BASE_ADDRESS_0 + (i * 4),
524 			                       pci_resource_start(dev, i));
525 		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
526 	}
527 	return 0;
528 }
529 #endif /* PCI power management */
530 
531 /* Old cp0 access macros deprecated in 2.4.19 */
532 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
533 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
534 #endif // endif
535 
536 /* Module refcount handled internally in 2.6.x */
537 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
538 #ifndef SET_MODULE_OWNER
539 #define SET_MODULE_OWNER(dev)		do {} while (0)
540 #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
541 #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
542 #else
543 #define OLD_MOD_INC_USE_COUNT		do {} while (0)
544 #define OLD_MOD_DEC_USE_COUNT		do {} while (0)
545 #endif // endif
546 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
547 #ifndef SET_MODULE_OWNER
548 #define SET_MODULE_OWNER(dev)		do {} while (0)
549 #endif // endif
550 #ifndef MOD_INC_USE_COUNT
551 #define MOD_INC_USE_COUNT			do {} while (0)
552 #endif // endif
553 #ifndef MOD_DEC_USE_COUNT
554 #define MOD_DEC_USE_COUNT			do {} while (0)
555 #endif // endif
556 #define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
557 #define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
558 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
559 
560 #ifndef SET_NETDEV_DEV
561 #define SET_NETDEV_DEV(net, pdev)	do {} while (0)
562 #endif // endif
563 
564 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
565 #ifndef HAVE_FREE_NETDEV
566 #define free_netdev(dev)		kfree(dev)
567 #endif // endif
568 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
569 
570 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
571 /* struct packet_type redefined in 2.6.x */
572 #define af_packet_priv			data
573 #endif // endif
574 
575 /* suspend args */
576 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
577 #define DRV_SUSPEND_STATE_TYPE pm_message_t
578 #else
579 #define DRV_SUSPEND_STATE_TYPE uint32
580 #endif // endif
581 
582 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
583 #define CHECKSUM_HW	CHECKSUM_PARTIAL
584 #endif // endif
585 
586 typedef struct {
587 	void	*parent;  /* some external entity that the thread supposed to work for */
588 	char	*proc_name;
589 	struct	task_struct *p_task;
590 	long	thr_pid;
591 	int		prio; /* priority */
592 	struct	semaphore sema;
593 	int	terminated;
594 	struct	completion completed;
595 	int	flush_ind;
596 	struct	completion flushed;
597 	spinlock_t	spinlock;
598 	int		up_cnt;
599 } tsk_ctl_t;
600 
601 /* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
602 /* note this macro assumes there may be only one context waiting on thread's completion */
603 #ifndef DHD_LOG_PREFIX
604 #define DHD_LOG_PREFIX "[dhd]"
605 #endif
606 #define DHD_LOG_PREFIXS DHD_LOG_PREFIX" "
607 #ifdef DHD_DEBUG
608 #define	printf_thr(fmt, args...)	printk(DHD_LOG_PREFIXS fmt , ## args)
609 #define DBG_THR(args)		do {printf_thr args;} while (0)
610 #else
611 #define DBG_THR(x)
612 #endif // endif
613 
binary_sema_down(tsk_ctl_t * tsk)614 static inline bool binary_sema_down(tsk_ctl_t *tsk)
615 {
616 	if (down_interruptible(&tsk->sema) == 0) {
617 		unsigned long flags = 0;
618 		spin_lock_irqsave(&tsk->spinlock, flags);
619 		if (tsk->up_cnt == 1)
620 			tsk->up_cnt--;
621 		else {
622 			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
623 		}
624 		spin_unlock_irqrestore(&tsk->spinlock, flags);
625 		return false;
626 	} else
627 		return true;
628 }
629 
binary_sema_up(tsk_ctl_t * tsk)630 static inline bool binary_sema_up(tsk_ctl_t *tsk)
631 {
632 	bool sem_up = false;
633 	unsigned long flags = 0;
634 
635 	spin_lock_irqsave(&tsk->spinlock, flags);
636 	if (tsk->up_cnt == 0) {
637 		tsk->up_cnt++;
638 		sem_up = true;
639 	} else if (tsk->up_cnt == 1) {
640 		/* dhd_sched_dpc: dpc is alread up! */
641 	} else
642 		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
643 
644 	spin_unlock_irqrestore(&tsk->spinlock, flags);
645 
646 	if (sem_up)
647 		up(&tsk->sema);
648 
649 	return sem_up;
650 }
651 
652 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
653 #define SMP_RD_BARRIER_DEPENDS(x)
654 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
655 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
656 #else
657 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
658 #endif // endif
659 
660 #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
661 { \
662 	sema_init(&((tsk_ctl)->sema), 0); \
663 	init_completion(&((tsk_ctl)->completed)); \
664 	init_completion(&((tsk_ctl)->flushed)); \
665 	(tsk_ctl)->parent = owner; \
666 	(tsk_ctl)->proc_name = name;  \
667 	(tsk_ctl)->terminated = FALSE; \
668 	(tsk_ctl)->flush_ind = FALSE; \
669 	(tsk_ctl)->up_cnt = 0; \
670 	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
671 	if (IS_ERR((tsk_ctl)->p_task)) { \
672 		(tsk_ctl)->thr_pid = -1; \
673 		DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
674 			(tsk_ctl)->proc_name)); \
675 	} else { \
676 		(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
677 		spin_lock_init(&((tsk_ctl)->spinlock)); \
678 		DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
679 			(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
680 	}; \
681 }
682 
683 #define PROC_WAIT_TIMEOUT_MSEC	5000 /* 5 seconds */
684 
685 #define PROC_STOP(tsk_ctl) \
686 { \
687 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
688 	(tsk_ctl)->terminated = TRUE; \
689 	smp_wmb(); \
690 	up(&((tsk_ctl)->sema));	\
691 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
692 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
693 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
694 	if (timeout == 0) \
695 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
696 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
697 	else \
698 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
699 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
700 	(tsk_ctl)->parent = NULL; \
701 	(tsk_ctl)->proc_name = NULL;  \
702 	(tsk_ctl)->thr_pid = -1; \
703 	(tsk_ctl)->up_cnt = 0; \
704 }
705 
706 #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
707 { \
708 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
709 	(tsk_ctl)->terminated = TRUE; \
710 	smp_wmb(); \
711 	binary_sema_up(tsk_ctl);	\
712 	DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
713 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
714 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
715 	if (timeout == 0) \
716 		DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
717 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
718 	else \
719 		DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
720 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
721 	(tsk_ctl)->parent = NULL; \
722 	(tsk_ctl)->proc_name = NULL;  \
723 	(tsk_ctl)->thr_pid = -1; \
724 }
725 
726 /*
727 * Flush is non-rentrant, so callers must make sure
728 * there is no race condition.
729 * For safer exit, added wait_for_completion_timeout
730 * with 1 sec timeout.
731 */
732 #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
733 { \
734 	uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
735 	(tsk_ctl)->flush_ind = TRUE; \
736 	smp_wmb(); \
737 	binary_sema_up(tsk_ctl);	\
738 	DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
739 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
740 	timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
741 	if (timeout == 0) \
742 		DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
743 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
744 	else \
745 		DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
746 			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
747 }
748 
749 /*  ----------------------- */
750 
751 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
752 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
753 /* send_sig declaration moved */
754 #include <linux/sched/signal.h>
755 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
756 
757 #define KILL_PROC(nr, sig) \
758 { \
759 struct task_struct *tsk; \
760 struct pid *pid;    \
761 pid = find_get_pid((pid_t)nr);    \
762 tsk = pid_task(pid, PIDTYPE_PID);    \
763 if (tsk) send_sig(sig, tsk, 1); \
764 }
765 #else
766 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
767 	KERNEL_VERSION(2, 6, 30))
768 #define KILL_PROC(pid, sig) \
769 { \
770 	struct task_struct *tsk; \
771 	tsk = find_task_by_vpid(pid); \
772 	if (tsk) send_sig(sig, tsk, 1); \
773 }
774 #else
775 #define KILL_PROC(pid, sig) \
776 { \
777 	kill_proc(pid, sig, 1); \
778 }
779 #endif // endif
780 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
781 
782 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
783 #include <linux/time.h>
784 #include <linux/wait.h>
785 #else
786 #include <linux/sched.h>
787 
788 #define __wait_event_interruptible_timeout(wq, condition, ret)		\
789 do {									\
790 	wait_queue_t __wait;						\
791 	init_waitqueue_entry(&__wait, current);				\
792 									\
793 	add_wait_queue(&wq, &__wait);					\
794 	for (;;) {							\
795 		set_current_state(TASK_INTERRUPTIBLE);			\
796 		if (condition)						\
797 			break;						\
798 		if (!signal_pending(current)) {				\
799 			ret = schedule_timeout(ret);			\
800 			if (!ret)					\
801 				break;					\
802 			continue;					\
803 		}							\
804 		ret = -ERESTARTSYS;					\
805 		break;							\
806 	}								\
807 	current->state = TASK_RUNNING;					\
808 	remove_wait_queue(&wq, &__wait);				\
809 } while (0)
810 
811 #define wait_event_interruptible_timeout(wq, condition, timeout)	\
812 ({									\
813 	long __ret = timeout;						\
814 	if (!(condition))						\
815 		__wait_event_interruptible_timeout(wq, condition, __ret); \
816 	__ret;								\
817 })
818 
819 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
820 
821 /*
822 For < 2.6.24, wl creates its own netdev but doesn't
823 align the priv area like the genuine alloc_netdev().
824 Since netdev_priv() always gives us the aligned address, it will
825 not match our unaligned address for < 2.6.24
826 */
827 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
828 #define DEV_PRIV(dev)	(dev->priv)
829 #else
830 #define DEV_PRIV(dev)	netdev_priv(dev)
831 #endif // endif
832 
833 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
834 #define WL_ISR(i, d, p)         wl_isr((i), (d))
835 #else
836 #define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
837 #endif  /* < 2.6.20 */
838 
839 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
840 #define netdev_priv(dev) dev->priv
841 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
842 
843 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
844 #define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
845 #else
846 #define CAN_SLEEP()	(FALSE)
847 #endif // endif
848 
849 #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
850 
851 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
852 #define RANDOM32	prandom_u32
853 #define RANDOM_BYTES	prandom_bytes
854 #else
855 #define RANDOM32	random32
856 #define RANDOM_BYTES	get_random_bytes
857 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
858 
859 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
860 #define SRANDOM32(entropy)	prandom_seed(entropy)
861 #else
862 #define SRANDOM32(entropy)	srandom32(entropy)
863 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
864 
865 /*
866  * Overide latest kfifo functions with
867  * older version to work on older kernels
868  */
869 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
870 #define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
871 #define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
872 #define kfifo_esize(a)				1
873 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
874 	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
875 #define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
876 #define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
877 #define kfifo_esize(a)				1
878 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
879 
880 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
881 #pragma GCC diagnostic pop
882 #endif // endif
883 
884 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
885 #include <linux/fs.h>
file_inode(const struct file * f)886 static inline struct inode *file_inode(const struct file *f)
887 {
888 	return f->f_dentry->d_inode;
889 }
890 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
891 
892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
893 #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
894 #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
895 int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
896 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
897 #define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
898 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
899 
900 #endif /* _linuxver_h_ */
901