• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux-specific abstractions to gain some independence from linux kernel
3  * versions. Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4  *
5  * Copyright (C) 1999-2019, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions
17  * of the license of that module.  An independent module is a module which is
18  * not derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $
29  */
30 
31 #ifndef _linuxver_h_
32 #define _linuxver_h_
33 
34 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
35 #pragma GCC diagnostic push
36 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
37 #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
38 #endif // endif
39 
40 #include <typedefs.h>
41 #include <linux/version.h>
42 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
43 #include <linux/config.h>
44 #else
45 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
46 #include <generated/autoconf.h>
47 #else
48 #include <linux/autoconf.h>
49 #endif // endif
50 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
51 
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
53 #include <linux/kconfig.h>
54 #endif // endif
55 #include <linux/module.h>
56 
57 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
58 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
59 #ifdef __UNDEF_NO_VERSION__
60 #undef __NO_VERSION__
61 #else
62 #define __NO_VERSION__
63 #endif // endif
64 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
65 
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
67 #define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
68 #define module_param_string(_name_, _string_, _size_, _perm_)                  \
69     MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
70 #endif // endif
71 
72 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
73 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
74 #include <linux/malloc.h>
75 #else
76 #include <linux/slab.h>
77 #endif // endif
78 
79 #include <linux/types.h>
80 #include <linux/init.h>
81 #include <linux/mm.h>
82 #include <linux/string.h>
83 #include <linux/pci.h>
84 #include <linux/interrupt.h>
85 #include <linux/kthread.h>
86 #include <linux/netdevice.h>
87 #include <linux/time.h>
88 #include <linux/rtc.h>
89 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
90 #include <linux/semaphore.h>
91 #else
92 #include <asm/semaphore.h>
93 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
94 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
95 #undef IP_TOS
96 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
97 #include <asm/io.h>
98 
99 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
100 #include <linux/workqueue.h>
101 #else
102 #include <linux/tqueue.h>
103 #ifndef work_struct
104 #define work_struct tq_struct
105 #endif // endif
106 #ifndef INIT_WORK
107 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
108 #endif // endif
109 #ifndef schedule_work
110 #define schedule_work(_work) schedule_task((_work))
111 #endif // endif
112 #ifndef flush_scheduled_work
113 #define flush_scheduled_work() flush_scheduled_tasks()
114 #endif // endif
115 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
116 
117 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
118 #define DAEMONIZE(a)                                                           \
119     do {                                                                       \
120         allow_signal(SIGKILL);                                                 \
121         allow_signal(SIGTERM);                                                 \
122     } while (0)
123 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) &&                       \
124        (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
125 #define DAEMONIZE(a)                                                           \
126     daemonize(a);                                                              \
127     allow_signal(SIGKILL);                                                     \
128     allow_signal(SIGTERM);
129 #else /* Linux 2.4 (w/o preemption patch) */
130 #define RAISE_RX_SOFTIRQ() cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
131 #define DAEMONIZE(a)                                                           \
132     daemonize();                                                               \
133     do {                                                                       \
134         if (a)                                                                 \
135             strncpy(current->comm, a,                                          \
136                     MIN(sizeof(current->comm), (strlen(a))));                  \
137     } while (0);
138 #endif /* LINUX_VERSION_CODE  */
139 
140 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
141 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
142 #else
143 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
144 #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
145       (RHEL_MAJOR == 5))
146 /* Exclude RHEL 5 */
147 typedef void (*work_func_t)(void *work);
148 #endif // endif
149 #endif /* >= 2.6.20 */
150 
151 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
152 /* Some distributions have their own 2.6.x compatibility layers */
153 #ifndef IRQ_NONE
154 typedef void irqreturn_t;
155 #define IRQ_NONE
156 #define IRQ_HANDLED
157 #define IRQ_RETVAL(x)
158 #endif // endif
159 #else
160 typedef irqreturn_t (*FN_ISR)(int irq, void *dev_id, struct pt_regs *ptregs);
161 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
162 
163 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
164 #define IRQF_SHARED SA_SHIRQ
165 #endif /* < 2.6.18 */
166 
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
168 #ifdef CONFIG_NET_RADIO
169 #define CONFIG_WIRELESS_EXT
170 #endif // endif
171 #endif /* < 2.6.17 */
172 
173 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
174 #define MOD_INC_USE_COUNT
175 #define MOD_DEC_USE_COUNT
176 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
177 
178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
179 #include <linux/sched.h>
180 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
181 
182 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
183 #include <linux/sched/rt.h>
184 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
185 
186 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
187 #include <net/lib80211.h>
188 #endif // endif
189 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
190 #include <linux/ieee80211.h>
191 #else
192 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
193 #include <net/ieee80211.h>
194 #endif // endif
195 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
196 
197 #ifndef __exit
198 #define __exit
199 #endif // endif
200 #ifndef __devexit
201 #define __devexit
202 #endif // endif
203 #ifndef __devinit
204 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
205 #define __devinit __init
206 #else
207 /* All devices are hotpluggable since linux 3.8.0 */
208 #define __devinit
209 #endif
210 #endif /* !__devinit */
211 #ifndef __devinitdata
212 #define __devinitdata
213 #endif // endif
214 #ifndef __devexit_p
215 #define __devexit_p(x) x
216 #endif // endif
217 
218 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
219 
220 #define pci_get_drvdata(dev) (dev)->sysdata
221 #define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
222 
223 /*
224  * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
225  */
226 
227 struct pci_device_id {
228     unsigned int vendor, device;       /* Vendor and device ID or PCI_ANY_ID */
229     unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
230     unsigned int class, class_mask;    /* (class,subclass,prog-if) triplet */
231     unsigned long driver_data;         /* Data private to the driver */
232 };
233 
234 struct pci_driver {
235     struct list_head node;
236     char *name;
237     const struct pci_device_id *id_table; /* NULL if wants all devices */
238     int (*probe)(struct pci_dev *dev,
239                  const struct pci_device_id *id); /* New device inserted */
240     void (*remove)(struct pci_dev *dev);  /* Device removed (NULL if not a
241                                            * hot-plug  capable driver)
242                                            */
243     void (*suspend)(struct pci_dev *dev); /* Device suspended */
244     void (*resume)(struct pci_dev *dev);  /* Device woken up */
245 };
246 
247 #define MODULE_DEVICE_TABLE(type, name)
248 #define PCI_ANY_ID (~0)
249 
250 /* compatpci.c */
251 #define pci_module_init pci_register_driver
252 extern int pci_register_driver(struct pci_driver *drv);
253 extern void pci_unregister_driver(struct pci_driver *drv);
254 
255 #endif /* PCI registration */
256 
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
258 #define pci_module_init pci_register_driver
259 #endif // endif
260 
261 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
262 #ifdef MODULE
263 #define module_init(x)                                                         \
264     int init_module(void)                                                      \
265     {                                                                          \
266         return x();                                                            \
267     }
268 #define module_exit(x)                                                         \
269     void cleanup_module(void)                                                  \
270     {                                                                          \
271         x();                                                                   \
272     }
273 #else
274 #define module_init(x) __initcall(x);
275 #define module_exit(x) __exitcall(x);
276 #endif // endif
277 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
278 
279 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
280 #define WL_USE_NETDEV_OPS
281 #else
282 #undef WL_USE_NETDEV_OPS
283 #endif // endif
284 
285 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
286 #define WL_CONFIG_RFKILL
287 #else
288 #undef WL_CONFIG_RFKILL
289 #endif // endif
290 
291 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
292 #define list_for_each(pos, head)                                               \
293     for (pos = (head)->next; pos != (head); pos = pos->next)
294 #endif // endif
295 
296 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
297 #define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
298 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
299 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
300 #endif // endif
301 
302 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
303 #define pci_enable_device(dev)                                                 \
304     do {                                                                       \
305     } while (0)
306 #endif // endif
307 
308 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
309 #define net_device device
310 #endif // endif
311 
312 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
313 
314 /*
315  * DMA mapping
316  *
317  * See linux/Documentation/DMA-mapping.txt
318  */
319 
320 #ifndef PCI_DMA_TODEVICE
321 #define PCI_DMA_TODEVICE 1
322 #define PCI_DMA_FROMDEVICE 2
323 #endif // endif
324 
325 typedef u32 dma_addr_t;
326 
327 /* Pure 2^n version of get_order */
get_order(unsigned long size)328 static inline int get_order(unsigned long size)
329 {
330     int order;
331 
332     size = (size - 1) >> (PAGE_SHIFT - 1);
333     order = -1;
334     do {
335         size >>= 1;
336         order++;
337     } while (size);
338     return order;
339 }
340 
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)341 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
342                                          dma_addr_t *dma_handle)
343 {
344     void *ret;
345     int gfp = GFP_ATOMIC | GFP_DMA;
346 
347     ret = (void *)__get_free_pages(gfp, get_order(size));
348     if (ret != NULL) {
349         memset(ret, 0, size);
350         *dma_handle = virt_to_bus(ret);
351     }
352     return ret;
353 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)354 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
355                                        void *vaddr, dma_addr_t dma_handle)
356 {
357     free_pages((unsigned long)vaddr, get_order(size));
358 }
359 #define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
360 #define pci_unmap_single(cookie, address, size, dir)
361 
362 #endif /* DMA mapping */
363 
364 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
365 
366 typedef struct timer_list timer_list_compat_t;
367 
368 #define init_timer_compat(timer_compat, cb, priv)                              \
369     init_timer(timer_compat);                                                  \
370     (timer_compat)->data = (ulong)priv;                                        \
371     (timer_compat)->function = cb
372 #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
373 #define timer_expires(timer_compat) (timer_compat)->expires
374 
375 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
376 
377 typedef struct timer_list_compat {
378     struct timer_list timer;
379     void *arg;
380     void (*callback)(ulong arg);
381 } timer_list_compat_t;
382 
383 extern void timer_cb_compat(struct timer_list *tl);
384 
385 #define init_timer_compat(timer_compat, cb, priv)                              \
386     (timer_compat)->arg = priv;                                                \
387     (timer_compat)->callback = cb;                                             \
388     timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
389 #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
390 #define timer_expires(timer_compat) (timer_compat)->timer.expires
391 
392 #define del_timer(t) del_timer(&((t)->timer))
393 #ifndef del_timer_sync
394 #define del_timer_sync(t) del_timer_sync(&((t)->timer))
395 #endif
396 #define timer_pending(t) timer_pending(&((t)->timer))
397 #define add_timer(t) add_timer(&((t)->timer))
398 #define mod_timer(t, j) mod_timer(&((t)->timer), j)
399 
400 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
401 
402 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
403 #define rtc_time_to_tm(a, b) rtc_time64_to_tm(a, b)
404 #else
405 #define rtc_time_to_tm(a, b) rtc_time_to_tm(a, b)
406 #endif /* LINUX_VER >= 3.19.0 */
407 
408 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
409 
410 #define dev_kfree_skb_any(a) dev_kfree_skb(a)
411 #define netif_down(dev)                                                        \
412     do {                                                                       \
413         (dev)->start = 0;                                                      \
414     } while (0)
415 
416 /* pcmcia-cs provides its own netdevice compatibility layer */
417 #ifndef _COMPAT_NETDEVICE_H
418 
419 /*
420  * SoftNet
421  *
422  * For pre-softnet kernels we need to tell the upper layer not to
423  * re-enter start_xmit() while we are in there. However softnet
424  * guarantees not to enter while we are in there so there is no need
425  * to do the netif_stop_queue() dance unless the transmit queue really
426  * gets stuck. This should also improve performance according to tests
427  * done by Aman Singla.
428  */
429 
430 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
431 #define netif_wake_queue(dev)                                                  \
432     do {                                                                       \
433         clear_bit(0, &(dev)->tbusy);                                           \
434         mark_bh(NET_BH);                                                       \
435     } while (0)
436 #define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
437 
netif_start_queue(struct net_device * dev)438 static inline void netif_start_queue(struct net_device *dev)
439 {
440     dev->tbusy = 0;
441     dev->interrupt = 0;
442     dev->start = 1;
443 }
444 
445 #define netif_queue_stopped(dev) (dev)->tbusy
446 #define netif_running(dev) (dev)->start
447 
448 #endif /* _COMPAT_NETDEVICE_H */
449 
450 #define netif_device_attach(dev) netif_start_queue(dev)
451 #define netif_device_detach(dev) netif_stop_queue(dev)
452 
453 /* 2.4.x renamed bottom halves to tasklets */
454 #define tasklet_struct tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)455 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
456 {
457     queue_task(tasklet, &tq_immediate);
458     mark_bh(IMMEDIATE_BH);
459 }
460 
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)461 static inline void tasklet_init(struct tasklet_struct *tasklet,
462                                 void (*func)(unsigned long), unsigned long data)
463 {
464     tasklet->next = NULL;
465     tasklet->sync = 0;
466     tasklet->routine = (void (*)(void *))func;
467     tasklet->data = (void *)data;
468 }
469 #define tasklet_kill(tasklet)                                                  \
470     {                                                                          \
471         do {                                                                   \
472         } while (0);                                                           \
473     }
474 
475 /* 2.4.x introduced del_timer_sync() */
476 #define del_timer_sync(timer) del_timer(timer)
477 
478 #else
479 
480 #define netif_down(dev)
481 
482 #endif /* SoftNet */
483 
484 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
485 
486 /*
487  * Emit code to initialise a tq_struct's routine and data pointers
488  */
489 #define PREPARE_TQUEUE(_tq, _routine, _data)                                   \
490     do {                                                                       \
491         (_tq)->routine = _routine;                                             \
492         (_tq)->data = _data;                                                   \
493     } while (0)
494 
495 /*
496  * Emit code to initialise all of a tq_struct
497  */
498 #define INIT_TQUEUE(_tq, _routine, _data)                                      \
499     do {                                                                       \
500         INIT_LIST_HEAD(&(_tq)->list);                                          \
501         (_tq)->sync = 0;                                                       \
502         PREPARE_TQUEUE((_tq), (_routine), (_data));                            \
503     } while (0)
504 
505 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
506 
507 /* Power management related macro & routines */
508 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
509 #define PCI_SAVE_STATE(a, b) pci_save_state(a)
510 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
511 #else
512 #define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
513 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
514 #endif // endif
515 
516 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
pci_save_state(struct pci_dev * dev,u32 * buffer)517 static inline int pci_save_state(struct pci_dev *dev, u32 *buffer)
518 {
519     int i;
520     if (buffer) {
521         for (i = 0; i < 0x10; i++) {
522             pci_read_config_dword(dev, i * 0x4, &buffer[i]);
523         }
524     }
525     return 0;
526 }
527 
pci_restore_state(struct pci_dev * dev,u32 * buffer)528 static inline int pci_restore_state(struct pci_dev *dev, u32 *buffer)
529 {
530     int i;
531 
532     if (buffer) {
533         for (i = 0; i < 0x10; i++) {
534             pci_write_config_dword(dev, i * 0x4, buffer[i]);
535         }
536     } else {
537         /*
538         * otherwise, write the context information we know from bootup.
539         * This works around a problem where warm-booting from Windows
540         * combined with a D3(hot)->D0 transition causes PCI config
541         * header data to be forgotten.
542         */
543         for (i = 0; i < 0x6; i++) {
544             pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + (i * 0x4),
545                                    pci_resource_start(dev, i));
546         }
547         pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
548     }
549     return 0;
550 }
551 #endif /* PCI power management */
552 
553 /* Old cp0 access macros deprecated in 2.4.19 */
554 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
555 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
556 #endif // endif
557 
558 /* Module refcount handled internally in 2.6.x */
559 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
560 #ifndef SET_MODULE_OWNER
561 #define SET_MODULE_OWNER(dev)                                                  \
562     do {                                                                       \
563     } while (0)
564 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
565 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
566 #else
567 #define OLD_MOD_INC_USE_COUNT                                                  \
568     do {                                                                       \
569     } while (0)
570 #define OLD_MOD_DEC_USE_COUNT                                                  \
571     do {                                                                       \
572     } while (0)
573 #endif // endif
574 #else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
575 #ifndef SET_MODULE_OWNER
576 #define SET_MODULE_OWNER(dev)                                                  \
577     do {                                                                       \
578     } while (0)
579 #endif // endif
580 #ifndef MOD_INC_USE_COUNT
581 #define MOD_INC_USE_COUNT                                                      \
582     do {                                                                       \
583     } while (0)
584 #endif // endif
585 #ifndef MOD_DEC_USE_COUNT
586 #define MOD_DEC_USE_COUNT                                                      \
587     do {                                                                       \
588     } while (0)
589 #endif // endif
590 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
591 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
592 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
593 
594 #ifndef SET_NETDEV_DEV
595 #define SET_NETDEV_DEV(net, pdev)                                              \
596     do {                                                                       \
597     } while (0)
598 #endif // endif
599 
600 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
601 #ifndef HAVE_FREE_NETDEV
602 #define free_netdev(dev) kfree(dev)
603 #endif // endif
604 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
605 
606 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
607 /* struct packet_type redefined in 2.6.x */
608 #define af_packet_priv data
609 #endif // endif
610 
611 /* suspend args */
612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
613 #define DRV_SUSPEND_STATE_TYPE pm_message_t
614 #else
615 #define DRV_SUSPEND_STATE_TYPE uint32
616 #endif // endif
617 
618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
619 #define CHECKSUM_HW CHECKSUM_PARTIAL
620 #endif // endif
621 
622 typedef struct {
623     void
624         *parent; /* some external entity that the thread supposed to work for */
625     char *proc_name;
626     struct task_struct *p_task;
627     long thr_pid;
628     int prio; /* priority */
629     struct semaphore sema;
630     int terminated;
631     struct completion completed;
632     int flush_ind;
633     struct completion flushed;
634     spinlock_t spinlock;
635     int up_cnt;
636 } tsk_ctl_t;
637 
638 /* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner
639  * ptr */
640 /* note this macro assumes there may be only one context waiting on thread's
641  * completion */
642 #ifndef DHD_LOG_PREFIX
643 #define DHD_LOG_PREFIX "[dhd]"
644 #endif
645 #define DHD_LOG_PREFIXS DHD_LOG_PREFIX " "
646 #ifdef DHD_DEBUG
647 #define printf_thr(fmt, args...) printk(DHD_LOG_PREFIXS fmt, ##args)
648 #define DBG_THR(args)                                                          \
649     do {                                                                       \
650         printf_thr args;                                                       \
651     } while (0)
652 #else
653 #define DBG_THR(x)
654 #endif // endif
655 
binary_sema_down(tsk_ctl_t * tsk)656 static inline bool binary_sema_down(tsk_ctl_t *tsk)
657 {
658     if (down_interruptible(&tsk->sema) == 0) {
659         unsigned long flags = 0;
660         spin_lock_irqsave(&tsk->spinlock, flags);
661         if (tsk->up_cnt == 1) {
662             tsk->up_cnt--;
663         } else {
664             DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
665         }
666         spin_unlock_irqrestore(&tsk->spinlock, flags);
667         return false;
668     } else {
669         return true;
670     }
671 }
672 
binary_sema_up(tsk_ctl_t * tsk)673 static inline bool binary_sema_up(tsk_ctl_t *tsk)
674 {
675     bool sem_up = false;
676     unsigned long flags = 0;
677 
678     spin_lock_irqsave(&tsk->spinlock, flags);
679     if (tsk->up_cnt == 0) {
680         tsk->up_cnt++;
681         sem_up = true;
682     } else if (tsk->up_cnt == 1) {
683         /* dhd_sched_dpc: dpc is alread up! */
684     } else {
685         DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
686     }
687 
688     spin_unlock_irqrestore(&tsk->spinlock, flags);
689 
690     if (sem_up) {
691         up(&tsk->sema);
692     }
693 
694     return sem_up;
695 }
696 
697 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
698 #define SMP_RD_BARRIER_DEPENDS(x)
699 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
700 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
701 #else
702 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
703 #endif // endif
704 
705 #define PROC_START(thread_func, owner, tsk_ctl, flags, name)                   \
706     {                                                                          \
707         sema_init(&((tsk_ctl)->sema), 0);                                      \
708         init_completion(&((tsk_ctl)->completed));                              \
709         init_completion(&((tsk_ctl)->flushed));                                \
710         (tsk_ctl)->parent = owner;                                             \
711         (tsk_ctl)->proc_name = name;                                           \
712         (tsk_ctl)->terminated = FALSE;                                         \
713         (tsk_ctl)->flush_ind = FALSE;                                          \
714         (tsk_ctl)->up_cnt = 0;                                                 \
715         (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char *)name);   \
716         if (IS_ERR((tsk_ctl)->p_task)) {                                       \
717             (tsk_ctl)->thr_pid = -1;                                           \
718             DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__,          \
719                      (tsk_ctl)->proc_name));                                   \
720         } else {                                                               \
721             (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid;                       \
722             spin_lock_init(&((tsk_ctl)->spinlock));                            \
723             DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__,            \
724                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
725         };                                                                     \
726     }
727 
728 #define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
729 
730 #define PROC_STOP(tsk_ctl)                                                     \
731     {                                                                          \
732         uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC);         \
733         (tsk_ctl)->terminated = TRUE;                                          \
734         smp_wmb();                                                             \
735         up(&((tsk_ctl)->sema));                                                \
736         DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__,     \
737                  (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));                   \
738         timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed),   \
739                                                     timeout);                  \
740         if (timeout == 0)                                                      \
741             DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__,  \
742                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
743         else                                                                   \
744             DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__,      \
745                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
746         (tsk_ctl)->parent = NULL;                                              \
747         (tsk_ctl)->proc_name = NULL;                                           \
748         (tsk_ctl)->thr_pid = -1;                                               \
749         (tsk_ctl)->up_cnt = 0;                                                 \
750     }
751 
752 #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl)                                   \
753     {                                                                          \
754         uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC);         \
755         (tsk_ctl)->terminated = TRUE;                                          \
756         smp_wmb();                                                             \
757         binary_sema_up(tsk_ctl);                                               \
758         DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__,     \
759                  (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));                   \
760         timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed),   \
761                                                     timeout);                  \
762         if (timeout == 0)                                                      \
763             DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__,  \
764                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
765         else                                                                   \
766             DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__,      \
767                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
768         (tsk_ctl)->parent = NULL;                                              \
769         (tsk_ctl)->proc_name = NULL;                                           \
770         (tsk_ctl)->thr_pid = -1;                                               \
771     }
772 
773 /*
774  * Flush is non-rentrant, so callers must make sure
775  * there is no race condition.
776  * For safer exit, added wait_for_completion_timeout
777  * with 1 sec timeout.
778  */
779 #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl)                                  \
780     {                                                                          \
781         uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC);         \
782         (tsk_ctl)->flush_ind = TRUE;                                           \
783         smp_wmb();                                                             \
784         binary_sema_up(tsk_ctl);                                               \
785         DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__,         \
786                  (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));                   \
787         timeout =                                                              \
788             (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
789         if (timeout == 0)                                                      \
790             DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__,      \
791                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
792         else                                                                   \
793             DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__,         \
794                      (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid));               \
795     }
796 
797 /*  ----------------------- */
798 
799 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
800 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
801 /* send_sig declaration moved */
802 #include <linux/sched/signal.h>
803 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
804 
805 #define KILL_PROC(nr, sig)                                                     \
806     {                                                                          \
807         struct task_struct *tsk;                                               \
808         struct pid *pid;                                                       \
809         pid = find_get_pid((pid_t)nr);                                         \
810         tsk = pid_task(pid, PIDTYPE_PID);                                      \
811         if (tsk)                                                               \
812             send_sig(sig, tsk, 1);                                             \
813     }
814 #else
815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) &&                        \
816     (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30))
817 #define KILL_PROC(pid, sig)                                                    \
818     {                                                                          \
819         struct task_struct *tsk;                                               \
820         tsk = find_task_by_vpid(pid);                                          \
821         if (tsk)                                                               \
822             send_sig(sig, tsk, 1);                                             \
823     }
824 #else
825 #define KILL_PROC(pid, sig)                                                    \
826     {                                                                          \
827         kill_proc(pid, sig, 1);                                                \
828     }
829 #endif // endif
830 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
831 
832 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
833 #include <linux/time.h>
834 #include <linux/wait.h>
835 #else
836 #include <linux/sched.h>
837 
838 #define __wait_event_interruptible_timeout(wq, condition, ret)                 \
839     do {                                                                       \
840         wait_queue_t __wait;                                                   \
841         init_waitqueue_entry(&__wait, current);                                \
842                                                                                \
843         add_wait_queue(&wq, &__wait);                                          \
844         for (; ; ) {                                                             \
845             set_current_state(TASK_INTERRUPTIBLE);                             \
846             if (condition)                                                     \
847                 break;                                                         \
848             if (!signal_pending(current)) {                                    \
849                 ret = schedule_timeout(ret);                                   \
850                 if (!ret)                                                      \
851                     break;                                                     \
852                 continue;                                                      \
853             }                                                                  \
854             ret = -ERESTARTSYS;                                                \
855             break;                                                             \
856         }                                                                      \
857         current->state = TASK_RUNNING;                                         \
858         remove_wait_queue(&wq, &__wait);                                       \
859     } while (0)
860 
861 #define wait_event_interruptible_timeout(wq, condition, timeout)               \
862     ( {                                                                        \
863         long __ret = timeout;                                                  \
864         if (!(condition))                                                      \
865             __wait_event_interruptible_timeout(wq, condition, __ret);          \
866         __ret;                                                                 \
867     })
868 
869 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
870 
871 /*
872 For < 2.6.24, wl creates its own netdev but doesn't
873 align the priv area like the genuine alloc_netdev().
874 Since netdev_priv() always gives us the aligned address, it will
875 not match our unaligned address for < 2.6.24
876 */
877 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
878 #define DEV_PRIV(dev) (dev->priv)
879 #else
880 #define DEV_PRIV(dev) netdev_priv(dev)
881 #endif // endif
882 
883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
884 #define WL_ISR(i, d, p) wl_isr((i), (d))
885 #else
886 #define WL_ISR(i, d, p) wl_isr((i), (d), (p))
887 #endif /* < 2.6.20 */
888 
889 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
890 #define netdev_priv(dev) dev->priv
891 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
892 
893 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
894 #define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
895 #else
896 #define CAN_SLEEP() (FALSE)
897 #endif // endif
898 
899 #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC)
900 
901 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
902 #define RANDOM32 prandom_u32
903 #define RANDOM_BYTES prandom_bytes
904 #else
905 #define RANDOM32 random32
906 #define RANDOM_BYTES get_random_bytes
907 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
908 
909 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
910 #define SRANDOM32(entropy) prandom_seed(entropy)
911 #else
912 #define SRANDOM32(entropy) srandom32(entropy)
913 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
914 
915 /*
916  * Overide latest kfifo functions with
917  * older version to work on older kernels
918  */
919 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) &&                         \
920     !defined(WL_COMPAT_WIRELESS)
921 #define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
922 #define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
923 #define kfifo_esize(a) 1
924 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) &&                       \
925     (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&                         \
926     !defined(WL_COMPAT_WIRELESS)
927 #define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
928 #define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
929 #define kfifo_esize(a) 1
930 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
931 
932 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
933 #pragma GCC diagnostic pop
934 #endif // endif
935 
936 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
937 #include <linux/fs.h>
file_inode(const struct file * f)938 static inline struct inode *file_inode(const struct file *f)
939 {
940     return f->f_dentry->d_inode;
941 }
942 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
943 
944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
945 #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
946 #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
947 int kernel_read_compat(struct file *file, loff_t offset, char *addr,
948                        unsigned long count);
949 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
950 #define kernel_read_compat(file, offset, addr, count)                          \
951     kernel_read(file, offset, addr, count)
952 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
953 
954 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
955 #define netdev_tx_t int
956 #endif
957 
958 #endif /* _linuxver_h_ */
959