• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux-specific abstractions to gain some independence from linux kernel versions.
3  * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4  *
5  * Copyright (C) 1999-2017, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: linuxver.h 646721 2016-06-30 12:36:41Z $
29  */
30 
31 #ifndef _linuxver_h_
32 #define _linuxver_h_
33 
34 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
35 #pragma GCC diagnostic push
36 #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
37 #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
38 #endif
39 
40 #include <typedefs.h>
41 #include <linux/version.h>
42 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
43 #include <linux/config.h>
44 #else
45 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
46 #include <generated/autoconf.h>
47 #else
48 #include <linux/autoconf.h>
49 #endif
50 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
51 
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
53 #include <linux/kconfig.h>
54 #endif
55 #include <linux/module.h>
56 
57 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
58 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
59 #ifdef __UNDEF_NO_VERSION__
60 #undef __NO_VERSION__
61 #else
62 #define __NO_VERSION__
63 #endif
64 #endif    /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
65 
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
67 #define module_param(_name_, _type_, _perm_)    MODULE_PARM(_name_, "i")
68 #define module_param_string(_name_, _string_, _size_, _perm_) \
69         MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
70 #endif
71 
72 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
73 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
74 #include <linux/malloc.h>
75 #else
76 #include <linux/slab.h>
77 #endif
78 
79 #include <linux/types.h>
80 #include <linux/init.h>
81 #include <linux/mm.h>
82 #include <linux/string.h>
83 #include <linux/pci.h>
84 #include <linux/interrupt.h>
85 #include <linux/kthread.h>
86 #include <linux/netdevice.h>
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
88 #include <linux/semaphore.h>
89 #else
90 #include <asm/semaphore.h>
91 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
92 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
93 #undef IP_TOS
94 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
95 #include <asm/io.h>
96 
97 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
98 #include <linux/workqueue.h>
99 #else
100 #include <linux/tqueue.h>
101 #ifndef work_struct
102 #define work_struct tq_struct
103 #endif
104 #ifndef INIT_WORK
105 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
106 #endif
107 #ifndef schedule_work
108 #define schedule_work(_work) schedule_task((_work))
109 #endif
110 #ifndef flush_scheduled_work
111 #define flush_scheduled_work() flush_scheduled_tasks()
112 #endif
113 #endif    /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
114 
115 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
116 #define DAEMONIZE(a)    do { \
117         allow_signal(SIGKILL);    \
118         allow_signal(SIGTERM);    \
119     } while (0)
120 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
121     (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
122 #define DAEMONIZE(a) daemonize(a); \
123     allow_signal(SIGKILL); \
124     allow_signal(SIGTERM);
125 #else /* Linux 2.4 (w/o preemption patch) */
126 #define RAISE_RX_SOFTIRQ() \
127     cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
128 #define DAEMONIZE(a) daemonize(); \
129     do { if (a) \
130         strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
131     } while (0);
132 #endif /* LINUX_VERSION_CODE  */
133 
134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
135 #define    MY_INIT_WORK(_work, _func)    INIT_WORK(_work, _func)
136 #else
137 #define    MY_INIT_WORK(_work, _func)    INIT_WORK(_work, _func, _work)
138 #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
139     (RHEL_MAJOR == 5))
140 /* Exclude RHEL 5 */
141 typedef void (*work_func_t)(void *work);
142 #endif
143 #endif    /* >= 2.6.20 */
144 
145 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
146 /* Some distributions have their own 2.6.x compatibility layers */
147 #ifndef IRQ_NONE
148 typedef void irqreturn_t;
149 #define IRQ_NONE
150 #define IRQ_HANDLED
151 #define IRQ_RETVAL(x)
152 #endif
153 #else
154 typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
155 #endif    /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
156 
157 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
158 #define IRQF_SHARED    SA_SHIRQ
159 #endif /* < 2.6.18 */
160 
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
162 #ifdef    CONFIG_NET_RADIO
163 #define    CONFIG_WIRELESS_EXT
164 #endif
165 #endif    /* < 2.6.17 */
166 
167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
168 #define MOD_INC_USE_COUNT
169 #define MOD_DEC_USE_COUNT
170 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
171 
172 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
173 #include <linux/sched.h>
174 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
175 
176 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
177 #include <linux/sched/rt.h>
178 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
179 
180 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
181 #include <net/lib80211.h>
182 #endif
183 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
184 #include <linux/ieee80211.h>
185 #else
186 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
187 #include <net/ieee80211.h>
188 #endif
189 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
190 
191 
192 #ifndef __exit
193 #define __exit
194 #endif
195 #ifndef __devexit
196 #define __devexit
197 #endif
198 #ifndef __devinit
199 #  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
200 #    define __devinit    __init
201 #  else
202 /* All devices are hotpluggable since linux 3.8.0 */
203 #    define __devinit
204 #  endif
205 #endif /* !__devinit */
206 #ifndef __devinitdata
207 #define __devinitdata
208 #endif
209 #ifndef __devexit_p
210 #define __devexit_p(x)    x
211 #endif
212 
213 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
214 
215 #define pci_get_drvdata(dev)        (dev)->sysdata
216 #define pci_set_drvdata(dev, value)    (dev)->sysdata = (value)
217 
218 /*
219  * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
220  */
221 
222 struct pci_device_id {
223     unsigned int vendor, device;        /* Vendor and device ID or PCI_ANY_ID */
224     unsigned int subvendor, subdevice;    /* Subsystem ID's or PCI_ANY_ID */
225     unsigned int class, class_mask;        /* (class,subclass,prog-if) triplet */
226     unsigned long driver_data;        /* Data private to the driver */
227 };
228 
229 struct pci_driver {
230     struct list_head node;
231     char *name;
232     const struct pci_device_id *id_table;    /* NULL if wants all devices */
233     int (*probe)(struct pci_dev *dev,
234                  const struct pci_device_id *id); /* New device inserted */
235     void (*remove)(struct pci_dev *dev);    /* Device removed (NULL if not a hot-plug
236                          * capable driver)
237                          */
238     void (*suspend)(struct pci_dev *dev);    /* Device suspended */
239     void (*resume)(struct pci_dev *dev);    /* Device woken up */
240 };
241 
242 #define MODULE_DEVICE_TABLE(type, name)
243 #define PCI_ANY_ID (~0)
244 
245 /* compatpci.c */
246 #define pci_module_init pci_register_driver
247 extern int pci_register_driver(struct pci_driver *drv);
248 extern void pci_unregister_driver(struct pci_driver *drv);
249 
250 #endif /* PCI registration */
251 
252 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
253 #define pci_module_init pci_register_driver
254 #endif
255 
256 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
257 #ifdef MODULE
258 #define module_init(x) int init_module(void) { return x(); }
259 #define module_exit(x) void cleanup_module(void) { x(); }
260 #else
261 #define module_init(x)    __initcall(x);
262 #define module_exit(x)    __exitcall(x);
263 #endif
264 #endif    /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
265 
266 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
267 #define WL_USE_NETDEV_OPS
268 #else
269 #undef WL_USE_NETDEV_OPS
270 #endif
271 
272 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
273 #define WL_CONFIG_RFKILL
274 #else
275 #undef WL_CONFIG_RFKILL
276 #endif
277 
278 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
279 #define list_for_each(pos, head) \
280     for (pos = (head)->next; pos != (head); pos = pos->next)
281 #endif
282 
283 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
284 #define pci_resource_start(dev, bar)    ((dev)->base_address[(bar)])
285 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
286 #define pci_resource_start(dev, bar)    ((dev)->resource[(bar)].start)
287 #endif
288 
289 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
290 #define pci_enable_device(dev) do { } while (0)
291 #endif
292 
293 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
294 #define net_device device
295 #endif
296 
297 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
298 
299 /*
300  * DMA mapping
301  *
302  * See linux/Documentation/DMA-mapping.txt
303  */
304 
305 #ifndef PCI_DMA_TODEVICE
306 #define    PCI_DMA_TODEVICE    1
307 #define    PCI_DMA_FROMDEVICE    2
308 #endif
309 
310 typedef u32 dma_addr_t;
311 
312 /* Pure 2^n version of get_order */
get_order(unsigned long size)313 static inline int get_order(unsigned long size)
314 {
315     int order;
316 
317     size = (size-1) >> (PAGE_SHIFT-1);
318     order = -1;
319     do {
320         size >>= 1;
321         order++;
322     } while (size);
323     return order;
324 }
325 
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)326 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
327                                          dma_addr_t *dma_handle)
328 {
329     void *ret;
330     int gfp = GFP_ATOMIC | GFP_DMA;
331 
332     ret = (void *)__get_free_pages(gfp, get_order(size));
333 
334     if (ret != NULL) {
335         memset(ret, 0, size);
336         *dma_handle = virt_to_bus(ret);
337     }
338     return ret;
339 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)340 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
341                                        void *vaddr, dma_addr_t dma_handle)
342 {
343     free_pages((unsigned long)vaddr, get_order(size));
344 }
345 #define pci_map_single(cookie, address, size, dir)    virt_to_bus(address)
346 #define pci_unmap_single(cookie, address, size, dir)
347 
348 #endif /* DMA mapping */
349 
350 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
351 
352 #define dev_kfree_skb_any(a)        dev_kfree_skb(a)
353 #define netif_down(dev)            do { (dev)->start = 0; } while (0)
354 
355 /* pcmcia-cs provides its own netdevice compatibility layer */
356 #ifndef _COMPAT_NETDEVICE_H
357 
358 /*
359  * SoftNet
360  *
361  * For pre-softnet kernels we need to tell the upper layer not to
362  * re-enter start_xmit() while we are in there. However softnet
363  * guarantees not to enter while we are in there so there is no need
364  * to do the netif_stop_queue() dance unless the transmit queue really
365  * gets stuck. This should also improve performance according to tests
366  * done by Aman Singla.
367  */
368 
369 #define dev_kfree_skb_irq(a)    dev_kfree_skb(a)
370 #define netif_wake_queue(dev) \
371         do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
372 #define netif_stop_queue(dev)    set_bit(0, &(dev)->tbusy)
373 
netif_start_queue(struct net_device * dev)374 static inline void netif_start_queue(struct net_device *dev)
375 {
376     dev->tbusy = 0;
377     dev->interrupt = 0;
378     dev->start = 1;
379 }
380 
381 #define netif_queue_stopped(dev)    (dev)->tbusy
382 #define netif_running(dev)        (dev)->start
383 
384 #endif /* _COMPAT_NETDEVICE_H */
385 
386 #define netif_device_attach(dev)    netif_start_queue(dev)
387 #define netif_device_detach(dev)    netif_stop_queue(dev)
388 
389 /* 2.4.x renamed bottom halves to tasklets */
390 #define tasklet_struct                tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)391 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
392 {
393     queue_task(tasklet, &tq_immediate);
394     mark_bh(IMMEDIATE_BH);
395 }
396 
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)397 static inline void tasklet_init(struct tasklet_struct *tasklet,
398                                 void (*func)(unsigned long),
399                                 unsigned long data)
400 {
401     tasklet->next = NULL;
402     tasklet->sync = 0;
403     tasklet->routine = (void (*)(void *))func;
404     tasklet->data = (void *)data;
405 }
406 #define tasklet_kill(tasklet)    { do {} while (0); }
407 
408 /* 2.4.x introduced del_timer_sync() */
409 #define del_timer_sync(timer) del_timer(timer)
410 
411 #else
412 
413 #define netif_down(dev)
414 
415 #endif /* SoftNet */
416 
417 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
418 
419 /*
420  * Emit code to initialise a tq_struct's routine and data pointers
421  */
422 #define PREPARE_TQUEUE(_tq, _routine, _data)            \
423     do {                            \
424         (_tq)->routine = _routine;            \
425         (_tq)->data = _data;                \
426     } while (0)
427 
428 /*
429  * Emit code to initialise all of a tq_struct
430  */
431 #define INIT_TQUEUE(_tq, _routine, _data)            \
432     do {                            \
433         INIT_LIST_HEAD(&(_tq)->list);            \
434         (_tq)->sync = 0;                \
435         PREPARE_TQUEUE((_tq), (_routine), (_data));    \
436     } while (0)
437 
438 #endif    /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
439 
440 /* Power management related macro & routines */
441 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
442 #define    PCI_SAVE_STATE(a, b)    pci_save_state(a)
443 #define    PCI_RESTORE_STATE(a, b)    pci_restore_state(a)
444 #else
445 #define    PCI_SAVE_STATE(a, b)    pci_save_state(a, b)
446 #define    PCI_RESTORE_STATE(a, b)    pci_restore_state(a, b)
447 #endif
448 
449 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
450 static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)451 pci_save_state(struct pci_dev *dev, u32 *buffer)
452 {
453     int i;
454     if (buffer) {
455         for (i = 0; i < 16; i++)
456             pci_read_config_dword(dev, i * 4, &buffer[i]);
457     }
458     return 0;
459 }
460 
461 static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)462 pci_restore_state(struct pci_dev *dev, u32 *buffer)
463 {
464     int i;
465 
466     if (buffer) {
467         for (i = 0; i < 16; i++)
468             pci_write_config_dword(dev, i * 4, buffer[i]);
469     }
470     /*
471      * otherwise, write the context information we know from bootup.
472      * This works around a problem where warm-booting from Windows
473      * combined with a D3(hot)->D0 transition causes PCI config
474      * header data to be forgotten.
475      */
476     else {
477         for (i = 0; i < 6; i ++)
478             pci_write_config_dword(dev,
479                                    PCI_BASE_ADDRESS_0 + (i * 4),
480                                    pci_resource_start(dev, i));
481         pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
482     }
483     return 0;
484 }
485 #endif /* PCI power management */
486 
487 /* Old cp0 access macros deprecated in 2.4.19 */
488 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
489 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
490 #endif
491 
492 /* Module refcount handled internally in 2.6.x */
493 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
494 #ifndef SET_MODULE_OWNER
495 #define SET_MODULE_OWNER(dev)        do {} while (0)
496 #define OLD_MOD_INC_USE_COUNT        MOD_INC_USE_COUNT
497 #define OLD_MOD_DEC_USE_COUNT        MOD_DEC_USE_COUNT
498 #else
499 #define OLD_MOD_INC_USE_COUNT        do {} while (0)
500 #define OLD_MOD_DEC_USE_COUNT        do {} while (0)
501 #endif
502 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
503 #ifndef SET_MODULE_OWNER
504 #define SET_MODULE_OWNER(dev)        do {} while (0)
505 #endif
506 #ifndef MOD_INC_USE_COUNT
507 #define MOD_INC_USE_COUNT            do {} while (0)
508 #endif
509 #ifndef MOD_DEC_USE_COUNT
510 #define MOD_DEC_USE_COUNT            do {} while (0)
511 #endif
512 #define OLD_MOD_INC_USE_COUNT        MOD_INC_USE_COUNT
513 #define OLD_MOD_DEC_USE_COUNT        MOD_DEC_USE_COUNT
514 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
515 
516 #ifndef SET_NETDEV_DEV
517 #define SET_NETDEV_DEV(net, pdev)    do {} while (0)
518 #endif
519 
520 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
521 #ifndef HAVE_FREE_NETDEV
522 #define free_netdev(dev)        kfree(dev)
523 #endif
524 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
525 
526 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
527 /* struct packet_type redefined in 2.6.x */
528 #define af_packet_priv            data
529 #endif
530 
531 /* suspend args */
532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
533 #define DRV_SUSPEND_STATE_TYPE pm_message_t
534 #else
535 #define DRV_SUSPEND_STATE_TYPE uint32
536 #endif
537 
538 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
539 #define CHECKSUM_HW    CHECKSUM_PARTIAL
540 #endif
541 
542 typedef struct {
543     void    *parent;  /* some external entity that the thread supposed to work for */
544     char    *proc_name;
545     struct    task_struct *p_task;
546     long    thr_pid;
547     int        prio; /* priority */
548     struct    semaphore sema;
549     int    terminated;
550     struct    completion completed;
551     spinlock_t    spinlock;
552     int        up_cnt;
553 } tsk_ctl_t;
554 
555 
556 /* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
557 /* note this macro assumes there may be only one context waiting on thread's completion */
558 #ifdef DHD_DEBUG
559 #define DBG_THR(x) printk x
560 #else
561 #define DBG_THR(x)
562 #endif
563 
binary_sema_down(tsk_ctl_t * tsk)564 static inline bool binary_sema_down(tsk_ctl_t *tsk)
565 {
566     if (down_interruptible(&tsk->sema) == 0) {
567         unsigned long flags = 0;
568         spin_lock_irqsave(&tsk->spinlock, flags);
569         if (tsk->up_cnt == 1)
570             tsk->up_cnt--;
571         else {
572             DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
573         }
574         spin_unlock_irqrestore(&tsk->spinlock, flags);
575         return false;
576     } else
577         return true;
578 }
579 
binary_sema_up(tsk_ctl_t * tsk)580 static inline bool binary_sema_up(tsk_ctl_t *tsk)
581 {
582     bool sem_up = false;
583     unsigned long flags = 0;
584 
585     spin_lock_irqsave(&tsk->spinlock, flags);
586     if (tsk->up_cnt == 0) {
587         tsk->up_cnt++;
588         sem_up = true;
589     } else if (tsk->up_cnt == 1) {
590         /* dhd_sched_dpc: dpc is alread up! */
591     } else
592         DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
593 
594     spin_unlock_irqrestore(&tsk->spinlock, flags);
595 
596     if (sem_up)
597         up(&tsk->sema);
598 
599     return sem_up;
600 }
601 
602 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
603 #ifndef smp_read_barrier_depends
604 #define smp_read_barrier_depends(x) smp_rmb(x)
605 #endif
606 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
607 #else
608 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
609 #endif
610 
611 #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
612 { \
613     sema_init(&((tsk_ctl)->sema), 0); \
614     init_completion(&((tsk_ctl)->completed)); \
615     (tsk_ctl)->parent = owner; \
616     (tsk_ctl)->proc_name = name;  \
617     (tsk_ctl)->terminated = FALSE; \
618     (tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
619     if (IS_ERR((tsk_ctl)->p_task)) { \
620         (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \
621         DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \
622             (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
623     } else { \
624         (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
625         spin_lock_init(&((tsk_ctl)->spinlock)); \
626         DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
627             (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
628     }; \
629 }
630 
631 #define PROC_STOP(tsk_ctl) \
632 { \
633     (tsk_ctl)->terminated = TRUE; \
634     smp_wmb(); \
635     up(&((tsk_ctl)->sema));    \
636     wait_for_completion(&((tsk_ctl)->completed)); \
637     DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
638              (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
639     (tsk_ctl)->thr_pid = -1; \
640 }
641 
642 /*  ----------------------- */
643 
644 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
645 #define KILL_PROC(nr, sig) \
646 { \
647 struct task_struct *tsk; \
648 struct pid *pid;    \
649 pid = find_get_pid((pid_t)nr);    \
650 tsk = pid_task(pid, PIDTYPE_PID);    \
651 if (tsk) send_sig(sig, tsk, 1); \
652 }
653 #else
654 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
655     KERNEL_VERSION(2, 6, 30))
656 #define KILL_PROC(pid, sig) \
657 { \
658     struct task_struct *tsk; \
659     tsk = find_task_by_vpid(pid); \
660     if (tsk) send_sig(sig, tsk, 1); \
661 }
662 #else
663 #define KILL_PROC(pid, sig) \
664 { \
665     kill_proc(pid, sig, 1); \
666 }
667 #endif
668 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
669 
670 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
671 #include <linux/time.h>
672 #include <linux/wait.h>
673 #else
674 #include <linux/sched.h>
675 
676 #define __wait_event_interruptible_timeout(wq, condition, ret)        \
677 do {                                    \
678     wait_queue_t __wait;                        \
679     init_waitqueue_entry(&__wait, current);                \
680                                     \
681     add_wait_queue(&wq, &__wait);                    \
682     for (; ;) {                            \
683         set_current_state(TASK_INTERRUPTIBLE);            \
684         if (condition)                        \
685             break;                        \
686         if (!signal_pending(current)) {                \
687             ret = schedule_timeout(ret);            \
688             if (!ret)                    \
689                 break;                    \
690             continue;                    \
691         }                            \
692         ret = -ERESTARTSYS;                    \
693         break;                            \
694     }                                \
695     current->state = TASK_RUNNING;                    \
696     remove_wait_queue(&wq, &__wait);                \
697 } while (0)
698 
699 #define wait_event_interruptible_timeout(wq, condition, timeout)    \
700 ({                                    \
701     long __ret = timeout;                        \
702     if (!(condition))                        \
703         __wait_event_interruptible_timeout(wq, condition, __ret); \
704     __ret;                                \
705 })
706 
707 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
708 
709 /*
710 For < 2.6.24, wl creates its own netdev but doesn't
711 align the priv area like the genuine alloc_netdev().
712 Since netdev_priv() always gives us the aligned address, it will
713 not match our unaligned address for < 2.6.24
714 */
715 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
716 #define DEV_PRIV(dev)    (dev->priv)
717 #else
718 #define DEV_PRIV(dev)    netdev_priv(dev)
719 #endif
720 
721 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
722 #define WL_ISR(i, d, p)         wl_isr((i), (d))
723 #else
724 #define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
725 #endif  /* < 2.6.20 */
726 
727 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
728 #define netdev_priv(dev) dev->priv
729 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
730 
731 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
732 #define CAN_SLEEP()    ((!in_atomic() && !irqs_disabled()))
733 #else
734 #define CAN_SLEEP()    (FALSE)
735 #endif
736 
737 #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
738 
739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
740 #define RANDOM32    prandom_u32
741 #else
742 #define RANDOM32    random32
743 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
744 
745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
746 #define SRANDOM32(entropy)    prandom_seed(entropy)
747 #else
748 #define SRANDOM32(entropy)    srandom32(entropy)
749 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
750 
751 /*
752  * Overide latest kfifo functions with
753  * older version to work on older kernels
754  */
755 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
756 #define kfifo_in_spinlocked(a, b, c, d)        kfifo_put(a, (u8 *)b, c)
757 #define kfifo_out_spinlocked(a, b, c, d)    kfifo_get(a, (u8 *)b, c)
758 #define kfifo_esize(a)                1
759 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
760     (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&    !defined(WL_COMPAT_WIRELESS)
761 #define kfifo_in_spinlocked(a, b, c, d)        kfifo_in_locked(a, b, c, d)
762 #define kfifo_out_spinlocked(a, b, c, d)    kfifo_out_locked(a, b, c, d)
763 #define kfifo_esize(a)                1
764 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
765 
766 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
767 #pragma GCC diagnostic pop
768 #endif
769 
770 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
file_inode(const struct file * f)771 static inline struct inode *file_inode(const struct file *f)
772 {
773     return f->f_dentry->d_inode;
774 }
775 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
776 
777 #endif /* _linuxver_h_ */
778