1 /*
2 * Linux-specific abstractions to gain some independence from linux kernel versions.
3 * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4 *
5 * Copyright (C) 1999-2013, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 * $Id: linuxver.h 417757 2013-08-12 12:24:45Z $
26 */
27
28 #ifndef _linuxver_h_
29 #define _linuxver_h_
30
31 #include <typedefs.h>
32 #include <linux/version.h>
33 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
34 #include <linux/config.h>
35 #else
36 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
37 #include <generated/autoconf.h>
38 #else
39 #include <linux/autoconf.h>
40 #endif
41 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
42 #include <linux/module.h>
43
44 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
45 /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
46 #ifdef __UNDEF_NO_VERSION__
47 #undef __NO_VERSION__
48 #else
49 #define __NO_VERSION__
50 #endif
51 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
52
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
54 #define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
55 #define module_param_string(_name_, _string_, _size_, _perm_) \
56 MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
57 #endif
58
59 /* linux/malloc.h is deprecated, use linux/slab.h instead. */
60 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
61 #include <linux/malloc.h>
62 #else
63 #include <linux/slab.h>
64 #endif
65
66 #include <linux/types.h>
67 #include <linux/init.h>
68 #include <linux/mm.h>
69 #include <linux/string.h>
70 #include <linux/pci.h>
71 #include <linux/interrupt.h>
72 #include <linux/kthread.h>
73 #include <linux/netdevice.h>
74 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
75 #include <linux/semaphore.h>
76 #else
77 #include <asm/semaphore.h>
78 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
79 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
80 #undef IP_TOS
81 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
82 #include <asm/io.h>
83
84 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
85 #include <linux/workqueue.h>
86 #else
87 #include <linux/tqueue.h>
88 #ifndef work_struct
89 #define work_struct tq_struct
90 #endif
91 #ifndef INIT_WORK
92 #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
93 #endif
94 #ifndef schedule_work
95 #define schedule_work(_work) schedule_task((_work))
96 #endif
97 #ifndef flush_scheduled_work
98 #define flush_scheduled_work() flush_scheduled_tasks()
99 #endif
100 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
101
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
103 #define DAEMONIZE(a)
104 #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
105 (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
106 #define DAEMONIZE(a) daemonize(a); \
107 allow_signal(SIGKILL); \
108 allow_signal(SIGTERM);
109 #else /* Linux 2.4 (w/o preemption patch) */
110 #define RAISE_RX_SOFTIRQ() \
111 cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
112 #define DAEMONIZE(a) daemonize(); \
113 do { if (a) \
114 strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
115 } while (0);
116 #endif /* LINUX_VERSION_CODE */
117
118 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
119 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
120 #else
121 #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
122 #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
123 (RHEL_MAJOR == 5))
124 /* Exclude RHEL 5 */
125 typedef void (*work_func_t)(void *work);
126 #endif
127 #endif /* >= 2.6.20 */
128
129 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
130 /* Some distributions have their own 2.6.x compatibility layers */
131 #ifndef IRQ_NONE
132 typedef void irqreturn_t;
133 #define IRQ_NONE
134 #define IRQ_HANDLED
135 #define IRQ_RETVAL(x)
136 #endif
137 #else
138 typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
139 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
140
141 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
142 #define IRQF_SHARED SA_SHIRQ
143 #endif /* < 2.6.18 */
144
145 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
146 #ifdef CONFIG_NET_RADIO
147 #define CONFIG_WIRELESS_EXT
148 #endif
149 #endif /* < 2.6.17 */
150
151 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
152 #define MOD_INC_USE_COUNT
153 #define MOD_DEC_USE_COUNT
154 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
155
156 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
157 #include <linux/sched.h>
158 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
159
160 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
161 #include <linux/sched/rt.h>
162 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
163
164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
165 #include <net/lib80211.h>
166 #endif
167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
168 #include <linux/ieee80211.h>
169 #else
170 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
171 #include <net/ieee80211.h>
172 #endif
173 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
174
175
176 #ifndef __exit
177 #define __exit
178 #endif
179 #ifndef __devexit
180 #define __devexit
181 #endif
182 #ifndef __devinit
183 #define __devinit __init
184 #endif
185 #ifndef __devinitdata
186 #define __devinitdata
187 #endif
188 #ifndef __devexit_p
189 #define __devexit_p(x) x
190 #endif
191
192 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
193
194 #define pci_get_drvdata(dev) (dev)->sysdata
195 #define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
196
197 /*
198 * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
199 */
200
201 struct pci_device_id {
202 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
203 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
204 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
205 unsigned long driver_data; /* Data private to the driver */
206 };
207
208 struct pci_driver {
209 struct list_head node;
210 char *name;
211 const struct pci_device_id *id_table; /* NULL if wants all devices */
212 int (*probe)(struct pci_dev *dev,
213 const struct pci_device_id *id); /* New device inserted */
214 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
215 * capable driver)
216 */
217 void (*suspend)(struct pci_dev *dev); /* Device suspended */
218 void (*resume)(struct pci_dev *dev); /* Device woken up */
219 };
220
221 #define MODULE_DEVICE_TABLE(type, name)
222 #define PCI_ANY_ID (~0)
223
224 /* compatpci.c */
225 #define pci_module_init pci_register_driver
226 extern int pci_register_driver(struct pci_driver *drv);
227 extern void pci_unregister_driver(struct pci_driver *drv);
228
229 #endif /* PCI registration */
230
231 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
232 #define pci_module_init pci_register_driver
233 #endif
234
235 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
236 #ifdef MODULE
237 #define module_init(x) int init_module(void) { return x(); }
238 #define module_exit(x) void cleanup_module(void) { x(); }
239 #else
240 #define module_init(x) __initcall(x);
241 #define module_exit(x) __exitcall(x);
242 #endif
243 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
244
245 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
246 #define WL_USE_NETDEV_OPS
247 #else
248 #undef WL_USE_NETDEV_OPS
249 #endif
250
251 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
252 #define WL_CONFIG_RFKILL
253 #else
254 #undef WL_CONFIG_RFKILL
255 #endif
256
257 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
258 #define list_for_each(pos, head) \
259 for (pos = (head)->next; pos != (head); pos = pos->next)
260 #endif
261
262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
263 #define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
264 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
265 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
266 #endif
267
268 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
269 #define pci_enable_device(dev) do { } while (0)
270 #endif
271
272 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
273 #define net_device device
274 #endif
275
276 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
277
278 /*
279 * DMA mapping
280 *
281 * See linux/Documentation/DMA-mapping.txt
282 */
283
284 #ifndef PCI_DMA_TODEVICE
285 #define PCI_DMA_TODEVICE 1
286 #define PCI_DMA_FROMDEVICE 2
287 #endif
288
289 typedef u32 dma_addr_t;
290
291 /* Pure 2^n version of get_order */
get_order(unsigned long size)292 static inline int get_order(unsigned long size)
293 {
294 int order;
295
296 size = (size-1) >> (PAGE_SHIFT-1);
297 order = -1;
298 do {
299 size >>= 1;
300 order++;
301 } while (size);
302 return order;
303 }
304
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)305 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
306 dma_addr_t *dma_handle)
307 {
308 void *ret;
309 int gfp = GFP_ATOMIC | GFP_DMA;
310
311 ret = (void *)__get_free_pages(gfp, get_order(size));
312
313 if (ret != NULL) {
314 memset(ret, 0, size);
315 *dma_handle = virt_to_bus(ret);
316 }
317 return ret;
318 }
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)319 static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
320 void *vaddr, dma_addr_t dma_handle)
321 {
322 free_pages((unsigned long)vaddr, get_order(size));
323 }
324 #define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
325 #define pci_unmap_single(cookie, address, size, dir)
326
327 #endif /* DMA mapping */
328
329 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
330
331 #define dev_kfree_skb_any(a) dev_kfree_skb(a)
332 #define netif_down(dev) do { (dev)->start = 0; } while (0)
333
334 /* pcmcia-cs provides its own netdevice compatibility layer */
335 #ifndef _COMPAT_NETDEVICE_H
336
337 /*
338 * SoftNet
339 *
340 * For pre-softnet kernels we need to tell the upper layer not to
341 * re-enter start_xmit() while we are in there. However softnet
342 * guarantees not to enter while we are in there so there is no need
343 * to do the netif_stop_queue() dance unless the transmit queue really
344 * gets stuck. This should also improve performance according to tests
345 * done by Aman Singla.
346 */
347
348 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
349 #define netif_wake_queue(dev) \
350 do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
351 #define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
352
netif_start_queue(struct net_device * dev)353 static inline void netif_start_queue(struct net_device *dev)
354 {
355 dev->tbusy = 0;
356 dev->interrupt = 0;
357 dev->start = 1;
358 }
359
360 #define netif_queue_stopped(dev) (dev)->tbusy
361 #define netif_running(dev) (dev)->start
362
363 #endif /* _COMPAT_NETDEVICE_H */
364
365 #define netif_device_attach(dev) netif_start_queue(dev)
366 #define netif_device_detach(dev) netif_stop_queue(dev)
367
368 /* 2.4.x renamed bottom halves to tasklets */
369 #define tasklet_struct tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)370 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
371 {
372 queue_task(tasklet, &tq_immediate);
373 mark_bh(IMMEDIATE_BH);
374 }
375
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)376 static inline void tasklet_init(struct tasklet_struct *tasklet,
377 void (*func)(unsigned long),
378 unsigned long data)
379 {
380 tasklet->next = NULL;
381 tasklet->sync = 0;
382 tasklet->routine = (void (*)(void *))func;
383 tasklet->data = (void *)data;
384 }
385 #define tasklet_kill(tasklet) { do {} while (0); }
386
387 /* 2.4.x introduced del_timer_sync() */
388 #define del_timer_sync(timer) del_timer(timer)
389
390 #else
391
392 #define netif_down(dev)
393
394 #endif /* SoftNet */
395
396 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
397
398 /*
399 * Emit code to initialise a tq_struct's routine and data pointers
400 */
401 #define PREPARE_TQUEUE(_tq, _routine, _data) \
402 do { \
403 (_tq)->routine = _routine; \
404 (_tq)->data = _data; \
405 } while (0)
406
407 /*
408 * Emit code to initialise all of a tq_struct
409 */
410 #define INIT_TQUEUE(_tq, _routine, _data) \
411 do { \
412 INIT_LIST_HEAD(&(_tq)->list); \
413 (_tq)->sync = 0; \
414 PREPARE_TQUEUE((_tq), (_routine), (_data)); \
415 } while (0)
416
417 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
418
419 /* Power management related macro & routines */
420 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
421 #define PCI_SAVE_STATE(a, b) pci_save_state(a)
422 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
423 #else
424 #define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
425 #define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
426 #endif
427
428 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
429 static inline int
pci_save_state(struct pci_dev * dev,u32 * buffer)430 pci_save_state(struct pci_dev *dev, u32 *buffer)
431 {
432 int i;
433 if (buffer) {
434 for (i = 0; i < 16; i++)
435 pci_read_config_dword(dev, i * 4, &buffer[i]);
436 }
437 return 0;
438 }
439
440 static inline int
pci_restore_state(struct pci_dev * dev,u32 * buffer)441 pci_restore_state(struct pci_dev *dev, u32 *buffer)
442 {
443 int i;
444
445 if (buffer) {
446 for (i = 0; i < 16; i++)
447 pci_write_config_dword(dev, i * 4, buffer[i]);
448 }
449 /*
450 * otherwise, write the context information we know from bootup.
451 * This works around a problem where warm-booting from Windows
452 * combined with a D3(hot)->D0 transition causes PCI config
453 * header data to be forgotten.
454 */
455 else {
456 for (i = 0; i < 6; i ++)
457 pci_write_config_dword(dev,
458 PCI_BASE_ADDRESS_0 + (i * 4),
459 pci_resource_start(dev, i));
460 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
461 }
462 return 0;
463 }
464 #endif /* PCI power management */
465
466 /* Old cp0 access macros deprecated in 2.4.19 */
467 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
468 #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
469 #endif
470
471 /* Module refcount handled internally in 2.6.x */
472 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
473 #ifndef SET_MODULE_OWNER
474 #define SET_MODULE_OWNER(dev) do {} while (0)
475 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
476 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
477 #else
478 #define OLD_MOD_INC_USE_COUNT do {} while (0)
479 #define OLD_MOD_DEC_USE_COUNT do {} while (0)
480 #endif
481 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
482 #ifndef SET_MODULE_OWNER
483 #define SET_MODULE_OWNER(dev) do {} while (0)
484 #endif
485 #ifndef MOD_INC_USE_COUNT
486 #define MOD_INC_USE_COUNT do {} while (0)
487 #endif
488 #ifndef MOD_DEC_USE_COUNT
489 #define MOD_DEC_USE_COUNT do {} while (0)
490 #endif
491 #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
492 #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
493 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
494
495 #ifndef SET_NETDEV_DEV
496 #define SET_NETDEV_DEV(net, pdev) do {} while (0)
497 #endif
498
499 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
500 #ifndef HAVE_FREE_NETDEV
501 #define free_netdev(dev) kfree(dev)
502 #endif
503 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
504
505 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
506 /* struct packet_type redefined in 2.6.x */
507 #define af_packet_priv data
508 #endif
509
510 /* suspend args */
511 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
512 #define DRV_SUSPEND_STATE_TYPE pm_message_t
513 #else
514 #define DRV_SUSPEND_STATE_TYPE uint32
515 #endif
516
517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
518 #define CHECKSUM_HW CHECKSUM_PARTIAL
519 #endif
520
521 typedef struct {
522 void *parent; /* some external entity that the thread supposed to work for */
523 char *proc_name;
524 struct task_struct *p_task;
525 long thr_pid;
526 int prio; /* priority */
527 struct semaphore sema;
528 int terminated;
529 struct completion completed;
530 spinlock_t spinlock;
531 int up_cnt;
532 } tsk_ctl_t;
533
534
535 /* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
536 /* note this macro assumes there may be only one context waiting on thread's completion */
537 #ifdef DHD_DEBUG
538 #define DBG_THR(x) printk x
539 #else
540 #define DBG_THR(x)
541 #endif
542
binary_sema_down(tsk_ctl_t * tsk)543 static inline bool binary_sema_down(tsk_ctl_t *tsk)
544 {
545 if (down_interruptible(&tsk->sema) == 0) {
546 unsigned long flags = 0;
547 spin_lock_irqsave(&tsk->spinlock, flags);
548 if (tsk->up_cnt == 1)
549 tsk->up_cnt--;
550 else {
551 DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
552 }
553 spin_unlock_irqrestore(&tsk->spinlock, flags);
554 return FALSE;
555 } else
556 return TRUE;
557 }
558
binary_sema_up(tsk_ctl_t * tsk)559 static inline bool binary_sema_up(tsk_ctl_t *tsk)
560 {
561 bool sem_up = FALSE;
562 unsigned long flags = 0;
563
564 spin_lock_irqsave(&tsk->spinlock, flags);
565 if (tsk->up_cnt == 0) {
566 tsk->up_cnt++;
567 sem_up = TRUE;
568 } else if (tsk->up_cnt == 1) {
569 /* dhd_sched_dpc: dpc is alread up! */
570 } else
571 DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
572
573 spin_unlock_irqrestore(&tsk->spinlock, flags);
574
575 if (sem_up)
576 up(&tsk->sema);
577
578 return sem_up;
579 }
580
581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
582 #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
583 #else
584 #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
585 #endif
586
587 #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
588 { \
589 sema_init(&((tsk_ctl)->sema), 0); \
590 init_completion(&((tsk_ctl)->completed)); \
591 (tsk_ctl)->parent = owner; \
592 (tsk_ctl)->proc_name = name; \
593 (tsk_ctl)->terminated = FALSE; \
594 (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
595 (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
596 spin_lock_init(&((tsk_ctl)->spinlock)); \
597 DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
598 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
599 }
600
601 #define PROC_STOP(tsk_ctl) \
602 { \
603 (tsk_ctl)->terminated = TRUE; \
604 smp_wmb(); \
605 up(&((tsk_ctl)->sema)); \
606 wait_for_completion(&((tsk_ctl)->completed)); \
607 DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
608 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
609 (tsk_ctl)->thr_pid = -1; \
610 }
611
612 /* ----------------------- */
613
614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
615 #define KILL_PROC(nr, sig) \
616 { \
617 struct task_struct *tsk; \
618 struct pid *pid; \
619 pid = find_get_pid((pid_t)nr); \
620 tsk = pid_task(pid, PIDTYPE_PID); \
621 if (tsk) send_sig(sig, tsk, 1); \
622 }
623 #else
624 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
625 KERNEL_VERSION(2, 6, 30))
626 #define KILL_PROC(pid, sig) \
627 { \
628 struct task_struct *tsk; \
629 tsk = find_task_by_vpid(pid); \
630 if (tsk) send_sig(sig, tsk, 1); \
631 }
632 #else
633 #define KILL_PROC(pid, sig) \
634 { \
635 kill_proc(pid, sig, 1); \
636 }
637 #endif
638 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
639
640 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
641 #include <linux/time.h>
642 #include <linux/wait.h>
643 #else
644 #include <linux/sched.h>
645
646 #define __wait_event_interruptible_timeout(wq, condition, ret) \
647 do { \
648 wait_queue_t __wait; \
649 init_waitqueue_entry(&__wait, current); \
650 \
651 add_wait_queue(&wq, &__wait); \
652 for (;;) { \
653 set_current_state(TASK_INTERRUPTIBLE); \
654 if (condition) \
655 break; \
656 if (!signal_pending(current)) { \
657 ret = schedule_timeout(ret); \
658 if (!ret) \
659 break; \
660 continue; \
661 } \
662 ret = -ERESTARTSYS; \
663 break; \
664 } \
665 current->state = TASK_RUNNING; \
666 remove_wait_queue(&wq, &__wait); \
667 } while (0)
668
669 #define wait_event_interruptible_timeout(wq, condition, timeout) \
670 ({ \
671 long __ret = timeout; \
672 if (!(condition)) \
673 __wait_event_interruptible_timeout(wq, condition, __ret); \
674 __ret; \
675 })
676
677 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
678
679 /*
680 For < 2.6.24, wl creates its own netdev but doesn't
681 align the priv area like the genuine alloc_netdev().
682 Since netdev_priv() always gives us the aligned address, it will
683 not match our unaligned address for < 2.6.24
684 */
685 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
686 #define DEV_PRIV(dev) (dev->priv)
687 #else
688 #define DEV_PRIV(dev) netdev_priv(dev)
689 #endif
690
691 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
692 #define WL_ISR(i, d, p) wl_isr((i), (d))
693 #else
694 #define WL_ISR(i, d, p) wl_isr((i), (d), (p))
695 #endif /* < 2.6.20 */
696
697 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
698 #define netdev_priv(dev) dev->priv
699 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
700
701 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
702 #define RANDOM32 prandom_u32
703 #else
704 #define RANDOM32 random32
705 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
706
707 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
708 #define SRANDOM32(entropy) prandom_seed(entropy)
709 #else
710 #define SRANDOM32(entropy) srandom32(entropy)
711 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
712
713 #endif /* _linuxver_h_ */
714