• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #ifndef __OSDEP_LINUX_SERVICE_H_
16 #define __OSDEP_LINUX_SERVICE_H_
17 
18 #include <linux/version.h>
19 #include <linux/spinlock.h>
20 #include <linux/compiler.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/namei.h>
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
28 	#include <linux/kref.h>
29 #endif
30 /* #include <linux/smp_lock.h> */
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/circ_buf.h>
35 #include <asm/uaccess.h>
36 #include <asm/byteorder.h>
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
40 	#include <asm/semaphore.h>
41 #else
42 	#include <linux/semaphore.h>
43 #endif
44 #include <linux/sem.h>
45 #include <linux/sched.h>
46 #include <linux/etherdevice.h>
47 #include <linux/wireless.h>
48 #include <net/iw_handler.h>
49 #include <net/addrconf.h>
50 #include <linux/if_arp.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/delay.h>
53 #include <linux/interrupt.h>	/* for struct tasklet_struct */
54 #include <linux/ip.h>
55 #include <linux/kthread.h>
56 #include <linux/list.h>
57 #include <linux/vmalloc.h>
58 
59 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
60 	#include <uapi/linux/sched/types.h>
61 #endif
62 
63 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 5, 41))
64 	#include <linux/tqueue.h>
65 #endif
66 
67 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
68 	#include <uapi/linux/limits.h>
69 #else
70 	#include <linux/limits.h>
71 #endif
72 
73 #ifdef RTK_DMP_PLATFORM
74 	#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
75 		#include <linux/pageremap.h>
76 	#endif
77 	#include <asm/io.h>
78 #endif
79 
80 #ifdef CONFIG_NET_RADIO
81 	#define CONFIG_WIRELESS_EXT
82 #endif
83 
84 /* Monitor mode */
85 #include <net/ieee80211_radiotap.h>
86 
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
88 	#include <linux/ieee80211.h>
89 #endif
90 
91 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) && \
92 	 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
93 	#define CONFIG_IEEE80211_HT_ADDT_INFO
94 #endif
95 
96 #ifdef CONFIG_IOCTL_CFG80211
97 	/*	#include <linux/ieee80211.h> */
98 	#include <net/cfg80211.h>
99 #else
100 	#ifdef CONFIG_REGD_SRC_FROM_OS
101 	#error "CONFIG_REGD_SRC_FROM_OS requires CONFIG_IOCTL_CFG80211"
102 	#endif
103 #endif /* CONFIG_IOCTL_CFG80211 */
104 
105 
106 #ifdef CONFIG_HAS_EARLYSUSPEND
107 	#include <linux/earlysuspend.h>
108 #endif /* CONFIG_HAS_EARLYSUSPEND */
109 
110 #ifdef CONFIG_EFUSE_CONFIG_FILE
111 	#include <linux/fs.h>
112 #endif
113 
114 #ifdef CONFIG_USB_HCI
115 	#include <linux/usb.h>
116 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 21))
117 		#include <linux/usb_ch9.h>
118 	#else
119 		#include <linux/usb/ch9.h>
120 	#endif
121 #endif
122 
123 #ifdef CONFIG_BT_COEXIST_SOCKET_TRX
124 	#include <net/sock.h>
125 	#include <net/tcp.h>
126 	#include <linux/udp.h>
127 	#include <linux/in.h>
128 	#include <linux/netlink.h>
129 #endif /* CONFIG_BT_COEXIST_SOCKET_TRX */
130 
131 #ifdef CONFIG_USB_HCI
132 	typedef struct urb   *PURB;
133 #endif
134 
135 #if defined(CONFIG_RTW_GRO) && (!defined(CONFIG_RTW_NAPI))
136 
137 	#error "Enable NAPI before enable GRO\n"
138 
139 #endif
140 
141 
142 #if (KERNEL_VERSION(2, 6, 29) > LINUX_VERSION_CODE && defined(CONFIG_RTW_NAPI))
143 
144 	#undef CONFIG_RTW_NAPI
145 	/*#warning "Linux Kernel version too old to support NAPI (should newer than 2.6.29)\n"*/
146 
147 #endif
148 
149 #if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE && defined(CONFIG_RTW_GRO))
150 
151 	#undef CONFIG_RTW_GRO
152 	/*#warning "Linux Kernel version too old to support GRO(should newer than 2.6.33)\n"*/
153 
154 #endif
155 
156 typedef struct	semaphore _sema;
157 typedef	spinlock_t	_lock;
158 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
159 	typedef struct mutex		_mutex;
160 #else
161 	typedef struct semaphore	_mutex;
162 #endif
163 struct rtw_timer_list {
164 	struct timer_list timer;
165 	void (*function)(void *);
166 	void *arg;
167 };
168 
169 typedef struct rtw_timer_list _timer;
170 typedef struct completion _completion;
171 
172 struct	__queue	{
173 	struct	list_head	queue;
174 	_lock	lock;
175 };
176 
177 typedef	struct sk_buff	_pkt;
178 typedef unsigned char	_buffer;
179 
180 typedef struct	__queue	_queue;
181 typedef struct	list_head	_list;
182 
183 /* hlist */
184 typedef struct	hlist_head	rtw_hlist_head;
185 typedef struct	hlist_node	rtw_hlist_node;
186 
187 /* RCU */
188 typedef struct rcu_head rtw_rcu_head;
189 #define rtw_rcu_dereference(p) rcu_dereference((p))
190 #define rtw_rcu_dereference_protected(p, c) rcu_dereference_protected(p, c)
191 #define rtw_rcu_assign_pointer(p, v) rcu_assign_pointer((p), (v))
192 #define rtw_rcu_read_lock() rcu_read_lock()
193 #define rtw_rcu_read_unlock() rcu_read_unlock()
194 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34))
195 #define rtw_rcu_access_pointer(p) rcu_access_pointer(p)
196 #endif
197 
198 /* rhashtable */
199 #include "../os_dep/linux/rtw_rhashtable.h"
200 
201 typedef	int	_OS_STATUS;
202 /* typedef u32	_irqL; */
203 typedef unsigned long _irqL;
204 typedef	struct	net_device *_nic_hdl;
205 
206 typedef void		*_thread_hdl_;
207 typedef int		thread_return;
208 typedef void	*thread_context;
209 
210 typedef void timer_hdl_return;
211 typedef void *timer_hdl_context;
212 
213 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
214 	typedef struct work_struct _workitem;
215 #else
216 	typedef struct tq_struct _workitem;
217 #endif
218 
219 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
220 	#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
221 #endif
222 
223 typedef unsigned long systime;
224 typedef ktime_t sysptime;
225 typedef struct tasklet_struct _tasklet;
226 
227 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
228 /* Porting from linux kernel, for compatible with old kernel. */
skb_tail_pointer(const struct sk_buff * skb)229 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
230 {
231 	return skb->tail;
232 }
233 
skb_reset_tail_pointer(struct sk_buff * skb)234 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
235 {
236 	skb->tail = skb->data;
237 }
238 
skb_set_tail_pointer(struct sk_buff * skb,const int offset)239 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
240 {
241 	skb->tail = skb->data + offset;
242 }
243 
skb_end_pointer(const struct sk_buff * skb)244 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
245 {
246 	return skb->end;
247 }
248 #endif
249 
rtw_list_delete(_list * plist)250 __inline static void rtw_list_delete(_list *plist)
251 {
252 	list_del_init(plist);
253 }
254 
get_next(_list * list)255 __inline static _list *get_next(_list	*list)
256 {
257 	return list->next;
258 }
259 
260 #define LIST_CONTAINOR(ptr, type, member) \
261 	((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
262 
263 #define rtw_list_first_entry(ptr, type, member) list_first_entry(ptr, type, member)
264 
265 #define rtw_hlist_for_each_entry(pos, head, member) hlist_for_each_entry(pos, head, member)
266 #define rtw_hlist_for_each_safe(pos, n, head) hlist_for_each_safe(pos, n, head)
267 #define rtw_hlist_entry(ptr, type, member) hlist_entry(ptr, type, member)
268 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
269 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, n, head, member)
270 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, head, member)
271 #else
272 #define rtw_hlist_for_each_entry_safe(pos, np, n, head, member) hlist_for_each_entry_safe(pos, np, n, head, member)
273 #define rtw_hlist_for_each_entry_rcu(pos, node, head, member) hlist_for_each_entry_rcu(pos, node, head, member)
274 #endif
275 
_enter_critical(_lock * plock,_irqL * pirqL)276 __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
277 {
278 	spin_lock_irqsave(plock, *pirqL);
279 }
280 
_exit_critical(_lock * plock,_irqL * pirqL)281 __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
282 {
283 	spin_unlock_irqrestore(plock, *pirqL);
284 }
285 
_enter_critical_ex(_lock * plock,_irqL * pirqL)286 __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
287 {
288 	spin_lock_irqsave(plock, *pirqL);
289 }
290 
_exit_critical_ex(_lock * plock,_irqL * pirqL)291 __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
292 {
293 	spin_unlock_irqrestore(plock, *pirqL);
294 }
295 
_enter_critical_bh(_lock * plock,_irqL * pirqL)296 __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
297 {
298 	spin_lock_bh(plock);
299 }
300 
_exit_critical_bh(_lock * plock,_irqL * pirqL)301 __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
302 {
303 	spin_unlock_bh(plock);
304 }
305 
enter_critical_bh(_lock * plock)306 __inline static void enter_critical_bh(_lock *plock)
307 {
308 	spin_lock_bh(plock);
309 }
310 
exit_critical_bh(_lock * plock)311 __inline static void exit_critical_bh(_lock *plock)
312 {
313 	spin_unlock_bh(plock);
314 }
315 
_enter_critical_mutex(_mutex * pmutex,_irqL * pirqL)316 __inline static int _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
317 {
318 	int ret = 0;
319 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
320 	/* mutex_lock(pmutex); */
321 	ret = mutex_lock_interruptible(pmutex);
322 #else
323 	ret = down_interruptible(pmutex);
324 #endif
325 	return ret;
326 }
327 
328 
_enter_critical_mutex_lock(_mutex * pmutex,_irqL * pirqL)329 __inline static int _enter_critical_mutex_lock(_mutex *pmutex, _irqL *pirqL)
330 {
331 	int ret = 0;
332 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
333 	mutex_lock(pmutex);
334 #else
335 	down(pmutex);
336 #endif
337 	return ret;
338 }
339 
_exit_critical_mutex(_mutex * pmutex,_irqL * pirqL)340 __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
341 {
342 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
343 	mutex_unlock(pmutex);
344 #else
345 	up(pmutex);
346 #endif
347 }
348 
get_list_head(_queue * queue)349 __inline static _list	*get_list_head(_queue	*queue)
350 {
351 	return &(queue->queue);
352 }
353 
354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
timer_hdl(struct timer_list * in_timer)355 static inline void timer_hdl(struct timer_list *in_timer)
356 #else
357 static inline void timer_hdl(unsigned long cntx)
358 #endif
359 {
360 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
361 	_timer *ptimer = from_timer(ptimer, in_timer, timer);
362 #else
363 	_timer *ptimer = (_timer *)cntx;
364 #endif
365 	ptimer->function(ptimer->arg);
366 }
367 
_init_timer(_timer * ptimer,_nic_hdl nic_hdl,void * pfunc,void * cntx)368 __inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void *cntx)
369 {
370 	ptimer->function = pfunc;
371 	ptimer->arg = cntx;
372 
373 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
374 	timer_setup(&ptimer->timer, timer_hdl, 0);
375 #else
376 	/* setup_timer(ptimer, pfunc,(u32)cntx);	 */
377 	ptimer->timer.function = timer_hdl;
378 	ptimer->timer.data = (unsigned long)ptimer;
379 	init_timer(&ptimer->timer);
380 #endif
381 }
382 
_set_timer(_timer * ptimer,u32 delay_time)383 __inline static void _set_timer(_timer *ptimer, u32 delay_time)
384 {
385 	mod_timer(&ptimer->timer , (jiffies + (delay_time * HZ / 1000)));
386 }
387 
_cancel_timer(_timer * ptimer,u8 * bcancelled)388 __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
389 {
390 	*bcancelled = del_timer_sync(&ptimer->timer) == 1 ? 1 : 0;
391 }
392 
_cancel_timer_async(_timer * ptimer)393 __inline static void _cancel_timer_async(_timer *ptimer)
394 {
395 	del_timer(&ptimer->timer);
396 }
397 
_init_workitem(_workitem * pwork,void * pfunc,void * cntx)398 static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
399 {
400 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20))
401 	INIT_WORK(pwork, pfunc);
402 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
403 	INIT_WORK(pwork, pfunc, pwork);
404 #else
405 	INIT_TQUEUE(pwork, pfunc, pwork);
406 #endif
407 }
408 
_set_workitem(_workitem * pwork)409 __inline static void _set_workitem(_workitem *pwork)
410 {
411 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
412 	schedule_work(pwork);
413 #else
414 	schedule_task(pwork);
415 #endif
416 }
417 
_cancel_workitem_sync(_workitem * pwork)418 __inline static void _cancel_workitem_sync(_workitem *pwork)
419 {
420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
421 	cancel_work_sync(pwork);
422 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
423 	flush_scheduled_work();
424 #else
425 	flush_scheduled_tasks();
426 #endif
427 }
428 /*
429  * Global Mutex: can only be used at PASSIVE level.
430  *   */
431 
432 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter)                              \
433 	{                                                               \
434 		while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1) { \
435 			atomic_dec((atomic_t *)&(_MutexCounter));        \
436 			msleep(10);                          \
437 		}                                                           \
438 	}
439 
440 #define RELEASE_GLOBAL_MUTEX(_MutexCounter)                              \
441 	{                                                               \
442 		atomic_dec((atomic_t *)&(_MutexCounter));        \
443 	}
444 
rtw_netif_queue_stopped(struct net_device * pnetdev)445 static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
446 {
447 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
448 	return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
449 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
450 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
451 		netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
452 #else
453 	return netif_queue_stopped(pnetdev);
454 #endif
455 }
456 
rtw_netif_wake_queue(struct net_device * pnetdev)457 static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
458 {
459 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
460 	netif_tx_wake_all_queues(pnetdev);
461 #else
462 	netif_wake_queue(pnetdev);
463 #endif
464 }
465 
rtw_netif_start_queue(struct net_device * pnetdev)466 static inline void rtw_netif_start_queue(struct net_device *pnetdev)
467 {
468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
469 	netif_tx_start_all_queues(pnetdev);
470 #else
471 	netif_start_queue(pnetdev);
472 #endif
473 }
474 
rtw_netif_stop_queue(struct net_device * pnetdev)475 static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
476 {
477 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
478 	netif_tx_stop_all_queues(pnetdev);
479 #else
480 	netif_stop_queue(pnetdev);
481 #endif
482 }
rtw_netif_device_attach(struct net_device * pnetdev)483 static inline void rtw_netif_device_attach(struct net_device *pnetdev)
484 {
485 	netif_device_attach(pnetdev);
486 }
rtw_netif_device_detach(struct net_device * pnetdev)487 static inline void rtw_netif_device_detach(struct net_device *pnetdev)
488 {
489 	netif_device_detach(pnetdev);
490 }
rtw_netif_carrier_on(struct net_device * pnetdev)491 static inline void rtw_netif_carrier_on(struct net_device *pnetdev)
492 {
493 	netif_carrier_on(pnetdev);
494 }
rtw_netif_carrier_off(struct net_device * pnetdev)495 static inline void rtw_netif_carrier_off(struct net_device *pnetdev)
496 {
497 	netif_carrier_off(pnetdev);
498 }
499 
rtw_merge_string(char * dst,int dst_len,const char * src1,const char * src2)500 static inline int rtw_merge_string(char *dst, int dst_len, const char *src1, const char *src2)
501 {
502 	int	len = 0;
503 	len += snprintf(dst + len, dst_len - len, "%s", src1);
504 	len += snprintf(dst + len, dst_len - len, "%s", src2);
505 
506 	return len;
507 }
508 
509 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
510 	#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
511 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
512 	#define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
513 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
514 
515 
516 /* Suspend lock prevent system from going suspend */
517 #ifdef CONFIG_WAKELOCK
518 	#include <linux/wakelock.h>
519 #elif defined(CONFIG_ANDROID_POWER)
520 	#include <linux/android_power.h>
521 #endif
522 
523 /* limitation of path length */
524 #define PATH_LENGTH_MAX PATH_MAX
525 
526 /* Atomic integer operations */
527 #define ATOMIC_T atomic_t
528 
529 
530 #if defined(DBG_MEM_ERR_FREE)
531 void rtw_dbg_mem_init(void);
532 void rtw_dbg_mem_deinit(void);
533 #else
534 #define rtw_dbg_mem_init() do {} while (0)
535 #define rtw_dbg_mem_deinit() do {} while (0)
536 #endif /* DBG_MEM_ERR_FREE */
537 
538 #ifdef CONFIG_RTL8822CS_WIFI_HDF
539 extern void* get_rtl_priv_data(void);
540 #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)get_rtl_priv_data())->priv)
541 #else
542 #define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
543 #endif
544 
545 #define NDEV_FMT "%s"
546 #define NDEV_ARG(ndev) ndev->name
547 #define ADPT_FMT "%s"
548 #define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
549 #define FUNC_NDEV_FMT "%s(%s)"
550 #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
551 #define FUNC_ADPT_FMT "%s(%s)"
552 #define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
553 
554 struct rtw_netdev_priv_indicator {
555 	void *priv;
556 	u32 sizeof_priv;
557 };
558 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
559 extern struct net_device *rtw_alloc_etherdev(int sizeof_priv);
560 
561 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
562 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(name)
563 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
564 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(ndev->nd_net, name)
565 #else
566 #define rtw_get_same_net_ndev_by_name(ndev, name) dev_get_by_name(dev_net(ndev), name)
567 #endif
568 
569 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
570 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(name)
571 #else
572 #define rtw_get_bridge_ndev_by_name(name) dev_get_by_name(&init_net, name)
573 #endif
574 
575 #define STRUCT_PACKED __attribute__ ((packed))
576 
577 #ifndef fallthrough
578 #define fallthrough do {} while (0)
579 #endif
580 
581 #endif /* __OSDEP_LINUX_SERVICE_H_ */
582