• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #ifndef __OSDEP_BSD_SERVICE_H_
16 #define __OSDEP_BSD_SERVICE_H_
17 
18 
19 #include <sys/cdefs.h>
20 #include <sys/types.h>
21 #include <sys/systm.h>
22 #include <sys/param.h>
23 #include <sys/sockio.h>
24 #include <sys/sysctl.h>
25 #include <sys/lock.h>
26 #include <sys/mutex.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kdb.h>
36 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/time.h>
39 #include <machine/atomic.h>
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/rman.h>
43 
44 #include <net/bpf.h>
45 #include <net/if.h>
46 #include <net/if_arp.h>
47 #include <net/ethernet.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
52 
53 
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/if_ether.h>
58 #include <if_ether.h>
59 
60 #include <net80211/ieee80211_var.h>
61 #include <net80211/ieee80211_regdomain.h>
62 #include <net80211/ieee80211_radiotap.h>
63 #include <net80211/ieee80211_ratectl.h>
64 
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67 #include "usbdevs.h"
68 
69 #define	USB_DEBUG_VAR rum_debug
70 #include <dev/usb/usb_debug.h>
71 
72 #if 1 //Baron porting from linux, it's all temp solution, needs to check again
73 #include <sys/sema.h>
74 #include <sys/pcpu.h> /* XXX for PCPU_GET */
75 //	typedef struct 	semaphore _sema;
76 	typedef struct 	sema _sema;
77 //	typedef	spinlock_t	_lock;
78 	typedef	struct mtx	_lock;
79 	typedef struct mtx 		_mutex;
80 	typedef struct rtw_timer_list _timer;
81 	struct list_head {
82 	struct list_head *next, *prev;
83 	};
84 	struct	__queue	{
85 		struct	list_head	queue;
86 		_lock	lock;
87 	};
88 
89 	typedef	struct mbuf _pkt;
90 	typedef struct mbuf	_buffer;
91 
92 	typedef struct	__queue	_queue;
93 	typedef struct	list_head	_list;
94 	typedef	int	_OS_STATUS;
95 	//typedef u32	_irqL;
96 	typedef unsigned long _irqL;
97 	typedef	struct	ifnet * _nic_hdl;
98 
99 	typedef pid_t		_thread_hdl_;
100 //	typedef struct thread		_thread_hdl_;
101 	typedef void		thread_return;
102 	typedef void*	thread_context;
103 
104 	typedef void timer_hdl_return;
105 	typedef void* timer_hdl_context;
106 	typedef struct work_struct _workitem;
107 	typedef struct task _tasklet;
108 
109 #define   KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
110 /* emulate a modern version */
111 #define LINUX_VERSION_CODE KERNEL_VERSION(2, 6, 35)
112 
113 #define WIRELESS_EXT -1
114 #define HZ hz
115 #define spin_lock_irqsave mtx_lock_irqsave
116 #define spin_lock_bh mtx_lock_irqsave
117 #define mtx_lock_irqsave(lock, x) mtx_lock(lock)//{local_irq_save((x)); mtx_lock_spin((lock));}
118 //#define IFT_RTW	0xf9 //ifnet allocate type for RTW
119 #define free_netdev if_free
120 #define LIST_CONTAINOR(ptr, type, member) \
121         ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
122 #define container_of(p,t,n) (t*)((p)-&(((t*)0)->n))
123 /*
124  * Linux timers are emulated using FreeBSD callout functions
125  * (and taskqueue functionality).
126  *
127  * Currently no timer stats functionality.
128  *
129  * See (linux_compat) processes.c
130  *
131  */
132 struct rtw_timer_list {
133 	struct callout callout;
134 	void (*function)(void *);
135 	void *arg;
136 };
137 
138 struct workqueue_struct;
139 struct work_struct;
140 typedef void (*work_func_t)(struct work_struct *work);
141 /* Values for the state of an item of work (work_struct) */
142 typedef enum work_state {
143         WORK_STATE_UNSET = 0,
144         WORK_STATE_CALLOUT_PENDING = 1,
145         WORK_STATE_TASK_PENDING = 2,
146         WORK_STATE_WORK_CANCELLED = 3
147 } work_state_t;
148 
149 struct work_struct {
150         struct task task; /* FreeBSD task */
151         work_state_t state; /* the pending or otherwise state of work. */
152         work_func_t func;
153 };
154 #define spin_unlock_irqrestore mtx_unlock_irqrestore
155 #define spin_unlock_bh mtx_unlock_irqrestore
156 #define mtx_unlock_irqrestore(lock,x)    mtx_unlock(lock);
157 extern void	_rtw_spinlock_init(_lock *plock);
158 
159 //modify private structure to match freebsd
160 #define BITS_PER_LONG 32
161 union ktime {
162 	s64	tv64;
163 #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
164 	struct {
165 #ifdef __BIG_ENDIAN
166 	s32	sec, nsec;
167 #else
168 	s32	nsec, sec;
169 #endif
170 	} tv;
171 #endif
172 };
173 #define kmemcheck_bitfield_begin(name)
174 #define kmemcheck_bitfield_end(name)
175 #define CHECKSUM_NONE 0
176 typedef unsigned char *sk_buff_data_t;
177 typedef union ktime ktime_t;		/* Kill this */
178 
179 void rtw_mtx_lock(_lock *plock);
180 
181 void rtw_mtx_unlock(_lock *plock);
182 
183 /**
184  *	struct sk_buff - socket buffer
185  *	@next: Next buffer in list
186  *	@prev: Previous buffer in list
187  *	@sk: Socket we are owned by
188  *	@tstamp: Time we arrived
189  *	@dev: Device we arrived on/are leaving by
190  *	@transport_header: Transport layer header
191  *	@network_header: Network layer header
192  *	@mac_header: Link layer header
193  *	@_skb_refdst: destination entry (with norefcount bit)
194  *	@sp: the security path, used for xfrm
195  *	@cb: Control buffer. Free for use by every layer. Put private vars here
196  *	@len: Length of actual data
197  *	@data_len: Data length
198  *	@mac_len: Length of link layer header
199  *	@hdr_len: writable header length of cloned skb
200  *	@csum: Checksum (must include start/offset pair)
201  *	@csum_start: Offset from skb->head where checksumming should start
202  *	@csum_offset: Offset from csum_start where checksum should be stored
203  *	@local_df: allow local fragmentation
204  *	@cloned: Head may be cloned (check refcnt to be sure)
205  *	@nohdr: Payload reference only, must not modify header
206  *	@pkt_type: Packet class
207  *	@fclone: skbuff clone status
208  *	@ip_summed: Driver fed us an IP checksum
209  *	@priority: Packet queueing priority
210  *	@users: User count - see {datagram,tcp}.c
211  *	@protocol: Packet protocol from driver
212  *	@truesize: Buffer size
213  *	@head: Head of buffer
214  *	@data: Data head pointer
215  *	@tail: Tail pointer
216  *	@end: End pointer
217  *	@destructor: Destruct function
218  *	@mark: Generic packet mark
219  *	@nfct: Associated connection, if any
220  *	@ipvs_property: skbuff is owned by ipvs
221  *	@peeked: this packet has been seen already, so stats have been
222  *		done for it, don't do them again
223  *	@nf_trace: netfilter packet trace flag
224  *	@nfctinfo: Relationship of this skb to the connection
225  *	@nfct_reasm: netfilter conntrack re-assembly pointer
226  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
227  *	@skb_iif: ifindex of device we arrived on
228  *	@rxhash: the packet hash computed on receive
229  *	@queue_mapping: Queue mapping for multiqueue devices
230  *	@tc_index: Traffic control index
231  *	@tc_verd: traffic control verdict
232  *	@ndisc_nodetype: router type (from link layer)
233  *	@dma_cookie: a cookie to one of several possible DMA operations
234  *		done by skb DMA functions
235  *	@secmark: security marking
236  *	@vlan_tci: vlan tag control information
237  */
238 
239 struct sk_buff {
240 	/* These two members must be first. */
241 	struct sk_buff		*next;
242 	struct sk_buff		*prev;
243 
244 	ktime_t			tstamp;
245 
246 	struct sock		*sk;
247 	//struct net_device	*dev;
248 	struct ifnet *dev;
249 
250 	/*
251 	 * This is the control buffer. It is free to use for every
252 	 * layer. Please put your private variables there. If you
253 	 * want to keep them across layers you have to do a skb_clone()
254 	 * first. This is owned by whoever has the skb queued ATM.
255 	 */
256 	char			cb[48] __aligned(8);
257 
258 	unsigned long		_skb_refdst;
259 #ifdef CONFIG_XFRM
260 	struct	sec_path	*sp;
261 #endif
262 	unsigned int		len,
263 				data_len;
264 	u16			mac_len,
265 				hdr_len;
266 	union {
267 		u32		csum;
268 		struct {
269 			u16	csum_start;
270 			u16	csum_offset;
271 		}smbol2;
272 	}smbol1;
273 	u32			priority;
274 	kmemcheck_bitfield_begin(flags1);
275 	u8			local_df:1,
276 				cloned:1,
277 				ip_summed:2,
278 				nohdr:1,
279 				nfctinfo:3;
280 	u8			pkt_type:3,
281 				fclone:2,
282 				ipvs_property:1,
283 				peeked:1,
284 				nf_trace:1;
285 	kmemcheck_bitfield_end(flags1);
286 	u16			protocol;
287 
288 	void			(*destructor)(struct sk_buff *skb);
289 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
290 	struct nf_conntrack	*nfct;
291 	struct sk_buff		*nfct_reasm;
292 #endif
293 #ifdef CONFIG_BRIDGE_NETFILTER
294 	struct nf_bridge_info	*nf_bridge;
295 #endif
296 
297 	int			skb_iif;
298 #ifdef CONFIG_NET_SCHED
299 	u16			tc_index;	/* traffic control index */
300 #ifdef CONFIG_NET_CLS_ACT
301 	u16			tc_verd;	/* traffic control verdict */
302 #endif
303 #endif
304 
305 	u32			rxhash;
306 
307 	kmemcheck_bitfield_begin(flags2);
308 	u16			queue_mapping:16;
309 #ifdef CONFIG_IPV6_NDISC_NODETYPE
310 	u8			ndisc_nodetype:2,
311 				deliver_no_wcard:1;
312 #else
313 	u8			deliver_no_wcard:1;
314 #endif
315 	kmemcheck_bitfield_end(flags2);
316 
317 	/* 0/14 bit hole */
318 
319 #ifdef CONFIG_NET_DMA
320 	dma_cookie_t		dma_cookie;
321 #endif
322 #ifdef CONFIG_NETWORK_SECMARK
323 	u32			secmark;
324 #endif
325 	union {
326 		u32		mark;
327 		u32		dropcount;
328 	}symbol3;
329 
330 	u16			vlan_tci;
331 
332 	sk_buff_data_t		transport_header;
333 	sk_buff_data_t		network_header;
334 	sk_buff_data_t		mac_header;
335 	/* These elements must be at the end, see alloc_skb() for details.  */
336 	sk_buff_data_t		tail;
337 	sk_buff_data_t		end;
338 	unsigned char		*head,
339 				*data;
340 	unsigned int		truesize;
341 	atomic_t		users;
342 };
343 struct sk_buff_head {
344 	/* These two members must be first. */
345 	struct sk_buff	*next;
346 	struct sk_buff	*prev;
347 
348 	u32		qlen;
349 	_lock	lock;
350 };
351 #define skb_tail_pointer(skb)	skb->tail
skb_put(struct sk_buff * skb,unsigned int len)352 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
353 {
354 	unsigned char *tmp = skb_tail_pointer(skb);
355 	//SKB_LINEAR_ASSERT(skb);
356 	skb->tail += len;
357 	skb->len  += len;
358 	return tmp;
359 }
360 
__skb_pull(struct sk_buff * skb,unsigned int len)361 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
362 {
363 	skb->len -= len;
364 	if(skb->len < skb->data_len)
365 		printf("%s(),%d,error!\n",__FUNCTION__,__LINE__);
366 	return skb->data += len;
367 }
skb_pull(struct sk_buff * skb,unsigned int len)368 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
369 {
370 	#ifdef PLATFORM_FREEBSD
371 	return __skb_pull(skb, len);
372 	#else
373 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
374 	#endif //PLATFORM_FREEBSD
375 }
skb_queue_len(const struct sk_buff_head * list_)376 static inline u32 skb_queue_len(const struct sk_buff_head *list_)
377 {
378 	return list_->qlen;
379 }
__skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)380 static inline void __skb_insert(struct sk_buff *newsk,
381 				struct sk_buff *prev, struct sk_buff *next,
382 				struct sk_buff_head *list)
383 {
384 	newsk->next = next;
385 	newsk->prev = prev;
386 	next->prev  = prev->next = newsk;
387 	list->qlen++;
388 }
__skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)389 static inline void __skb_queue_before(struct sk_buff_head *list,
390 				      struct sk_buff *next,
391 				      struct sk_buff *newsk)
392 {
393 	__skb_insert(newsk, next->prev, next, list);
394 }
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)395 static inline void skb_queue_tail(struct sk_buff_head *list,
396 				   struct sk_buff *newsk)
397 {
398 	mtx_lock(&list->lock);
399 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
400 	mtx_unlock(&list->lock);
401 }
skb_peek(struct sk_buff_head * list_)402 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
403 {
404 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
405 	if (list == (struct sk_buff *)list_)
406 		list = NULL;
407 	return list;
408 }
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)409 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
410 {
411 	struct sk_buff *next, *prev;
412 
413 	list->qlen--;
414 	next	   = skb->next;
415 	prev	   = skb->prev;
416 	skb->next  = skb->prev = NULL;
417 	next->prev = prev;
418 	prev->next = next;
419 }
420 
skb_dequeue(struct sk_buff_head * list)421 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
422 {
423 	mtx_lock(&list->lock);
424 
425 	struct sk_buff *skb = skb_peek(list);
426 	if (skb)
427 		__skb_unlink(skb, list);
428 
429 	mtx_unlock(&list->lock);
430 
431 	return skb;
432 }
skb_reserve(struct sk_buff * skb,int len)433 static inline void skb_reserve(struct sk_buff *skb, int len)
434 {
435 	skb->data += len;
436 	skb->tail += len;
437 }
__skb_queue_head_init(struct sk_buff_head * list)438 static inline void __skb_queue_head_init(struct sk_buff_head *list)
439 {
440 	list->prev = list->next = (struct sk_buff *)list;
441 	list->qlen = 0;
442 }
443 /*
444  * This function creates a split out lock class for each invocation;
445  * this is needed for now since a whole lot of users of the skb-queue
446  * infrastructure in drivers have different locking usage (in hardirq)
447  * than the networking core (in softirq only). In the long run either the
448  * network layer or drivers should need annotation to consolidate the
449  * main types of usage into 3 classes.
450  */
skb_queue_head_init(struct sk_buff_head * list)451 static inline void skb_queue_head_init(struct sk_buff_head *list)
452 {
453 	_rtw_spinlock_init(&list->lock);
454 	__skb_queue_head_init(list);
455 }
456 unsigned long copy_from_user(void *to, const void *from, unsigned long n);
457 unsigned long copy_to_user(void *to, const void *from, unsigned long n);
458 struct sk_buff * dev_alloc_skb(unsigned int size);
459 struct sk_buff *skb_clone(const struct sk_buff *skb);
460 void dev_kfree_skb_any(struct sk_buff *skb);
461 #endif //Baron porting from linux, it's all temp solution, needs to check again
462 
463 
464 #if 1 // kenny add Linux compatibility code for Linux USB driver
465 #include <dev/usb/usb_compat_linux.h>
466 
467 #define __init		// __attribute ((constructor))
468 #define __exit		// __attribute ((destructor))
469 
470 /*
471  * Definitions for module_init and module_exit macros.
472  *
473  * These macros will use the SYSINIT framework to call a specified
474  * function (with no arguments) on module loading or unloading.
475  *
476  */
477 
478 void module_init_exit_wrapper(void *arg);
479 
480 #define module_init(initfn)                             \
481         SYSINIT(mod_init_ ## initfn,                    \
482                 SI_SUB_KLD, SI_ORDER_FIRST,             \
483                 module_init_exit_wrapper, initfn)
484 
485 #define module_exit(exitfn)                             \
486         SYSUNINIT(mod_exit_ ## exitfn,                  \
487                   SI_SUB_KLD, SI_ORDER_ANY,             \
488                   module_init_exit_wrapper, exitfn)
489 
490 /*
491  * The usb_register and usb_deregister functions are used to register
492  * usb drivers with the usb subsystem.
493  */
494 int usb_register(struct usb_driver *driver);
495 int usb_deregister(struct usb_driver *driver);
496 
497 /*
498  * usb_get_dev and usb_put_dev - increment/decrement the reference count
499  * of the usb device structure.
500  *
501  * Original body of usb_get_dev:
502  *
503  *       if (dev)
504  *               get_device(&dev->dev);
505  *       return dev;
506  *
507  * Reference counts are not currently used in this compatibility
508  * layer. So these functions will do nothing.
509  */
510 static inline struct usb_device *
usb_get_dev(struct usb_device * dev)511 usb_get_dev(struct usb_device *dev)
512 {
513         return dev;
514 }
515 
516 static inline void
usb_put_dev(struct usb_device * dev)517 usb_put_dev(struct usb_device *dev)
518 {
519         return;
520 }
521 
522 
523 // rtw_usb_compat_linux
524 int rtw_usb_submit_urb(struct urb *urb, uint16_t mem_flags);
525 int rtw_usb_unlink_urb(struct urb *urb);
526 int rtw_usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe);
527 int rtw_usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
528     uint8_t request, uint8_t requesttype,
529     uint16_t value, uint16_t index, void *data,
530     uint16_t size, usb_timeout_t timeout);
531 int rtw_usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index);
532 int rtw_usb_setup_endpoint(struct usb_device *dev,
533     struct usb_host_endpoint *uhe, usb_size_t bufsize);
534 struct urb *rtw_usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags);
535 struct usb_host_endpoint *rtw_usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep);
536 struct usb_host_interface *rtw_usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index);
537 struct usb_interface *rtw_usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no);
538 void *rtw_usbd_get_intfdata(struct usb_interface *intf);
539 void rtw_usb_linux_register(void *arg);
540 void rtw_usb_linux_deregister(void *arg);
541 void rtw_usb_linux_free_device(struct usb_device *dev);
542 void rtw_usb_free_urb(struct urb *urb);
543 void rtw_usb_init_urb(struct urb *urb);
544 void rtw_usb_kill_urb(struct urb *urb);
545 void rtw_usb_set_intfdata(struct usb_interface *intf, void *data);
546 void rtw_usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev,
547     struct usb_host_endpoint *uhe, void *buf,
548     int length, usb_complete_t callback, void *arg);
549 int rtw_usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe,
550     void *data, int len, uint16_t *pactlen, usb_timeout_t timeout);
551 void *usb_get_intfdata(struct usb_interface *intf);
552 int usb_linux_init_endpoints(struct usb_device *udev);
553 
554 
555 
556 typedef struct urb *  PURB;
557 
558 typedef unsigned gfp_t;
559 #define __GFP_WAIT      ((gfp_t)0x10u)  /* Can wait and reschedule? */
560 #define __GFP_HIGH      ((gfp_t)0x20u)  /* Should access emergency pools? */
561 #define __GFP_IO        ((gfp_t)0x40u)  /* Can start physical IO? */
562 #define __GFP_FS        ((gfp_t)0x80u)  /* Can call down to low-level FS? */
563 #define __GFP_COLD      ((gfp_t)0x100u) /* Cache-cold page required */
564 #define __GFP_NOWARN    ((gfp_t)0x200u) /* Suppress page allocation failure warning */
565 #define __GFP_REPEAT    ((gfp_t)0x400u) /* Retry the allocation.  Might fail */
566 #define __GFP_NOFAIL    ((gfp_t)0x800u) /* Retry for ever.  Cannot fail */
567 #define __GFP_NORETRY   ((gfp_t)0x1000u)/* Do not retry.  Might fail */
568 #define __GFP_NO_GROW   ((gfp_t)0x2000u)/* Slab internal usage */
569 #define __GFP_COMP      ((gfp_t)0x4000u)/* Add compound page metadata */
570 #define __GFP_ZERO      ((gfp_t)0x8000u)/* Return zeroed page on success */
571 #define __GFP_NOMEMALLOC ((gfp_t)0x10000u) /* Don't use emergency reserves */
572 #define __GFP_HARDWALL   ((gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
573 
574 /* This equals 0, but use constants in case they ever change */
575 #define GFP_NOWAIT      (GFP_ATOMIC & ~__GFP_HIGH)
576 /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
577 #define GFP_ATOMIC      (__GFP_HIGH)
578 #define GFP_NOIO        (__GFP_WAIT)
579 #define GFP_NOFS        (__GFP_WAIT | __GFP_IO)
580 #define GFP_KERNEL      (__GFP_WAIT | __GFP_IO | __GFP_FS)
581 #define GFP_USER        (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
582 #define GFP_HIGHUSER    (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
583                          __GFP_HIGHMEM)
584 
585 
586 #endif // kenny add Linux compatibility code for Linux USB
587 
get_next(_list * list)588 __inline static _list *get_next(_list	*list)
589 {
590 	return list->next;
591 }
592 
get_list_head(_queue * queue)593 __inline static _list	*get_list_head(_queue	*queue)
594 {
595 	return (&(queue->queue));
596 }
597 
598 
599 #define LIST_CONTAINOR(ptr, type, member) \
600         ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
601 
602 
_enter_critical(_lock * plock,_irqL * pirqL)603 __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
604 {
605 	spin_lock_irqsave(plock, *pirqL);
606 }
607 
_exit_critical(_lock * plock,_irqL * pirqL)608 __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
609 {
610 	spin_unlock_irqrestore(plock, *pirqL);
611 }
612 
_enter_critical_ex(_lock * plock,_irqL * pirqL)613 __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
614 {
615 	spin_lock_irqsave(plock, *pirqL);
616 }
617 
_exit_critical_ex(_lock * plock,_irqL * pirqL)618 __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
619 {
620 	spin_unlock_irqrestore(plock, *pirqL);
621 }
622 
_enter_critical_bh(_lock * plock,_irqL * pirqL)623 __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
624 {
625 	spin_lock_bh(plock, *pirqL);
626 }
627 
_exit_critical_bh(_lock * plock,_irqL * pirqL)628 __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
629 {
630 	spin_unlock_bh(plock, *pirqL);
631 }
632 
_enter_critical_mutex(_mutex * pmutex,_irqL * pirqL)633 __inline static void _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
634 {
635 
636 		mtx_lock(pmutex);
637 
638 }
639 
640 
_exit_critical_mutex(_mutex * pmutex,_irqL * pirqL)641 __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
642 {
643 
644 		mtx_unlock(pmutex);
645 
646 }
__list_del(struct list_head * prev,struct list_head * next)647 static inline void __list_del(struct list_head * prev, struct list_head * next)
648 {
649 	next->prev = prev;
650 	prev->next = next;
651 }
INIT_LIST_HEAD(struct list_head * list)652 static inline void INIT_LIST_HEAD(struct list_head *list)
653 {
654 	list->next = list;
655 	list->prev = list;
656 }
rtw_list_delete(_list * plist)657 __inline static void rtw_list_delete(_list *plist)
658 {
659 	__list_del(plist->prev, plist->next);
660 	INIT_LIST_HEAD(plist);
661 }
662 
timer_hdl(void * ctx)663 static inline void timer_hdl(void *ctx)
664 {
665 	_timer *timer = (_timer *)ctx;
666 
667 	rtw_mtx_lock(NULL);
668 	if (callout_pending(&timer->callout)) {
669 		/* callout was reset */
670 		rtw_mtx_unlock(NULL);
671 		return;
672 	}
673 
674 	if (!callout_active(&timer->callout)) {
675 		/* callout was stopped */
676 		rtw_mtx_unlock(NULL);
677 		return;
678 	}
679 
680 	callout_deactivate(&timer->callout);
681 
682 	timer->function(timer->arg);
683 
684 	rtw_mtx_unlock(NULL);
685 }
686 
_init_timer(_timer * ptimer,_nic_hdl padapter,void * pfunc,void * cntx)687 static inline void _init_timer(_timer *ptimer, _nic_hdl padapter, void *pfunc, void *cntx)
688 {
689 	ptimer->function = pfunc;
690 	ptimer->arg = cntx;
691 	callout_init(&ptimer->callout, CALLOUT_MPSAFE);
692 }
693 
_set_timer(_timer * ptimer,u32 delay_time)694 __inline static void _set_timer(_timer *ptimer,u32 delay_time)
695 {
696 	if (ptimer->function && ptimer->arg) {
697 		rtw_mtx_lock(NULL);
698 		callout_reset(&ptimer->callout, delay_time, timer_hdl, ptimer);
699 		rtw_mtx_unlock(NULL);
700 	}
701 }
702 
_cancel_timer(_timer * ptimer,u8 * bcancelled)703 __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled)
704 {
705 	rtw_mtx_lock(NULL);
706 	callout_drain(&ptimer->callout);
707 	rtw_mtx_unlock(NULL);
708 	*bcancelled = 1; /* assume an pending timer to be canceled */
709 }
710 
_init_workitem(_workitem * pwork,void * pfunc,PVOID cntx)711 __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx)
712 {
713 	printf("%s Not implement yet! \n",__FUNCTION__);
714 }
715 
_set_workitem(_workitem * pwork)716 __inline static void _set_workitem(_workitem *pwork)
717 {
718 	printf("%s Not implement yet! \n",__FUNCTION__);
719 //	schedule_work(pwork);
720 }
721 
722 //
723 // Global Mutex: can only be used at PASSIVE level.
724 //
725 
726 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter)                              \
727 {                                                               \
728 }
729 
730 #define RELEASE_GLOBAL_MUTEX(_MutexCounter)                              \
731 {                                                               \
732 }
733 
734 #define ATOMIC_INIT(i)  { (i) }
735 
736 static __inline void thread_enter(char *name);
737 
738 //Atomic integer operations
739 typedef uint32_t ATOMIC_T ;
740 
741 #define rtw_netdev_priv(netdev) (((struct ifnet *)netdev)->if_softc)
742 
743 #define rtw_free_netdev(netdev) if_free((netdev))
744 
745 #define NDEV_FMT "%s"
746 #define NDEV_ARG(ndev) ""
747 #define ADPT_FMT "%s"
748 #define ADPT_ARG(adapter) ""
749 #define FUNC_NDEV_FMT "%s"
750 #define FUNC_NDEV_ARG(ndev) __func__
751 #define FUNC_ADPT_FMT "%s"
752 #define FUNC_ADPT_ARG(adapter) __func__
753 
754 #define STRUCT_PACKED
755 
756 #endif
757 
758