• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 
16 
17 #define _OSDEP_SERVICE_C_
18 
19 #include <drv_types.h>
20 #ifdef CONFIG_RTL8822CS_WIFI_HDF
21 #include <linux/uaccess.h>
22 #include "net_device.h"
23 #endif
24 
25 #define RT_TAG	'1178'
26 
27 #ifdef DBG_MEMORY_LEAK
28 #ifdef PLATFORM_LINUX
29 atomic_t _malloc_cnt = ATOMIC_INIT(0);
30 atomic_t _malloc_size = ATOMIC_INIT(0);
31 #endif
32 #endif /* DBG_MEMORY_LEAK */
33 
34 
35 #ifdef DBG_MEM_ERR_FREE
36 
37 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
38 
39 #define DBG_MEM_HASHBITS 10
40 
41 #define DBG_MEM_TYPE_PHY 0
42 #define DBG_MEM_TYPE_VIR 1
43 
44 /*
45  * DBG_MEM_ERR_FREE is only for the debug purpose.
46  *
47  * There is the limitation that this mechanism only can
48  * support one wifi device, and has problem if there
49  * are two or more wifi devices with one driver on
50  * the same system. It's because dbg_mem_ht is global
51  * variable, and if we move this dbg_mem_ht into struct
52  * dvobj_priv to support more wifi devices, the memory
53  * allocation functions, like rtw_malloc(), need to have
54  * the parameter dvobj to get relative hash table, and
55  * then it is the huge changes for the driver currently.
56  *
57  */
58 struct hlist_head dbg_mem_ht[1 << DBG_MEM_HASHBITS];
59 
60 struct hash_mem {
61 	void *mem;
62 	int sz;
63 	int type;
64 	struct hlist_node node;
65 };
66 
67 #endif /* LINUX_VERSION_CODE */
68 
rtw_dbg_mem_init(void)69 void rtw_dbg_mem_init(void)
70 {
71 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
72 	hash_init(dbg_mem_ht);
73 #endif /* LINUX_VERSION_CODE */
74 }
75 
rtw_dbg_mem_deinit(void)76 void rtw_dbg_mem_deinit(void)
77 {
78 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
79 	struct hlist_head *head;
80 	struct hlist_node *p;
81 	int i;
82 
83 	for (i = 0; i < HASH_SIZE(dbg_mem_ht); i++) {
84 		head = &dbg_mem_ht[i];
85 		p = head->first;
86 		while (p) {
87 			struct hlist_node *prev;
88 			struct hash_mem *hm;
89 
90 			hm = container_of(p, struct hash_mem, node);
91 			prev = p;
92 			p = p->next;
93 
94 			RTW_ERR("%s: memory leak - 0x%x\n", __func__, hm->mem);
95 			hash_del(prev);
96 			kfree(hm);
97 		}
98 	}
99 #endif /* LINUX_VERSION_CODE */
100 }
101 
102 #if (KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE)
rtw_dbg_mem_find(void * mem)103 struct hash_mem *rtw_dbg_mem_find(void *mem)
104 {
105 	struct hash_mem *hm;
106 	struct hlist_head *head;
107 	struct hlist_node *p;
108 
109 	head = &dbg_mem_ht[hash_64((u64)(mem), DBG_MEM_HASHBITS)];
110 
111 	p = head->first;
112 	while (p) {
113 		hm = container_of(p, struct hash_mem, node);
114 		if (hm->mem == mem)
115 			goto out;
116 		p = p->next;
117 	}
118 	hm = NULL;
119 out:
120 	return hm;
121 }
122 
rtw_dbg_mem_alloc(void * mem,int sz,int type)123 void rtw_dbg_mem_alloc(void *mem, int sz, int type)
124 {
125 	struct hash_mem *hm;
126 
127 	hm = rtw_dbg_mem_find(mem);
128 	if (!hm) {
129 		hm = (struct hash_mem *)kmalloc(sizeof(*hm), GFP_ATOMIC);
130 		hm->mem = mem;
131 		hm->sz = sz;
132 		hm->type = type;
133 		hash_add(dbg_mem_ht, &hm->node, (u64)(mem));
134 	} else {
135 		RTW_ERR("%s mem(%x) is in hash already\n", __func__, mem);
136 		rtw_warn_on(1);
137 	}
138 }
139 
rtw_dbg_mem_free(void * mem,int sz,int type)140 bool rtw_dbg_mem_free(void *mem, int sz, int type)
141 {
142 	struct hash_mem *hm;
143 	bool ret;
144 
145 	hm = rtw_dbg_mem_find(mem);
146 	if (!hm) {
147 		RTW_ERR("%s cannot find allocated memory: %x\n",
148 			__func__, mem);
149 		rtw_warn_on(1);
150 		return false;
151 	}
152 
153 	if (hm->sz != sz) {
154 		RTW_ERR("%s memory (%x) size mismatch free(%d) != alloc(%d)\n",
155 			__func__, mem, sz, hm->sz);
156 		rtw_warn_on(1);
157 		ret = false;
158 		goto out;
159 	}
160 
161 	if (hm->type != type) {
162 		RTW_ERR("%s memory (%x) type mismatch free(%d) != alloc(%d)\n",
163 			__func__, mem, type, hm->type);
164 		rtw_warn_on(1);
165 		ret = false;
166 		goto out;
167 	}
168 	ret = true;
169 
170 out:
171 	hash_del(&hm->node);
172 	kfree(hm);
173 
174 	return ret;
175 }
176 
177 #endif /* LINUX_VERSION_CODE */
178 #endif /* DBG_MEM_ERR_FREE */
179 
180 #if defined(PLATFORM_LINUX)
181 /*
182 * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
183 * @return: one of RTW_STATUS_CODE
184 */
RTW_STATUS_CODE(int error_code)185 inline int RTW_STATUS_CODE(int error_code)
186 {
187 	if (error_code >= 0)
188 		return _SUCCESS;
189 
190 	switch (error_code) {
191 	/* case -ETIMEDOUT: */
192 	/*	return RTW_STATUS_TIMEDOUT; */
193 	default:
194 		return _FAIL;
195 	}
196 }
197 #else
RTW_STATUS_CODE(int error_code)198 inline int RTW_STATUS_CODE(int error_code)
199 {
200 	return error_code;
201 }
202 #endif
203 
rtw_atoi(u8 * s)204 u32 rtw_atoi(u8 *s)
205 {
206 
207 	int num = 0, flag = 0;
208 	int i;
209 	for (i = 0; i <= strlen(s); i++) {
210 		if (s[i] >= '0' && s[i] <= '9')
211 			num = num * 10 + s[i] - '0';
212 		else if (s[0] == '-' && i == 0)
213 			flag = 1;
214 		else
215 			break;
216 	}
217 
218 	if (flag == 1)
219 		num = num * -1;
220 
221 	return num;
222 
223 }
224 
_rtw_vmalloc(u32 sz)225 inline void *_rtw_vmalloc(u32 sz)
226 {
227 	void *pbuf;
228 #ifdef PLATFORM_LINUX
229 	pbuf = vmalloc(sz);
230 #endif
231 #ifdef PLATFORM_FREEBSD
232 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
233 #endif
234 
235 #ifdef PLATFORM_WINDOWS
236 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
237 #endif
238 
239 #ifdef DBG_MEM_ERR_FREE
240 	if (pbuf)
241 		rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_VIR);
242 #endif /* DBG_MEM_ERR_FREE */
243 
244 #ifdef DBG_MEMORY_LEAK
245 #ifdef PLATFORM_LINUX
246 	if (pbuf != NULL) {
247 		atomic_inc(&_malloc_cnt);
248 		atomic_add(sz, &_malloc_size);
249 	}
250 #endif
251 #endif /* DBG_MEMORY_LEAK */
252 
253 	return pbuf;
254 }
255 
_rtw_zvmalloc(u32 sz)256 inline void *_rtw_zvmalloc(u32 sz)
257 {
258 	void *pbuf;
259 #ifdef PLATFORM_LINUX
260 	pbuf = _rtw_vmalloc(sz);
261 	if (pbuf != NULL)
262 		memset(pbuf, 0, sz);
263 #endif
264 #ifdef PLATFORM_FREEBSD
265 	pbuf = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
266 #endif
267 #ifdef PLATFORM_WINDOWS
268 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
269 	if (pbuf != NULL)
270 		NdisFillMemory(pbuf, sz, 0);
271 #endif
272 
273 	return pbuf;
274 }
275 
_rtw_vmfree(void * pbuf,u32 sz)276 inline void _rtw_vmfree(void *pbuf, u32 sz)
277 {
278 #ifdef DBG_MEM_ERR_FREE
279 	if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_VIR))
280 		return;
281 #endif /* DBG_MEM_ERR_FREE */
282 
283 #ifdef PLATFORM_LINUX
284 	vfree(pbuf);
285 #endif
286 #ifdef PLATFORM_FREEBSD
287 	free(pbuf, M_DEVBUF);
288 #endif
289 #ifdef PLATFORM_WINDOWS
290 	NdisFreeMemory(pbuf, sz, 0);
291 #endif
292 
293 #ifdef DBG_MEMORY_LEAK
294 #ifdef PLATFORM_LINUX
295 	atomic_dec(&_malloc_cnt);
296 	atomic_sub(sz, &_malloc_size);
297 #endif
298 #endif /* DBG_MEMORY_LEAK */
299 }
300 
_rtw_malloc(u32 sz)301 void *_rtw_malloc(u32 sz)
302 {
303 	void *pbuf = NULL;
304 
305 #ifdef PLATFORM_LINUX
306 #ifdef RTK_DMP_PLATFORM
307 	if (sz > 0x4000)
308 		pbuf = dvr_malloc(sz);
309 	else
310 #endif
311 		pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
312 
313 #endif
314 #ifdef PLATFORM_FREEBSD
315 	pbuf = malloc(sz, M_DEVBUF, M_NOWAIT);
316 #endif
317 #ifdef PLATFORM_WINDOWS
318 
319 	NdisAllocateMemoryWithTag(&pbuf, sz, RT_TAG);
320 
321 #endif
322 
323 #ifdef DBG_MEM_ERR_FREE
324 	if (pbuf)
325 		rtw_dbg_mem_alloc(pbuf, sz, DBG_MEM_TYPE_PHY);
326 #endif /* DBG_MEM_ERR_FREE */
327 
328 #ifdef DBG_MEMORY_LEAK
329 #ifdef PLATFORM_LINUX
330 	if (pbuf != NULL) {
331 		atomic_inc(&_malloc_cnt);
332 		atomic_add(sz, &_malloc_size);
333 	}
334 #endif
335 #endif /* DBG_MEMORY_LEAK */
336 
337 	return pbuf;
338 
339 }
340 
341 
_rtw_zmalloc(u32 sz)342 void *_rtw_zmalloc(u32 sz)
343 {
344 #ifdef PLATFORM_FREEBSD
345 	return malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
346 #else /* PLATFORM_FREEBSD */
347 	void *pbuf = _rtw_malloc(sz);
348 
349 	if (pbuf != NULL) {
350 
351 #ifdef PLATFORM_LINUX
352 		memset(pbuf, 0, sz);
353 #endif
354 
355 #ifdef PLATFORM_WINDOWS
356 		NdisFillMemory(pbuf, sz, 0);
357 #endif
358 	}
359 
360 	return pbuf;
361 #endif /* PLATFORM_FREEBSD */
362 }
363 
_rtw_mfree(void * pbuf,u32 sz)364 void _rtw_mfree(void *pbuf, u32 sz)
365 {
366 
367 #ifdef DBG_MEM_ERR_FREE
368 	if (!rtw_dbg_mem_free(pbuf, sz, DBG_MEM_TYPE_PHY))
369 		return;
370 #endif /* DBG_MEM_ERR_FREE */
371 
372 #ifdef PLATFORM_LINUX
373 #ifdef RTK_DMP_PLATFORM
374 	if (sz > 0x4000)
375 		dvr_free(pbuf);
376 	else
377 #endif
378 		kfree(pbuf);
379 
380 #endif
381 #ifdef PLATFORM_FREEBSD
382 	free(pbuf, M_DEVBUF);
383 #endif
384 #ifdef PLATFORM_WINDOWS
385 
386 	NdisFreeMemory(pbuf, sz, 0);
387 
388 #endif
389 
390 #ifdef DBG_MEMORY_LEAK
391 #ifdef PLATFORM_LINUX
392 	atomic_dec(&_malloc_cnt);
393 	atomic_sub(sz, &_malloc_size);
394 #endif
395 #endif /* DBG_MEMORY_LEAK */
396 
397 }
398 
399 #ifdef PLATFORM_FREEBSD
400 /* review again */
dev_alloc_skb(unsigned int size)401 struct sk_buff *dev_alloc_skb(unsigned int size)
402 {
403 	struct sk_buff *skb = NULL;
404 	u8 *data = NULL;
405 
406 	/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
407 	skb = _rtw_malloc(sizeof(struct sk_buff));
408 	if (!skb)
409 		goto out;
410 	data = _rtw_malloc(size);
411 	if (!data)
412 		goto nodata;
413 
414 	skb->head = (unsigned char *)data;
415 	skb->data = (unsigned char *)data;
416 	skb->tail = (unsigned char *)data;
417 	skb->end = (unsigned char *)data + size;
418 	skb->len = 0;
419 	/* printf("%s()-%d: skb=%p, skb->head = %p\n", __FUNCTION__, __LINE__, skb, skb->head); */
420 
421 out:
422 	return skb;
423 nodata:
424 	_rtw_mfree(skb, sizeof(struct sk_buff));
425 	skb = NULL;
426 	goto out;
427 
428 }
429 
dev_kfree_skb_any(struct sk_buff * skb)430 void dev_kfree_skb_any(struct sk_buff *skb)
431 {
432 	/* printf("%s()-%d: skb->head = %p\n", __FUNCTION__, __LINE__, skb->head); */
433 	if (skb->head)
434 		_rtw_mfree(skb->head, 0);
435 	/* printf("%s()-%d: skb = %p\n", __FUNCTION__, __LINE__, skb); */
436 	if (skb)
437 		_rtw_mfree(skb, 0);
438 }
skb_clone(const struct sk_buff * skb)439 struct sk_buff *skb_clone(const struct sk_buff *skb)
440 {
441 	return NULL;
442 }
443 
444 #endif /* PLATFORM_FREEBSD */
445 
446 #ifdef CONFIG_PCIE_DMA_COHERENT
dev_alloc_skb_coherent(struct pci_dev * pdev,unsigned int size)447 struct sk_buff *dev_alloc_skb_coherent(struct pci_dev *pdev, unsigned int size)
448 {
449 	struct sk_buff *skb = NULL;
450 	unsigned char *data = NULL;
451 
452 	/* skb = _rtw_zmalloc(sizeof(struct sk_buff)); */ /* for skb->len, etc. */
453 
454 	skb = _rtw_malloc(sizeof(struct sk_buff));
455 	if (!skb)
456 		goto out;
457 
458 	data = dma_alloc_coherent(&pdev->dev, size, (dma_addr_t *)&skb->cb, GFP_KERNEL);
459 
460 	if (!data)
461 		goto nodata;
462 
463 	skb->head = data;
464 	skb->data = data;
465 	skb_reset_tail_pointer(skb);
466 	skb->end = skb->tail + size;
467 	skb->len = 0;
468 out:
469 	return skb;
470 nodata:
471 	_rtw_mfree(skb, sizeof(struct sk_buff));
472 	skb = NULL;
473 	goto out;
474 
475 }
476 #endif
477 
_rtw_skb_alloc(u32 sz)478 inline struct sk_buff *_rtw_skb_alloc(u32 sz)
479 {
480 #ifdef PLATFORM_LINUX
481 	return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
482 #endif /* PLATFORM_LINUX */
483 
484 #ifdef PLATFORM_FREEBSD
485 	return dev_alloc_skb(sz);
486 #endif /* PLATFORM_FREEBSD */
487 }
488 
_rtw_skb_free(struct sk_buff * skb)489 inline void _rtw_skb_free(struct sk_buff *skb)
490 {
491 	dev_kfree_skb_any(skb);
492 }
493 
_rtw_skb_copy(const struct sk_buff * skb)494 inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
495 {
496 #ifdef PLATFORM_LINUX
497 	return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
498 #endif /* PLATFORM_LINUX */
499 
500 #ifdef PLATFORM_FREEBSD
501 	return NULL;
502 #endif /* PLATFORM_FREEBSD */
503 }
504 
_rtw_skb_clone(struct sk_buff * skb)505 inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
506 {
507 #ifdef PLATFORM_LINUX
508 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
509 #endif /* PLATFORM_LINUX */
510 
511 #ifdef PLATFORM_FREEBSD
512 	return skb_clone(skb);
513 #endif /* PLATFORM_FREEBSD */
514 }
_rtw_pskb_copy(struct sk_buff * skb)515 inline struct sk_buff *_rtw_pskb_copy(struct sk_buff *skb)
516 {
517 #ifdef PLATFORM_LINUX
518 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
519 	return pskb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
520 #else
521 	return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
522 #endif
523 #endif /* PLATFORM_LINUX */
524 
525 #ifdef PLATFORM_FREEBSD
526 	return NULL;
527 #endif /* PLATFORM_FREEBSD */
528 }
529 
_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb)530 inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
531 {
532 #if defined(PLATFORM_LINUX)
533 	skb->dev = ndev;
534 #ifdef CONFIG_RTL8822CS_WIFI_HDF
535 	return NetIfRxNi(get_rtl_netdev(), skb);
536 #else
537 	return netif_rx(skb);
538 #endif
539 #elif defined(PLATFORM_FREEBSD)
540 	return (*ndev->if_input)(ndev, skb);
541 #else
542 	rtw_warn_on(1);
543 	return -1;
544 #endif
545 }
546 
547 #ifdef CONFIG_RTW_NAPI
_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb)548 inline int _rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb)
549 {
550 #if defined(PLATFORM_LINUX)
551 	skb->dev = ndev;
552 	return netif_receive_skb(skb);
553 #else
554 	rtw_warn_on(1);
555 	return -1;
556 #endif
557 }
558 
559 #ifdef CONFIG_RTW_GRO
_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)560 inline gro_result_t _rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
561 {
562 #if defined(PLATFORM_LINUX)
563 	return napi_gro_receive(napi, skb);
564 #else
565 	rtw_warn_on(1);
566 	return -1;
567 #endif
568 }
569 #endif /* CONFIG_RTW_GRO */
570 #endif /* CONFIG_RTW_NAPI */
571 
_rtw_skb_queue_purge(struct sk_buff_head * list)572 void _rtw_skb_queue_purge(struct sk_buff_head *list)
573 {
574 	struct sk_buff *skb;
575 
576 	while ((skb = skb_dequeue(list)) != NULL)
577 		_rtw_skb_free(skb);
578 }
579 
580 #ifdef CONFIG_USB_HCI
_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma)581 inline void *_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma)
582 {
583 #ifdef PLATFORM_LINUX
584 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
585 	return usb_alloc_coherent(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
586 #else
587 	return usb_buffer_alloc(dev, size, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), dma);
588 #endif
589 #endif /* PLATFORM_LINUX */
590 
591 #ifdef PLATFORM_FREEBSD
592 	return malloc(size, M_USBDEV, M_NOWAIT | M_ZERO);
593 #endif /* PLATFORM_FREEBSD */
594 }
_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma)595 inline void _rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma)
596 {
597 #ifdef PLATFORM_LINUX
598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
599 	usb_free_coherent(dev, size, addr, dma);
600 #else
601 	usb_buffer_free(dev, size, addr, dma);
602 #endif
603 #endif /* PLATFORM_LINUX */
604 
605 #ifdef PLATFORM_FREEBSD
606 	free(addr, M_USBDEV);
607 #endif /* PLATFORM_FREEBSD */
608 }
609 #endif /* CONFIG_USB_HCI */
610 
611 #if defined(DBG_MEM_ALLOC)
612 
613 struct rtw_mem_stat {
614 	ATOMIC_T alloc; /* the memory bytes we allocate currently */
615 	ATOMIC_T peak; /* the peak memory bytes we allocate */
616 	ATOMIC_T alloc_cnt; /* the alloc count for alloc currently */
617 	ATOMIC_T alloc_err_cnt; /* the error times we fail to allocate memory */
618 };
619 
620 struct rtw_mem_stat rtw_mem_type_stat[mstat_tf_idx(MSTAT_TYPE_MAX)];
621 #ifdef RTW_MEM_FUNC_STAT
622 struct rtw_mem_stat rtw_mem_func_stat[mstat_ff_idx(MSTAT_FUNC_MAX)];
623 #endif
624 
625 char *MSTAT_TYPE_str[] = {
626 	"VIR",
627 	"PHY",
628 	"SKB",
629 	"USB",
630 };
631 
632 #ifdef RTW_MEM_FUNC_STAT
633 char *MSTAT_FUNC_str[] = {
634 	"UNSP",
635 	"IO",
636 	"TXIO",
637 	"RXIO",
638 	"TX",
639 	"RX",
640 };
641 #endif
642 
rtw_mstat_dump(void * sel)643 void rtw_mstat_dump(void *sel)
644 {
645 	int i;
646 	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
647 #ifdef RTW_MEM_FUNC_STAT
648 	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
649 #endif
650 
651 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
652 		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
653 		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
654 		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
655 		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
656 	}
657 
658 #ifdef RTW_MEM_FUNC_STAT
659 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
660 		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
661 		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
662 		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
663 		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
664 	}
665 #endif
666 
667 	RTW_PRINT_SEL(sel, "===================== MSTAT =====================\n");
668 	RTW_PRINT_SEL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
669 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
670 	for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++)
671 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
672 #ifdef RTW_MEM_FUNC_STAT
673 	RTW_PRINT_SEL(sel, "-------------------------------------------------\n");
674 	for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++)
675 		RTW_PRINT_SEL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
676 #endif
677 }
678 
rtw_mstat_update(const enum mstat_f flags,const MSTAT_STATUS status,u32 sz)679 void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
680 {
681 	static systime update_time = 0;
682 	int peak, alloc;
683 	int i;
684 
685 	/* initialization */
686 	if (!update_time) {
687 		for (i = 0; i < mstat_tf_idx(MSTAT_TYPE_MAX); i++) {
688 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
689 			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
690 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
691 			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
692 		}
693 		#ifdef RTW_MEM_FUNC_STAT
694 		for (i = 0; i < mstat_ff_idx(MSTAT_FUNC_MAX); i++) {
695 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
696 			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
697 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
698 			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
699 		}
700 		#endif
701 	}
702 
703 	switch (status) {
704 	case MSTAT_ALLOC_SUCCESS:
705 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
706 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
707 		peak = ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
708 		if (peak < alloc)
709 			ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);
710 
711 		#ifdef RTW_MEM_FUNC_STAT
712 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
713 		alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
714 		peak = ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
715 		if (peak < alloc)
716 			ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
717 		#endif
718 		break;
719 
720 	case MSTAT_ALLOC_FAIL:
721 		ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));
722 		#ifdef RTW_MEM_FUNC_STAT
723 		ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
724 		#endif
725 		break;
726 
727 	case MSTAT_FREE:
728 		ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
729 		ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
730 		#ifdef RTW_MEM_FUNC_STAT
731 		ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
732 		ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
733 		#endif
734 		break;
735 	};
736 
737 	/* if (rtw_get_passing_time_ms(update_time) > 5000) { */
738 	/*	rtw_mstat_dump(RTW_DBGDUMP); */
739 	update_time = rtw_get_current_time();
740 	/* } */
741 }
742 
743 #ifndef SIZE_MAX
744 	#define SIZE_MAX (~(size_t)0)
745 #endif
746 
747 struct mstat_sniff_rule {
748 	enum mstat_f flags;
749 	size_t lb;
750 	size_t hb;
751 };
752 
753 struct mstat_sniff_rule mstat_sniff_rules[] = {
754 	{MSTAT_TYPE_PHY, 4097, SIZE_MAX},
755 };
756 
757 int mstat_sniff_rule_num = sizeof(mstat_sniff_rules) / sizeof(struct mstat_sniff_rule);
758 
match_mstat_sniff_rules(const enum mstat_f flags,const size_t size)759 bool match_mstat_sniff_rules(const enum mstat_f flags, const size_t size)
760 {
761 	int i;
762 	for (i = 0; i < mstat_sniff_rule_num; i++) {
763 		if (mstat_sniff_rules[i].flags == flags
764 			&& mstat_sniff_rules[i].lb <= size
765 			&& mstat_sniff_rules[i].hb >= size)
766 			return _TRUE;
767 	}
768 
769 	return _FALSE;
770 }
771 
dbg_rtw_vmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)772 inline void *dbg_rtw_vmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
773 {
774 	void *p;
775 
776 	if (match_mstat_sniff_rules(flags, sz))
777 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
778 
779 	p = _rtw_vmalloc((sz));
780 
781 	rtw_mstat_update(
782 		flags
783 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
784 		, sz
785 	);
786 
787 	return p;
788 }
789 
dbg_rtw_zvmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)790 inline void *dbg_rtw_zvmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
791 {
792 	void *p;
793 
794 	if (match_mstat_sniff_rules(flags, sz))
795 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
796 
797 	p = _rtw_zvmalloc((sz));
798 
799 	rtw_mstat_update(
800 		flags
801 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
802 		, sz
803 	);
804 
805 	return p;
806 }
807 
dbg_rtw_vmfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)808 inline void dbg_rtw_vmfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
809 {
810 
811 	if (match_mstat_sniff_rules(flags, sz))
812 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
813 
814 	_rtw_vmfree((pbuf), (sz));
815 
816 	rtw_mstat_update(
817 		flags
818 		, MSTAT_FREE
819 		, sz
820 	);
821 }
822 
dbg_rtw_malloc(u32 sz,const enum mstat_f flags,const char * func,const int line)823 inline void *dbg_rtw_malloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
824 {
825 	void *p;
826 
827 	if (match_mstat_sniff_rules(flags, sz))
828 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
829 
830 	p = _rtw_malloc((sz));
831 
832 	rtw_mstat_update(
833 		flags
834 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
835 		, sz
836 	);
837 
838 	return p;
839 }
840 
dbg_rtw_zmalloc(u32 sz,const enum mstat_f flags,const char * func,const int line)841 inline void *dbg_rtw_zmalloc(u32 sz, const enum mstat_f flags, const char *func, const int line)
842 {
843 	void *p;
844 
845 	if (match_mstat_sniff_rules(flags, sz))
846 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
847 
848 	p = _rtw_zmalloc((sz));
849 
850 	rtw_mstat_update(
851 		flags
852 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
853 		, sz
854 	);
855 
856 	return p;
857 }
858 
dbg_rtw_mfree(void * pbuf,u32 sz,const enum mstat_f flags,const char * func,const int line)859 inline void dbg_rtw_mfree(void *pbuf, u32 sz, const enum mstat_f flags, const char *func, const int line)
860 {
861 	if (match_mstat_sniff_rules(flags, sz))
862 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d)\n", func, line, __FUNCTION__, (sz));
863 
864 	_rtw_mfree((pbuf), (sz));
865 
866 	rtw_mstat_update(
867 		flags
868 		, MSTAT_FREE
869 		, sz
870 	);
871 }
872 
dbg_rtw_skb_alloc(unsigned int size,const enum mstat_f flags,const char * func,int line)873 inline struct sk_buff *dbg_rtw_skb_alloc(unsigned int size, const enum mstat_f flags, const char *func, int line)
874 {
875 	struct sk_buff *skb;
876 	unsigned int truesize = 0;
877 
878 	skb = _rtw_skb_alloc(size);
879 
880 	if (skb)
881 		truesize = skb->truesize;
882 
883 	if (!skb || truesize < size || match_mstat_sniff_rules(flags, truesize))
884 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%d), skb:%p, truesize=%u\n", func, line, __FUNCTION__, size, skb, truesize);
885 
886 	rtw_mstat_update(
887 		flags
888 		, skb ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
889 		, truesize
890 	);
891 
892 	return skb;
893 }
894 
dbg_rtw_skb_free(struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)895 inline void dbg_rtw_skb_free(struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
896 {
897 	unsigned int truesize = skb->truesize;
898 
899 	if (match_mstat_sniff_rules(flags, truesize))
900 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
901 
902 	_rtw_skb_free(skb);
903 
904 	rtw_mstat_update(
905 		flags
906 		, MSTAT_FREE
907 		, truesize
908 	);
909 }
910 
dbg_rtw_skb_copy(const struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)911 inline struct sk_buff *dbg_rtw_skb_copy(const struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
912 {
913 	struct sk_buff *skb_cp;
914 	unsigned int truesize = skb->truesize;
915 	unsigned int cp_truesize = 0;
916 
917 	skb_cp = _rtw_skb_copy(skb);
918 	if (skb_cp)
919 		cp_truesize = skb_cp->truesize;
920 
921 	if (!skb_cp || cp_truesize < truesize || match_mstat_sniff_rules(flags, cp_truesize))
922 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cp:%p, cp_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cp, cp_truesize);
923 
924 	rtw_mstat_update(
925 		flags
926 		, skb_cp ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
927 		, cp_truesize
928 	);
929 
930 	return skb_cp;
931 }
932 
dbg_rtw_skb_clone(struct sk_buff * skb,const enum mstat_f flags,const char * func,const int line)933 inline struct sk_buff *dbg_rtw_skb_clone(struct sk_buff *skb, const enum mstat_f flags, const char *func, const int line)
934 {
935 	struct sk_buff *skb_cl;
936 	unsigned int truesize = skb->truesize;
937 	unsigned int cl_truesize = 0;
938 
939 	skb_cl = _rtw_skb_clone(skb);
940 	if (skb_cl)
941 		cl_truesize = skb_cl->truesize;
942 
943 	if (!skb_cl || cl_truesize < truesize || match_mstat_sniff_rules(flags, cl_truesize))
944 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%u), skb_cl:%p, cl_truesize=%u\n", func, line, __FUNCTION__, truesize, skb_cl, cl_truesize);
945 
946 	rtw_mstat_update(
947 		flags
948 		, skb_cl ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
949 		, cl_truesize
950 	);
951 
952 	return skb_cl;
953 }
954 
dbg_rtw_netif_rx(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)955 inline int dbg_rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
956 {
957 	int ret;
958 	unsigned int truesize = skb->truesize;
959 
960 	if (match_mstat_sniff_rules(flags, truesize))
961 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
962 
963 	ret = _rtw_netif_rx(ndev, skb);
964 
965 	rtw_mstat_update(
966 		flags
967 		, MSTAT_FREE
968 		, truesize
969 	);
970 
971 	return ret;
972 }
973 
974 #ifdef CONFIG_RTW_NAPI
dbg_rtw_netif_receive_skb(_nic_hdl ndev,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)975 inline int dbg_rtw_netif_receive_skb(_nic_hdl ndev, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
976 {
977 	int ret;
978 	unsigned int truesize = skb->truesize;
979 
980 	if (match_mstat_sniff_rules(flags, truesize))
981 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
982 
983 	ret = _rtw_netif_receive_skb(ndev, skb);
984 
985 	rtw_mstat_update(
986 		flags
987 		, MSTAT_FREE
988 		, truesize
989 	);
990 
991 	return ret;
992 }
993 
994 #ifdef CONFIG_RTW_GRO
dbg_rtw_napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb,const enum mstat_f flags,const char * func,int line)995 inline gro_result_t dbg_rtw_napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb, const enum mstat_f flags, const char *func, int line)
996 {
997 	int ret;
998 	unsigned int truesize = skb->truesize;
999 
1000 	if (match_mstat_sniff_rules(flags, truesize))
1001 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s, truesize=%u\n", func, line, __FUNCTION__, truesize);
1002 
1003 	ret = _rtw_napi_gro_receive(napi, skb);
1004 
1005 	rtw_mstat_update(
1006 		flags
1007 		, MSTAT_FREE
1008 		, truesize
1009 	);
1010 
1011 	return ret;
1012 }
1013 #endif /* CONFIG_RTW_GRO */
1014 #endif /* CONFIG_RTW_NAPI */
1015 
dbg_rtw_skb_queue_purge(struct sk_buff_head * list,enum mstat_f flags,const char * func,int line)1016 inline void dbg_rtw_skb_queue_purge(struct sk_buff_head *list, enum mstat_f flags, const char *func, int line)
1017 {
1018 	struct sk_buff *skb;
1019 
1020 	while ((skb = skb_dequeue(list)) != NULL)
1021 		dbg_rtw_skb_free(skb, flags, func, line);
1022 }
1023 
1024 #ifdef CONFIG_USB_HCI
dbg_rtw_usb_buffer_alloc(struct usb_device * dev,size_t size,dma_addr_t * dma,const enum mstat_f flags,const char * func,int line)1025 inline void *dbg_rtw_usb_buffer_alloc(struct usb_device *dev, size_t size, dma_addr_t *dma, const enum mstat_f flags, const char *func, int line)
1026 {
1027 	void *p;
1028 
1029 	if (match_mstat_sniff_rules(flags, size))
1030 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
1031 
1032 	p = _rtw_usb_buffer_alloc(dev, size, dma);
1033 
1034 	rtw_mstat_update(
1035 		flags
1036 		, p ? MSTAT_ALLOC_SUCCESS : MSTAT_ALLOC_FAIL
1037 		, size
1038 	);
1039 
1040 	return p;
1041 }
1042 
dbg_rtw_usb_buffer_free(struct usb_device * dev,size_t size,void * addr,dma_addr_t dma,const enum mstat_f flags,const char * func,int line)1043 inline void dbg_rtw_usb_buffer_free(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma, const enum mstat_f flags, const char *func, int line)
1044 {
1045 
1046 	if (match_mstat_sniff_rules(flags, size))
1047 		RTW_INFO("DBG_MEM_ALLOC %s:%d %s(%zu)\n", func, line, __FUNCTION__, size);
1048 
1049 	_rtw_usb_buffer_free(dev, size, addr, dma);
1050 
1051 	rtw_mstat_update(
1052 		flags
1053 		, MSTAT_FREE
1054 		, size
1055 	);
1056 }
1057 #endif /* CONFIG_USB_HCI */
1058 
1059 #endif /* defined(DBG_MEM_ALLOC) */
1060 
rtw_malloc2d(int h,int w,size_t size)1061 void *rtw_malloc2d(int h, int w, size_t size)
1062 {
1063 	int j;
1064 
1065 	void **a = (void **) rtw_zmalloc(h * sizeof(void *) + h * w * size);
1066 	if (a == NULL) {
1067 		RTW_INFO("%s: alloc memory fail!\n", __FUNCTION__);
1068 		return NULL;
1069 	}
1070 
1071 	for (j = 0; j < h; j++)
1072 		a[j] = ((char *)(a + h)) + j * w * size;
1073 
1074 	return a;
1075 }
1076 
rtw_mfree2d(void * pbuf,int h,int w,int size)1077 void rtw_mfree2d(void *pbuf, int h, int w, int size)
1078 {
1079 	rtw_mfree((u8 *)pbuf, h * sizeof(void *) + w * h * size);
1080 }
1081 
rtw_os_pkt_free(_pkt * pkt)1082 inline void rtw_os_pkt_free(_pkt *pkt)
1083 {
1084 #if defined(PLATFORM_LINUX)
1085 	rtw_skb_free(pkt);
1086 #elif defined(PLATFORM_FREEBSD)
1087 	m_freem(pkt);
1088 #else
1089 	#error "TBD\n"
1090 #endif
1091 }
1092 
rtw_os_pkt_copy(_pkt * pkt)1093 inline _pkt *rtw_os_pkt_copy(_pkt *pkt)
1094 {
1095 #if defined(PLATFORM_LINUX)
1096 	return rtw_skb_copy(pkt);
1097 #elif defined(PLATFORM_FREEBSD)
1098 	return m_dup(pkt, M_NOWAIT);
1099 #else
1100 	#error "TBD\n"
1101 #endif
1102 }
1103 
rtw_os_pkt_data(_pkt * pkt)1104 inline void *rtw_os_pkt_data(_pkt *pkt)
1105 {
1106 #if defined(PLATFORM_LINUX)
1107 	return pkt->data;
1108 #elif defined(PLATFORM_FREEBSD)
1109 	return pkt->m_data;
1110 #else
1111 	#error "TBD\n"
1112 #endif
1113 }
1114 
rtw_os_pkt_len(_pkt * pkt)1115 inline u32 rtw_os_pkt_len(_pkt *pkt)
1116 {
1117 #if defined(PLATFORM_LINUX)
1118 	return pkt->len;
1119 #elif defined(PLATFORM_FREEBSD)
1120 	return pkt->m_pkthdr.len;
1121 #else
1122 	#error "TBD\n"
1123 #endif
1124 }
1125 
_rtw_memcpy(void * dst,const void * src,u32 sz)1126 void _rtw_memcpy(void *dst, const void *src, u32 sz)
1127 {
1128 
1129 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1130 
1131 	memcpy(dst, src, sz);
1132 
1133 #endif
1134 
1135 #ifdef PLATFORM_WINDOWS
1136 
1137 	NdisMoveMemory(dst, src, sz);
1138 
1139 #endif
1140 
1141 }
1142 
_rtw_memmove(void * dst,const void * src,u32 sz)1143 inline void _rtw_memmove(void *dst, const void *src, u32 sz)
1144 {
1145 #if defined(PLATFORM_LINUX)
1146 	memmove(dst, src, sz);
1147 #else
1148 	#error "TBD\n"
1149 #endif
1150 }
1151 
_rtw_memcmp(const void * dst,const void * src,u32 sz)1152 int	_rtw_memcmp(const void *dst, const void *src, u32 sz)
1153 {
1154 
1155 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1156 	/* under Linux/GNU/GLibc, the return value of memcmp for two same mem. chunk is 0 */
1157 
1158 	if (!(memcmp(dst, src, sz)))
1159 		return _TRUE;
1160 	else
1161 		return _FALSE;
1162 #endif
1163 
1164 
1165 #ifdef PLATFORM_WINDOWS
1166 	/* under Windows, the return value of NdisEqualMemory for two same mem. chunk is 1 */
1167 
1168 	if (NdisEqualMemory(dst, src, sz))
1169 		return _TRUE;
1170 	else
1171 		return _FALSE;
1172 
1173 #endif
1174 
1175 
1176 
1177 }
1178 
_rtw_memcmp2(const void * dst,const void * src,u32 sz)1179 int _rtw_memcmp2(const void *dst, const void *src, u32 sz)
1180 {
1181 	const unsigned char *p1 = dst, *p2 = src;
1182 
1183 	if (sz == 0)
1184 		return 0;
1185 
1186 	while (*p1 == *p2) {
1187 		p1++;
1188 		p2++;
1189 		sz--;
1190 		if (sz == 0)
1191 			return 0;
1192 	}
1193 
1194 	return *p1 - *p2;
1195 }
1196 
_rtw_memset(void * pbuf,int c,u32 sz)1197 void _rtw_memset(void *pbuf, int c, u32 sz)
1198 {
1199 
1200 #if defined(PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
1201 
1202 	memset(pbuf, c, sz);
1203 
1204 #endif
1205 
1206 #ifdef PLATFORM_WINDOWS
1207 #if 0
1208 	NdisZeroMemory(pbuf, sz);
1209 	if (c != 0)
1210 		memset(pbuf, c, sz);
1211 #else
1212 	NdisFillMemory(pbuf, sz, c);
1213 #endif
1214 #endif
1215 
1216 }
1217 
1218 #ifdef PLATFORM_FREEBSD
__list_add(_list * pnew,_list * pprev,_list * pnext)1219 static inline void __list_add(_list *pnew, _list *pprev, _list *pnext)
1220 {
1221 	pnext->prev = pnew;
1222 	pnew->next = pnext;
1223 	pnew->prev = pprev;
1224 	pprev->next = pnew;
1225 }
1226 #endif /* PLATFORM_FREEBSD */
1227 
1228 
_rtw_init_listhead(_list * list)1229 void _rtw_init_listhead(_list *list)
1230 {
1231 
1232 #ifdef PLATFORM_LINUX
1233 
1234 	INIT_LIST_HEAD(list);
1235 
1236 #endif
1237 
1238 #ifdef PLATFORM_FREEBSD
1239 	list->next = list;
1240 	list->prev = list;
1241 #endif
1242 #ifdef PLATFORM_WINDOWS
1243 
1244 	NdisInitializeListHead(list);
1245 
1246 #endif
1247 
1248 }
1249 
1250 
1251 /*
1252 For the following list_xxx operations,
1253 caller must guarantee the atomic context.
1254 Otherwise, there will be racing condition.
1255 */
rtw_is_list_empty(_list * phead)1256 u32	rtw_is_list_empty(_list *phead)
1257 {
1258 
1259 #ifdef PLATFORM_LINUX
1260 
1261 	if (list_empty(phead))
1262 		return _TRUE;
1263 	else
1264 		return _FALSE;
1265 
1266 #endif
1267 #ifdef PLATFORM_FREEBSD
1268 
1269 	if (phead->next == phead)
1270 		return _TRUE;
1271 	else
1272 		return _FALSE;
1273 
1274 #endif
1275 
1276 
1277 #ifdef PLATFORM_WINDOWS
1278 
1279 	if (IsListEmpty(phead))
1280 		return _TRUE;
1281 	else
1282 		return _FALSE;
1283 
1284 #endif
1285 
1286 
1287 }
1288 
rtw_list_insert_head(_list * plist,_list * phead)1289 void rtw_list_insert_head(_list *plist, _list *phead)
1290 {
1291 
1292 #ifdef PLATFORM_LINUX
1293 	list_add(plist, phead);
1294 #endif
1295 
1296 #ifdef PLATFORM_FREEBSD
1297 	__list_add(plist, phead, phead->next);
1298 #endif
1299 
1300 #ifdef PLATFORM_WINDOWS
1301 	InsertHeadList(phead, plist);
1302 #endif
1303 }
1304 
rtw_list_insert_tail(_list * plist,_list * phead)1305 void rtw_list_insert_tail(_list *plist, _list *phead)
1306 {
1307 
1308 #ifdef PLATFORM_LINUX
1309 
1310 	list_add_tail(plist, phead);
1311 
1312 #endif
1313 #ifdef PLATFORM_FREEBSD
1314 
1315 	__list_add(plist, phead->prev, phead);
1316 
1317 #endif
1318 #ifdef PLATFORM_WINDOWS
1319 
1320 	InsertTailList(phead, plist);
1321 
1322 #endif
1323 
1324 }
1325 
rtw_list_splice(_list * list,_list * head)1326 inline void rtw_list_splice(_list *list, _list *head)
1327 {
1328 #ifdef PLATFORM_LINUX
1329 	list_splice(list, head);
1330 #else
1331 	#error "TBD\n"
1332 #endif
1333 }
1334 
rtw_list_splice_init(_list * list,_list * head)1335 inline void rtw_list_splice_init(_list *list, _list *head)
1336 {
1337 #ifdef PLATFORM_LINUX
1338 	list_splice_init(list, head);
1339 #else
1340 	#error "TBD\n"
1341 #endif
1342 }
1343 
rtw_list_splice_tail(_list * list,_list * head)1344 inline void rtw_list_splice_tail(_list *list, _list *head)
1345 {
1346 #ifdef PLATFORM_LINUX
1347 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
1348 	if (!list_empty(list))
1349 		__list_splice(list, head);
1350 	#else
1351 	list_splice_tail(list, head);
1352 	#endif
1353 #else
1354 	#error "TBD\n"
1355 #endif
1356 }
1357 
rtw_hlist_head_init(rtw_hlist_head * h)1358 inline void rtw_hlist_head_init(rtw_hlist_head *h)
1359 {
1360 #ifdef PLATFORM_LINUX
1361 	INIT_HLIST_HEAD(h);
1362 #else
1363 	#error "TBD\n"
1364 #endif
1365 }
1366 
rtw_hlist_add_head(rtw_hlist_node * n,rtw_hlist_head * h)1367 inline void rtw_hlist_add_head(rtw_hlist_node *n, rtw_hlist_head *h)
1368 {
1369 #ifdef PLATFORM_LINUX
1370 	hlist_add_head(n, h);
1371 #else
1372 	#error "TBD\n"
1373 #endif
1374 }
1375 
rtw_hlist_del(rtw_hlist_node * n)1376 inline void rtw_hlist_del(rtw_hlist_node *n)
1377 {
1378 #ifdef PLATFORM_LINUX
1379 	hlist_del(n);
1380 #else
1381 	#error "TBD\n"
1382 #endif
1383 }
1384 
rtw_hlist_add_head_rcu(rtw_hlist_node * n,rtw_hlist_head * h)1385 inline void rtw_hlist_add_head_rcu(rtw_hlist_node *n, rtw_hlist_head *h)
1386 {
1387 #ifdef PLATFORM_LINUX
1388 	hlist_add_head_rcu(n, h);
1389 #else
1390 	#error "TBD\n"
1391 #endif
1392 }
1393 
rtw_hlist_del_rcu(rtw_hlist_node * n)1394 inline void rtw_hlist_del_rcu(rtw_hlist_node *n)
1395 {
1396 #ifdef PLATFORM_LINUX
1397 	hlist_del_rcu(n);
1398 #else
1399 	#error "TBD\n"
1400 #endif
1401 }
1402 
rtw_init_timer(_timer * ptimer,void * padapter,void * pfunc,void * ctx)1403 void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc, void *ctx)
1404 {
1405 	_adapter *adapter = (_adapter *)padapter;
1406 
1407 #ifdef PLATFORM_LINUX
1408 	_init_timer(ptimer, adapter->pnetdev, pfunc, ctx);
1409 #endif
1410 #ifdef PLATFORM_FREEBSD
1411 	_init_timer(ptimer, adapter->pifp, pfunc, ctx);
1412 #endif
1413 #ifdef PLATFORM_WINDOWS
1414 	_init_timer(ptimer, adapter->hndis_adapter, pfunc, ctx);
1415 #endif
1416 }
1417 
1418 /*
1419 
1420 Caller must check if the list is empty before calling rtw_list_delete
1421 
1422 */
1423 
1424 
_rtw_init_sema(_sema * sema,int init_val)1425 void _rtw_init_sema(_sema	*sema, int init_val)
1426 {
1427 
1428 #ifdef PLATFORM_LINUX
1429 
1430 	sema_init(sema, init_val);
1431 
1432 #endif
1433 #ifdef PLATFORM_FREEBSD
1434 	sema_init(sema, init_val, "rtw_drv");
1435 #endif
1436 #ifdef PLATFORM_OS_XP
1437 
1438 	KeInitializeSemaphore(sema, init_val,  SEMA_UPBND); /* count=0; */
1439 
1440 #endif
1441 
1442 #ifdef PLATFORM_OS_CE
1443 	if (*sema == NULL)
1444 		*sema = CreateSemaphore(NULL, init_val, SEMA_UPBND, NULL);
1445 #endif
1446 
1447 }
1448 
_rtw_free_sema(_sema * sema)1449 void _rtw_free_sema(_sema	*sema)
1450 {
1451 #ifdef PLATFORM_FREEBSD
1452 	sema_destroy(sema);
1453 #endif
1454 #ifdef PLATFORM_OS_CE
1455 	CloseHandle(*sema);
1456 #endif
1457 
1458 }
1459 
_rtw_up_sema(_sema * sema)1460 void _rtw_up_sema(_sema	*sema)
1461 {
1462 
1463 #ifdef PLATFORM_LINUX
1464 
1465 	up(sema);
1466 
1467 #endif
1468 #ifdef PLATFORM_FREEBSD
1469 	sema_post(sema);
1470 #endif
1471 #ifdef PLATFORM_OS_XP
1472 
1473 	KeReleaseSemaphore(sema, IO_NETWORK_INCREMENT, 1,  FALSE);
1474 
1475 #endif
1476 
1477 #ifdef PLATFORM_OS_CE
1478 	ReleaseSemaphore(*sema,  1,  NULL);
1479 #endif
1480 }
1481 
_rtw_down_sema(_sema * sema)1482 u32 _rtw_down_sema(_sema *sema)
1483 {
1484 
1485 #ifdef PLATFORM_LINUX
1486 
1487 	if (down_interruptible(sema))
1488 		return _FAIL;
1489 	else
1490 		return _SUCCESS;
1491 
1492 #endif
1493 #ifdef PLATFORM_FREEBSD
1494 	sema_wait(sema);
1495 	return  _SUCCESS;
1496 #endif
1497 #ifdef PLATFORM_OS_XP
1498 
1499 	if (STATUS_SUCCESS == KeWaitForSingleObject(sema, Executive, KernelMode, TRUE, NULL))
1500 		return  _SUCCESS;
1501 	else
1502 		return _FAIL;
1503 #endif
1504 
1505 #ifdef PLATFORM_OS_CE
1506 	if (WAIT_OBJECT_0 == WaitForSingleObject(*sema, INFINITE))
1507 		return _SUCCESS;
1508 	else
1509 		return _FAIL;
1510 #endif
1511 }
1512 
thread_exit(_completion * comp)1513 inline void thread_exit(_completion *comp)
1514 {
1515 #ifdef PLATFORM_LINUX
1516 	complete_and_exit(comp, 0);
1517 #endif
1518 
1519 #ifdef PLATFORM_FREEBSD
1520 	printf("%s", "RTKTHREAD_exit");
1521 #endif
1522 
1523 #ifdef PLATFORM_OS_CE
1524 	ExitThread(STATUS_SUCCESS);
1525 #endif
1526 
1527 #ifdef PLATFORM_OS_XP
1528 	PsTerminateSystemThread(STATUS_SUCCESS);
1529 #endif
1530 }
1531 
_rtw_init_completion(_completion * comp)1532 inline void _rtw_init_completion(_completion *comp)
1533 {
1534 #ifdef PLATFORM_LINUX
1535 	init_completion(comp);
1536 #endif
1537 }
_rtw_wait_for_comp_timeout(_completion * comp)1538 inline void _rtw_wait_for_comp_timeout(_completion *comp)
1539 {
1540 #ifdef PLATFORM_LINUX
1541 	wait_for_completion_timeout(comp, msecs_to_jiffies(3000));
1542 #endif
1543 }
_rtw_wait_for_comp(_completion * comp)1544 inline void _rtw_wait_for_comp(_completion *comp)
1545 {
1546 #ifdef PLATFORM_LINUX
1547 	wait_for_completion(comp);
1548 #endif
1549 }
1550 
_rtw_mutex_init(_mutex * pmutex)1551 void	_rtw_mutex_init(_mutex *pmutex)
1552 {
1553 #ifdef PLATFORM_LINUX
1554 
1555 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1556 	mutex_init(pmutex);
1557 #else
1558 	init_MUTEX(pmutex);
1559 #endif
1560 
1561 #endif
1562 #ifdef PLATFORM_FREEBSD
1563 	mtx_init(pmutex, "", NULL, MTX_DEF | MTX_RECURSE);
1564 #endif
1565 #ifdef PLATFORM_OS_XP
1566 
1567 	KeInitializeMutex(pmutex, 0);
1568 
1569 #endif
1570 
1571 #ifdef PLATFORM_OS_CE
1572 	*pmutex =  CreateMutex(NULL, _FALSE, NULL);
1573 #endif
1574 }
1575 
1576 void	_rtw_mutex_free(_mutex *pmutex);
_rtw_mutex_free(_mutex * pmutex)1577 void	_rtw_mutex_free(_mutex *pmutex)
1578 {
1579 #ifdef PLATFORM_LINUX
1580 
1581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
1582 	mutex_destroy(pmutex);
1583 #else
1584 #endif
1585 
1586 #ifdef PLATFORM_FREEBSD
1587 	sema_destroy(pmutex);
1588 #endif
1589 
1590 #endif
1591 
1592 #ifdef PLATFORM_OS_XP
1593 
1594 #endif
1595 
1596 #ifdef PLATFORM_OS_CE
1597 
1598 #endif
1599 }
1600 
_rtw_spinlock_init(_lock * plock)1601 void	_rtw_spinlock_init(_lock *plock)
1602 {
1603 
1604 #ifdef PLATFORM_LINUX
1605 
1606 	spin_lock_init(plock);
1607 
1608 #endif
1609 #ifdef PLATFORM_FREEBSD
1610 	mtx_init(plock, "", NULL, MTX_DEF | MTX_RECURSE);
1611 #endif
1612 #ifdef PLATFORM_WINDOWS
1613 
1614 	NdisAllocateSpinLock(plock);
1615 
1616 #endif
1617 
1618 }
1619 
_rtw_spinlock_free(_lock * plock)1620 void	_rtw_spinlock_free(_lock *plock)
1621 {
1622 #ifdef PLATFORM_FREEBSD
1623 	mtx_destroy(plock);
1624 #endif
1625 
1626 #ifdef PLATFORM_WINDOWS
1627 
1628 	NdisFreeSpinLock(plock);
1629 
1630 #endif
1631 
1632 }
1633 #ifdef PLATFORM_FREEBSD
1634 extern PADAPTER prtw_lock;
1635 
rtw_mtx_lock(_lock * plock)1636 void rtw_mtx_lock(_lock *plock)
1637 {
1638 	if (prtw_lock)
1639 		mtx_lock(&prtw_lock->glock);
1640 	else
1641 		printf("%s prtw_lock==NULL", __FUNCTION__);
1642 }
rtw_mtx_unlock(_lock * plock)1643 void rtw_mtx_unlock(_lock *plock)
1644 {
1645 	if (prtw_lock)
1646 		mtx_unlock(&prtw_lock->glock);
1647 	else
1648 		printf("%s prtw_lock==NULL", __FUNCTION__);
1649 
1650 }
1651 #endif /* PLATFORM_FREEBSD */
1652 
1653 
_rtw_spinlock(_lock * plock)1654 void	_rtw_spinlock(_lock	*plock)
1655 {
1656 
1657 #ifdef PLATFORM_LINUX
1658 
1659 	spin_lock(plock);
1660 
1661 #endif
1662 #ifdef PLATFORM_FREEBSD
1663 	mtx_lock(plock);
1664 #endif
1665 #ifdef PLATFORM_WINDOWS
1666 
1667 	NdisAcquireSpinLock(plock);
1668 
1669 #endif
1670 
1671 }
1672 
_rtw_spinunlock(_lock * plock)1673 void	_rtw_spinunlock(_lock *plock)
1674 {
1675 
1676 #ifdef PLATFORM_LINUX
1677 
1678 	spin_unlock(plock);
1679 
1680 #endif
1681 #ifdef PLATFORM_FREEBSD
1682 	mtx_unlock(plock);
1683 #endif
1684 #ifdef PLATFORM_WINDOWS
1685 
1686 	NdisReleaseSpinLock(plock);
1687 
1688 #endif
1689 }
1690 
1691 
_rtw_spinlock_ex(_lock * plock)1692 void	_rtw_spinlock_ex(_lock	*plock)
1693 {
1694 
1695 #ifdef PLATFORM_LINUX
1696 
1697 	spin_lock(plock);
1698 
1699 #endif
1700 #ifdef PLATFORM_FREEBSD
1701 	mtx_lock(plock);
1702 #endif
1703 #ifdef PLATFORM_WINDOWS
1704 
1705 	NdisDprAcquireSpinLock(plock);
1706 
1707 #endif
1708 
1709 }
1710 
_rtw_spinunlock_ex(_lock * plock)1711 void	_rtw_spinunlock_ex(_lock *plock)
1712 {
1713 
1714 #ifdef PLATFORM_LINUX
1715 
1716 	spin_unlock(plock);
1717 
1718 #endif
1719 #ifdef PLATFORM_FREEBSD
1720 	mtx_unlock(plock);
1721 #endif
1722 #ifdef PLATFORM_WINDOWS
1723 
1724 	NdisDprReleaseSpinLock(plock);
1725 
1726 #endif
1727 }
1728 
1729 
1730 
_rtw_init_queue(_queue * pqueue)1731 void _rtw_init_queue(_queue *pqueue)
1732 {
1733 	_rtw_init_listhead(&(pqueue->queue));
1734 	_rtw_spinlock_init(&(pqueue->lock));
1735 }
1736 
_rtw_deinit_queue(_queue * pqueue)1737 void _rtw_deinit_queue(_queue *pqueue)
1738 {
1739 	_rtw_spinlock_free(&(pqueue->lock));
1740 }
1741 
_rtw_queue_empty(_queue * pqueue)1742 u32	  _rtw_queue_empty(_queue	*pqueue)
1743 {
1744 	return rtw_is_list_empty(&(pqueue->queue));
1745 }
1746 
1747 
rtw_end_of_queue_search(_list * head,_list * plist)1748 u32 rtw_end_of_queue_search(_list *head, _list *plist)
1749 {
1750 	if (head == plist)
1751 		return _TRUE;
1752 	else
1753 		return _FALSE;
1754 }
1755 
1756 
_rtw_get_current_time(void)1757 systime _rtw_get_current_time(void)
1758 {
1759 
1760 #ifdef PLATFORM_LINUX
1761 	return jiffies;
1762 #endif
1763 #ifdef PLATFORM_FREEBSD
1764 	struct timeval tvp;
1765 	getmicrotime(&tvp);
1766 	return tvp.tv_sec;
1767 #endif
1768 #ifdef PLATFORM_WINDOWS
1769 	LARGE_INTEGER	SystemTime;
1770 	NdisGetCurrentSystemTime(&SystemTime);
1771 	return SystemTime.LowPart;/* count of 100-nanosecond intervals */
1772 #endif
1773 }
1774 
_rtw_systime_to_ms(systime stime)1775 inline u32 _rtw_systime_to_ms(systime stime)
1776 {
1777 #ifdef PLATFORM_LINUX
1778 	return jiffies_to_msecs(stime);
1779 #endif
1780 #ifdef PLATFORM_FREEBSD
1781 	return stime * 1000;
1782 #endif
1783 #ifdef PLATFORM_WINDOWS
1784 	return stime / 10000 ;
1785 #endif
1786 }
1787 
_rtw_ms_to_systime(u32 ms)1788 inline systime _rtw_ms_to_systime(u32 ms)
1789 {
1790 #ifdef PLATFORM_LINUX
1791 	return msecs_to_jiffies(ms);
1792 #endif
1793 #ifdef PLATFORM_FREEBSD
1794 	return ms / 1000;
1795 #endif
1796 #ifdef PLATFORM_WINDOWS
1797 	return ms * 10000 ;
1798 #endif
1799 }
1800 
_rtw_us_to_systime(u32 us)1801 inline systime _rtw_us_to_systime(u32 us)
1802 {
1803 #ifdef PLATFORM_LINUX
1804 	return usecs_to_jiffies(us);
1805 #else
1806 	#error "TBD\n"
1807 #endif
1808 }
1809 
1810 /* the input parameter start use the same unit as returned by rtw_get_current_time */
_rtw_get_passing_time_ms(systime start)1811 inline s32 _rtw_get_passing_time_ms(systime start)
1812 {
1813 	return _rtw_systime_to_ms(_rtw_get_current_time() - start);
1814 }
1815 
_rtw_get_remaining_time_ms(systime end)1816 inline s32 _rtw_get_remaining_time_ms(systime end)
1817 {
1818 	return _rtw_systime_to_ms(end - _rtw_get_current_time());
1819 }
1820 
_rtw_get_time_interval_ms(systime start,systime end)1821 inline s32 _rtw_get_time_interval_ms(systime start, systime end)
1822 {
1823 	return _rtw_systime_to_ms(end - start);
1824 }
1825 
_rtw_time_after(systime a,systime b)1826 inline bool _rtw_time_after(systime a, systime b)
1827 {
1828 #ifdef PLATFORM_LINUX
1829 	return time_after(a, b);
1830 #else
1831 	#error "TBD\n"
1832 #endif
1833 }
1834 
rtw_sptime_get(void)1835 sysptime rtw_sptime_get(void)
1836 {
1837 	/* CLOCK_MONOTONIC */
1838 #ifdef PLATFORM_LINUX
1839 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
1840 	struct timespec64 cur;
1841 
1842 	ktime_get_ts64(&cur);
1843 	return timespec64_to_ktime(cur);
1844 	#else
1845 	struct timespec cur;
1846 
1847 	ktime_get_ts(&cur);
1848 	return timespec_to_ktime(cur);
1849 	#endif
1850 #else
1851 	#error "TBD\n"
1852 #endif
1853 }
1854 
rtw_sptime_set(s64 secs,const u32 nsecs)1855 sysptime rtw_sptime_set(s64 secs, const u32 nsecs)
1856 {
1857 #ifdef PLATFORM_LINUX
1858 	return ktime_set(secs, nsecs);
1859 #else
1860 	#error "TBD\n"
1861 #endif
1862 }
1863 
rtw_sptime_zero(void)1864 sysptime rtw_sptime_zero(void)
1865 {
1866 #ifdef PLATFORM_LINUX
1867 	return ktime_set(0, 0);
1868 #else
1869 	#error "TBD\n"
1870 #endif
1871 }
1872 
1873 /*
1874  *   cmp1  < cmp2: return <0
1875  *   cmp1 == cmp2: return 0
1876  *   cmp1  > cmp2: return >0
1877  */
rtw_sptime_cmp(const sysptime cmp1,const sysptime cmp2)1878 int rtw_sptime_cmp(const sysptime cmp1, const sysptime cmp2)
1879 {
1880 #ifdef PLATFORM_LINUX
1881 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
1882 	return ktime_compare(cmp1, cmp2);
1883 	#else
1884 	if (cmp1.tv64 < cmp2.tv64)
1885 		return -1;
1886 	if (cmp1.tv64 > cmp2.tv64)
1887 		return 1;
1888 	return 0;
1889 	#endif
1890 #else
1891 	#error "TBD\n"
1892 #endif
1893 }
1894 
rtw_sptime_eql(const sysptime cmp1,const sysptime cmp2)1895 bool rtw_sptime_eql(const sysptime cmp1, const sysptime cmp2)
1896 {
1897 #ifdef PLATFORM_LINUX
1898 	return rtw_sptime_cmp(cmp1, cmp2) == 0;
1899 #else
1900 	#error "TBD\n"
1901 #endif
1902 }
1903 
rtw_sptime_is_zero(const sysptime sptime)1904 bool rtw_sptime_is_zero(const sysptime sptime)
1905 {
1906 #ifdef PLATFORM_LINUX
1907 	return rtw_sptime_cmp(sptime, rtw_sptime_zero()) == 0;
1908 #else
1909 	#error "TBD\n"
1910 #endif
1911 }
1912 
1913 /*
1914  * sub = lhs - rhs, in normalized form
1915  */
rtw_sptime_sub(const sysptime lhs,const sysptime rhs)1916 sysptime rtw_sptime_sub(const sysptime lhs, const sysptime rhs)
1917 {
1918 #ifdef PLATFORM_LINUX
1919 	return ktime_sub(lhs, rhs);
1920 #else
1921 	#error "TBD\n"
1922 #endif
1923 }
1924 
1925 /*
1926  * add = lhs + rhs, in normalized form
1927  */
rtw_sptime_add(const sysptime lhs,const sysptime rhs)1928 sysptime rtw_sptime_add(const sysptime lhs, const sysptime rhs)
1929 {
1930 #ifdef PLATFORM_LINUX
1931 	return ktime_add(lhs, rhs);
1932 #else
1933 	#error "TBD\n"
1934 #endif
1935 }
1936 
rtw_sptime_to_ms(const sysptime sptime)1937 s64 rtw_sptime_to_ms(const sysptime sptime)
1938 {
1939 #ifdef PLATFORM_LINUX
1940 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
1941 	return ktime_to_ms(sptime);
1942 	#else
1943 	struct timeval tv = ktime_to_timeval(sptime);
1944 
1945 	return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
1946 	#endif
1947 #else
1948 	#error "TBD\n"
1949 #endif
1950 }
1951 
rtw_ms_to_sptime(u64 ms)1952 sysptime rtw_ms_to_sptime(u64 ms)
1953 {
1954 #ifdef PLATFORM_LINUX
1955 	return ns_to_ktime(ms * NSEC_PER_MSEC);
1956 #else
1957 	#error "TBD\n"
1958 #endif
1959 }
1960 
rtw_sptime_to_us(const sysptime sptime)1961 s64 rtw_sptime_to_us(const sysptime sptime)
1962 {
1963 #ifdef PLATFORM_LINUX
1964 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22))
1965 	return ktime_to_us(sptime);
1966 	#else
1967 	struct timeval tv = ktime_to_timeval(sptime);
1968 
1969 	return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
1970 	#endif
1971 #else
1972 	#error "TBD\n"
1973 #endif
1974 }
1975 
rtw_us_to_sptime(u64 us)1976 sysptime rtw_us_to_sptime(u64 us)
1977 {
1978 #ifdef PLATFORM_LINUX
1979 	return ns_to_ktime(us * NSEC_PER_USEC);
1980 #else
1981 	#error "TBD\n"
1982 #endif
1983 }
1984 
rtw_sptime_to_ns(const sysptime sptime)1985 s64 rtw_sptime_to_ns(const sysptime sptime)
1986 {
1987 #ifdef PLATFORM_LINUX
1988 	return ktime_to_ns(sptime);
1989 #else
1990 	#error "TBD\n"
1991 #endif
1992 }
1993 
rtw_ns_to_sptime(u64 ns)1994 sysptime rtw_ns_to_sptime(u64 ns)
1995 {
1996 #ifdef PLATFORM_LINUX
1997 	return ns_to_ktime(ns);
1998 #else
1999 	#error "TBD\n"
2000 #endif
2001 }
2002 
rtw_sptime_diff_ms(const sysptime start,const sysptime end)2003 s64 rtw_sptime_diff_ms(const sysptime start, const sysptime end)
2004 {
2005 	sysptime diff;
2006 
2007 	diff = rtw_sptime_sub(end, start);
2008 
2009 	return rtw_sptime_to_ms(diff);
2010 }
2011 
rtw_sptime_pass_ms(const sysptime start)2012 s64 rtw_sptime_pass_ms(const sysptime start)
2013 {
2014 	sysptime cur, diff;
2015 
2016 	cur = rtw_sptime_get();
2017 	diff = rtw_sptime_sub(cur, start);
2018 
2019 	return rtw_sptime_to_ms(diff);
2020 }
2021 
rtw_sptime_diff_us(const sysptime start,const sysptime end)2022 s64 rtw_sptime_diff_us(const sysptime start, const sysptime end)
2023 {
2024 	sysptime diff;
2025 
2026 	diff = rtw_sptime_sub(end, start);
2027 
2028 	return rtw_sptime_to_us(diff);
2029 }
2030 
rtw_sptime_pass_us(const sysptime start)2031 s64 rtw_sptime_pass_us(const sysptime start)
2032 {
2033 	sysptime cur, diff;
2034 
2035 	cur = rtw_sptime_get();
2036 	diff = rtw_sptime_sub(cur, start);
2037 
2038 	return rtw_sptime_to_us(diff);
2039 }
2040 
rtw_sptime_diff_ns(const sysptime start,const sysptime end)2041 s64 rtw_sptime_diff_ns(const sysptime start, const sysptime end)
2042 {
2043 	sysptime diff;
2044 
2045 	diff = rtw_sptime_sub(end, start);
2046 
2047 	return rtw_sptime_to_ns(diff);
2048 }
2049 
rtw_sptime_pass_ns(const sysptime start)2050 s64 rtw_sptime_pass_ns(const sysptime start)
2051 {
2052 	sysptime cur, diff;
2053 
2054 	cur = rtw_sptime_get();
2055 	diff = rtw_sptime_sub(cur, start);
2056 
2057 	return rtw_sptime_to_ns(diff);
2058 }
2059 
rtw_sleep_schedulable(int ms)2060 void rtw_sleep_schedulable(int ms)
2061 {
2062 
2063 #ifdef PLATFORM_LINUX
2064 
2065 	u32 delta;
2066 
2067 	delta = (ms * HZ) / 1000; /* (ms) */
2068 	if (delta == 0) {
2069 		delta = 1;/* 1 ms */
2070 	}
2071 	set_current_state(TASK_INTERRUPTIBLE);
2072         schedule_timeout(delta);
2073 	return;
2074 
2075 #endif
2076 #ifdef PLATFORM_FREEBSD
2077 	DELAY(ms * 1000);
2078 	return ;
2079 #endif
2080 
2081 #ifdef PLATFORM_WINDOWS
2082 
2083 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2084 
2085 #endif
2086 
2087 }
2088 
2089 
rtw_msleep_os(int ms)2090 void rtw_msleep_os(int ms)
2091 {
2092 
2093 #ifdef PLATFORM_LINUX
2094 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2095 	if (ms < 20) {
2096 		unsigned long us = ms * 1000UL;
2097 		usleep_range(us, us + 1000UL);
2098 	} else
2099 #endif
2100 		msleep((unsigned int)ms);
2101 
2102 #endif
2103 #ifdef PLATFORM_FREEBSD
2104 	/* Delay for delay microseconds */
2105 	DELAY(ms * 1000);
2106 	return ;
2107 #endif
2108 #ifdef PLATFORM_WINDOWS
2109 
2110 	NdisMSleep(ms * 1000); /* (us)*1000=(ms) */
2111 
2112 #endif
2113 
2114 
2115 }
rtw_usleep_os(int us)2116 void rtw_usleep_os(int us)
2117 {
2118 #ifdef PLATFORM_LINUX
2119 
2120 	/* msleep((unsigned int)us); */
2121 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
2122 	usleep_range(us, us + 1);
2123 #else
2124 	if (1 < (us / 1000))
2125 		msleep(1);
2126 	else
2127 		msleep((us / 1000) + 1);
2128 #endif
2129 #endif
2130 
2131 #ifdef PLATFORM_FREEBSD
2132 	/* Delay for delay microseconds */
2133 	DELAY(us);
2134 
2135 	return ;
2136 #endif
2137 #ifdef PLATFORM_WINDOWS
2138 
2139 	NdisMSleep(us); /* (us) */
2140 
2141 #endif
2142 
2143 
2144 }
2145 
2146 
2147 #ifdef DBG_DELAY_OS
_rtw_mdelay_os(int ms,const char * func,const int line)2148 void _rtw_mdelay_os(int ms, const char *func, const int line)
2149 {
2150 #if 0
2151 	if (ms > 10)
2152 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2153 	rtw_msleep_os(ms);
2154 	return;
2155 #endif
2156 
2157 
2158 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, ms);
2159 
2160 #if defined(PLATFORM_LINUX)
2161 
2162 	mdelay((unsigned long)ms);
2163 
2164 #elif defined(PLATFORM_WINDOWS)
2165 
2166 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2167 
2168 #endif
2169 
2170 
2171 }
_rtw_udelay_os(int us,const char * func,const int line)2172 void _rtw_udelay_os(int us, const char *func, const int line)
2173 {
2174 
2175 #if 0
2176 	if (us > 1000) {
2177 		RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2178 		rtw_usleep_os(us);
2179 		return;
2180 	}
2181 #endif
2182 
2183 
2184 	RTW_INFO("%s:%d %s(%d)\n", func, line, __FUNCTION__, us);
2185 
2186 
2187 #if defined(PLATFORM_LINUX)
2188 
2189 	udelay((unsigned long)us);
2190 
2191 #elif defined(PLATFORM_WINDOWS)
2192 
2193 	NdisStallExecution(us); /* (us) */
2194 
2195 #endif
2196 
2197 }
2198 #else
rtw_mdelay_os(int ms)2199 void rtw_mdelay_os(int ms)
2200 {
2201 
2202 #ifdef PLATFORM_LINUX
2203 
2204 	mdelay((unsigned long)ms);
2205 
2206 #endif
2207 #ifdef PLATFORM_FREEBSD
2208 	DELAY(ms * 1000);
2209 	return ;
2210 #endif
2211 #ifdef PLATFORM_WINDOWS
2212 
2213 	NdisStallExecution(ms * 1000); /* (us)*1000=(ms) */
2214 
2215 #endif
2216 
2217 
2218 }
rtw_udelay_os(int us)2219 void rtw_udelay_os(int us)
2220 {
2221 
2222 #ifdef PLATFORM_LINUX
2223 
2224 	udelay((unsigned long)us);
2225 
2226 #endif
2227 #ifdef PLATFORM_FREEBSD
2228 	/* Delay for delay microseconds */
2229 	DELAY(us);
2230 	return ;
2231 #endif
2232 #ifdef PLATFORM_WINDOWS
2233 
2234 	NdisStallExecution(us); /* (us) */
2235 
2236 #endif
2237 
2238 }
2239 #endif
2240 
rtw_yield_os(void)2241 void rtw_yield_os(void)
2242 {
2243 #ifdef PLATFORM_LINUX
2244 	yield();
2245 #endif
2246 #ifdef PLATFORM_FREEBSD
2247 	yield();
2248 #endif
2249 #ifdef PLATFORM_WINDOWS
2250 	SwitchToThread();
2251 #endif
2252 }
2253 
2254 const char *_rtw_pwait_type_str[] = {
2255 	[RTW_PWAIT_TYPE_MSLEEP] = "MS",
2256 	[RTW_PWAIT_TYPE_USLEEP] = "US",
2257 	[RTW_PWAIT_TYPE_YIELD] = "Y",
2258 	[RTW_PWAIT_TYPE_MDELAY] = "MD",
2259 	[RTW_PWAIT_TYPE_UDELAY] = "UD",
2260 	[RTW_PWAIT_TYPE_NUM] = "unknown",
2261 };
2262 
rtw_pwctx_yield(int us)2263 static void rtw_pwctx_yield(int us)
2264 {
2265 	rtw_yield_os();
2266 }
2267 
2268 static void (*const rtw_pwait_hdl[])(int)= {
2269 	[RTW_PWAIT_TYPE_MSLEEP] = rtw_msleep_os,
2270 	[RTW_PWAIT_TYPE_USLEEP] = rtw_usleep_os,
2271 	[RTW_PWAIT_TYPE_YIELD] = rtw_pwctx_yield,
2272 	[RTW_PWAIT_TYPE_MDELAY] = rtw_mdelay_os,
2273 	[RTW_PWAIT_TYPE_UDELAY] = rtw_udelay_os,
2274 };
2275 
rtw_pwctx_config(struct rtw_pwait_ctx * pwctx,enum rtw_pwait_type type,s32 time,s32 cnt_lmt)2276 int rtw_pwctx_config(struct rtw_pwait_ctx *pwctx, enum rtw_pwait_type type, s32 time, s32 cnt_lmt)
2277 {
2278 	int ret = _FAIL;
2279 
2280 	if (!RTW_PWAIT_TYPE_VALID(type))
2281 		goto exit;
2282 
2283 	pwctx->conf.type = type;
2284 	pwctx->conf.wait_time = time;
2285 	pwctx->conf.wait_cnt_lmt = cnt_lmt;
2286 	pwctx->wait_hdl = rtw_pwait_hdl[type];
2287 
2288 	ret = _SUCCESS;
2289 
2290 exit:
2291 	return ret;
2292 }
2293 
rtw_macaddr_is_larger(const u8 * a,const u8 * b)2294 bool rtw_macaddr_is_larger(const u8 *a, const u8 *b)
2295 {
2296 	u32 va, vb;
2297 
2298 	va = be32_to_cpu(*((u32 *)a));
2299 	vb = be32_to_cpu(*((u32 *)b));
2300 	if (va > vb)
2301 		return 1;
2302 	else if (va < vb)
2303 		return 0;
2304 
2305 	return be16_to_cpu(*((u16 *)(a + 4))) > be16_to_cpu(*((u16 *)(b + 4)));
2306 }
2307 
2308 #define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
2309 #define RTW_SUSPEND_TRAFFIC_LOCK_NAME "rtw_wifi_traffic"
2310 #define RTW_SUSPEND_RESUME_LOCK_NAME "rtw_wifi_resume"
2311 #ifdef CONFIG_WAKELOCK
2312 static struct wake_lock rtw_suspend_lock;
2313 static struct wake_lock rtw_suspend_traffic_lock;
2314 static struct wake_lock rtw_suspend_resume_lock;
2315 #elif defined(CONFIG_ANDROID_POWER)
2316 static android_suspend_lock_t rtw_suspend_lock = {
2317 	.name = RTW_SUSPEND_LOCK_NAME
2318 };
2319 static android_suspend_lock_t rtw_suspend_traffic_lock = {
2320 	.name = RTW_SUSPEND_TRAFFIC_LOCK_NAME
2321 };
2322 static android_suspend_lock_t rtw_suspend_resume_lock = {
2323 	.name = RTW_SUSPEND_RESUME_LOCK_NAME
2324 };
2325 #endif
2326 
rtw_suspend_lock_init(void)2327 inline void rtw_suspend_lock_init(void)
2328 {
2329 #ifdef CONFIG_WAKELOCK
2330 	wake_lock_init(&rtw_suspend_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_LOCK_NAME);
2331 	wake_lock_init(&rtw_suspend_traffic_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_TRAFFIC_LOCK_NAME);
2332 	wake_lock_init(&rtw_suspend_resume_lock, WAKE_LOCK_SUSPEND, RTW_SUSPEND_RESUME_LOCK_NAME);
2333 #elif defined(CONFIG_ANDROID_POWER)
2334 	android_init_suspend_lock(&rtw_suspend_lock);
2335 	android_init_suspend_lock(&rtw_suspend_traffic_lock);
2336 	android_init_suspend_lock(&rtw_suspend_resume_lock);
2337 #endif
2338 }
2339 
rtw_suspend_lock_uninit(void)2340 inline void rtw_suspend_lock_uninit(void)
2341 {
2342 #ifdef CONFIG_WAKELOCK
2343 	wake_lock_destroy(&rtw_suspend_lock);
2344 	wake_lock_destroy(&rtw_suspend_traffic_lock);
2345 	wake_lock_destroy(&rtw_suspend_resume_lock);
2346 #elif defined(CONFIG_ANDROID_POWER)
2347 	android_uninit_suspend_lock(&rtw_suspend_lock);
2348 	android_uninit_suspend_lock(&rtw_suspend_traffic_lock);
2349 	android_uninit_suspend_lock(&rtw_suspend_resume_lock);
2350 #endif
2351 }
2352 
rtw_lock_suspend(void)2353 inline void rtw_lock_suspend(void)
2354 {
2355 #ifdef CONFIG_WAKELOCK
2356 	wake_lock(&rtw_suspend_lock);
2357 #elif defined(CONFIG_ANDROID_POWER)
2358 	android_lock_suspend(&rtw_suspend_lock);
2359 #endif
2360 
2361 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2362 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2363 #endif
2364 }
2365 
rtw_unlock_suspend(void)2366 inline void rtw_unlock_suspend(void)
2367 {
2368 #ifdef CONFIG_WAKELOCK
2369 	wake_unlock(&rtw_suspend_lock);
2370 #elif defined(CONFIG_ANDROID_POWER)
2371 	android_unlock_suspend(&rtw_suspend_lock);
2372 #endif
2373 
2374 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2375 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2376 #endif
2377 }
2378 
rtw_resume_lock_suspend(void)2379 inline void rtw_resume_lock_suspend(void)
2380 {
2381 #ifdef CONFIG_WAKELOCK
2382 	wake_lock(&rtw_suspend_resume_lock);
2383 #elif defined(CONFIG_ANDROID_POWER)
2384 	android_lock_suspend(&rtw_suspend_resume_lock);
2385 #endif
2386 
2387 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2388 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2389 #endif
2390 }
2391 
rtw_resume_unlock_suspend(void)2392 inline void rtw_resume_unlock_suspend(void)
2393 {
2394 #ifdef CONFIG_WAKELOCK
2395 	wake_unlock(&rtw_suspend_resume_lock);
2396 #elif defined(CONFIG_ANDROID_POWER)
2397 	android_unlock_suspend(&rtw_suspend_resume_lock);
2398 #endif
2399 
2400 #if  defined(CONFIG_WAKELOCK) || defined(CONFIG_ANDROID_POWER)
2401 	/* RTW_INFO("####%s: suspend_lock_count:%d####\n", __FUNCTION__, rtw_suspend_lock.stat.count); */
2402 #endif
2403 }
2404 
rtw_lock_suspend_timeout(u32 timeout_ms)2405 inline void rtw_lock_suspend_timeout(u32 timeout_ms)
2406 {
2407 #ifdef CONFIG_WAKELOCK
2408 	wake_lock_timeout(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2409 #elif defined(CONFIG_ANDROID_POWER)
2410 	android_lock_suspend_auto_expire(&rtw_suspend_lock, rtw_ms_to_systime(timeout_ms));
2411 #endif
2412 }
2413 
2414 
rtw_lock_traffic_suspend_timeout(u32 timeout_ms)2415 inline void rtw_lock_traffic_suspend_timeout(u32 timeout_ms)
2416 {
2417 #ifdef CONFIG_WAKELOCK
2418 	wake_lock_timeout(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2419 #elif defined(CONFIG_ANDROID_POWER)
2420 	android_lock_suspend_auto_expire(&rtw_suspend_traffic_lock, rtw_ms_to_systime(timeout_ms));
2421 #endif
2422 	/* RTW_INFO("traffic lock timeout:%d\n", timeout_ms); */
2423 }
2424 
rtw_set_bit(int nr,unsigned long * addr)2425 inline void rtw_set_bit(int nr, unsigned long *addr)
2426 {
2427 #ifdef PLATFORM_LINUX
2428 	set_bit(nr, addr);
2429 #else
2430 	#error "TBD\n";
2431 #endif
2432 }
2433 
rtw_clear_bit(int nr,unsigned long * addr)2434 inline void rtw_clear_bit(int nr, unsigned long *addr)
2435 {
2436 #ifdef PLATFORM_LINUX
2437 	clear_bit(nr, addr);
2438 #else
2439 	#error "TBD\n";
2440 #endif
2441 }
2442 
rtw_test_and_clear_bit(int nr,unsigned long * addr)2443 inline int rtw_test_and_clear_bit(int nr, unsigned long *addr)
2444 {
2445 #ifdef PLATFORM_LINUX
2446 	return test_and_clear_bit(nr, addr);
2447 #else
2448 	#error "TBD\n";
2449 #endif
2450 }
2451 
ATOMIC_SET(ATOMIC_T * v,int i)2452 inline void ATOMIC_SET(ATOMIC_T *v, int i)
2453 {
2454 #ifdef PLATFORM_LINUX
2455 	atomic_set(v, i);
2456 #elif defined(PLATFORM_WINDOWS)
2457 	*v = i; /* other choice???? */
2458 #elif defined(PLATFORM_FREEBSD)
2459 	atomic_set_int(v, i);
2460 #endif
2461 }
2462 
ATOMIC_READ(ATOMIC_T * v)2463 inline int ATOMIC_READ(ATOMIC_T *v)
2464 {
2465 #ifdef PLATFORM_LINUX
2466 	return atomic_read(v);
2467 #elif defined(PLATFORM_WINDOWS)
2468 	return *v; /* other choice???? */
2469 #elif defined(PLATFORM_FREEBSD)
2470 	return atomic_load_acq_32(v);
2471 #endif
2472 }
2473 
ATOMIC_ADD(ATOMIC_T * v,int i)2474 inline void ATOMIC_ADD(ATOMIC_T *v, int i)
2475 {
2476 #ifdef PLATFORM_LINUX
2477 	atomic_add(i, v);
2478 #elif defined(PLATFORM_WINDOWS)
2479 	InterlockedAdd(v, i);
2480 #elif defined(PLATFORM_FREEBSD)
2481 	atomic_add_int(v, i);
2482 #endif
2483 }
ATOMIC_SUB(ATOMIC_T * v,int i)2484 inline void ATOMIC_SUB(ATOMIC_T *v, int i)
2485 {
2486 #ifdef PLATFORM_LINUX
2487 	atomic_sub(i, v);
2488 #elif defined(PLATFORM_WINDOWS)
2489 	InterlockedAdd(v, -i);
2490 #elif defined(PLATFORM_FREEBSD)
2491 	atomic_subtract_int(v, i);
2492 #endif
2493 }
2494 
ATOMIC_INC(ATOMIC_T * v)2495 inline void ATOMIC_INC(ATOMIC_T *v)
2496 {
2497 #ifdef PLATFORM_LINUX
2498 	atomic_inc(v);
2499 #elif defined(PLATFORM_WINDOWS)
2500 	InterlockedIncrement(v);
2501 #elif defined(PLATFORM_FREEBSD)
2502 	atomic_add_int(v, 1);
2503 #endif
2504 }
2505 
ATOMIC_DEC(ATOMIC_T * v)2506 inline void ATOMIC_DEC(ATOMIC_T *v)
2507 {
2508 #ifdef PLATFORM_LINUX
2509 	atomic_dec(v);
2510 #elif defined(PLATFORM_WINDOWS)
2511 	InterlockedDecrement(v);
2512 #elif defined(PLATFORM_FREEBSD)
2513 	atomic_subtract_int(v, 1);
2514 #endif
2515 }
2516 
ATOMIC_ADD_RETURN(ATOMIC_T * v,int i)2517 inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
2518 {
2519 #ifdef PLATFORM_LINUX
2520 	return atomic_add_return(i, v);
2521 #elif defined(PLATFORM_WINDOWS)
2522 	return InterlockedAdd(v, i);
2523 #elif defined(PLATFORM_FREEBSD)
2524 	atomic_add_int(v, i);
2525 	return atomic_load_acq_32(v);
2526 #endif
2527 }
2528 
ATOMIC_SUB_RETURN(ATOMIC_T * v,int i)2529 inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
2530 {
2531 #ifdef PLATFORM_LINUX
2532 	return atomic_sub_return(i, v);
2533 #elif defined(PLATFORM_WINDOWS)
2534 	return InterlockedAdd(v, -i);
2535 #elif defined(PLATFORM_FREEBSD)
2536 	atomic_subtract_int(v, i);
2537 	return atomic_load_acq_32(v);
2538 #endif
2539 }
2540 
ATOMIC_INC_RETURN(ATOMIC_T * v)2541 inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
2542 {
2543 #ifdef PLATFORM_LINUX
2544 	return atomic_inc_return(v);
2545 #elif defined(PLATFORM_WINDOWS)
2546 	return InterlockedIncrement(v);
2547 #elif defined(PLATFORM_FREEBSD)
2548 	atomic_add_int(v, 1);
2549 	return atomic_load_acq_32(v);
2550 #endif
2551 }
2552 
ATOMIC_DEC_RETURN(ATOMIC_T * v)2553 inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
2554 {
2555 #ifdef PLATFORM_LINUX
2556 	return atomic_dec_return(v);
2557 #elif defined(PLATFORM_WINDOWS)
2558 	return InterlockedDecrement(v);
2559 #elif defined(PLATFORM_FREEBSD)
2560 	atomic_subtract_int(v, 1);
2561 	return atomic_load_acq_32(v);
2562 #endif
2563 }
2564 
ATOMIC_INC_UNLESS(ATOMIC_T * v,int u)2565 inline bool ATOMIC_INC_UNLESS(ATOMIC_T *v, int u)
2566 {
2567 #ifdef PLATFORM_LINUX
2568 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15))
2569 	return atomic_add_unless(v, 1, u);
2570 #else
2571 	/* only make sure not exceed after this function */
2572 	if (ATOMIC_INC_RETURN(v) > u) {
2573 		ATOMIC_DEC(v);
2574 		return 0;
2575 	}
2576 	return 1;
2577 #endif
2578 #else
2579 	#error "TBD\n"
2580 #endif
2581 }
2582 
2583 #ifdef PLATFORM_LINUX
2584 #if !defined(CONFIG_RTW_ANDROID_GKI)
2585 /*
2586 * Open a file with the specific @param path, @param flag, @param mode
2587 * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
2588 * @param path the path of the file to open
2589 * @param flag file operation flags, please refer to linux document
2590 * @param mode please refer to linux document
2591 * @return Linux specific error code
2592 */
openFile(struct file ** fpp,const char * path,int flag,int mode)2593 static int openFile(struct file **fpp, const char *path, int flag, int mode)
2594 {
2595 	struct file *fp;
2596 
2597 	fp = filp_open(path, flag, mode);
2598 	if (IS_ERR(fp)) {
2599 		*fpp = NULL;
2600 		return PTR_ERR(fp);
2601 	} else {
2602 		*fpp = fp;
2603 		return 0;
2604 	}
2605 }
2606 
2607 /*
2608 * Close the file with the specific @param fp
2609 * @param fp the pointer of struct file to close
2610 * @return always 0
2611 */
closeFile(struct file * fp)2612 static int closeFile(struct file *fp)
2613 {
2614 	filp_close(fp, NULL);
2615 	return 0;
2616 }
2617 
readFile(struct file * fp,char * buf,int len)2618 static int readFile(struct file *fp, char *buf, int len)
2619 {
2620 	int rlen = 0, sum = 0;
2621 
2622 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2623 	if (!(fp->f_mode & FMODE_CAN_READ))
2624 #else
2625 	if (!fp->f_op || !fp->f_op->read)
2626 #endif
2627 		return -EPERM;
2628 
2629 	while (sum < len) {
2630 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2631 		rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
2632 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2633 		rlen = __vfs_read(fp, buf + sum, len - sum, &fp->f_pos);
2634 #else
2635 		rlen = fp->f_op->read(fp, buf + sum, len - sum, &fp->f_pos);
2636 #endif
2637 		if (rlen > 0)
2638 			sum += rlen;
2639 		else if (0 != rlen)
2640 			return rlen;
2641 		else
2642 			break;
2643 	}
2644 
2645 	return  sum;
2646 
2647 }
2648 
writeFile(struct file * fp,char * buf,int len)2649 static int writeFile(struct file *fp, char *buf, int len)
2650 {
2651 	int wlen = 0, sum = 0;
2652 
2653 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2654 	if (!(fp->f_mode & FMODE_CAN_WRITE))
2655 #else
2656 	if (!fp->f_op || !fp->f_op->write)
2657 #endif
2658 		return -EPERM;
2659 
2660 	while (sum < len) {
2661 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
2662 		wlen = kernel_write(fp, buf + sum, len - sum, &fp->f_pos);
2663 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
2664 		wlen = __vfs_write(fp, buf + sum, len - sum, &fp->f_pos);
2665 #else
2666 		wlen = fp->f_op->write(fp, buf + sum, len - sum, &fp->f_pos);
2667 #endif
2668 		if (wlen > 0)
2669 			sum += wlen;
2670 		else if (0 != wlen)
2671 			return wlen;
2672 		else
2673 			break;
2674 	}
2675 
2676 	return sum;
2677 
2678 }
2679 
2680 /*
2681 * Test if the specifi @param pathname is a direct and readable
2682 * If readable, @param sz is not used
2683 * @param pathname the name of the path to test
2684 * @return Linux specific error code
2685 */
isDirReadable(const char * pathname,u32 * sz)2686 static int isDirReadable(const char *pathname, u32 *sz)
2687 {
2688 	struct path path;
2689 	int error = 0;
2690 
2691 	return kern_path(pathname, LOOKUP_FOLLOW, &path);
2692 }
2693 
2694 /*
2695 * Test if the specifi @param path is a file and readable
2696 * If readable, @param sz is got
2697 * @param path the path of the file to test
2698 * @return Linux specific error code
2699 */
isFileReadable(const char * path,u32 * sz)2700 static int isFileReadable(const char *path, u32 *sz)
2701 {
2702 	struct file *fp;
2703 	int ret = 0;
2704 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2705 	mm_segment_t oldfs;
2706 	#endif
2707 	char buf;
2708 
2709 	fp = filp_open(path, O_RDONLY, 0);
2710 	if (IS_ERR(fp))
2711 		ret = PTR_ERR(fp);
2712 	else {
2713 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2714 		oldfs = get_fs();
2715 		#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2716 		set_fs(KERNEL_DS);
2717 		#else
2718 		set_fs(get_ds());
2719 		#endif
2720 		#endif
2721 
2722 		if (1 != readFile(fp, &buf, 1))
2723 			ret = PTR_ERR(fp);
2724 
2725 		if (ret == 0 && sz) {
2726 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
2727 			*sz = i_size_read(fp->f_path.dentry->d_inode);
2728 			#else
2729 			*sz = i_size_read(fp->f_dentry->d_inode);
2730 			#endif
2731 		}
2732 
2733 		#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2734 		set_fs(oldfs);
2735 		#endif
2736 		filp_close(fp, NULL);
2737 	}
2738 	return ret;
2739 }
2740 
2741 /*
2742 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
2743 * @param path the path of the file to open and write
2744 * @param buf the starting address of the data to write into file
2745 * @param sz how many bytes to write at most
2746 * @return the byte we've written, or Linux specific error code
2747 */
storeToFile(const char * path,u8 * buf,u32 sz)2748 static int storeToFile(const char *path, u8 *buf, u32 sz)
2749 {
2750 	int ret = 0;
2751 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2752 	mm_segment_t oldfs;
2753 	#endif
2754 	struct file *fp;
2755 
2756 	if (path && buf) {
2757 		ret = openFile(&fp, path, O_CREAT | O_WRONLY, 0666);
2758 		if (0 == ret) {
2759 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2760 
2761 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2762 			oldfs = get_fs();
2763 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2764 			set_fs(KERNEL_DS);
2765 			#else
2766 			set_fs(get_ds());
2767 			#endif
2768 			#endif
2769 
2770 			ret = writeFile(fp, buf, sz);
2771 
2772 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2773 			set_fs(oldfs);
2774 			#endif
2775 			closeFile(fp);
2776 
2777 			RTW_INFO("%s writeFile, ret:%d\n", __FUNCTION__, ret);
2778 
2779 		} else
2780 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2781 	} else {
2782 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2783 		ret =  -EINVAL;
2784 	}
2785 	return ret;
2786 }
2787 #endif /* !defined(CONFIG_RTW_ANDROID_GKI)*/
2788 #endif /* PLATFORM_LINUX */
2789 
2790 #if !defined(CONFIG_RTW_ANDROID_GKI)
2791 /*
2792 * Test if the specifi @param path is a direct and readable
2793 * @param path the path of the direct to test
2794 * @return _TRUE or _FALSE
2795 */
rtw_is_dir_readable(const char * path)2796 int rtw_is_dir_readable(const char *path)
2797 {
2798 #ifdef PLATFORM_LINUX
2799 	if (isDirReadable(path, NULL) == 0)
2800 		return _TRUE;
2801 	else
2802 		return _FALSE;
2803 #else
2804 	/* Todo... */
2805 	return _FALSE;
2806 #endif
2807 }
2808 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2809 
2810 /*
2811 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2812 * @param path the path of the file to open and read
2813 * @param buf the starting address of the buffer to store file content
2814 * @param sz how many bytes to read at most
2815 * @return the byte we've read, or Linux specific error code
2816 */
retriveFromFile(const char * path,u8 * buf,u32 sz)2817 static int retriveFromFile(const char *path, u8 *buf, u32 sz)
2818 {
2819 #if defined(CONFIG_RTW_ANDROID_GKI)
2820 	int ret = -EINVAL;
2821 	const struct firmware *fw = NULL;
2822 	char* const delim = "/";
2823 	char *name, *token, *cur, *path_tmp = NULL;
2824 
2825 
2826 	if (path == NULL || buf == NULL) {
2827 		RTW_ERR("%s() NULL pointer\n", __func__);
2828 		goto err;
2829 	}
2830 
2831 	path_tmp = kstrdup(path, GFP_KERNEL);
2832 	if (path_tmp == NULL) {
2833 		RTW_ERR("%s() cannot copy path for parsing file name\n", __func__);
2834 		goto err;
2835 	}
2836 
2837 	/* parsing file name from path */
2838 	cur = path_tmp;
2839 	token = strsep(&cur, delim);
2840 	while (token != NULL) {
2841 		token = strsep(&cur, delim);
2842 		if(token)
2843 			name = token;
2844 	}
2845 
2846 	if (name == NULL) {
2847 		RTW_ERR("%s() parsing file name fail\n", __func__);
2848 		goto err;
2849 	}
2850 
2851 	/* request_firmware() will find file in /vendor/firmware but not in path */
2852 	ret = request_firmware(&fw, name, NULL);
2853 	if (ret == 0) {
2854 		RTW_INFO("%s() Success. retrieve file : %s, file size : %zu\n", __func__, name, fw->size);
2855 
2856 		if ((u32)fw->size < sz) {
2857 			_rtw_memcpy(buf, fw->data, (u32)fw->size);
2858 			ret = (u32)fw->size;
2859 			goto exit;
2860 		} else {
2861 			RTW_ERR("%s() file size : %zu exceed buf size : %u\n", __func__, fw->size, sz);
2862 			ret = -EFBIG;
2863 			goto err;
2864 		}
2865 	} else {
2866 		RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, name, ret);
2867 		goto err;
2868 	}
2869 
2870 
2871 
2872 err:
2873 	RTW_ERR("%s() Fail. retrieve file : %s, error : %d\n", __func__, path, ret);
2874 exit:
2875 	if (path_tmp)
2876 		kfree(path_tmp);
2877 	if (fw)
2878 		release_firmware(fw);
2879 	return ret;
2880 #else /* !defined(CONFIG_RTW_ANDROID_GKI) */
2881 	int ret = -1;
2882 	#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2883 	mm_segment_t oldfs;
2884 	#endif
2885 	struct file *fp;
2886 
2887 	if (path && buf) {
2888 		ret = openFile(&fp, path, O_RDONLY, 0);
2889 		if (0 == ret) {
2890 			RTW_INFO("%s openFile path:%s fp=%p\n", __FUNCTION__, path , fp);
2891 
2892 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2893 			oldfs = get_fs();
2894 			#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
2895 			set_fs(KERNEL_DS);
2896 			#else
2897 			set_fs(get_ds());
2898 			#endif
2899 			#endif
2900 
2901 			ret = readFile(fp, buf, sz);
2902 
2903 			#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2904 			set_fs(oldfs);
2905 			#endif
2906 			closeFile(fp);
2907 
2908 			RTW_INFO("%s readFile, ret:%d\n", __FUNCTION__, ret);
2909 
2910 		} else
2911 			RTW_INFO("%s openFile path:%s Fail, ret:%d\n", __FUNCTION__, path, ret);
2912 	} else {
2913 		RTW_INFO("%s NULL pointer\n", __FUNCTION__);
2914 		ret =  -EINVAL;
2915 	}
2916 	return ret;
2917 #endif /* defined(CONFIG_RTW_ANDROID_GKI) */
2918 }
2919 
2920 /*
2921 * Test if the specifi @param path is a file and readable
2922 * @param path the path of the file to test
2923 * @return _TRUE or _FALSE
2924 */
rtw_is_file_readable(const char * path)2925 int rtw_is_file_readable(const char *path)
2926 {
2927 #ifdef PLATFORM_LINUX
2928 #if !defined(CONFIG_RTW_ANDROID_GKI)
2929 	if (isFileReadable(path, NULL) == 0)
2930 		return _TRUE;
2931 	else
2932 		return _FALSE;
2933 #else
2934 	RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2935 	return  _TRUE;
2936 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2937 #else
2938 	/* Todo... */
2939 	return _FALSE;
2940 #endif
2941 }
2942 
2943 /*
2944 * Test if the specifi @param path is a file and readable.
2945 * If readable, @param sz is got
2946 * @param path the path of the file to test
2947 * @return _TRUE or _FALSE
2948 */
rtw_is_file_readable_with_size(const char * path,u32 * sz)2949 int rtw_is_file_readable_with_size(const char *path, u32 *sz)
2950 {
2951 #ifdef PLATFORM_LINUX
2952 #if !defined(CONFIG_RTW_ANDROID_GKI)
2953 	if (isFileReadable(path, sz) == 0)
2954 		return _TRUE;
2955 	else
2956 		return _FALSE;
2957 #else
2958 	RTW_INFO("%s() Android GKI prohibbit kernel_read, return _TRUE\n", __func__);
2959 	*sz = 0;
2960 	return  _TRUE;
2961 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
2962 #else
2963 	/* Todo... */
2964 	return _FALSE;
2965 #endif
2966 }
2967 
2968 /*
2969 * Test if the specifi @param path is a readable file with valid size.
2970 * If readable, @param sz is got
2971 * @param path the path of the file to test
2972 * @return _TRUE or _FALSE
2973 */
rtw_readable_file_sz_chk(const char * path,u32 sz)2974 int rtw_readable_file_sz_chk(const char *path, u32 sz)
2975 {
2976 	u32 fsz;
2977 
2978 	if (rtw_is_file_readable_with_size(path, &fsz) == _FALSE)
2979 		return _FALSE;
2980 
2981 	if (fsz > sz)
2982 		return _FALSE;
2983 
2984 	return _TRUE;
2985 }
2986 
2987 /*
2988 * Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
2989 * @param path the path of the file to open and read
2990 * @param buf the starting address of the buffer to store file content
2991 * @param sz how many bytes to read at most
2992 * @return the byte we've read
2993 */
rtw_retrieve_from_file(const char * path,u8 * buf,u32 sz)2994 int rtw_retrieve_from_file(const char *path, u8 *buf, u32 sz)
2995 {
2996 #ifdef PLATFORM_LINUX
2997 	int ret = retriveFromFile(path, buf, sz);
2998 	return ret >= 0 ? ret : 0;
2999 #else
3000 	/* Todo... */
3001 	return 0;
3002 #endif
3003 }
3004 
3005 #if !defined(CONFIG_RTW_ANDROID_GKI)
3006 /*
3007 * Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
3008 * @param path the path of the file to open and write
3009 * @param buf the starting address of the data to write into file
3010 * @param sz how many bytes to write at most
3011 * @return the byte we've written
3012 */
rtw_store_to_file(const char * path,u8 * buf,u32 sz)3013 int rtw_store_to_file(const char *path, u8 *buf, u32 sz)
3014 {
3015 #ifdef PLATFORM_LINUX
3016 	int ret = storeToFile(path, buf, sz);
3017 	return ret >= 0 ? ret : 0;
3018 #else
3019 	/* Todo... */
3020 	return 0;
3021 #endif
3022 }
3023 #endif /* !defined(CONFIG_RTW_ANDROID_GKI) */
3024 
3025 #ifdef PLATFORM_LINUX
3026 #ifdef CONFIG_RTL8822CS_WIFI_HDF
3027 struct net_device* GetLinuxInfByNetDevice(const struct NetDevice *netDevice);
3028 extern struct NetDevice* get_rtl_netdev(void);
3029 extern void* get_rtl_priv_data(void);
3030 #endif
3031 
rtw_alloc_etherdev_with_old_priv(int sizeof_priv,void * old_priv)3032 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
3033 {
3034 	struct net_device *pnetdev;
3035 	struct rtw_netdev_priv_indicator *pnpi;
3036 #ifdef CONFIG_RTL8822CS_WIFI_HDF
3037 	struct NetDevice* tempDev = get_rtl_netdev();
3038 
3039     pnetdev = GetLinuxInfByNetDevice(tempDev);
3040     if (pnetdev == NULL) {
3041         goto RETURN;
3042     }
3043 
3044 	tempDev->mlPriv = kzalloc(sizeof(struct rtw_netdev_priv_indicator), GFP_KERNEL);
3045 	pnpi = tempDev->mlPriv;
3046 #else
3047 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
3048 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
3049 #else
3050 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
3051 #endif
3052 	if (!pnetdev)
3053 		goto RETURN;
3054 
3055 	pnpi = netdev_priv(pnetdev);
3056 #endif
3057 
3058 	pnpi->priv = old_priv;
3059 	pnpi->sizeof_priv = sizeof_priv;
3060 
3061 RETURN:
3062 	return pnetdev;
3063 }
3064 
rtw_alloc_etherdev(int sizeof_priv)3065 struct net_device *rtw_alloc_etherdev(int sizeof_priv)
3066 {
3067 	struct net_device *pnetdev;
3068 	struct rtw_netdev_priv_indicator *pnpi;
3069 
3070 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
3071 	pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
3072 #else
3073 	pnetdev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
3074 #endif
3075 	if (!pnetdev)
3076 		goto RETURN;
3077 
3078 	pnpi = netdev_priv(pnetdev);
3079 
3080 	pnpi->priv = rtw_zvmalloc(sizeof_priv);
3081 	if (!pnpi->priv) {
3082 		free_netdev(pnetdev);
3083 		pnetdev = NULL;
3084 		goto RETURN;
3085 	}
3086 
3087 	pnpi->sizeof_priv = sizeof_priv;
3088 RETURN:
3089 	return pnetdev;
3090 }
3091 
rtw_free_netdev(struct net_device * netdev)3092 void rtw_free_netdev(struct net_device *netdev)
3093 {
3094 	struct rtw_netdev_priv_indicator *pnpi;
3095 
3096 	if (!netdev)
3097 		goto RETURN;
3098 
3099 	pnpi = netdev_priv(netdev);
3100 
3101 	if (!pnpi->priv)
3102 		goto RETURN;
3103 
3104 	free_netdev(netdev);
3105 
3106 RETURN:
3107 	return;
3108 }
3109 
3110 #endif
3111 
3112 #ifdef PLATFORM_FREEBSD
3113 /*
3114  * Copy a buffer from userspace and write into kernel address
3115  * space.
3116  *
3117  * This emulation just calls the FreeBSD copyin function (to
3118  * copy data from user space buffer into a kernel space buffer)
3119  * and is designed to be used with the above io_write_wrapper.
3120  *
3121  * This function should return the number of bytes not copied.
3122  * I.e. success results in a zero value.
3123  * Negative error values are not returned.
3124  */
3125 unsigned long
copy_from_user(void * to,const void * from,unsigned long n)3126 copy_from_user(void *to, const void *from, unsigned long n)
3127 {
3128 	if (copyin(from, to, n) != 0) {
3129 		/* Any errors will be treated as a failure
3130 		   to copy any of the requested bytes */
3131 		return n;
3132 	}
3133 
3134 	return 0;
3135 }
3136 
3137 unsigned long
copy_to_user(void * to,const void * from,unsigned long n)3138 copy_to_user(void *to, const void *from, unsigned long n)
3139 {
3140 	if (copyout(from, to, n) != 0) {
3141 		/* Any errors will be treated as a failure
3142 		   to copy any of the requested bytes */
3143 		return n;
3144 	}
3145 
3146 	return 0;
3147 }
3148 
3149 
3150 /*
3151  * The usb_register and usb_deregister functions are used to register
3152  * usb drivers with the usb subsystem. In this compatibility layer
3153  * emulation a list of drivers (struct usb_driver) is maintained
3154  * and is used for probing/attaching etc.
3155  *
3156  * usb_register and usb_deregister simply call these functions.
3157  */
3158 int
usb_register(struct usb_driver * driver)3159 usb_register(struct usb_driver *driver)
3160 {
3161 	rtw_usb_linux_register(driver);
3162 	return 0;
3163 }
3164 
3165 
3166 int
usb_deregister(struct usb_driver * driver)3167 usb_deregister(struct usb_driver *driver)
3168 {
3169 	rtw_usb_linux_deregister(driver);
3170 	return 0;
3171 }
3172 
module_init_exit_wrapper(void * arg)3173 void module_init_exit_wrapper(void *arg)
3174 {
3175 	int (*func)(void) = arg;
3176 	func();
3177 	return;
3178 }
3179 
3180 #endif /* PLATFORM_FREEBSD */
3181 
3182 #ifdef CONFIG_PLATFORM_SPRD
3183 	#ifdef do_div
3184 		#undef do_div
3185 	#endif
3186 	#include <asm-generic/div64.h>
3187 #endif
3188 
rtw_modular64(u64 x,u64 y)3189 u64 rtw_modular64(u64 x, u64 y)
3190 {
3191 #ifdef PLATFORM_LINUX
3192 	return do_div(x, y);
3193 #elif defined(PLATFORM_WINDOWS)
3194 	return x % y;
3195 #elif defined(PLATFORM_FREEBSD)
3196 	return x % y;
3197 #endif
3198 }
3199 
rtw_division64(u64 x,u64 y)3200 u64 rtw_division64(u64 x, u64 y)
3201 {
3202 #ifdef PLATFORM_LINUX
3203 	do_div(x, y);
3204 	return x;
3205 #elif defined(PLATFORM_WINDOWS)
3206 	return x / y;
3207 #elif defined(PLATFORM_FREEBSD)
3208 	return x / y;
3209 #endif
3210 }
3211 
rtw_random32(void)3212 inline u32 rtw_random32(void)
3213 {
3214 #ifdef PLATFORM_LINUX
3215 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
3216 	return prandom_u32();
3217 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18))
3218 	u32 random_int;
3219 	get_random_bytes(&random_int , 4);
3220 	return random_int;
3221 #else
3222 	return random32();
3223 #endif
3224 #elif defined(PLATFORM_WINDOWS)
3225 #error "to be implemented\n"
3226 #elif defined(PLATFORM_FREEBSD)
3227 #error "to be implemented\n"
3228 #endif
3229 }
3230 
rtw_buf_free(u8 ** buf,u32 * buf_len)3231 void rtw_buf_free(u8 **buf, u32 *buf_len)
3232 {
3233 	u32 ori_len;
3234 
3235 	if (!buf || !buf_len)
3236 		return;
3237 
3238 	ori_len = *buf_len;
3239 
3240 	if (*buf) {
3241 		u32 tmp_buf_len = *buf_len;
3242 		*buf_len = 0;
3243 		rtw_mfree(*buf, tmp_buf_len);
3244 		*buf = NULL;
3245 	}
3246 }
3247 
rtw_buf_update(u8 ** buf,u32 * buf_len,const u8 * src,u32 src_len)3248 void rtw_buf_update(u8 **buf, u32 *buf_len, const u8 *src, u32 src_len)
3249 {
3250 	u32 ori_len = 0, dup_len = 0;
3251 	u8 *ori = NULL;
3252 	u8 *dup = NULL;
3253 
3254 	if (!buf || !buf_len)
3255 		return;
3256 
3257 	if (!src || !src_len)
3258 		goto keep_ori;
3259 
3260 	/* duplicate src */
3261 	dup = rtw_malloc(src_len);
3262 	if (dup) {
3263 		dup_len = src_len;
3264 		_rtw_memcpy(dup, src, dup_len);
3265 	}
3266 
3267 keep_ori:
3268 	ori = *buf;
3269 	ori_len = *buf_len;
3270 
3271 	/* replace buf with dup */
3272 	*buf_len = 0;
3273 	*buf = dup;
3274 	*buf_len = dup_len;
3275 
3276 	/* free ori */
3277 	if (ori && ori_len > 0)
3278 		rtw_mfree(ori, ori_len);
3279 }
3280 
3281 
3282 /**
3283  * rtw_cbuf_full - test if cbuf is full
3284  * @cbuf: pointer of struct rtw_cbuf
3285  *
3286  * Returns: _TRUE if cbuf is full
3287  */
rtw_cbuf_full(struct rtw_cbuf * cbuf)3288 inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
3289 {
3290 	return (cbuf->write == cbuf->read - 1) ? _TRUE : _FALSE;
3291 }
3292 
3293 /**
3294  * rtw_cbuf_empty - test if cbuf is empty
3295  * @cbuf: pointer of struct rtw_cbuf
3296  *
3297  * Returns: _TRUE if cbuf is empty
3298  */
rtw_cbuf_empty(struct rtw_cbuf * cbuf)3299 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
3300 {
3301 	return (cbuf->write == cbuf->read) ? _TRUE : _FALSE;
3302 }
3303 
3304 /**
3305  * rtw_cbuf_push - push a pointer into cbuf
3306  * @cbuf: pointer of struct rtw_cbuf
3307  * @buf: pointer to push in
3308  *
3309  * Lock free operation, be careful of the use scheme
3310  * Returns: _TRUE push success
3311  */
rtw_cbuf_push(struct rtw_cbuf * cbuf,void * buf)3312 bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
3313 {
3314 	if (rtw_cbuf_full(cbuf))
3315 		return _FAIL;
3316 
3317 	if (0)
3318 		RTW_INFO("%s on %u\n", __func__, cbuf->write);
3319 	cbuf->bufs[cbuf->write] = buf;
3320 	cbuf->write = (cbuf->write + 1) % cbuf->size;
3321 
3322 	return _SUCCESS;
3323 }
3324 
3325 /**
3326  * rtw_cbuf_pop - pop a pointer from cbuf
3327  * @cbuf: pointer of struct rtw_cbuf
3328  *
3329  * Lock free operation, be careful of the use scheme
3330  * Returns: pointer popped out
3331  */
rtw_cbuf_pop(struct rtw_cbuf * cbuf)3332 void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
3333 {
3334 	void *buf;
3335 	if (rtw_cbuf_empty(cbuf))
3336 		return NULL;
3337 
3338 	if (0)
3339 		RTW_INFO("%s on %u\n", __func__, cbuf->read);
3340 	buf = cbuf->bufs[cbuf->read];
3341 	cbuf->read = (cbuf->read + 1) % cbuf->size;
3342 
3343 	return buf;
3344 }
3345 
3346 /**
3347  * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
3348  * @size: size of pointer
3349  *
3350  * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
3351  */
rtw_cbuf_alloc(u32 size)3352 struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
3353 {
3354 	struct rtw_cbuf *cbuf;
3355 
3356 	cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
3357 
3358 	if (cbuf) {
3359 		cbuf->write = cbuf->read = 0;
3360 		cbuf->size = size;
3361 	}
3362 
3363 	return cbuf;
3364 }
3365 
3366 /**
3367  * rtw_cbuf_free - free the given rtw_cbuf
3368  * @cbuf: pointer of struct rtw_cbuf to free
3369  */
rtw_cbuf_free(struct rtw_cbuf * cbuf)3370 void rtw_cbuf_free(struct rtw_cbuf *cbuf)
3371 {
3372 	rtw_mfree((u8 *)cbuf, sizeof(*cbuf) + sizeof(void *) * cbuf->size);
3373 }
3374 
3375 /**
3376  * map_readN - read a range of map data
3377  * @map: map to read
3378  * @offset: start address to read
3379  * @len: length to read
3380  * @buf: pointer of buffer to store data read
3381  *
3382  * Returns: _SUCCESS or _FAIL
3383  */
map_readN(const struct map_t * map,u16 offset,u16 len,u8 * buf)3384 int map_readN(const struct map_t *map, u16 offset, u16 len, u8 *buf)
3385 {
3386 	const struct map_seg_t *seg;
3387 	int ret = _FAIL;
3388 	int i;
3389 
3390 	if (len == 0) {
3391 		rtw_warn_on(1);
3392 		goto exit;
3393 	}
3394 
3395 	if (offset + len > map->len) {
3396 		rtw_warn_on(1);
3397 		goto exit;
3398 	}
3399 
3400 	_rtw_memset(buf, map->init_value, len);
3401 
3402 	for (i = 0; i < map->seg_num; i++) {
3403 		u8 *c_dst, *c_src;
3404 		u16 c_len;
3405 
3406 		seg = map->segs + i;
3407 		if (seg->sa + seg->len <= offset || seg->sa >= offset + len)
3408 			continue;
3409 
3410 		if (seg->sa >= offset) {
3411 			c_dst = buf + (seg->sa - offset);
3412 			c_src = seg->c;
3413 			if (seg->sa + seg->len <= offset + len)
3414 				c_len = seg->len;
3415 			else
3416 				c_len = offset + len - seg->sa;
3417 		} else {
3418 			c_dst = buf;
3419 			c_src = seg->c + (offset - seg->sa);
3420 			if (seg->sa + seg->len >= offset + len)
3421 				c_len = len;
3422 			else
3423 				c_len = seg->sa + seg->len - offset;
3424 		}
3425 
3426 		_rtw_memcpy(c_dst, c_src, c_len);
3427 	}
3428 
3429 exit:
3430 	return ret;
3431 }
3432 
3433 /**
3434  * map_read8 - read 1 byte of map data
3435  * @map: map to read
3436  * @offset: address to read
3437  *
3438  * Returns: value of data of specified offset. map.init_value if offset is out of range
3439  */
map_read8(const struct map_t * map,u16 offset)3440 u8 map_read8(const struct map_t *map, u16 offset)
3441 {
3442 	const struct map_seg_t *seg;
3443 	u8 val = map->init_value;
3444 	int i;
3445 
3446 	if (offset + 1 > map->len) {
3447 		rtw_warn_on(1);
3448 		goto exit;
3449 	}
3450 
3451 	for (i = 0; i < map->seg_num; i++) {
3452 		seg = map->segs + i;
3453 		if (seg->sa + seg->len <= offset || seg->sa >= offset + 1)
3454 			continue;
3455 
3456 		val = *(seg->c + offset - seg->sa);
3457 		break;
3458 	}
3459 
3460 exit:
3461 	return val;
3462 }
3463 
3464 #ifdef CONFIG_RTW_MESH
rtw_blacklist_add(_queue * blist,const u8 * addr,u32 timeout_ms)3465 int rtw_blacklist_add(_queue *blist, const u8 *addr, u32 timeout_ms)
3466 {
3467 	struct blacklist_ent *ent;
3468 	_list *list, *head;
3469 	u8 exist = _FALSE, timeout = _FALSE;
3470 
3471 	enter_critical_bh(&blist->lock);
3472 
3473 	head = &blist->queue;
3474 	list = get_next(head);
3475 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3476 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3477 		list = get_next(list);
3478 
3479 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3480 			exist = _TRUE;
3481 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3482 				timeout = _TRUE;
3483 			ent->exp_time = rtw_get_current_time()
3484 				+ rtw_ms_to_systime(timeout_ms);
3485 			break;
3486 		}
3487 
3488 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3489 			rtw_list_delete(&ent->list);
3490 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3491 		}
3492 	}
3493 
3494 	if (exist == _FALSE) {
3495 		ent = rtw_malloc(sizeof(struct blacklist_ent));
3496 		if (ent) {
3497 			_rtw_memcpy(ent->addr, addr, ETH_ALEN);
3498 			ent->exp_time = rtw_get_current_time()
3499 				+ rtw_ms_to_systime(timeout_ms);
3500 			rtw_list_insert_tail(&ent->list, head);
3501 		}
3502 	}
3503 
3504 	exit_critical_bh(&blist->lock);
3505 
3506 	return (exist == _TRUE && timeout == _FALSE) ? RTW_ALREADY : (ent ? _SUCCESS : _FAIL);
3507 }
3508 
rtw_blacklist_del(_queue * blist,const u8 * addr)3509 int rtw_blacklist_del(_queue *blist, const u8 *addr)
3510 {
3511 	struct blacklist_ent *ent = NULL;
3512 	_list *list, *head;
3513 	u8 exist = _FALSE;
3514 
3515 	enter_critical_bh(&blist->lock);
3516 	head = &blist->queue;
3517 	list = get_next(head);
3518 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3519 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3520 		list = get_next(list);
3521 
3522 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3523 			rtw_list_delete(&ent->list);
3524 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3525 			exist = _TRUE;
3526 			break;
3527 		}
3528 
3529 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3530 			rtw_list_delete(&ent->list);
3531 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3532 		}
3533 	}
3534 
3535 	exit_critical_bh(&blist->lock);
3536 
3537 	return exist == _TRUE ? _SUCCESS : RTW_ALREADY;
3538 }
3539 
rtw_blacklist_search(_queue * blist,const u8 * addr)3540 int rtw_blacklist_search(_queue *blist, const u8 *addr)
3541 {
3542 	struct blacklist_ent *ent = NULL;
3543 	_list *list, *head;
3544 	u8 exist = _FALSE;
3545 
3546 	enter_critical_bh(&blist->lock);
3547 	head = &blist->queue;
3548 	list = get_next(head);
3549 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3550 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3551 		list = get_next(list);
3552 
3553 		if (_rtw_memcmp(ent->addr, addr, ETH_ALEN) == _TRUE) {
3554 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3555 				rtw_list_delete(&ent->list);
3556 				rtw_mfree(ent, sizeof(struct blacklist_ent));
3557 			} else
3558 				exist = _TRUE;
3559 			break;
3560 		}
3561 
3562 		if (rtw_time_after(rtw_get_current_time(), ent->exp_time)) {
3563 			rtw_list_delete(&ent->list);
3564 			rtw_mfree(ent, sizeof(struct blacklist_ent));
3565 		}
3566 	}
3567 
3568 	exit_critical_bh(&blist->lock);
3569 
3570 	return exist;
3571 }
3572 
rtw_blacklist_flush(_queue * blist)3573 void rtw_blacklist_flush(_queue *blist)
3574 {
3575 	struct blacklist_ent *ent;
3576 	_list *list, *head;
3577 	_list tmp;
3578 
3579 	_rtw_init_listhead(&tmp);
3580 
3581 	enter_critical_bh(&blist->lock);
3582 	rtw_list_splice_init(&blist->queue, &tmp);
3583 	exit_critical_bh(&blist->lock);
3584 
3585 	head = &tmp;
3586 	list = get_next(head);
3587 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
3588 		ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3589 		list = get_next(list);
3590 		rtw_list_delete(&ent->list);
3591 		rtw_mfree(ent, sizeof(struct blacklist_ent));
3592 	}
3593 }
3594 
dump_blacklist(void * sel,_queue * blist,const char * title)3595 void dump_blacklist(void *sel, _queue *blist, const char *title)
3596 {
3597 	struct blacklist_ent *ent = NULL;
3598 	_list *list, *head;
3599 
3600 	enter_critical_bh(&blist->lock);
3601 	head = &blist->queue;
3602 	list = get_next(head);
3603 
3604 	if (rtw_end_of_queue_search(head, list) == _FALSE) {
3605 		if (title)
3606 			RTW_PRINT_SEL(sel, "%s:\n", title);
3607 
3608 		while (rtw_end_of_queue_search(head, list) == _FALSE) {
3609 			ent = LIST_CONTAINOR(list, struct blacklist_ent, list);
3610 			list = get_next(list);
3611 
3612 			if (rtw_time_after(rtw_get_current_time(), ent->exp_time))
3613 				RTW_PRINT_SEL(sel, MAC_FMT" expired\n", MAC_ARG(ent->addr));
3614 			else
3615 				RTW_PRINT_SEL(sel, MAC_FMT" %u\n", MAC_ARG(ent->addr)
3616 					, rtw_get_remaining_time_ms(ent->exp_time));
3617 		}
3618 
3619 	}
3620 	exit_critical_bh(&blist->lock);
3621 }
3622 #endif
3623 
3624 /**
3625 * is_null -
3626 *
3627 * Return	TRUE if c is null character
3628 *		FALSE otherwise.
3629 */
is_null(char c)3630 inline BOOLEAN is_null(char c)
3631 {
3632 	if (c == '\0')
3633 		return _TRUE;
3634 	else
3635 		return _FALSE;
3636 }
3637 
is_all_null(char * c,int len)3638 inline BOOLEAN is_all_null(char *c, int len)
3639 {
3640 	for (; len > 0; len--)
3641 		if (c[len - 1] != '\0')
3642 			return _FALSE;
3643 
3644 	return _TRUE;
3645 }
3646 
3647 /**
3648 * is_eol -
3649 *
3650 * Return	TRUE if c is represent for EOL (end of line)
3651 *		FALSE otherwise.
3652 */
is_eol(char c)3653 inline BOOLEAN is_eol(char c)
3654 {
3655 	if (c == '\r' || c == '\n')
3656 		return _TRUE;
3657 	else
3658 		return _FALSE;
3659 }
3660 
3661 /**
3662 * is_space -
3663 *
3664 * Return	TRUE if c is represent for space
3665 *		FALSE otherwise.
3666 */
is_space(char c)3667 inline BOOLEAN is_space(char c)
3668 {
3669 	if (c == ' ' || c == '\t')
3670 		return _TRUE;
3671 	else
3672 		return _FALSE;
3673 }
3674 
3675 /**
3676 * is_decimal -
3677 *
3678 * Return	TRUE if chTmp is represent for decimal digit
3679 *		FALSE otherwise.
3680 */
is_decimal(char chTmp)3681 inline BOOLEAN is_decimal(char chTmp)
3682 {
3683 	if ((chTmp >= '0' && chTmp <= '9'))
3684 		return _TRUE;
3685 	else
3686 		return _FALSE;
3687 }
3688 
3689 /**
3690 * IsHexDigit -
3691 *
3692 * Return	TRUE if chTmp is represent for hex digit
3693 *		FALSE otherwise.
3694 */
IsHexDigit(char chTmp)3695 inline BOOLEAN IsHexDigit(char chTmp)
3696 {
3697 	if ((chTmp >= '0' && chTmp <= '9') ||
3698 		(chTmp >= 'a' && chTmp <= 'f') ||
3699 		(chTmp >= 'A' && chTmp <= 'F'))
3700 		return _TRUE;
3701 	else
3702 		return _FALSE;
3703 }
3704 
3705 /**
3706 * is_alpha -
3707 *
3708 * Return	TRUE if chTmp is represent for alphabet
3709 *		FALSE otherwise.
3710 */
is_alpha(char chTmp)3711 inline BOOLEAN is_alpha(char chTmp)
3712 {
3713 	if ((chTmp >= 'a' && chTmp <= 'z') ||
3714 		(chTmp >= 'A' && chTmp <= 'Z'))
3715 		return _TRUE;
3716 	else
3717 		return _FALSE;
3718 }
3719 
alpha_to_upper(char c)3720 inline char alpha_to_upper(char c)
3721 {
3722 	if ((c >= 'a' && c <= 'z'))
3723 		c = 'A' + (c - 'a');
3724 	return c;
3725 }
3726 
hex2num_i(char c)3727 int hex2num_i(char c)
3728 {
3729 	if (c >= '0' && c <= '9')
3730 		return c - '0';
3731 	if (c >= 'a' && c <= 'f')
3732 		return c - 'a' + 10;
3733 	if (c >= 'A' && c <= 'F')
3734 		return c - 'A' + 10;
3735 	return -1;
3736 }
3737 
hex2byte_i(const char * hex)3738 int hex2byte_i(const char *hex)
3739 {
3740 	int a, b;
3741 	a = hex2num_i(*hex++);
3742 	if (a < 0)
3743 		return -1;
3744 	b = hex2num_i(*hex++);
3745 	if (b < 0)
3746 		return -1;
3747 	return (a << 4) | b;
3748 }
3749 
hexstr2bin(const char * hex,u8 * buf,size_t len)3750 int hexstr2bin(const char *hex, u8 *buf, size_t len)
3751 {
3752 	size_t i;
3753 	int a;
3754 	const char *ipos = hex;
3755 	u8 *opos = buf;
3756 
3757 	for (i = 0; i < len; i++) {
3758 		a = hex2byte_i(ipos);
3759 		if (a < 0)
3760 			return -1;
3761 		*opos++ = a;
3762 		ipos += 2;
3763 	}
3764 	return 0;
3765 }
3766 
3767 /**
3768  * hwaddr_aton - Convert ASCII string to MAC address
3769  * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
3770  * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
3771  * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
3772  */
hwaddr_aton_i(const char * txt,u8 * addr)3773 int hwaddr_aton_i(const char *txt, u8 *addr)
3774 {
3775 	int i;
3776 
3777 	for (i = 0; i < 6; i++) {
3778 		int a, b;
3779 
3780 		a = hex2num_i(*txt++);
3781 		if (a < 0)
3782 			return -1;
3783 		b = hex2num_i(*txt++);
3784 		if (b < 0)
3785 			return -1;
3786 		*addr++ = (a << 4) | b;
3787 		if (i < 5 && *txt++ != ':')
3788 			return -1;
3789 	}
3790 
3791 	return 0;
3792 }
3793 
3794