• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux OS Independent Layer
4  *
5  * Copyright (C) 1999-2019, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
29  */
30 
31 #define LINUX_PORT
32 
33 #include <typedefs.h>
34 #include <bcmendian.h>
35 #include <linuxver.h>
36 #include <bcmdefs.h>
37 
38 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
39 #include <asm/cacheflush.h>
40 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
41 
42 #include <linux/random.h>
43 
44 #include <osl.h>
45 #include <bcmutils.h>
46 #include <linux/delay.h>
47 #include <linux/vmalloc.h>
48 #include <pcicfg.h>
49 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
50 #include <asm-generic/pci-dma-compat.h>
51 #endif
52 
53 #ifdef BCM_SECURE_DMA
54 #include <linux/module.h>
55 #include <linux/kernel.h>
56 #include <linux/io.h>
57 #include <linux/printk.h>
58 #include <linux/errno.h>
59 #include <linux/mm.h>
60 #include <linux/moduleparam.h>
61 #include <asm/io.h>
62 #include <linux/skbuff.h>
63 #include <stbutils.h>
64 #include <linux/highmem.h>
65 #include <linux/dma-mapping.h>
66 #include <asm/memory.h>
67 #endif /* BCM_SECURE_DMA */
68 
69 #include <linux/fs.h>
70 
71 #if defined(STB)
72 #include <linux/spinlock.h>
73 extern spinlock_t l2x0_reg_lock;
74 #endif // endif
75 
76 #ifdef BCM_OBJECT_TRACE
77 #include <bcmutils.h>
78 #endif /* BCM_OBJECT_TRACE */
79 #include "linux_osl_priv.h"
80 
81 #define PCI_CFG_RETRY		10
82 
83 #define DUMPBUFSZ 1024
84 
85 #ifdef BCM_SECURE_DMA
86 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
87 	bool iscache, bool isdecr);
88 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
89 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
90 	sec_mem_elem_t **list);
91 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
92 	void *sec_list_base);
93 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
94 	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
95 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
96 static void osl_sec_dma_init_consistent(osl_t *osh);
97 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
98 	ulong *pap);
99 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
100 #endif /* BCM_SECURE_DMA */
101 
102 /* PCMCIA attribute space access macros */
103 
104 uint32 g_assert_type = 0; /* By Default Kernel Panic */
105 
106 module_param(g_assert_type, int, 0);
107 #ifdef	BCM_SECURE_DMA
108 #define	SECDMA_MODULE_PARAMS	0
109 #define	SECDMA_EXT_FILE	1
110 unsigned long secdma_addr = 0;
111 unsigned long secdma_addr2 = 0;
112 u32 secdma_size = 0;
113 u32 secdma_size2 = 0;
114 module_param(secdma_addr, ulong, 0);
115 module_param(secdma_size, int, 0);
116 module_param(secdma_addr2, ulong, 0);
117 module_param(secdma_size2, int, 0);
118 static int secdma_found = 0;
119 #endif /* BCM_SECURE_DMA */
120 
121 #ifdef USE_DMA_LOCK
122 static void osl_dma_lock(osl_t *osh);
123 static void osl_dma_unlock(osl_t *osh);
124 static void osl_dma_lock_init(osl_t *osh);
125 
126 #define DMA_LOCK(osh)		osl_dma_lock(osh)
127 #define DMA_UNLOCK(osh)		osl_dma_unlock(osh)
128 #define DMA_LOCK_INIT(osh)	osl_dma_lock_init(osh);
129 #else
130 #define DMA_LOCK(osh)		do { /* noop */ } while(0)
131 #define DMA_UNLOCK(osh)		do { /* noop */ } while(0)
132 #define DMA_LOCK_INIT(osh)	do { /* noop */ } while(0)
133 #endif /* USE_DMA_LOCK */
134 
135 static int16 linuxbcmerrormap[] =
136 {	0,				/* 0 */
137 	-EINVAL,		/* BCME_ERROR */
138 	-EINVAL,		/* BCME_BADARG */
139 	-EINVAL,		/* BCME_BADOPTION */
140 	-EINVAL,		/* BCME_NOTUP */
141 	-EINVAL,		/* BCME_NOTDOWN */
142 	-EINVAL,		/* BCME_NOTAP */
143 	-EINVAL,		/* BCME_NOTSTA */
144 	-EINVAL,		/* BCME_BADKEYIDX */
145 	-EINVAL,		/* BCME_RADIOOFF */
146 	-EINVAL,		/* BCME_NOTBANDLOCKED */
147 	-EINVAL, 		/* BCME_NOCLK */
148 	-EINVAL, 		/* BCME_BADRATESET */
149 	-EINVAL, 		/* BCME_BADBAND */
150 	-E2BIG,			/* BCME_BUFTOOSHORT */
151 	-E2BIG,			/* BCME_BUFTOOLONG */
152 	-EBUSY, 		/* BCME_BUSY */
153 	-EINVAL, 		/* BCME_NOTASSOCIATED */
154 	-EINVAL, 		/* BCME_BADSSIDLEN */
155 	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
156 	-EINVAL, 		/* BCME_BADCHAN */
157 	-EFAULT, 		/* BCME_BADADDR */
158 	-ENOMEM, 		/* BCME_NORESOURCE */
159 	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
160 	-EMSGSIZE,		/* BCME_BADLENGTH */
161 	-EINVAL,		/* BCME_NOTREADY */
162 	-EPERM,			/* BCME_EPERM */
163 	-ENOMEM, 		/* BCME_NOMEM */
164 	-EINVAL, 		/* BCME_ASSOCIATED */
165 	-ERANGE, 		/* BCME_RANGE */
166 	-EINVAL, 		/* BCME_NOTFOUND */
167 	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
168 	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
169 	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
170 	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
171 	-EIO,			/* BCME_SDIO_ERROR */
172 	-ENODEV,		/* BCME_DONGLE_DOWN */
173 	-EINVAL,		/* BCME_VERSION */
174 	-EIO,			/* BCME_TXFAIL */
175 	-EIO,			/* BCME_RXFAIL */
176 	-ENODEV,		/* BCME_NODEVICE */
177 	-EINVAL,		/* BCME_NMODE_DISABLED */
178 	-ENODATA,		/* BCME_NONRESIDENT */
179 	-EINVAL,		/* BCME_SCANREJECT */
180 	-EINVAL,		/* BCME_USAGE_ERROR */
181 	-EIO,     		/* BCME_IOCTL_ERROR */
182 	-EIO,			/* BCME_SERIAL_PORT_ERR */
183 	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
184 	-EIO,			/* BCME_DECERR */
185 	-EIO,			/* BCME_ENCERR */
186 	-EIO,			/* BCME_MICERR */
187 	-ERANGE,		/* BCME_REPLAY */
188 	-EINVAL,		/* BCME_IE_NOTFOUND */
189 	-EINVAL,		/* BCME_DATA_NOTFOUND */
190 	-EINVAL,        /* BCME_NOT_GC */
191 	-EINVAL,        /* BCME_PRS_REQ_FAILED */
192 	-EINVAL,        /* BCME_NO_P2P_SE */
193 	-EINVAL,        /* BCME_NOA_PND */
194 	-EINVAL,        /* BCME_FRAG_Q_FAILED */
195 	-EINVAL,        /* BCME_GET_AF_FAILED */
196 	-EINVAL,	/* BCME_MSCH_NOTREADY */
197 	-EINVAL,	/* BCME_IOV_LAST_CMD */
198 	-EINVAL,	/* BCME_MINIPMU_CAL_FAIL */
199 	-EINVAL,	/* BCME_RCAL_FAIL */
200 	-EINVAL,	/* BCME_LPF_RCCAL_FAIL */
201 	-EINVAL,	/* BCME_DACBUF_RCCAL_FAIL */
202 	-EINVAL,	/* BCME_VCOCAL_FAIL */
203 	-EINVAL,	/* BCME_BANDLOCKED */
204 	-EINVAL,	/* BCME_DNGL_DEVRESET */
205 
206 /* When an new error code is added to bcmutils.h, add os
207  * specific error translation here as well
208  */
209 /* check if BCME_LAST changed since the last time this function was updated */
210 #if BCME_LAST != -68
211 #error "You need to add a OS error translation in the linuxbcmerrormap \
212 	for new error code defined in bcmutils.h"
213 #endif // endif
214 };
215 uint lmtest = FALSE;
216 
217 #ifdef DHD_MAP_LOGGING
218 #define DHD_MAP_LOG_SIZE 2048
219 
220 typedef struct dhd_map_item {
221 	dmaaddr_t pa;		/* DMA address (physical) */
222 	uint64 ts_nsec;		/* timestamp: nsec */
223 	uint32 size;		/* mapping size */
224 	uint8 rsvd[4];		/* reserved for future use */
225 } dhd_map_item_t;
226 
227 typedef struct dhd_map_record {
228 	uint32 items;		/* number of total items */
229 	uint32 idx;		/* current index of metadata */
230 	dhd_map_item_t map[0];	/* metadata storage */
231 } dhd_map_log_t;
232 
233 void
osl_dma_map_dump(osl_t * osh)234 osl_dma_map_dump(osl_t *osh)
235 {
236 	dhd_map_log_t *map_log, *unmap_log;
237 	uint64 ts_sec, ts_usec;
238 
239 	map_log = (dhd_map_log_t *)(osh->dhd_map_log);
240 	unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
241 	osl_get_localtime(&ts_sec, &ts_usec);
242 
243 	if (map_log && unmap_log) {
244 		printk("%s: map_idx=%d unmap_idx=%d "
245 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
246 			map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
247 			(unsigned long)ts_usec);
248 		printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
249 			" dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
250 			(uint64)__virt_to_phys((ulong)(map_log->map)),
251 			(uint32)(sizeof(dhd_map_item_t) * map_log->items),
252 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
253 			(uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
254 	}
255 }
256 
257 static void *
osl_dma_map_log_init(uint32 item_len)258 osl_dma_map_log_init(uint32 item_len)
259 {
260 	dhd_map_log_t *map_log;
261 	gfp_t flags;
262 	uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
263 		(item_len * sizeof(dhd_map_item_t)));
264 
265 	flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
266 	map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
267 	if (map_log) {
268 		memset(map_log, 0, alloc_size);
269 		map_log->items = item_len;
270 		map_log->idx = 0;
271 	}
272 
273 	return (void *)map_log;
274 }
275 
276 static void
osl_dma_map_log_deinit(osl_t * osh)277 osl_dma_map_log_deinit(osl_t *osh)
278 {
279 	if (osh->dhd_map_log) {
280 		kfree(osh->dhd_map_log);
281 		osh->dhd_map_log = NULL;
282 	}
283 
284 	if (osh->dhd_unmap_log) {
285 		kfree(osh->dhd_unmap_log);
286 		osh->dhd_unmap_log = NULL;
287 	}
288 }
289 
290 static void
osl_dma_map_logging(osl_t * osh,void * handle,dmaaddr_t pa,uint32 len)291 osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
292 {
293 	dhd_map_log_t *log = (dhd_map_log_t *)handle;
294 	uint32 idx;
295 
296 	if (log == NULL) {
297 		printk("%s: log is NULL\n", __FUNCTION__);
298 		return;
299 	}
300 
301 	idx = log->idx;
302 	log->map[idx].ts_nsec = osl_localtime_ns();
303 	log->map[idx].pa = pa;
304 	log->map[idx].size = len;
305 	log->idx = (idx + 1) % log->items;
306 }
307 #endif /* DHD_MAP_LOGGING */
308 
309 /* translate bcmerrors into linux errors */
310 int
osl_error(int bcmerror)311 osl_error(int bcmerror)
312 {
313 	if (bcmerror > 0)
314 		bcmerror = 0;
315 	else if (bcmerror < BCME_LAST)
316 		bcmerror = BCME_ERROR;
317 
318 	/* Array bounds covered by ASSERT in osl_attach */
319 	return linuxbcmerrormap[-bcmerror];
320 }
321 osl_t *
osl_attach(void * pdev,uint bustype,bool pkttag)322 osl_attach(void *pdev, uint bustype, bool pkttag)
323 {
324 	void **osl_cmn = NULL;
325 	osl_t *osh;
326 	gfp_t flags;
327 #ifdef BCM_SECURE_DMA
328 	u32 secdma_memsize;
329 #endif // endif
330 
331 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
332 	if (!(osh = kmalloc(sizeof(osl_t), flags)))
333 		return osh;
334 
335 	ASSERT(osh);
336 
337 	bzero(osh, sizeof(osl_t));
338 
339 	if (osl_cmn == NULL || *osl_cmn == NULL) {
340 		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
341 			kfree(osh);
342 			return NULL;
343 		}
344 		bzero(osh->cmn, sizeof(osl_cmn_t));
345 		if (osl_cmn)
346 			*osl_cmn = osh->cmn;
347 		atomic_set(&osh->cmn->malloced, 0);
348 		osh->cmn->dbgmem_list = NULL;
349 		spin_lock_init(&(osh->cmn->dbgmem_lock));
350 
351 		spin_lock_init(&(osh->cmn->pktalloc_lock));
352 
353 	} else {
354 		osh->cmn = *osl_cmn;
355 	}
356 	atomic_add(1, &osh->cmn->refcount);
357 
358 	bcm_object_trace_init();
359 
360 	/* Check that error map has the right number of entries in it */
361 	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
362 
363 	osh->failed = 0;
364 	osh->pdev = pdev;
365 	osh->pub.pkttag = pkttag;
366 	osh->bustype = bustype;
367 	osh->magic = OS_HANDLE_MAGIC;
368 #ifdef BCM_SECURE_DMA
369 
370 	if ((secdma_addr != 0) && (secdma_size != 0)) {
371 		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
372 		if (secdma_found == 0) {
373 			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
374 			secdma_memsize = secdma_size;
375 		} else if (secdma_found == 1) {
376 			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
377 			secdma_memsize = secdma_size2;
378 		} else {
379 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
380 			kfree(osh);
381 			return NULL;
382 		}
383 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
384 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
385 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
386 			(unsigned int)osh->contig_base_alloc);
387 		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
388 	}
389 	else if (stbpriv_init(osh) == 0) {
390 		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
391 		if (secdma_found == 0) {
392 			osh->contig_base_alloc =
393 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
394 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
395 		} else if (secdma_found == 1) {
396 			osh->contig_base_alloc =
397 				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
398 			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
399 		} else {
400 			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
401 			kfree(osh);
402 			return NULL;
403 		}
404 		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
405 		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
406 		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
407 			(unsigned int)osh->contig_base_alloc);
408 		osh->stb_ext_params = SECDMA_EXT_FILE;
409 	}
410 	else {
411 		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
412 		kfree(osh);
413 		return NULL;
414 	}
415 	secdma_found++;
416 	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
417 		phys_to_page((u32)osh->contig_base_alloc),
418 		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
419 
420 	if (osh->contig_base_alloc_coherent_va == NULL) {
421 		if (osh->cmn)
422 			kfree(osh->cmn);
423 	    kfree(osh);
424 	    return NULL;
425 	}
426 	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
427 	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
428 	osl_sec_dma_init_consistent(osh);
429 
430 	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
431 
432 	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
433 		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
434 	if (osh->contig_base_alloc_va == NULL) {
435 		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
436 		if (osh->cmn)
437 			kfree(osh->cmn);
438 		kfree(osh);
439 		return NULL;
440 	}
441 	osh->contig_base_va = osh->contig_base_alloc_va;
442 
443 #ifdef NOT_YET
444 	/*
445 	* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
446 	* osh->sec_list_base_512 = osh->sec_list_512;
447 	* osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
448 	* osh->sec_list_base_2048 = osh->sec_list_2048;
449 	*/
450 #endif // endif
451 	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
452 		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
453 	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
454 	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
455 		if (osh->cmn)
456 			kfree(osh->cmn);
457 		kfree(osh);
458 		return NULL;
459 	}
460 	osh->sec_list_base_4096 = osh->sec_list_4096;
461 
462 #endif /* BCM_SECURE_DMA */
463 
464 	switch (bustype) {
465 		case PCI_BUS:
466 		case SI_BUS:
467 		case PCMCIA_BUS:
468 			osh->pub.mmbus = TRUE;
469 			break;
470 		case JTAG_BUS:
471 		case SDIO_BUS:
472 		case USB_BUS:
473 		case SPI_BUS:
474 		case RPC_BUS:
475 			osh->pub.mmbus = FALSE;
476 			break;
477 		default:
478 			ASSERT(FALSE);
479 			break;
480 	}
481 
482 	DMA_LOCK_INIT(osh);
483 
484 #ifdef DHD_MAP_LOGGING
485 	osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
486 	if (osh->dhd_map_log == NULL) {
487 		printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
488 	}
489 
490 	osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
491 	if (osh->dhd_unmap_log == NULL) {
492 		printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
493 	}
494 #endif /* DHD_MAP_LOGGING */
495 
496 	return osh;
497 }
498 
osl_set_bus_handle(osl_t * osh,void * bus_handle)499 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
500 {
501 	osh->bus_handle = bus_handle;
502 }
503 
osl_get_bus_handle(osl_t * osh)504 void* osl_get_bus_handle(osl_t *osh)
505 {
506 	return osh->bus_handle;
507 }
508 
509 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_set_bpt_cb(osl_t * osh,void * bpt_cb,void * bpt_ctx)510 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
511 {
512 	if (osh) {
513 		osh->bpt_cb = (bpt_cb_fn)bpt_cb;
514 		osh->sih = bpt_ctx;
515 	}
516 }
517 #endif	/* BCM_BACKPLANE_TIMEOUT */
518 
519 void
osl_detach(osl_t * osh)520 osl_detach(osl_t *osh)
521 {
522 	if (osh == NULL)
523 		return;
524 
525 #ifdef BCM_SECURE_DMA
526 	if (osh->stb_ext_params == SECDMA_EXT_FILE)
527 		stbpriv_exit(osh);
528 #ifdef NOT_YET
529 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
530 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
531 #endif /* NOT_YET */
532 	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
533 	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
534 	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
535 	secdma_found--;
536 #endif /* BCM_SECURE_DMA */
537 
538 	bcm_object_trace_deinit();
539 
540 #ifdef DHD_MAP_LOGGING
541 	osl_dma_map_log_deinit(osh->dhd_map_log);
542 	osl_dma_map_log_deinit(osh->dhd_unmap_log);
543 #endif /* DHD_MAP_LOGGING */
544 
545 	ASSERT(osh->magic == OS_HANDLE_MAGIC);
546 	atomic_sub(1, &osh->cmn->refcount);
547 	if (atomic_read(&osh->cmn->refcount) == 0) {
548 			kfree(osh->cmn);
549 	}
550 	kfree(osh);
551 }
552 
553 /* APIs to set/get specific quirks in OSL layer */
554 void BCMFASTPATH
osl_flag_set(osl_t * osh,uint32 mask)555 osl_flag_set(osl_t *osh, uint32 mask)
556 {
557 	osh->flags |= mask;
558 }
559 
560 void
osl_flag_clr(osl_t * osh,uint32 mask)561 osl_flag_clr(osl_t *osh, uint32 mask)
562 {
563 	osh->flags &= ~mask;
564 }
565 
566 #if defined(STB)
567 inline bool BCMFASTPATH
568 #else
569 bool
570 #endif // endif
osl_is_flag_set(osl_t * osh,uint32 mask)571 osl_is_flag_set(osl_t *osh, uint32 mask)
572 {
573 	return (osh->flags & mask);
574 }
575 
576 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
577 	defined(STB_SOC_WIFI)
578 
579 inline int BCMFASTPATH
osl_arch_is_coherent(void)580 osl_arch_is_coherent(void)
581 {
582 	return 0;
583 }
584 
585 inline int BCMFASTPATH
osl_acp_war_enab(void)586 osl_acp_war_enab(void)
587 {
588 	return 0;
589 }
590 
591 inline void BCMFASTPATH
osl_cache_flush(void * va,uint size)592 osl_cache_flush(void *va, uint size)
593 {
594 
595 	if (size > 0)
596 #ifdef STB_SOC_WIFI
597 		dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
598 #else /* STB_SOC_WIFI */
599 		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
600 			DMA_TO_DEVICE);
601 #endif /* STB_SOC_WIFI */
602 }
603 
604 inline void BCMFASTPATH
osl_cache_inv(void * va,uint size)605 osl_cache_inv(void *va, uint size)
606 {
607 
608 #ifdef STB_SOC_WIFI
609 	dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
610 #else /* STB_SOC_WIFI */
611 	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
612 #endif /* STB_SOC_WIFI */
613 }
614 
615 inline void BCMFASTPATH
osl_prefetch(const void * ptr)616 osl_prefetch(const void *ptr)
617 {
618 #if !defined(STB_SOC_WIFI)
619 	__asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
620 #endif // endif
621 }
622 
623 #endif // endif
624 
625 uint32
osl_pci_read_config(osl_t * osh,uint offset,uint size)626 osl_pci_read_config(osl_t *osh, uint offset, uint size)
627 {
628 	uint val = 0;
629 	uint retry = PCI_CFG_RETRY;
630 
631 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
632 
633 	/* only 4byte access supported */
634 	ASSERT(size == 4);
635 
636 	do {
637 		pci_read_config_dword(osh->pdev, offset, &val);
638 		if (val != 0xffffffff)
639 			break;
640 	} while (retry--);
641 
642 	return (val);
643 }
644 
645 void
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)646 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
647 {
648 	uint retry = PCI_CFG_RETRY;
649 
650 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
651 
652 	/* only 4byte access supported */
653 	ASSERT(size == 4);
654 
655 	do {
656 		pci_write_config_dword(osh->pdev, offset, val);
657 		if (offset != PCI_BAR0_WIN)
658 			break;
659 		if (osl_pci_read_config(osh, offset, size) == val)
660 			break;
661 	} while (retry--);
662 
663 }
664 
665 /* return bus # for the pci device pointed by osh->pdev */
666 uint
osl_pci_bus(osl_t * osh)667 osl_pci_bus(osl_t *osh)
668 {
669 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
670 
671 #if defined(__ARM_ARCH_7A__)
672 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
673 #else
674 	return ((struct pci_dev *)osh->pdev)->bus->number;
675 #endif // endif
676 }
677 
678 /* return slot # for the pci device pointed by osh->pdev */
679 uint
osl_pci_slot(osl_t * osh)680 osl_pci_slot(osl_t *osh)
681 {
682 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
683 
684 #if defined(__ARM_ARCH_7A__)
685 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
686 #else
687 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
688 #endif // endif
689 }
690 
691 /* return domain # for the pci device pointed by osh->pdev */
692 uint
osl_pcie_domain(osl_t * osh)693 osl_pcie_domain(osl_t *osh)
694 {
695 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
696 
697 	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
698 }
699 
700 /* return bus # for the pci device pointed by osh->pdev */
701 uint
osl_pcie_bus(osl_t * osh)702 osl_pcie_bus(osl_t *osh)
703 {
704 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
705 
706 	return ((struct pci_dev *)osh->pdev)->bus->number;
707 }
708 
709 /* return the pci device pointed by osh->pdev */
710 struct pci_dev *
osl_pci_device(osl_t * osh)711 osl_pci_device(osl_t *osh)
712 {
713 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
714 
715 	return osh->pdev;
716 }
717 
718 static void
osl_pcmcia_attr(osl_t * osh,uint offset,char * buf,int size,bool write)719 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
720 {
721 }
722 
723 void
osl_pcmcia_read_attr(osl_t * osh,uint offset,void * buf,int size)724 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
725 {
726 	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
727 }
728 
729 void
osl_pcmcia_write_attr(osl_t * osh,uint offset,void * buf,int size)730 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
731 {
732 	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
733 }
734 
735 void *
osl_malloc(osl_t * osh,uint size)736 osl_malloc(osl_t *osh, uint size)
737 {
738 	void *addr;
739 	gfp_t flags;
740 
741 	/* only ASSERT if osh is defined */
742 	if (osh)
743 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
744 #ifdef CONFIG_DHD_USE_STATIC_BUF
745 	if (bcm_static_buf)
746 	{
747 		unsigned long irq_flags;
748 		int i = 0;
749 		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
750 		{
751 			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
752 
753 			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
754 			{
755 				if (bcm_static_buf->buf_use[i] == 0)
756 					break;
757 			}
758 
759 			if (i == STATIC_BUF_MAX_NUM)
760 			{
761 				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
762 				printk("all static buff in use!\n");
763 				goto original;
764 			}
765 
766 			bcm_static_buf->buf_use[i] = 1;
767 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
768 
769 			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
770 			if (osh)
771 				atomic_add(size, &osh->cmn->malloced);
772 
773 			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
774 		}
775 	}
776 original:
777 #endif /* CONFIG_DHD_USE_STATIC_BUF */
778 
779 	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
780 	if ((addr = kmalloc(size, flags)) == NULL) {
781 		if (osh)
782 			osh->failed++;
783 		return (NULL);
784 	}
785 	if (osh && osh->cmn)
786 		atomic_add(size, &osh->cmn->malloced);
787 
788 	return (addr);
789 }
790 
791 void *
osl_mallocz(osl_t * osh,uint size)792 osl_mallocz(osl_t *osh, uint size)
793 {
794 	void *ptr;
795 
796 	ptr = osl_malloc(osh, size);
797 
798 	if (ptr != NULL) {
799 		bzero(ptr, size);
800 	}
801 
802 	return ptr;
803 }
804 
805 void
osl_mfree(osl_t * osh,void * addr,uint size)806 osl_mfree(osl_t *osh, void *addr, uint size)
807 {
808 #ifdef CONFIG_DHD_USE_STATIC_BUF
809 	unsigned long flags;
810 
811 	if (bcm_static_buf)
812 	{
813 		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
814 			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
815 		{
816 			int buf_idx = 0;
817 
818 			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
819 
820 			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
821 			bcm_static_buf->buf_use[buf_idx] = 0;
822 			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
823 
824 			if (osh && osh->cmn) {
825 				ASSERT(osh->magic == OS_HANDLE_MAGIC);
826 				atomic_sub(size, &osh->cmn->malloced);
827 			}
828 			return;
829 		}
830 	}
831 #endif /* CONFIG_DHD_USE_STATIC_BUF */
832 	if (osh && osh->cmn) {
833 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
834 
835 		ASSERT(size <= osl_malloced(osh));
836 
837 		atomic_sub(size, &osh->cmn->malloced);
838 	}
839 	kfree(addr);
840 }
841 
842 void *
osl_vmalloc(osl_t * osh,uint size)843 osl_vmalloc(osl_t *osh, uint size)
844 {
845 	void *addr;
846 
847 	/* only ASSERT if osh is defined */
848 	if (osh)
849 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
850 	if ((addr = vmalloc(size)) == NULL) {
851 		if (osh)
852 			osh->failed++;
853 		return (NULL);
854 	}
855 	if (osh && osh->cmn)
856 		atomic_add(size, &osh->cmn->malloced);
857 
858 	return (addr);
859 }
860 
861 void *
osl_vmallocz(osl_t * osh,uint size)862 osl_vmallocz(osl_t *osh, uint size)
863 {
864 	void *ptr;
865 
866 	ptr = osl_vmalloc(osh, size);
867 
868 	if (ptr != NULL) {
869 		bzero(ptr, size);
870 	}
871 
872 	return ptr;
873 }
874 
875 void
osl_vmfree(osl_t * osh,void * addr,uint size)876 osl_vmfree(osl_t *osh, void *addr, uint size)
877 {
878 	if (osh && osh->cmn) {
879 		ASSERT(osh->magic == OS_HANDLE_MAGIC);
880 
881 		ASSERT(size <= osl_malloced(osh));
882 
883 		atomic_sub(size, &osh->cmn->malloced);
884 	}
885 	vfree(addr);
886 }
887 
888 uint
osl_check_memleak(osl_t * osh)889 osl_check_memleak(osl_t *osh)
890 {
891 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
892 	if (atomic_read(&osh->cmn->refcount) == 1)
893 		return (atomic_read(&osh->cmn->malloced));
894 	else
895 		return 0;
896 }
897 
898 uint
osl_malloced(osl_t * osh)899 osl_malloced(osl_t *osh)
900 {
901 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
902 	return (atomic_read(&osh->cmn->malloced));
903 }
904 
905 uint
osl_malloc_failed(osl_t * osh)906 osl_malloc_failed(osl_t *osh)
907 {
908 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
909 	return (osh->failed);
910 }
911 
912 uint
osl_dma_consistent_align(void)913 osl_dma_consistent_align(void)
914 {
915 	return (PAGE_SIZE);
916 }
917 
918 void*
osl_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,uint * alloced,dmaaddr_t * pap)919 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
920 {
921 	void *va;
922 	uint16 align = (1 << align_bits);
923 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
924 
925 	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
926 		size += align;
927 	*alloced = size;
928 
929 #ifndef	BCM_SECURE_DMA
930 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
931 	defined(STB_SOC_WIFI)
932 	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
933 	if (va)
934 		*pap = (ulong)__virt_to_phys((ulong)va);
935 #else
936 	{
937 		dma_addr_t pap_lin;
938 		struct pci_dev *hwdev = osh->pdev;
939 		gfp_t flags;
940 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
941 		flags = GFP_ATOMIC;
942 #else
943 		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
944 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
945 		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
946 #ifdef BCMDMA64OSL
947 		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
948 		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
949 #else
950 		*pap = (dmaaddr_t)pap_lin;
951 #endif /* BCMDMA64OSL */
952 	}
953 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
954 #else
955 	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
956 #endif /* BCM_SECURE_DMA */
957 	return va;
958 }
959 
960 void
osl_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)961 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
962 {
963 #ifdef BCMDMA64OSL
964 	dma_addr_t paddr;
965 #endif /* BCMDMA64OSL */
966 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
967 
968 #ifndef BCM_SECURE_DMA
969 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
970 	defined(STB_SOC_WIFI)
971 	kfree(va);
972 #else
973 #ifdef BCMDMA64OSL
974 	PHYSADDRTOULONG(pa, paddr);
975 	pci_free_consistent(osh->pdev, size, va, paddr);
976 #else
977 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
978 #endif /* BCMDMA64OSL */
979 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
980 #else
981 	osl_sec_dma_free_consistent(osh, va, size, pa);
982 #endif /* BCM_SECURE_DMA */
983 }
984 
985 void *
osl_virt_to_phys(void * va)986 osl_virt_to_phys(void *va)
987 {
988 	return (void *)(uintptr)virt_to_phys(va);
989 }
990 
991 #include <asm/cacheflush.h>
992 void BCMFASTPATH
osl_dma_flush(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)993 osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
994 {
995 	return;
996 }
997 
998 dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)999 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1000 {
1001 	int dir;
1002 	dmaaddr_t ret_addr;
1003 	dma_addr_t map_addr;
1004 	int ret;
1005 
1006 	DMA_LOCK(osh);
1007 
1008 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1009 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1010 
1011 #ifdef STB_SOC_WIFI
1012 #if (__LINUX_ARM_ARCH__ == 8)
1013 	/* need to flush or invalidate the cache here */
1014 	if (dir == DMA_TX) { /* to device */
1015 		osl_cache_flush(va, size);
1016 	} else if (dir == DMA_RX) { /* from device */
1017 		osl_cache_inv(va, size);
1018 	} else { /* both */
1019 		osl_cache_flush(va, size);
1020 		osl_cache_inv(va, size);
1021 	}
1022 	DMA_UNLOCK(osh);
1023 	return virt_to_phys(va);
1024 #else /* (__LINUX_ARM_ARCH__ == 8) */
1025 	map_addr = dma_map_single(osh->pdev, va, size, dir);
1026 	DMA_UNLOCK(osh);
1027 	return map_addr;
1028 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1029 #else /* ! STB_SOC_WIFI */
1030 	map_addr = pci_map_single(osh->pdev, va, size, dir);
1031 #endif	/* ! STB_SOC_WIFI */
1032 
1033 	ret = pci_dma_mapping_error(osh->pdev, map_addr);
1034 
1035 	if (ret) {
1036 		printk("%s: Failed to map memory\n", __FUNCTION__);
1037 		PHYSADDRLOSET(ret_addr, 0);
1038 		PHYSADDRHISET(ret_addr, 0);
1039 	} else {
1040 		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1041 		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1042 	}
1043 
1044 #ifdef DHD_MAP_LOGGING
1045 	osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1046 #endif /* DHD_MAP_LOGGING */
1047 
1048 	DMA_UNLOCK(osh);
1049 
1050 	return ret_addr;
1051 }
1052 
1053 void BCMFASTPATH
osl_dma_unmap(osl_t * osh,dmaaddr_t pa,uint size,int direction)1054 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1055 {
1056 	int dir;
1057 #ifdef BCMDMA64OSL
1058 	dma_addr_t paddr;
1059 #endif /* BCMDMA64OSL */
1060 
1061 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1062 
1063 	DMA_LOCK(osh);
1064 
1065 	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1066 
1067 #ifdef DHD_MAP_LOGGING
1068 	osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
1069 #endif /* DHD_MAP_LOGGING */
1070 
1071 #ifdef BCMDMA64OSL
1072 	PHYSADDRTOULONG(pa, paddr);
1073 	pci_unmap_single(osh->pdev, paddr, size, dir);
1074 #else /* BCMDMA64OSL */
1075 
1076 #ifdef STB_SOC_WIFI
1077 #if (__LINUX_ARM_ARCH__ == 8)
1078 	if (dir == DMA_TX) { /* to device */
1079 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1080 	} else if (dir == DMA_RX) { /* from device */
1081 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1082 	} else { /* both */
1083 		dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1084 		dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1085 	}
1086 #else /* (__LINUX_ARM_ARCH__ == 8) */
1087 	dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1088 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1089 #else /* STB_SOC_WIFI */
1090 	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1091 #endif /* STB_SOC_WIFI */
1092 
1093 #endif /* BCMDMA64OSL */
1094 
1095 	DMA_UNLOCK(osh);
1096 }
1097 
1098 /* OSL function for CPU relax */
1099 inline void BCMFASTPATH
osl_cpu_relax(void)1100 osl_cpu_relax(void)
1101 {
1102 	cpu_relax();
1103 }
1104 
osl_preempt_disable(osl_t * osh)1105 extern void osl_preempt_disable(osl_t *osh)
1106 {
1107 	preempt_disable();
1108 }
1109 
osl_preempt_enable(osl_t * osh)1110 extern void osl_preempt_enable(osl_t *osh)
1111 {
1112 	preempt_enable();
1113 }
1114 
1115 #if defined(BCMASSERT_LOG)
1116 void
osl_assert(const char * exp,const char * file,int line)1117 osl_assert(const char *exp, const char *file, int line)
1118 {
1119 	char tempbuf[256];
1120 	const char *basename;
1121 
1122 	basename = strrchr(file, '/');
1123 	/* skip the '/' */
1124 	if (basename)
1125 		basename++;
1126 
1127 	if (!basename)
1128 		basename = file;
1129 
1130 #ifdef BCMASSERT_LOG
1131 	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1132 		exp, basename, line);
1133 #endif /* BCMASSERT_LOG */
1134 
1135 	switch (g_assert_type) {
1136 	case 0:
1137 		panic("%s", tempbuf);
1138 		break;
1139 	case 1:
1140 		/* fall through */
1141 	case 3:
1142 		printk("%s", tempbuf);
1143 		break;
1144 	case 2:
1145 		printk("%s", tempbuf);
1146 		BUG();
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 }
1152 #endif // endif
1153 
1154 void
osl_delay(uint usec)1155 osl_delay(uint usec)
1156 {
1157 	uint d;
1158 
1159 	while (usec > 0) {
1160 		d = MIN(usec, 1000);
1161 		udelay(d);
1162 		usec -= d;
1163 	}
1164 }
1165 
1166 void
osl_sleep(uint ms)1167 osl_sleep(uint ms)
1168 {
1169 	if (ms < 20)
1170 		usleep_range(ms*1000, ms*1000 + 1000);
1171 	else
1172 		msleep(ms);
1173 }
1174 
1175 uint64
osl_sysuptime_us(void)1176 osl_sysuptime_us(void)
1177 {
1178 	struct osl_timespec tv;
1179 	uint64 usec;
1180 
1181 	osl_do_gettimeofday(&tv);
1182 	/* tv_usec content is fraction of a second */
1183 	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1184 	return usec;
1185 }
1186 
1187 uint64
osl_localtime_ns(void)1188 osl_localtime_ns(void)
1189 {
1190 	uint64 ts_nsec = 0;
1191 
1192 	ts_nsec = local_clock();
1193 
1194 	return ts_nsec;
1195 }
1196 
1197 void
osl_get_localtime(uint64 * sec,uint64 * usec)1198 osl_get_localtime(uint64 *sec, uint64 *usec)
1199 {
1200 	uint64 ts_nsec = 0;
1201 	unsigned long rem_nsec = 0;
1202 
1203 	ts_nsec = local_clock();
1204 	rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1205 	*sec = (uint64)ts_nsec;
1206 	*usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1207 }
1208 
1209 uint64
osl_systztime_us(void)1210 osl_systztime_us(void)
1211 {
1212 	struct osl_timespec tv;
1213 	uint64 tzusec;
1214 
1215 	osl_do_gettimeofday(&tv);
1216 	/* apply timezone */
1217 	tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
1218 		USEC_PER_SEC);
1219 	tzusec += tv.tv_usec;
1220 
1221 	return tzusec;
1222 }
1223 
1224 /*
1225  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1226  */
1227 
1228 /*
1229  * BINOSL selects the slightly slower function-call-based binary compatible osl.
1230  */
1231 
1232 uint32
osl_rand(void)1233 osl_rand(void)
1234 {
1235 	uint32 rand;
1236 
1237 	get_random_bytes(&rand, sizeof(rand));
1238 
1239 	return rand;
1240 }
1241 
1242 /* Linux Kernel: File Operations: start */
1243 void *
osl_os_open_image(char * filename)1244 osl_os_open_image(char *filename)
1245 {
1246 	struct file *fp;
1247 
1248 	fp = filp_open(filename, O_RDONLY, 0);
1249 	/*
1250 	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1251 	 * Alternative:
1252 	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1253 	 * ???
1254 	 */
1255 	 if (IS_ERR(fp))
1256 		 fp = NULL;
1257 
1258 	 return fp;
1259 }
1260 
1261 int
osl_os_get_image_block(char * buf,int len,void * image)1262 osl_os_get_image_block(char *buf, int len, void *image)
1263 {
1264 	struct file *fp = (struct file *)image;
1265 	int rdlen;
1266 
1267 	if (!image)
1268 		return 0;
1269 
1270 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1271 	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1272 #else
1273 	rdlen = kernel_read(fp, fp->f_pos, buf, len);
1274 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1275 
1276 	if (rdlen > 0)
1277 		fp->f_pos += rdlen;
1278 
1279 	return rdlen;
1280 }
1281 
1282 void
osl_os_close_image(void * image)1283 osl_os_close_image(void *image)
1284 {
1285 	if (image)
1286 		filp_close((struct file *)image, NULL);
1287 }
1288 
1289 int
osl_os_image_size(void * image)1290 osl_os_image_size(void *image)
1291 {
1292 	int len = 0, curroffset;
1293 
1294 	if (image) {
1295 		/* store the current offset */
1296 		curroffset = generic_file_llseek(image, 0, 1);
1297 		/* goto end of file to get length */
1298 		len = generic_file_llseek(image, 0, 2);
1299 		/* restore back the offset */
1300 		generic_file_llseek(image, curroffset, 0);
1301 	}
1302 	return len;
1303 }
1304 
1305 /* Linux Kernel: File Operations: end */
1306 
1307 #if (defined(STB) && defined(__arm__))
osl_pcie_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1308 inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1309 {
1310 	unsigned long flags = 0;
1311 	int pci_access = 0;
1312 	int acp_war_enab = ACP_WAR_ENAB();
1313 
1314 	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1315 		pci_access = 1;
1316 
1317 	if (pci_access && acp_war_enab)
1318 		spin_lock_irqsave(&l2x0_reg_lock, flags);
1319 
1320 	switch (size) {
1321 	case sizeof(uint8):
1322 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1323 		break;
1324 	case sizeof(uint16):
1325 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1326 		break;
1327 	case sizeof(uint32):
1328 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1329 		break;
1330 	case sizeof(uint64):
1331 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1332 		break;
1333 	}
1334 
1335 	if (pci_access && acp_war_enab)
1336 		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1337 }
1338 #endif // endif
1339 
1340 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_bpt_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1341 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1342 {
1343 	bool poll_timeout = FALSE;
1344 	static int in_si_clear = FALSE;
1345 
1346 	switch (size) {
1347 	case sizeof(uint8):
1348 		*(volatile uint8*)v = readb((volatile uint8*)(addr));
1349 		if (*(volatile uint8*)v == 0xff)
1350 			poll_timeout = TRUE;
1351 		break;
1352 	case sizeof(uint16):
1353 		*(volatile uint16*)v = readw((volatile uint16*)(addr));
1354 		if (*(volatile uint16*)v == 0xffff)
1355 			poll_timeout = TRUE;
1356 		break;
1357 	case sizeof(uint32):
1358 		*(volatile uint32*)v = readl((volatile uint32*)(addr));
1359 		if (*(volatile uint32*)v == 0xffffffff)
1360 			poll_timeout = TRUE;
1361 		break;
1362 	case sizeof(uint64):
1363 		*(volatile uint64*)v = *((volatile uint64*)(addr));
1364 		if (*(volatile uint64*)v == 0xffffffffffffffff)
1365 			poll_timeout = TRUE;
1366 		break;
1367 	}
1368 
1369 	if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1370 		in_si_clear = TRUE;
1371 		osh->bpt_cb((void *)osh->sih, (void *)addr);
1372 		in_si_clear = FALSE;
1373 	}
1374 }
1375 #endif /* BCM_BACKPLANE_TIMEOUT */
1376 
1377 #ifdef BCM_SECURE_DMA
1378 static void *
osl_sec_dma_ioremap(osl_t * osh,struct page * page,size_t size,bool iscache,bool isdecr)1379 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1380 {
1381 
1382 	struct page **map;
1383 	int order, i;
1384 	void *addr = NULL;
1385 
1386 	size = PAGE_ALIGN(size);
1387 	order = get_order(size);
1388 
1389 	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1390 
1391 	if (map == NULL)
1392 		return NULL;
1393 
1394 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
1395 		map[i] = page + i;
1396 
1397 	if (iscache) {
1398 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1399 		if (isdecr) {
1400 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1401 		}
1402 	} else {
1403 
1404 #if defined(__ARM_ARCH_7A__)
1405 		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1406 			pgprot_noncached(__pgprot(PAGE_KERNEL)));
1407 #endif // endif
1408 		if (isdecr) {
1409 			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1410 		}
1411 	}
1412 
1413 	kfree(map);
1414 	return (void *)addr;
1415 }
1416 
1417 static void
osl_sec_dma_iounmap(osl_t * osh,void * contig_base_va,size_t size)1418 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1419 {
1420 	vunmap(contig_base_va);
1421 }
1422 
1423 static int
osl_sec_dma_init_elem_mem_block(osl_t * osh,size_t mbsize,int max,sec_mem_elem_t ** list)1424 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1425 {
1426 	int i;
1427 	int ret = BCME_OK;
1428 	sec_mem_elem_t *sec_mem_elem;
1429 
1430 	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1431 
1432 		*list = sec_mem_elem;
1433 		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1434 		for (i = 0; i < max-1; i++) {
1435 			sec_mem_elem->next = (sec_mem_elem + 1);
1436 			sec_mem_elem->size = mbsize;
1437 			sec_mem_elem->pa_cma = osh->contig_base_alloc;
1438 			sec_mem_elem->vac = osh->contig_base_alloc_va;
1439 
1440 			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1441 			osh->contig_base_alloc += mbsize;
1442 			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1443 
1444 			sec_mem_elem = sec_mem_elem + 1;
1445 		}
1446 		sec_mem_elem->next = NULL;
1447 		sec_mem_elem->size = mbsize;
1448 		sec_mem_elem->pa_cma = osh->contig_base_alloc;
1449 		sec_mem_elem->vac = osh->contig_base_alloc_va;
1450 
1451 		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1452 		osh->contig_base_alloc += mbsize;
1453 		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
1454 
1455 	} else {
1456 		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1457 		ret = BCME_ERROR;
1458 	}
1459 	return ret;
1460 }
1461 
1462 static void
osl_sec_dma_deinit_elem_mem_block(osl_t * osh,size_t mbsize,int max,void * sec_list_base)1463 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1464 {
1465 	if (sec_list_base)
1466 		kfree(sec_list_base);
1467 }
1468 
1469 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t * osh,void * va,uint size,int direction,struct sec_cma_info * ptr_cma_info,uint offset)1470 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1471 	struct sec_cma_info *ptr_cma_info, uint offset)
1472 {
1473 	sec_mem_elem_t *sec_mem_elem = NULL;
1474 
1475 #ifdef NOT_YET
1476 	if (size <= 512 && osh->sec_list_512) {
1477 		sec_mem_elem = osh->sec_list_512;
1478 		osh->sec_list_512 = sec_mem_elem->next;
1479 	}
1480 	else if (size <= 2048 && osh->sec_list_2048) {
1481 		sec_mem_elem = osh->sec_list_2048;
1482 		osh->sec_list_2048 = sec_mem_elem->next;
1483 	}
1484 	else
1485 #else
1486 		ASSERT(osh->sec_list_4096);
1487 		sec_mem_elem = osh->sec_list_4096;
1488 		osh->sec_list_4096 = sec_mem_elem->next;
1489 #endif /* NOT_YET */
1490 
1491 		sec_mem_elem->next = NULL;
1492 
1493 	if (ptr_cma_info->sec_alloc_list_tail) {
1494 		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1495 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1496 	}
1497 	else {
1498 		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1499 		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1500 		ptr_cma_info->sec_alloc_list = sec_mem_elem;
1501 		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1502 	}
1503 	return sec_mem_elem;
1504 }
1505 
1506 static void BCMFASTPATH
osl_sec_dma_free_mem_elem(osl_t * osh,sec_mem_elem_t * sec_mem_elem)1507 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1508 {
1509 	sec_mem_elem->dma_handle = 0x0;
1510 	sec_mem_elem->va = NULL;
1511 #ifdef NOT_YET
1512 	if (sec_mem_elem->size == 512) {
1513 		sec_mem_elem->next = osh->sec_list_512;
1514 		osh->sec_list_512 = sec_mem_elem;
1515 	} else if (sec_mem_elem->size == 2048) {
1516 		sec_mem_elem->next = osh->sec_list_2048;
1517 		osh->sec_list_2048 = sec_mem_elem;
1518 	} else if (sec_mem_elem->size == 4096) {
1519 #endif /* NOT_YET */
1520 		sec_mem_elem->next = osh->sec_list_4096;
1521 		osh->sec_list_4096 = sec_mem_elem;
1522 #ifdef NOT_YET
1523 	}
1524 	else
1525 		printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
1526 #endif /* NOT_YET */
1527 }
1528 
1529 static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_find_rem_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info,dma_addr_t dma_handle)1530 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1531 {
1532 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1533 	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1534 
1535 	if (sec_mem_elem->dma_handle == dma_handle) {
1536 
1537 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1538 
1539 		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1540 			ptr_cma_info->sec_alloc_list_tail = NULL;
1541 			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1542 		}
1543 
1544 		return sec_mem_elem;
1545 	}
1546 	sec_mem_elem = sec_mem_elem->next;
1547 
1548 	while (sec_mem_elem != NULL) {
1549 
1550 		if (sec_mem_elem->dma_handle == dma_handle) {
1551 
1552 			sec_prv_elem->next = sec_mem_elem->next;
1553 			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1554 				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1555 
1556 			return sec_mem_elem;
1557 		}
1558 		sec_prv_elem = sec_mem_elem;
1559 		sec_mem_elem = sec_mem_elem->next;
1560 	}
1561 	return NULL;
1562 }
1563 
1564 static sec_mem_elem_t *
osl_sec_dma_rem_first_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1565 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1566 {
1567 	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1568 
1569 	if (sec_mem_elem) {
1570 
1571 		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1572 
1573 		if (ptr_cma_info->sec_alloc_list == NULL)
1574 			ptr_cma_info->sec_alloc_list_tail = NULL;
1575 
1576 		return sec_mem_elem;
1577 
1578 	} else
1579 		return NULL;
1580 }
1581 
1582 static void * BCMFASTPATH
osl_sec_dma_last_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1583 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1584 {
1585 	return ptr_cma_info->sec_alloc_list_tail;
1586 }
1587 
1588 dma_addr_t BCMFASTPATH
osl_sec_dma_map_txmeta(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info)1589 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1590 	hnddma_seg_map_t *dmah, void *ptr_cma_info)
1591 {
1592 	sec_mem_elem_t *sec_mem_elem;
1593 	struct page *pa_cma_page;
1594 	uint loffset;
1595 	void *vaorig = ((uint8 *)va + size);
1596 	dma_addr_t dma_handle = 0x0;
1597 	/* packet will be the one added with osl_sec_dma_map() just before this call */
1598 
1599 	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1600 
1601 	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1602 
1603 		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1604 		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1605 
1606 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1607 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1608 
1609 	} else {
1610 		printf("%s: error orig va not found va = 0x%p \n",
1611 			__FUNCTION__, vaorig);
1612 	}
1613 	return dma_handle;
1614 }
1615 
1616 dma_addr_t BCMFASTPATH
osl_sec_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info,uint offset)1617 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1618 	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1619 {
1620 
1621 	sec_mem_elem_t *sec_mem_elem;
1622 	struct page *pa_cma_page;
1623 	void *pa_cma_kmap_va = NULL;
1624 	uint buflen = 0;
1625 	dma_addr_t dma_handle = 0x0;
1626 	uint loffset;
1627 #ifdef NOT_YET
1628 	int *fragva;
1629 	struct sk_buff *skb;
1630 	int i = 0;
1631 #endif /* NOT_YET */
1632 
1633 	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1634 	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1635 
1636 	sec_mem_elem->va = va;
1637 	sec_mem_elem->direction = direction;
1638 	pa_cma_page = sec_mem_elem->pa_cma_page;
1639 
1640 	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1641 	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1642 	* pa_cma_kmap_va += loffset;
1643 	*/
1644 
1645 	pa_cma_kmap_va = sec_mem_elem->vac;
1646 	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1647 	buflen = size;
1648 
1649 	if (direction == DMA_TX) {
1650 		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1651 
1652 #ifdef NOT_YET
1653 		if (p == NULL) {
1654 
1655 			memcpy(pa_cma_kmap_va, va, size);
1656 			/* prhex("Txpkt",pa_cma_kmap_va, size); */
1657 		} else {
1658 			for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1659 				if (skb_is_nonlinear(skb)) {
1660 
1661 					for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1662 						skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1663 						fragva = kmap_atomic(skb_frag_page(f));
1664 						pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1665 						memcpy((pa_cma_kmap_va),
1666 						(fragva + f->page_offset), skb_frag_size(f));
1667 						kunmap_atomic(fragva);
1668 						buflen += skb_frag_size(f);
1669 					}
1670 				} else {
1671 
1672 					pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1673 					memcpy(pa_cma_kmap_va, skb->data, skb->len);
1674 					buflen += skb->len;
1675 				}
1676 			}
1677 
1678 		}
1679 #endif /* NOT_YET */
1680 		if (dmah) {
1681 			dmah->nsegs = 1;
1682 			dmah->origsize = buflen;
1683 		}
1684 	}
1685 	else
1686 	{
1687 		if ((p != NULL) && (dmah != NULL)) {
1688 			dmah->nsegs = 1;
1689 			dmah->origsize = buflen;
1690 		}
1691 		*(uint32 *)(pa_cma_kmap_va) = 0x0;
1692 	}
1693 
1694 	if (direction == DMA_RX) {
1695 		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1696 	}
1697 		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1698 			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1699 	if (dmah) {
1700 		dmah->segs[0].addr = dma_handle;
1701 		dmah->segs[0].length = buflen;
1702 	}
1703 	sec_mem_elem->dma_handle = dma_handle;
1704 	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
1705 	return dma_handle;
1706 }
1707 
1708 dma_addr_t BCMFASTPATH
osl_sec_dma_dd_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * map)1709 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1710 {
1711 
1712 	struct page *pa_cma_page;
1713 	phys_addr_t pa_cma;
1714 	dma_addr_t dma_handle = 0x0;
1715 	uint loffset;
1716 
1717 	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1718 	pa_cma_page = phys_to_page(pa_cma);
1719 	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1720 
1721 	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1722 		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1723 
1724 	return dma_handle;
1725 }
1726 
1727 void BCMFASTPATH
osl_sec_dma_unmap(osl_t * osh,dma_addr_t dma_handle,uint size,int direction,void * p,hnddma_seg_map_t * map,void * ptr_cma_info,uint offset)1728 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1729 void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
1730 {
1731 	sec_mem_elem_t *sec_mem_elem;
1732 #ifdef NOT_YET
1733 	struct page *pa_cma_page;
1734 #endif // endif
1735 	void *pa_cma_kmap_va = NULL;
1736 	uint buflen = 0;
1737 	dma_addr_t pa_cma;
1738 	void *va;
1739 	int read_count = 0;
1740 	BCM_REFERENCE(buflen);
1741 	BCM_REFERENCE(read_count);
1742 
1743 	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1744 	ASSERT(sec_mem_elem);
1745 
1746 	va = sec_mem_elem->va;
1747 	va = (uint8 *)va - offset;
1748 	pa_cma = sec_mem_elem->pa_cma;
1749 
1750 #ifdef NOT_YET
1751 	pa_cma_page = sec_mem_elem->pa_cma_page;
1752 #endif // endif
1753 
1754 	if (direction == DMA_RX) {
1755 
1756 		if (p == NULL) {
1757 
1758 			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1759 			* pa_cma_kmap_va += loffset;
1760 			*/
1761 
1762 			pa_cma_kmap_va = sec_mem_elem->vac;
1763 
1764 			do {
1765 				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1766 
1767 				buflen = *(uint *)(pa_cma_kmap_va);
1768 				if (buflen)
1769 					break;
1770 
1771 				OSL_DELAY(1);
1772 				read_count++;
1773 			} while (read_count < 200);
1774 			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1775 			memcpy(va, pa_cma_kmap_va, size);
1776 			/* kunmap_atomic(pa_cma_kmap_va); */
1777 		}
1778 #ifdef NOT_YET
1779 		else {
1780 			buflen = 0;
1781 			for (skb = (struct sk_buff *)p; (buflen < size) &&
1782 				(skb != NULL); skb = skb->next) {
1783 				if (skb_is_nonlinear(skb)) {
1784 					pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1785 					for (i = 0; (buflen < size) &&
1786 						(i < skb_shinfo(skb)->nr_frags); i++) {
1787 						skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1788 						cpuaddr = kmap_atomic(skb_frag_page(f));
1789 						pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1790 						memcpy((cpuaddr + f->page_offset),
1791 							pa_cma_kmap_va, skb_frag_size(f));
1792 						kunmap_atomic(cpuaddr);
1793 						buflen += skb_frag_size(f);
1794 					}
1795 						kunmap_atomic(pa_cma_kmap_va);
1796 				} else {
1797 					pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1798 					pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1799 					memcpy(skb->data, pa_cma_kmap_va, skb->len);
1800 					kunmap_atomic(pa_cma_kmap_va);
1801 					buflen += skb->len;
1802 				}
1803 
1804 			}
1805 
1806 		}
1807 #endif /* NOT YET */
1808 	} else {
1809 		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1810 	}
1811 
1812 	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1813 }
1814 
1815 void
osl_sec_dma_unmap_all(osl_t * osh,void * ptr_cma_info)1816 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1817 {
1818 
1819 	sec_mem_elem_t *sec_mem_elem;
1820 
1821 	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1822 
1823 	while (sec_mem_elem != NULL) {
1824 
1825 		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1826 			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1827 		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1828 
1829 		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1830 	}
1831 }
1832 
1833 static void
osl_sec_dma_init_consistent(osl_t * osh)1834 osl_sec_dma_init_consistent(osl_t *osh)
1835 {
1836 	int i;
1837 	void *temp_va = osh->contig_base_alloc_coherent_va;
1838 	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1839 
1840 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1841 		osh->sec_cma_coherent[i].avail = TRUE;
1842 		osh->sec_cma_coherent[i].va = temp_va;
1843 		osh->sec_cma_coherent[i].pa = temp_pa;
1844 		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
1845 		temp_pa += SEC_CMA_COHERENT_BLK;
1846 	}
1847 }
1848 
1849 static void *
osl_sec_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,ulong * pap)1850 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1851 {
1852 
1853 	void *temp_va = NULL;
1854 	ulong temp_pa = 0;
1855 	int i;
1856 
1857 	if (size > SEC_CMA_COHERENT_BLK) {
1858 		printf("%s unsupported size\n", __FUNCTION__);
1859 		return NULL;
1860 	}
1861 
1862 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1863 		if (osh->sec_cma_coherent[i].avail == TRUE) {
1864 			temp_va = osh->sec_cma_coherent[i].va;
1865 			temp_pa = osh->sec_cma_coherent[i].pa;
1866 			osh->sec_cma_coherent[i].avail = FALSE;
1867 			break;
1868 		}
1869 	}
1870 
1871 	if (i == SEC_CMA_COHERENT_MAX)
1872 		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1873 			temp_va, (ulong)temp_pa, size);
1874 
1875 	*pap = (unsigned long)temp_pa;
1876 	return temp_va;
1877 }
1878 
1879 static void
osl_sec_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)1880 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1881 {
1882 	int i = 0;
1883 
1884 	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1885 		if (osh->sec_cma_coherent[i].va == va) {
1886 			osh->sec_cma_coherent[i].avail = TRUE;
1887 			break;
1888 		}
1889 	}
1890 	if (i == SEC_CMA_COHERENT_MAX)
1891 		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1892 			va, (ulong)pa, size);
1893 }
1894 #endif /* BCM_SECURE_DMA */
1895 
1896 /* timer apis */
1897 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1898 
1899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1900 void
timer_cb_compat(struct timer_list * tl)1901 timer_cb_compat(struct timer_list *tl)
1902 {
1903 	timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1904 	t->callback((ulong)t->arg);
1905 }
1906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
1907 
1908 osl_timer_t *
osl_timer_init(osl_t * osh,const char * name,void (* fn)(void * arg),void * arg)1909 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1910 {
1911 	osl_timer_t *t;
1912 	BCM_REFERENCE(fn);
1913 	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1914 		printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1915 			(int)sizeof(osl_timer_t));
1916 		return (NULL);
1917 	}
1918 	bzero(t, sizeof(osl_timer_t));
1919 	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1920 		printf("osl_timer_init: malloc failed\n");
1921 		MFREE(NULL, t, sizeof(osl_timer_t));
1922 		return (NULL);
1923 	}
1924 	t->set = TRUE;
1925 
1926 	init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
1927 
1928 	return (t);
1929 }
1930 
1931 void
osl_timer_add(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1932 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1933 {
1934 	if (t == NULL) {
1935 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1936 		return;
1937 	}
1938 	ASSERT(!t->set);
1939 
1940 	t->set = TRUE;
1941 	if (periodic) {
1942 		printf("Periodic timers are not supported by Linux timer apis\n");
1943 	}
1944 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1945 
1946 	add_timer(t->timer);
1947 
1948 	return;
1949 }
1950 
1951 void
osl_timer_update(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1952 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1953 {
1954 	if (t == NULL) {
1955 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1956 		return;
1957 	}
1958 	if (periodic) {
1959 		printf("Periodic timers are not supported by Linux timer apis\n");
1960 	}
1961 	t->set = TRUE;
1962 	timer_expires(t->timer) = jiffies + ms*HZ/1000;
1963 
1964 	mod_timer(t->timer, timer_expires(t->timer));
1965 
1966 	return;
1967 }
1968 
1969 /*
1970  * Return TRUE if timer successfully deleted, FALSE if still pending
1971  */
1972 bool
osl_timer_del(osl_t * osh,osl_timer_t * t)1973 osl_timer_del(osl_t *osh, osl_timer_t *t)
1974 {
1975 	if (t == NULL) {
1976 		printf("%s: Timer handle is NULL\n", __FUNCTION__);
1977 		return (FALSE);
1978 	}
1979 	if (t->set) {
1980 		t->set = FALSE;
1981 		if (t->timer) {
1982 			del_timer(t->timer);
1983 			MFREE(NULL, t->timer, sizeof(struct timer_list));
1984 		}
1985 		MFREE(NULL, t, sizeof(osl_timer_t));
1986 	}
1987 	return (TRUE);
1988 }
1989 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1990 int
kernel_read_compat(struct file * file,loff_t offset,char * addr,unsigned long count)1991 kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
1992 {
1993 	return (int)kernel_read(file, addr, (size_t)count, &offset);
1994 }
1995 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1996 
1997 void *
osl_spin_lock_init(osl_t * osh)1998 osl_spin_lock_init(osl_t *osh)
1999 {
2000 	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
2001 	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
2002 	/* and this results in kernel asserts in internal builds */
2003 	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
2004 	if (lock)
2005 		spin_lock_init(lock);
2006 	return ((void *)lock);
2007 }
2008 
2009 void
osl_spin_lock_deinit(osl_t * osh,void * lock)2010 osl_spin_lock_deinit(osl_t *osh, void *lock)
2011 {
2012 	if (lock)
2013 		MFREE(osh, lock, sizeof(spinlock_t) + 4);
2014 }
2015 
2016 unsigned long
osl_spin_lock(void * lock)2017 osl_spin_lock(void *lock)
2018 {
2019 	unsigned long flags = 0;
2020 
2021 	if (lock)
2022 		spin_lock_irqsave((spinlock_t *)lock, flags);
2023 
2024 	return flags;
2025 }
2026 
2027 void
osl_spin_unlock(void * lock,unsigned long flags)2028 osl_spin_unlock(void *lock, unsigned long flags)
2029 {
2030 	if (lock)
2031 		spin_unlock_irqrestore((spinlock_t *)lock, flags);
2032 }
2033 
2034 #ifdef USE_DMA_LOCK
2035 static void
osl_dma_lock(osl_t * osh)2036 osl_dma_lock(osl_t *osh)
2037 {
2038 	if (likely(in_irq() || irqs_disabled())) {
2039 		spin_lock(&osh->dma_lock);
2040 	} else {
2041 		spin_lock_bh(&osh->dma_lock);
2042 		osh->dma_lock_bh = TRUE;
2043 	}
2044 }
2045 
2046 static void
osl_dma_unlock(osl_t * osh)2047 osl_dma_unlock(osl_t *osh)
2048 {
2049 	if (unlikely(osh->dma_lock_bh)) {
2050 		osh->dma_lock_bh = FALSE;
2051 		spin_unlock_bh(&osh->dma_lock);
2052 	} else {
2053 		spin_unlock(&osh->dma_lock);
2054 	}
2055 }
2056 
2057 static void
osl_dma_lock_init(osl_t * osh)2058 osl_dma_lock_init(osl_t *osh)
2059 {
2060 	spin_lock_init(&osh->dma_lock);
2061 	osh->dma_lock_bh = FALSE;
2062 }
2063 #endif /* USE_DMA_LOCK */
2064 
2065 void
osl_do_gettimeofday(struct osl_timespec * ts)2066 osl_do_gettimeofday(struct osl_timespec *ts)
2067 {
2068 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2069 	struct timespec64 curtime;
2070 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2071 	struct timespec curtime;
2072 #else
2073 	struct timeval curtime;
2074 #endif
2075 
2076 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2077 	ktime_get_real_ts64(&curtime);
2078 	ts->tv_nsec = curtime.tv_nsec;
2079 	ts->tv_usec	= curtime.tv_nsec / 1000;
2080 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2081 	getnstimeofday(&curtime);
2082 	ts->tv_nsec = curtime.tv_nsec;
2083 	ts->tv_usec = curtime.tv_nsec / 1000;
2084 #else
2085 	do_gettimeofday(&curtime);
2086 	ts->tv_usec = curtime.tv_usec;
2087 	ts->tv_nsec = curtime.tv_usec * 1000;
2088 #endif
2089 	ts->tv_sec = curtime.tv_sec;
2090 }
2091 
2092 uint32
osl_do_gettimediff(struct osl_timespec * cur_ts,struct osl_timespec * old_ts)2093 osl_do_gettimediff(struct osl_timespec *cur_ts, struct osl_timespec *old_ts)
2094 {
2095 	uint32 diff_s, diff_us, total_diff_us;
2096 	bool pgc_g = FALSE;
2097 
2098 	diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
2099 	pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
2100 	diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec) : (old_ts->tv_usec - cur_ts->tv_usec);
2101 	total_diff_us = pgc_g ? (diff_s * 1000000 + diff_us) : (diff_s * 1000000 - diff_us);
2102 	return total_diff_us;
2103 }
2104 
2105 void
osl_get_monotonic_boottime(struct osl_timespec * ts)2106 osl_get_monotonic_boottime(struct osl_timespec *ts)
2107 {
2108 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2109 	struct timespec64 curtime;
2110 #else
2111 	struct timespec curtime;
2112 #endif
2113 
2114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2115 	curtime = ktime_to_timespec64(ktime_get_boottime());
2116 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2117 	curtime = ktime_to_timespec(ktime_get_boottime());
2118 #else
2119 	get_monotonic_boottime(&curtime);
2120 #endif
2121 	ts->tv_sec = curtime.tv_sec;
2122 	ts->tv_nsec = curtime.tv_nsec;
2123 	ts->tv_usec = curtime.tv_nsec / 1000;
2124 }