• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux OS Independent Layer
3  *
4  * Copyright (C) 1999-2019, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions
16  * of the license of that module.  An independent module is a module which is
17  * not derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
28  */
29 
30 #define LINUX_PORT
31 
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <linuxver.h>
35 #include <bcmdefs.h>
36 
37 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
38 #include <asm/cacheflush.h>
39 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
40 
41 #include <linux/random.h>
42 
43 #include <osl.h>
44 #include <bcmutils.h>
45 #include <linux/delay.h>
46 #include <linux/vmalloc.h>
47 #include <pcicfg.h>
48 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(0x4, 8, 0))
49 #include <asm-generic/pci-dma-compat.h>
50 #endif
51 
52 #ifdef BCM_SECURE_DMA
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/io.h>
56 #include <linux/printk.h>
57 #include <linux/errno.h>
58 #include <linux/mm.h>
59 #include <linux/moduleparam.h>
60 #include <asm/io.h>
61 #include <linux/skbuff.h>
62 #include <stbutils.h>
63 #include <linux/highmem.h>
64 #include <linux/dma-mapping.h>
65 #include <asm/memory.h>
66 #endif /* BCM_SECURE_DMA */
67 
68 #include <linux/fs.h>
69 
70 #if defined(STB)
71 #include <linux/spinlock.h>
72 extern spinlock_t l2x0_reg_lock;
73 #endif // endif
74 
75 #ifdef BCM_OBJECT_TRACE
76 #include <bcmutils.h>
77 #endif /* BCM_OBJECT_TRACE */
78 #include "linux_osl_priv.h"
79 
80 #define PCI_CFG_RETRY 10
81 
82 #define DUMPBUFSZ 1024
83 
84 #ifdef BCM_SECURE_DMA
85 static void *osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
86                                  bool iscache, bool isdecr);
87 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
88 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
89                                            sec_mem_elem_t **list);
90 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize,
91                                               int max, void *sec_list_base);
92 static sec_mem_elem_t *
93 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
94                            struct sec_cma_info *ptr_cma_info, uint offset);
95 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
96 static void osl_sec_dma_init_consistent(osl_t *osh);
97 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size,
98                                           uint16 align_bits, ulong *pap);
99 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size,
100                                         dmaaddr_t pa);
101 #endif /* BCM_SECURE_DMA */
102 
103 /* PCMCIA attribute space access macros */
104 
105 uint32 g_assert_type = 0; /* By Default Kernel Panic */
106 
107 module_param(g_assert_type, int, 0);
108 #ifdef BCM_SECURE_DMA
109 #define SECDMA_MODULE_PARAMS 0
110 #define SECDMA_EXT_FILE 1
111 unsigned long secdma_addr = 0;
112 unsigned long secdma_addr2 = 0;
113 u32 secdma_size = 0;
114 u32 secdma_size2 = 0;
115 module_param(secdma_addr, ulong, 0);
116 module_param(secdma_size, int, 0);
117 module_param(secdma_addr2, ulong, 0);
118 module_param(secdma_size2, int, 0);
119 static int secdma_found = 0;
120 #endif /* BCM_SECURE_DMA */
121 
122 #ifdef USE_DMA_LOCK
123 static void osl_dma_lock(osl_t *osh);
124 static void osl_dma_unlock(osl_t *osh);
125 static void osl_dma_lock_init(osl_t *osh);
126 
127 #define DMA_LOCK(osh) osl_dma_lock(osh)
128 #define DMA_UNLOCK(osh) osl_dma_unlock(osh)
129 #define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
130 #else
131 #define DMA_LOCK(osh)                                                          \
132     do { /* noop */                                                            \
133     } while (0)
134 #define DMA_UNLOCK(osh)                                                        \
135     do { /* noop */                                                            \
136     } while (0)
137 #define DMA_LOCK_INIT(osh)                                                     \
138     do { /* noop */                                                            \
139     } while (0)
140 #endif /* USE_DMA_LOCK */
141 
142 static int16 linuxbcmerrormap[] = {
143     0,           /* 0 */
144     -EINVAL,     /* BCME_ERROR */
145     -EINVAL,     /* BCME_BADARG */
146     -EINVAL,     /* BCME_BADOPTION */
147     -EINVAL,     /* BCME_NOTUP */
148     -EINVAL,     /* BCME_NOTDOWN */
149     -EINVAL,     /* BCME_NOTAP */
150     -EINVAL,     /* BCME_NOTSTA */
151     -EINVAL,     /* BCME_BADKEYIDX */
152     -EINVAL,     /* BCME_RADIOOFF */
153     -EINVAL,     /* BCME_NOTBANDLOCKED */
154     -EINVAL,     /* BCME_NOCLK */
155     -EINVAL,     /* BCME_BADRATESET */
156     -EINVAL,     /* BCME_BADBAND */
157     -E2BIG,      /* BCME_BUFTOOSHORT */
158     -E2BIG,      /* BCME_BUFTOOLONG */
159     -EBUSY,      /* BCME_BUSY */
160     -EINVAL,     /* BCME_NOTASSOCIATED */
161     -EINVAL,     /* BCME_BADSSIDLEN */
162     -EINVAL,     /* BCME_OUTOFRANGECHAN */
163     -EINVAL,     /* BCME_BADCHAN */
164     -EFAULT,     /* BCME_BADADDR */
165     -ENOMEM,     /* BCME_NORESOURCE */
166     -EOPNOTSUPP, /* BCME_UNSUPPORTED */
167     -EMSGSIZE,   /* BCME_BADLENGTH */
168     -EINVAL,     /* BCME_NOTREADY */
169     -EPERM,      /* BCME_EPERM */
170     -ENOMEM,     /* BCME_NOMEM */
171     -EINVAL,     /* BCME_ASSOCIATED */
172     -ERANGE,     /* BCME_RANGE */
173     -EINVAL,     /* BCME_NOTFOUND */
174     -EINVAL,     /* BCME_WME_NOT_ENABLED */
175     -EINVAL,     /* BCME_TSPEC_NOTFOUND */
176     -EINVAL,     /* BCME_ACM_NOTSUPPORTED */
177     -EINVAL,     /* BCME_NOT_WME_ASSOCIATION */
178     -EIO,        /* BCME_SDIO_ERROR */
179     -ENODEV,     /* BCME_DONGLE_DOWN */
180     -EINVAL,     /* BCME_VERSION */
181     -EIO,        /* BCME_TXFAIL */
182     -EIO,        /* BCME_RXFAIL */
183     -ENODEV,     /* BCME_NODEVICE */
184     -EINVAL,     /* BCME_NMODE_DISABLED */
185     -ENODATA,    /* BCME_NONRESIDENT */
186     -EINVAL,     /* BCME_SCANREJECT */
187     -EINVAL,     /* BCME_USAGE_ERROR */
188     -EIO,        /* BCME_IOCTL_ERROR */
189     -EIO,        /* BCME_SERIAL_PORT_ERR */
190     -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
191     -EIO,        /* BCME_DECERR */
192     -EIO,        /* BCME_ENCERR */
193     -EIO,        /* BCME_MICERR */
194     -ERANGE,     /* BCME_REPLAY */
195     -EINVAL,     /* BCME_IE_NOTFOUND */
196     -EINVAL,     /* BCME_DATA_NOTFOUND */
197     -EINVAL,     /* BCME_NOT_GC */
198     -EINVAL,     /* BCME_PRS_REQ_FAILED */
199     -EINVAL,     /* BCME_NO_P2P_SE */
200     -EINVAL,     /* BCME_NOA_PND */
201     -EINVAL,     /* BCME_FRAG_Q_FAILED */
202     -EINVAL,     /* BCME_GET_AF_FAILED */
203     -EINVAL,     /* BCME_MSCH_NOTREADY */
204     -EINVAL,     /* BCME_IOV_LAST_CMD */
205     -EINVAL,     /* BCME_MINIPMU_CAL_FAIL */
206     -EINVAL,     /* BCME_RCAL_FAIL */
207     -EINVAL,     /* BCME_LPF_RCCAL_FAIL */
208     -EINVAL,     /* BCME_DACBUF_RCCAL_FAIL */
209     -EINVAL,     /* BCME_VCOCAL_FAIL */
210     -EINVAL,     /* BCME_BANDLOCKED */
211     -EINVAL,     /* BCME_DNGL_DEVRESET */
212 
213 /* When an new error code is added to bcmutils.h, add os
214  * specific error translation here as well
215  */
216 /* check if BCME_LAST changed since the last time this function was updated */
217 #if BCME_LAST != -68
218 #error "You need to add a OS error translation in the linuxbcmerrormap \
219 	for new error code defined in bcmutils.h"
220 #endif // endif
221 };
222 uint lmtest = FALSE;
223 
224 #ifdef DHD_MAP_LOGGING
225 #define DHD_MAP_LOG_SIZE 2048
226 
227 typedef struct dhd_map_item {
228     dmaaddr_t pa;   /* DMA address (physical) */
229     uint64 ts_nsec; /* timestamp: nsec */
230     uint32 size;    /* mapping size */
231     uint8 rsvd[0x4];  /* reserved for future use */
232 } dhd_map_item_t;
233 
234 typedef struct dhd_map_record {
235     uint32 items;          /* number of total items */
236     uint32 idx;            /* current index of metadata */
237     dhd_map_item_t map[0]; /* metadata storage */
238 } dhd_map_log_t;
239 
osl_dma_map_dump(osl_t * osh)240 void osl_dma_map_dump(osl_t *osh)
241 {
242     dhd_map_log_t *map_log, *unmap_log;
243     uint64 ts_sec, ts_usec;
244 
245     map_log = (dhd_map_log_t *)(osh->dhd_map_log);
246     unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
247     osl_get_localtime(&ts_sec, &ts_usec);
248 
249     if (map_log && unmap_log) {
250         printk("%s: map_idx=%d unmap_idx=%d "
251                "current time=[%5lu.%06lu]\n",
252                __FUNCTION__, map_log->idx, unmap_log->idx,
253                (unsigned long)ts_sec, (unsigned long)ts_usec);
254         printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
255                " dma_unmap_log(pa)=0x%llx size=%d\n",
256                __FUNCTION__, (uint64)__virt_to_phys((ulong)(map_log->map)),
257                (uint32)(sizeof(dhd_map_item_t) * map_log->items),
258                (uint64)__virt_to_phys((ulong)(unmap_log->map)),
259                (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
260     }
261 }
262 
osl_dma_map_log_init(uint32 item_len)263 static void *osl_dma_map_log_init(uint32 item_len)
264 {
265     dhd_map_log_t *map_log;
266     gfp_t flags;
267     uint32 alloc_size =
268         (uint32)(sizeof(dhd_map_log_t) + (item_len * sizeof(dhd_map_item_t)));
269 
270     flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
271     map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
272     if (map_log) {
273         memset(map_log, 0, alloc_size);
274         map_log->items = item_len;
275         map_log->idx = 0;
276     }
277 
278     return (void *)map_log;
279 }
280 
osl_dma_map_log_deinit(osl_t * osh)281 static void osl_dma_map_log_deinit(osl_t *osh)
282 {
283     if (osh->dhd_map_log) {
284         kfree(osh->dhd_map_log);
285         osh->dhd_map_log = NULL;
286     }
287 
288     if (osh->dhd_unmap_log) {
289         kfree(osh->dhd_unmap_log);
290         osh->dhd_unmap_log = NULL;
291     }
292 }
293 
osl_dma_map_logging(osl_t * osh,void * handle,dmaaddr_t pa,uint32 len)294 static void osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa,
295                                 uint32 len)
296 {
297     dhd_map_log_t *log = (dhd_map_log_t *)handle;
298     uint32 idx;
299 
300     if (log == NULL) {
301         printk("%s: log is NULL\n", __FUNCTION__);
302         return;
303     }
304 
305     idx = log->idx;
306     log->map[idx].ts_nsec = osl_localtime_ns();
307     log->map[idx].pa = pa;
308     log->map[idx].size = len;
309     log->idx = (idx + 1) % log->items;
310 }
311 #endif /* DHD_MAP_LOGGING */
312 
313 /* translate bcmerrors into linux errors */
osl_error(int bcmerror)314 int osl_error(int bcmerror)
315 {
316     if (bcmerror > 0) {
317         bcmerror = 0;
318     } else if (bcmerror < BCME_LAST) {
319         bcmerror = BCME_ERROR;
320     }
321 
322     /* Array bounds covered by ASSERT in osl_attach */
323     return linuxbcmerrormap[-bcmerror];
324 }
osl_attach(void * pdev,uint bustype,bool pkttag)325 osl_t *osl_attach(void *pdev, uint bustype, bool pkttag)
326 {
327     void **osl_cmn = NULL;
328     osl_t *osh;
329     gfp_t flags;
330 #ifdef BCM_SECURE_DMA
331     u32 secdma_memsize;
332 #endif // endif
333 
334     flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
335     if (!(osh = kmalloc(sizeof(osl_t), flags))) {
336         return osh;
337     }
338 
339     ASSERT(osh);
340 
341     bzero(osh, sizeof(osl_t));
342 
343     if (osl_cmn == NULL || *osl_cmn == NULL) {
344         if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
345             kfree(osh);
346             return NULL;
347         }
348         bzero(osh->cmn, sizeof(osl_cmn_t));
349         if (osl_cmn) {
350             *osl_cmn = osh->cmn;
351         }
352         atomic_set(&osh->cmn->malloced, 0);
353         osh->cmn->dbgmem_list = NULL;
354         spin_lock_init(&(osh->cmn->dbgmem_lock));
355 
356         spin_lock_init(&(osh->cmn->pktalloc_lock));
357     } else {
358         osh->cmn = *osl_cmn;
359     }
360     atomic_add(1, &osh->cmn->refcount);
361 
362     bcm_object_trace_init();
363 
364     /* Check that error map has the right number of entries in it */
365     ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
366 
367     osh->failed = 0;
368     osh->pdev = pdev;
369     osh->pub.pkttag = pkttag;
370     osh->bustype = bustype;
371     osh->magic = OS_HANDLE_MAGIC;
372 #ifdef BCM_SECURE_DMA
373 
374     if ((secdma_addr != 0) && (secdma_size != 0)) {
375         printk(
376             "linux_osl.c: Buffer info passed via module params, using it.\n");
377         if (secdma_found == 0) {
378             osh->contig_base_alloc = (phys_addr_t)secdma_addr;
379             secdma_memsize = secdma_size;
380         } else if (secdma_found == 1) {
381             osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
382             secdma_memsize = secdma_size2;
383         } else {
384             printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
385             kfree(osh);
386             return NULL;
387         }
388         osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
389         printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
390         printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
391                (unsigned int)osh->contig_base_alloc);
392         osh->stb_ext_params = SECDMA_MODULE_PARAMS;
393     } else if (stbpriv_init(osh) == 0) {
394         printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
395         if (secdma_found == 0) {
396             osh->contig_base_alloc = (phys_addr_t)bcm_strtoul(
397                 stbparam_get("secdma_cma_addr"), NULL, 0);
398             secdma_memsize =
399                 bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
400         } else if (secdma_found == 1) {
401             osh->contig_base_alloc = (phys_addr_t)bcm_strtoul(
402                 stbparam_get("secdma_cma_addr2"), NULL, 0);
403             secdma_memsize =
404                 bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
405         } else {
406             printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
407             kfree(osh);
408             return NULL;
409         }
410         osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
411         printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
412         printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
413                (unsigned int)osh->contig_base_alloc);
414         osh->stb_ext_params = SECDMA_EXT_FILE;
415     } else {
416         printk("linux_osl.c: secDMA no longer supports internal buffer "
417                "allocation.\n");
418         kfree(osh);
419         return NULL;
420     }
421     secdma_found++;
422     osh->contig_base_alloc_coherent_va =
423         osl_sec_dma_ioremap(osh, phys_to_page((u32)osh->contig_base_alloc),
424                             CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
425 
426     if (osh->contig_base_alloc_coherent_va == NULL) {
427         if (osh->cmn) {
428             kfree(osh->cmn);
429         }
430         kfree(osh);
431         return NULL;
432     }
433     osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
434     osh->contig_base_alloc_coherent = osh->contig_base_alloc;
435     osl_sec_dma_init_consistent(osh);
436 
437     osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
438 
439     osh->contig_base_alloc_va =
440         osl_sec_dma_ioremap(osh, phys_to_page((u32)osh->contig_base_alloc),
441                             CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
442     if (osh->contig_base_alloc_va == NULL) {
443         osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va,
444                             CMA_DMA_DESC_MEMBLOCK);
445         if (osh->cmn) {
446             kfree(osh->cmn);
447         }
448         kfree(osh);
449         return NULL;
450     }
451     osh->contig_base_va = osh->contig_base_alloc_va;
452 
453 #ifdef NOT_YET
454     /*
455      * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM,
456      * &osh->sec_list_512); osh->sec_list_base_512 = osh->sec_list_512;
457      * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM,
458      * &osh->sec_list_2048); osh->sec_list_base_2048 = osh->sec_list_2048;
459      */
460 #endif // endif
461     if (BCME_OK != osl_sec_dma_init_elem_mem_block(
462                        osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
463         osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va,
464                             CMA_DMA_DESC_MEMBLOCK);
465         osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
466         if (osh->cmn) {
467             kfree(osh->cmn);
468         }
469         kfree(osh);
470         return NULL;
471     }
472     osh->sec_list_base_4096 = osh->sec_list_4096;
473 
474 #endif /* BCM_SECURE_DMA */
475 
476     switch (bustype) {
477         case PCI_BUS:
478         case SI_BUS:
479         case PCMCIA_BUS:
480             osh->pub.mmbus = TRUE;
481             break;
482         case JTAG_BUS:
483         case SDIO_BUS:
484         case USB_BUS:
485         case SPI_BUS:
486         case RPC_BUS:
487             osh->pub.mmbus = FALSE;
488             break;
489         default:
490             ASSERT(FALSE);
491             break;
492     }
493 
494     DMA_LOCK_INIT(osh);
495 
496 #ifdef DHD_MAP_LOGGING
497     osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
498     if (osh->dhd_map_log == NULL) {
499         printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
500     }
501 
502     osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
503     if (osh->dhd_unmap_log == NULL) {
504         printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
505     }
506 #endif /* DHD_MAP_LOGGING */
507 
508     return osh;
509 }
510 
osl_set_bus_handle(osl_t * osh,void * bus_handle)511 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
512 {
513     osh->bus_handle = bus_handle;
514 }
515 
osl_get_bus_handle(osl_t * osh)516 void *osl_get_bus_handle(osl_t *osh)
517 {
518     return osh->bus_handle;
519 }
520 
521 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_set_bpt_cb(osl_t * osh,void * bpt_cb,void * bpt_ctx)522 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
523 {
524     if (osh) {
525         osh->bpt_cb = (bpt_cb_fn)bpt_cb;
526         osh->sih = bpt_ctx;
527     }
528 }
529 #endif /* BCM_BACKPLANE_TIMEOUT */
530 
osl_detach(osl_t * osh)531 void osl_detach(osl_t *osh)
532 {
533     if (osh == NULL) {
534         return;
535     }
536 
537 #ifdef BCM_SECURE_DMA
538     if (osh->stb_ext_params == SECDMA_EXT_FILE) {
539         stbpriv_exit(osh);
540     }
541 #ifdef NOT_YET
542     osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM,
543                                       osh->sec_list_base_512);
544     osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM,
545                                       osh->sec_list_base_2048);
546 #endif /* NOT_YET */
547     osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM,
548                                       osh->sec_list_base_4096);
549     osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va,
550                         CMA_DMA_DESC_MEMBLOCK);
551     osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
552     secdma_found--;
553 #endif /* BCM_SECURE_DMA */
554 
555     bcm_object_trace_deinit();
556 
557 #ifdef DHD_MAP_LOGGING
558     osl_dma_map_log_deinit(osh->dhd_map_log);
559     osl_dma_map_log_deinit(osh->dhd_unmap_log);
560 #endif /* DHD_MAP_LOGGING */
561 
562     ASSERT(osh->magic == OS_HANDLE_MAGIC);
563     atomic_sub(1, &osh->cmn->refcount);
564     if (atomic_read(&osh->cmn->refcount) == 0) {
565         kfree(osh->cmn);
566     }
567     kfree(osh);
568 }
569 
570 /* APIs to set/get specific quirks in OSL layer */
osl_flag_set(osl_t * osh,uint32 mask)571 void BCMFASTPATH osl_flag_set(osl_t *osh, uint32 mask)
572 {
573     osh->flags |= mask;
574 }
575 
osl_flag_clr(osl_t * osh,uint32 mask)576 void osl_flag_clr(osl_t *osh, uint32 mask)
577 {
578     osh->flags &= ~mask;
579 }
580 
581 #if defined(STB)
582 inline bool BCMFASTPATH
583 #else
584 bool
585 #endif // endif
osl_is_flag_set(osl_t * osh,uint32 mask)586 osl_is_flag_set(osl_t *osh, uint32 mask)
587 {
588     return (osh->flags & mask);
589 }
590 
591 #if (defined(BCMPCIE) && defined(__ARM_ARCH_7A__) &&                           \
592      !defined(DHD_USE_COHERENT_MEM_FOR_RING)) ||                               \
593     defined(STB_SOC_WIFI)
594 
osl_arch_is_coherent(void)595 inline int BCMFASTPATH osl_arch_is_coherent(void)
596 {
597     return 0;
598 }
599 
osl_acp_war_enab(void)600 inline int BCMFASTPATH osl_acp_war_enab(void)
601 {
602     return 0;
603 }
604 
osl_cache_flush(void * va,uint size)605 inline void BCMFASTPATH osl_cache_flush(void *va, uint size)
606 {
607     if (size > 0)
608 #ifdef STB_SOC_WIFI
609         dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
610 #else  /* STB_SOC_WIFI */
611         dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
612                                    DMA_TO_DEVICE);
613 #endif /* STB_SOC_WIFI */
614 }
615 
osl_cache_inv(void * va,uint size)616 inline void BCMFASTPATH osl_cache_inv(void *va, uint size)
617 {
618 #ifdef STB_SOC_WIFI
619     dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
620 #else  /* STB_SOC_WIFI */
621     dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
622                             DMA_FROM_DEVICE);
623 #endif /* STB_SOC_WIFI */
624 }
625 
osl_prefetch(const void * ptr)626 inline void BCMFASTPATH osl_prefetch(const void *ptr)
627 {
628 #if !defined(STB_SOC_WIFI)
629     __asm__ __volatile__("pld\t%0" ::"o"(*(const char *)ptr) : "cc");
630 #endif // endif
631 }
632 
633 #endif // endif
634 
osl_pci_read_config(osl_t * osh,uint offset,uint size)635 uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size)
636 {
637     uint val = 0;
638     uint retry = PCI_CFG_RETRY;
639 
640     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
641 
642     /* only 4byte access supported */
643     ASSERT(size == 0x4);
644 
645     do {
646         pci_read_config_dword(osh->pdev, offset, &val);
647         if (val != 0xffffffff) {
648             break;
649         }
650     } while (retry--);
651 
652     return (val);
653 }
654 
osl_pci_write_config(osl_t * osh,uint offset,uint size,uint val)655 void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
656 {
657     uint retry = PCI_CFG_RETRY;
658 
659     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
660 
661     /* only 4byte access supported */
662     ASSERT(size == 0x4);
663 
664     do {
665         pci_write_config_dword(osh->pdev, offset, val);
666         if (offset != PCI_BAR0_WIN) {
667             break;
668         }
669         if (osl_pci_read_config(osh, offset, size) == val) {
670             break;
671         }
672     } while (retry--);
673 }
674 
675 #ifdef BCMPCIE
676 /* return bus # for the pci device pointed by osh->pdev */
osl_pci_bus(osl_t * osh)677 uint osl_pci_bus(osl_t *osh)
678 {
679     ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
680 
681 #if defined(__ARM_ARCH_7A__)
682     return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
683 #else
684     return ((struct pci_dev *)osh->pdev)->bus->number;
685 #endif // endif
686 }
687 
688 /* return slot # for the pci device pointed by osh->pdev */
osl_pci_slot(osl_t * osh)689 uint osl_pci_slot(osl_t *osh)
690 {
691     ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
692 
693 #if defined(__ARM_ARCH_7A__)
694     return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
695 #else
696     return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
697 #endif // endif
698 }
699 
700 /* return domain # for the pci device pointed by osh->pdev */
osl_pcie_domain(osl_t * osh)701 uint osl_pcie_domain(osl_t *osh)
702 {
703     ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
704 
705     return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
706 }
707 
708 /* return bus # for the pci device pointed by osh->pdev */
osl_pcie_bus(osl_t * osh)709 uint osl_pcie_bus(osl_t *osh)
710 {
711     ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
712 
713     return ((struct pci_dev *)osh->pdev)->bus->number;
714 }
715 
716 /* return the pci device pointed by osh->pdev */
osl_pci_device(osl_t * osh)717 struct pci_dev *osl_pci_device(osl_t *osh)
718 {
719     ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
720 
721     return osh->pdev;
722 }
723 #endif
724 
osl_pcmcia_attr(osl_t * osh,uint offset,char * buf,int size,bool write)725 static void osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size,
726                             bool write)
727 {
728 }
729 
osl_pcmcia_read_attr(osl_t * osh,uint offset,void * buf,int size)730 void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
731 {
732     osl_pcmcia_attr(osh, offset, (char *)buf, size, FALSE);
733 }
734 
osl_pcmcia_write_attr(osl_t * osh,uint offset,void * buf,int size)735 void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
736 {
737     osl_pcmcia_attr(osh, offset, (char *)buf, size, TRUE);
738 }
739 
osl_malloc(osl_t * osh,uint size)740 void *osl_malloc(osl_t *osh, uint size)
741 {
742     void *addr;
743     gfp_t flags;
744 
745     /* only ASSERT if osh is defined */
746     if (osh) {
747         ASSERT(osh->magic == OS_HANDLE_MAGIC);
748     }
749 #ifdef CONFIG_DHD_USE_STATIC_BUF
750     if (bcm_static_buf) {
751         unsigned long irq_flags;
752         int i = 0;
753         if ((size >= PAGE_SIZE) && (size <= STATIC_BUF_SIZE)) {
754             spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
755 
756             for (i = 0; i < STATIC_BUF_MAX_NUM; i++) {
757                 if (bcm_static_buf->buf_use[i] == 0) {
758                     break;
759                 }
760             }
761 
762             if (i == STATIC_BUF_MAX_NUM) {
763                 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
764                 printk("all static buff in use!\n");
765                 goto original;
766             }
767 
768             bcm_static_buf->buf_use[i] = 1;
769             spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
770 
771             bzero(bcm_static_buf->buf_ptr + STATIC_BUF_SIZE * i, size);
772             if (osh) {
773                 atomic_add(size, &osh->cmn->malloced);
774             }
775 
776             return ((void *)(bcm_static_buf->buf_ptr + STATIC_BUF_SIZE * i));
777         }
778     }
779 original:
780 #endif /* CONFIG_DHD_USE_STATIC_BUF */
781 
782     flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
783     if ((addr = kmalloc(size, flags)) == NULL) {
784         if (osh) {
785             osh->failed++;
786         }
787         return (NULL);
788     }
789     if (osh && osh->cmn) {
790         atomic_add(size, &osh->cmn->malloced);
791     }
792 
793     return (addr);
794 }
795 
osl_mallocz(osl_t * osh,uint size)796 void *osl_mallocz(osl_t *osh, uint size)
797 {
798     void *ptr;
799 
800     ptr = osl_malloc(osh, size);
801     if (ptr != NULL) {
802         bzero(ptr, size);
803     }
804 
805     return ptr;
806 }
807 
osl_mfree(osl_t * osh,void * addr,uint size)808 void osl_mfree(osl_t *osh, void *addr, uint size)
809 {
810 #ifdef CONFIG_DHD_USE_STATIC_BUF
811     unsigned long flags;
812 
813     if (bcm_static_buf) {
814         if ((addr > (void *)bcm_static_buf) &&
815             ((unsigned char *)addr <=
816              ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) {
817             int buf_idx = 0;
818 
819             buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr) /
820                       STATIC_BUF_SIZE;
821 
822             spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
823             bcm_static_buf->buf_use[buf_idx] = 0;
824             spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
825 
826             if (osh && osh->cmn) {
827                 ASSERT(osh->magic == OS_HANDLE_MAGIC);
828                 atomic_sub(size, &osh->cmn->malloced);
829             }
830             return;
831         }
832     }
833 #endif /* CONFIG_DHD_USE_STATIC_BUF */
834     if (osh && osh->cmn) {
835         ASSERT(osh->magic == OS_HANDLE_MAGIC);
836 
837         ASSERT(size <= osl_malloced(osh));
838 
839         atomic_sub(size, &osh->cmn->malloced);
840     }
841     kfree(addr);
842 }
843 
osl_vmalloc(osl_t * osh,uint size)844 void *osl_vmalloc(osl_t *osh, uint size)
845 {
846     void *addr;
847 
848     /* only ASSERT if osh is defined */
849     if (osh) {
850         ASSERT(osh->magic == OS_HANDLE_MAGIC);
851     }
852     if ((addr = vmalloc(size)) == NULL) {
853         if (osh) {
854             osh->failed++;
855         }
856         return (NULL);
857     }
858     if (osh && osh->cmn) {
859         atomic_add(size, &osh->cmn->malloced);
860     }
861 
862     return (addr);
863 }
864 
osl_vmallocz(osl_t * osh,uint size)865 void *osl_vmallocz(osl_t *osh, uint size)
866 {
867     void *ptr;
868 
869     ptr = osl_vmalloc(osh, size);
870     if (ptr != NULL) {
871         bzero(ptr, size);
872     }
873 
874     return ptr;
875 }
876 
osl_vmfree(osl_t * osh,void * addr,uint size)877 void osl_vmfree(osl_t *osh, void *addr, uint size)
878 {
879     if (osh && osh->cmn) {
880         ASSERT(osh->magic == OS_HANDLE_MAGIC);
881 
882         ASSERT(size <= osl_malloced(osh));
883 
884         atomic_sub(size, &osh->cmn->malloced);
885     }
886     vfree(addr);
887 }
888 
osl_check_memleak(osl_t * osh)889 uint osl_check_memleak(osl_t *osh)
890 {
891     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
892     if (atomic_read(&osh->cmn->refcount) == 1) {
893         return (atomic_read(&osh->cmn->malloced));
894     } else {
895         return 0;
896     }
897 }
898 
osl_malloced(osl_t * osh)899 uint osl_malloced(osl_t *osh)
900 {
901     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
902     return (atomic_read(&osh->cmn->malloced));
903 }
904 
osl_malloc_failed(osl_t * osh)905 uint osl_malloc_failed(osl_t *osh)
906 {
907     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
908     return (osh->failed);
909 }
910 
osl_dma_consistent_align(void)911 uint osl_dma_consistent_align(void)
912 {
913     return (PAGE_SIZE);
914 }
915 
osl_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,uint * alloced,dmaaddr_t * pap)916 void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
917                                uint *alloced, dmaaddr_t *pap)
918 {
919     void *va;
920     uint16 align = (1 << align_bits);
921     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
922 
923     if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) {
924         size += align;
925     }
926     *alloced = size;
927 
928 #ifndef BCM_SECURE_DMA
929 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) ||   \
930     defined(STB_SOC_WIFI)
931     va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
932     if (va) {
933         *pap = (ulong)__virt_to_phys((ulong)va);
934     }
935 #else
936     {
937         dma_addr_t pap_lin;
938         struct pci_dev *hwdev = osh->pdev;
939         gfp_t flags;
940 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
941         flags = GFP_ATOMIC;
942 #else
943         flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
944 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
945         va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
946 #ifdef BCMDMA64OSL
947         PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
948         PHYSADDRHISET(*pap, (pap_lin >> 0x20) & 0xffffffff);
949 #else
950         *pap = (dmaaddr_t)pap_lin;
951 #endif /* BCMDMA64OSL */
952     }
953 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
954 #else
955     va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
956 #endif /* BCM_SECURE_DMA */
957     return va;
958 }
959 
osl_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)960 void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
961 {
962 #ifdef BCMDMA64OSL
963     dma_addr_t paddr;
964 #endif /* BCMDMA64OSL */
965     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
966 
967 #ifndef BCM_SECURE_DMA
968 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) ||   \
969     defined(STB_SOC_WIFI)
970     kfree(va);
971 #else
972 #ifdef BCMDMA64OSL
973     PHYSADDRTOULONG(pa, paddr);
974     pci_free_consistent(osh->pdev, size, va, paddr);
975 #else
976     pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
977 #endif /* BCMDMA64OSL */
978 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
979 #else
980     osl_sec_dma_free_consistent(osh, va, size, pa);
981 #endif /* BCM_SECURE_DMA */
982 }
983 
osl_virt_to_phys(void * va)984 void *osl_virt_to_phys(void *va)
985 {
986     return (void *)(uintptr)virt_to_phys(va);
987 }
988 
989 #include <asm/cacheflush.h>
osl_dma_flush(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)990 void BCMFASTPATH osl_dma_flush(osl_t *osh, void *va, uint size, int direction,
991                                void *p, hnddma_seg_map_t *dmah)
992 {
993     return;
994 }
995 
osl_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah)996 dmaaddr_t BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size,
997                                   int direction, void *p,
998                                   hnddma_seg_map_t *dmah)
999 {
1000     int dir;
1001     dmaaddr_t ret_addr;
1002     dma_addr_t map_addr;
1003     int ret;
1004 
1005     DMA_LOCK(osh);
1006 
1007     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1008     dir = (direction == DMA_TX) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1009 
1010 #ifdef STB_SOC_WIFI
1011 #if (__LINUX_ARM_ARCH__ == 8)
1012     /* need to flush or invalidate the cache here */
1013     if (dir == DMA_TX) { /* to device */
1014         osl_cache_flush(va, size);
1015     } else if (dir == DMA_RX) { /* from device */
1016         osl_cache_inv(va, size);
1017     } else { /* both */
1018         osl_cache_flush(va, size);
1019         osl_cache_inv(va, size);
1020     }
1021     DMA_UNLOCK(osh);
1022     return virt_to_phys(va);
1023 #else  /* (__LINUX_ARM_ARCH__ == 8) */
1024     map_addr = dma_map_single(osh->pdev, va, size, dir);
1025     DMA_UNLOCK(osh);
1026     return map_addr;
1027 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1028 #else  /* ! STB_SOC_WIFI */
1029     map_addr = pci_map_single(osh->pdev, va, size, dir);
1030 #endif /* ! STB_SOC_WIFI */
1031 
1032     ret = pci_dma_mapping_error(osh->pdev, map_addr);
1033     if (ret) {
1034         printk("%s: Failed to map memory\n", __FUNCTION__);
1035         PHYSADDRLOSET(ret_addr, 0);
1036         PHYSADDRHISET(ret_addr, 0);
1037     } else {
1038         PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1039         PHYSADDRHISET(ret_addr, (map_addr >> 0x20) & 0xffffffff);
1040     }
1041 
1042 #ifdef DHD_MAP_LOGGING
1043     osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1044 #endif /* DHD_MAP_LOGGING */
1045 
1046     DMA_UNLOCK(osh);
1047 
1048     return ret_addr;
1049 }
1050 
osl_dma_unmap(osl_t * osh,dmaaddr_t pa,uint size,int direction)1051 void BCMFASTPATH osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size,
1052                                int direction)
1053 {
1054     int dir;
1055 #ifdef BCMDMA64OSL
1056     dma_addr_t paddr;
1057 #endif /* BCMDMA64OSL */
1058 
1059     ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1060 
1061     DMA_LOCK(osh);
1062 
1063     dir = (direction == DMA_TX) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1064 
1065 #ifdef DHD_MAP_LOGGING
1066     osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
1067 #endif /* DHD_MAP_LOGGING */
1068 
1069 #ifdef BCMDMA64OSL
1070     PHYSADDRTOULONG(pa, paddr);
1071     pci_unmap_single(osh->pdev, paddr, size, dir);
1072 #else /* BCMDMA64OSL */
1073 
1074 #ifdef STB_SOC_WIFI
1075 #if (__LINUX_ARM_ARCH__ == 8)
1076     if (dir == DMA_TX) { /* to device */
1077         dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1078     } else if (dir == DMA_RX) { /* from device */
1079         dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1080     } else { /* both */
1081         dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1082         dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1083     }
1084 #else  /* (__LINUX_ARM_ARCH__ == 8) */
1085     dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1086 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1087 #else  /* STB_SOC_WIFI */
1088     pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1089 #endif /* STB_SOC_WIFI */
1090 
1091 #endif /* BCMDMA64OSL */
1092 
1093     DMA_UNLOCK(osh);
1094 }
1095 
1096 /* OSL function for CPU relax */
osl_cpu_relax(void)1097 inline void BCMFASTPATH osl_cpu_relax(void)
1098 {
1099     cpu_relax();
1100 }
1101 
osl_preempt_disable(osl_t * osh)1102 extern void osl_preempt_disable(osl_t *osh)
1103 {
1104     preempt_disable();
1105 }
1106 
osl_preempt_enable(osl_t * osh)1107 extern void osl_preempt_enable(osl_t *osh)
1108 {
1109     preempt_enable();
1110 }
1111 
1112 #if defined(BCMASSERT_LOG)
osl_assert(const char * exp,const char * file,int line)1113 void osl_assert(const char *exp, const char *file, int line)
1114 {
1115     char tempbuf[256];
1116     const char *basename;
1117 
1118     basename = strrchr(file, '/');
1119     /* skip the '/' */
1120     if (basename) {
1121         basename++;
1122     }
1123 
1124     if (!basename) {
1125         basename = file;
1126     }
1127 
1128 #ifdef BCMASSERT_LOG
1129     snprintf(tempbuf, 0x40, "\"%s\": file \"%s\", line %d\n", exp, basename,
1130              line);
1131 #endif /* BCMASSERT_LOG */
1132 
1133     switch (g_assert_type) {
1134         case 0:
1135             panic("%s", tempbuf);
1136             break;
1137         case 1:
1138             /* fall through */
1139         case 0x3:
1140             printk("%s", tempbuf);
1141             break;
1142         case 0x2:
1143             printk("%s", tempbuf);
1144             BUG();
1145             break;
1146         default:
1147             break;
1148     }
1149 }
1150 #endif // endif
1151 
osl_delay(uint usec)1152 void osl_delay(uint usec)
1153 {
1154     uint d;
1155 
1156     while (usec > 0) {
1157         d = MIN(usec, 0x3E8);
1158         udelay(d);
1159         usec -= d;
1160     }
1161 }
1162 
osl_sleep(uint ms)1163 void osl_sleep(uint ms)
1164 {
1165 #if LINUX_VERSION_CODE >= KERNEL_VERSION(0x2, 6, 36)
1166     if (ms < 0x14) {
1167         usleep_range(ms * 0x3E8, ms * 0x3E8 + 0x3E8);
1168     } else
1169 #endif
1170         msleep(ms);
1171 }
1172 
osl_sysuptime_us(void)1173 uint64 osl_sysuptime_us(void)
1174 {
1175     struct osl_timespec tv;
1176     uint64 usec;
1177 
1178     osl_do_gettimeofday(&tv);
1179     /* tv_usec content is fraction of a second */
1180     usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1181     return usec;
1182 }
1183 
osl_localtime_ns(void)1184 uint64 osl_localtime_ns(void)
1185 {
1186     uint64 ts_nsec = 0;
1187 
1188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(0x2, 6, 36)
1189     ts_nsec = local_clock();
1190 #else
1191     ts_nsec = cpu_clock(smp_processor_id());
1192 #endif
1193 
1194     return ts_nsec;
1195 }
1196 
osl_get_localtime(uint64 * sec,uint64 * usec)1197 void osl_get_localtime(uint64 *sec, uint64 *usec)
1198 {
1199     uint64 ts_nsec = 0;
1200     unsigned long rem_nsec = 0;
1201 
1202 #if LINUX_VERSION_CODE >= KERNEL_VERSION(0x2, 6, 36)
1203     ts_nsec = local_clock();
1204 #else
1205     ts_nsec = cpu_clock(smp_processor_id());
1206 #endif
1207     rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1208     *sec = (uint64)ts_nsec;
1209     *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1210 }
1211 
osl_systztime_us(void)1212 uint64 osl_systztime_us(void)
1213 {
1214     struct osl_timespec tv;
1215     uint64 tzusec;
1216 
1217     osl_do_gettimeofday(&tv);
1218     /* apply timezone */
1219     tzusec =
1220         (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 0x3C)) * USEC_PER_SEC);
1221     tzusec += tv.tv_usec;
1222 
1223     return tzusec;
1224 }
1225 
1226 /*
1227  * OSLREGOPS specifies the use of osl_XXX routines to be used for register
1228  * access
1229  */
1230 
1231 /*
1232  * BINOSL selects the slightly slower function-call-based binary compatible osl.
1233  */
1234 
osl_rand(void)1235 uint32 osl_rand(void)
1236 {
1237     uint32 rand;
1238 
1239     get_random_bytes(&rand, sizeof(rand));
1240 
1241     return rand;
1242 }
1243 
1244 /* Linux Kernel: File Operations: start */
osl_os_open_image(char * filename)1245 void *osl_os_open_image(char *filename)
1246 {
1247     struct file *fp;
1248 
1249     fp = filp_open(filename, O_RDONLY, 0);
1250     /*
1251      * 2.6.11 (FC4) supports filp_open() but later revs don't?
1252      * Alternative:
1253      * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1254      * ???
1255      */
1256     if (IS_ERR(fp)) {
1257         fp = NULL;
1258     }
1259 
1260     return fp;
1261 }
1262 
osl_os_get_image_block(char * buf,int len,void * image)1263 int osl_os_get_image_block(char *buf, int len, void *image)
1264 {
1265     struct file *fp = (struct file *)image;
1266     int rdlen;
1267 
1268     if (!image) {
1269         return 0;
1270     }
1271 
1272 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 14, 0))
1273     rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1274 #else
1275     rdlen = kernel_read(fp, fp->f_pos, buf, len);
1276 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 14, 0)) */
1277     if (rdlen > 0) {
1278         fp->f_pos += rdlen;
1279     }
1280     return rdlen;
1281 }
1282 
osl_os_close_image(void * image)1283 void osl_os_close_image(void *image)
1284 {
1285     if (image) {
1286         filp_close((struct file *)image, NULL);
1287     }
1288 }
1289 
osl_os_image_size(void * image)1290 int osl_os_image_size(void *image)
1291 {
1292     int len = 0, curroffset;
1293 
1294     if (image) {
1295         /* store the current offset */
1296         curroffset = generic_file_llseek(image, 0, 1);
1297         /* goto end of file to get length */
1298         len = generic_file_llseek(image, 0, 0x2);
1299         /* restore back the offset */
1300         generic_file_llseek(image, curroffset, 0);
1301     }
1302     return len;
1303 }
1304 
1305 /* Linux Kernel: File Operations: end */
1306 
1307 #if (defined(STB) && defined(__arm__))
osl_pcie_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1308 inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1309 {
1310     unsigned long flags = 0;
1311     int pci_access = 0;
1312     int acp_war_enab = ACP_WAR_ENAB();
1313 
1314     if (osh && BUSTYPE(osh->bustype) == PCI_BUS) {
1315         pci_access = 1;
1316     }
1317 
1318     if (pci_access && acp_war_enab) {
1319         spin_lock_irqsave(&l2x0_reg_lock, flags);
1320     }
1321 
1322     switch (size) {
1323         case sizeof(uint8):
1324             *(volatile uint8 *)v = readb((volatile uint8 *)(addr));
1325             break;
1326         case sizeof(uint16):
1327             *(volatile uint16 *)v = readw((volatile uint16 *)(addr));
1328             break;
1329         case sizeof(uint32):
1330             *(volatile uint32 *)v = readl((volatile uint32 *)(addr));
1331             break;
1332         case sizeof(uint64):
1333             *(volatile uint64 *)v = *((volatile uint64 *)(addr));
1334             break;
1335     }
1336 
1337     if (pci_access && acp_war_enab) {
1338         spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1339     }
1340 }
1341 #endif // endif
1342 
1343 #if defined(BCM_BACKPLANE_TIMEOUT)
osl_bpt_rreg(osl_t * osh,ulong addr,volatile void * v,uint size)1344 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1345 {
1346     bool poll_timeout = FALSE;
1347     static int in_si_clear = FALSE;
1348 
1349     switch (size) {
1350         case sizeof(uint8):
1351             *(volatile uint8 *)v = readb((volatile uint8 *)(addr));
1352             if (*(volatile uint8 *)v == 0xff) {
1353                 poll_timeout = TRUE;
1354             }
1355             break;
1356         case sizeof(uint16):
1357             *(volatile uint16 *)v = readw((volatile uint16 *)(addr));
1358             if (*(volatile uint16 *)v == 0xffff) {
1359                 poll_timeout = TRUE;
1360             }
1361             break;
1362         case sizeof(uint32):
1363             *(volatile uint32 *)v = readl((volatile uint32 *)(addr));
1364             if (*(volatile uint32 *)v == 0xffffffff) {
1365                 poll_timeout = TRUE;
1366             }
1367             break;
1368         case sizeof(uint64):
1369             *(volatile uint64 *)v = *((volatile uint64 *)(addr));
1370             if (*(volatile uint64 *)v == 0xffffffffffffffff) {
1371                 poll_timeout = TRUE;
1372             }
1373             break;
1374     }
1375 
1376     if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout &&
1377         osh->bpt_cb) {
1378         in_si_clear = TRUE;
1379         osh->bpt_cb((void *)osh->sih, (void *)addr);
1380         in_si_clear = FALSE;
1381     }
1382 }
1383 #endif /* BCM_BACKPLANE_TIMEOUT */
1384 
1385 #ifdef BCM_SECURE_DMA
osl_sec_dma_ioremap(osl_t * osh,struct page * page,size_t size,bool iscache,bool isdecr)1386 static void *osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
1387                                  bool iscache, bool isdecr)
1388 {
1389     struct page **map;
1390     int order, i;
1391     void *addr = NULL;
1392 
1393     size = PAGE_ALIGN(size);
1394     order = get_order(size);
1395     map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1396     if (map == NULL) {
1397         return NULL;
1398     }
1399 
1400     for (i = 0; i < (size >> PAGE_SHIFT); i++) {
1401         map[i] = page + i;
1402     }
1403     if (iscache) {
1404         addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1405         if (isdecr) {
1406             osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1407         }
1408     } else {
1409 #if defined(__ARM_ARCH_7A__)
1410         addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1411                     pgprot_noncached(__pgprot(PAGE_KERNEL)));
1412 #endif // endif
1413         if (isdecr) {
1414             osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1415         }
1416     }
1417 
1418     kfree(map);
1419     return (void *)addr;
1420 }
1421 
osl_sec_dma_iounmap(osl_t * osh,void * contig_base_va,size_t size)1422 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1423 {
1424     vunmap(contig_base_va);
1425 }
1426 
osl_sec_dma_init_elem_mem_block(osl_t * osh,size_t mbsize,int max,sec_mem_elem_t ** list)1427 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
1428                                            sec_mem_elem_t **list)
1429 {
1430     int i;
1431     int ret = BCME_OK;
1432     sec_mem_elem_t *sec_mem_elem;
1433 
1434     if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t) * (max), GFP_ATOMIC)) !=
1435         NULL) {
1436         *list = sec_mem_elem;
1437         bzero(sec_mem_elem, sizeof(sec_mem_elem_t) * (max));
1438         for (i = 0; i < max - 1; i++) {
1439             sec_mem_elem->next = (sec_mem_elem + 1);
1440             sec_mem_elem->size = mbsize;
1441             sec_mem_elem->pa_cma = osh->contig_base_alloc;
1442             sec_mem_elem->vac = osh->contig_base_alloc_va;
1443 
1444             sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1445             osh->contig_base_alloc += mbsize;
1446             osh->contig_base_alloc_va =
1447                 ((uint8 *)osh->contig_base_alloc_va + mbsize);
1448 
1449             sec_mem_elem = sec_mem_elem + 1;
1450         }
1451         sec_mem_elem->next = NULL;
1452         sec_mem_elem->size = mbsize;
1453         sec_mem_elem->pa_cma = osh->contig_base_alloc;
1454         sec_mem_elem->vac = osh->contig_base_alloc_va;
1455 
1456         sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1457         osh->contig_base_alloc += mbsize;
1458         osh->contig_base_alloc_va =
1459             ((uint8 *)osh->contig_base_alloc_va + mbsize);
1460     } else {
1461         printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1462         ret = BCME_ERROR;
1463     }
1464     return ret;
1465 }
1466 
osl_sec_dma_deinit_elem_mem_block(osl_t * osh,size_t mbsize,int max,void * sec_list_base)1467 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize,
1468                                               int max, void *sec_list_base)
1469 {
1470     if (sec_list_base) {
1471         kfree(sec_list_base);
1472     }
1473 }
1474 
1475 static sec_mem_elem_t *BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t * osh,void * va,uint size,int direction,struct sec_cma_info * ptr_cma_info,uint offset)1476 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1477                            struct sec_cma_info *ptr_cma_info, uint offset)
1478 {
1479     sec_mem_elem_t *sec_mem_elem = NULL;
1480 
1481 #ifdef NOT_YET
1482     if (size <= 0x200 && osh->sec_list_512) {
1483         sec_mem_elem = osh->sec_list_512;
1484         osh->sec_list_512 = sec_mem_elem->next;
1485     } else if (size <= 0x800 && osh->sec_list_2048) {
1486         sec_mem_elem = osh->sec_list_2048;
1487         osh->sec_list_2048 = sec_mem_elem->next;
1488     } else
1489 #else
1490     ASSERT(osh->sec_list_4096);
1491     sec_mem_elem = osh->sec_list_4096;
1492     osh->sec_list_4096 = sec_mem_elem->next;
1493 #endif /* NOT_YET */
1494 
1495         sec_mem_elem->next = NULL;
1496 
1497     if (ptr_cma_info->sec_alloc_list_tail) {
1498         ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1499         ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1500     } else {
1501         /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL
1502          */
1503         ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1504         ptr_cma_info->sec_alloc_list = sec_mem_elem;
1505         ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1506     }
1507     return sec_mem_elem;
1508 }
1509 
osl_sec_dma_free_mem_elem(osl_t * osh,sec_mem_elem_t * sec_mem_elem)1510 static void BCMFASTPATH osl_sec_dma_free_mem_elem(osl_t *osh,
1511                                                   sec_mem_elem_t *sec_mem_elem)
1512 {
1513     sec_mem_elem->dma_handle = 0x0;
1514     sec_mem_elem->va = NULL;
1515 #ifdef NOT_YET
1516     if (sec_mem_elem->size == 0x200) {
1517         sec_mem_elem->next = osh->sec_list_512;
1518         osh->sec_list_512 = sec_mem_elem;
1519     } else if (sec_mem_elem->size == 0x800) {
1520         sec_mem_elem->next = osh->sec_list_2048;
1521         osh->sec_list_2048 = sec_mem_elem;
1522     } else if (sec_mem_elem->size == 4096) {
1523 #endif /* NOT_YET */
1524         sec_mem_elem->next = osh->sec_list_4096;
1525         osh->sec_list_4096 = sec_mem_elem;
1526 #ifdef NOT_YET
1527     } else {
1528         printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
1529     }
1530 #endif /* NOT_YET */
1531 }
1532 
osl_sec_dma_find_rem_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info,dma_addr_t dma_handle)1533 static sec_mem_elem_t *BCMFASTPATH osl_sec_dma_find_rem_elem(
1534     osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1535 {
1536     sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1537     sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1538 
1539     if (sec_mem_elem->dma_handle == dma_handle) {
1540 
1541         ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1542 
1543         if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1544             ptr_cma_info->sec_alloc_list_tail = NULL;
1545             ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1546         }
1547 
1548         return sec_mem_elem;
1549     }
1550     sec_mem_elem = sec_mem_elem->next;
1551 
1552     while (sec_mem_elem != NULL) {
1553 
1554         if (sec_mem_elem->dma_handle == dma_handle) {
1555 
1556             sec_prv_elem->next = sec_mem_elem->next;
1557             if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1558                 ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1559             }
1560 
1561             return sec_mem_elem;
1562         }
1563         sec_prv_elem = sec_mem_elem;
1564         sec_mem_elem = sec_mem_elem->next;
1565     }
1566     return NULL;
1567 }
1568 
1569 static sec_mem_elem_t *
osl_sec_dma_rem_first_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1570 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1571 {
1572     sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1573 
1574     if (sec_mem_elem) {
1575         ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1576 
1577         if (ptr_cma_info->sec_alloc_list == NULL) {
1578             ptr_cma_info->sec_alloc_list_tail = NULL;
1579         }
1580         return sec_mem_elem;
1581     } else {
1582         return NULL;
1583     }
1584 }
1585 
1586 static void *BCMFASTPATH
osl_sec_dma_last_elem(osl_t * osh,struct sec_cma_info * ptr_cma_info)1587 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1588 {
1589     return ptr_cma_info->sec_alloc_list_tail;
1590 }
1591 
osl_sec_dma_map_txmeta(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info)1592 dma_addr_t BCMFASTPATH osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size,
1593                                               int direction, void *p,
1594                                               hnddma_seg_map_t *dmah,
1595                                               void *ptr_cma_info)
1596 {
1597     sec_mem_elem_t *sec_mem_elem;
1598     struct page *pa_cma_page;
1599     uint loffset;
1600     void *vaorig = ((uint8 *)va + size);
1601     dma_addr_t dma_handle = 0x0;
1602     /* packet will be the one added with osl_sec_dma_map() just before this call
1603      */
1604 
1605     sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1606     if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1607         pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1608         loffset =
1609             sec_mem_elem->pa_cma - (sec_mem_elem->pa_cma & ~(PAGE_SIZE - 1));
1610         dma_handle = dma_map_page(
1611             OSH_NULL, pa_cma_page, loffset, size,
1612             (direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1613     } else {
1614         printf("%s: error orig va not found va = 0x%p \n", __FUNCTION__,
1615                vaorig);
1616     }
1617     return dma_handle;
1618 }
1619 
osl_sec_dma_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * dmah,void * ptr_cma_info,uint offset)1620 dma_addr_t BCMFASTPATH osl_sec_dma_map(osl_t *osh, void *va, uint size,
1621                                        int direction, void *p,
1622                                        hnddma_seg_map_t *dmah,
1623                                        void *ptr_cma_info, uint offset)
1624 {
1625     sec_mem_elem_t *sec_mem_elem;
1626     struct page *pa_cma_page;
1627     void *pa_cma_kmap_va = NULL;
1628     uint buflen = 0;
1629     dma_addr_t dma_handle = 0x0;
1630     uint loffset;
1631 #ifdef NOT_YET
1632     int *fragva;
1633     struct sk_buff *skb;
1634     int i = 0;
1635 #endif /* NOT_YET */
1636     ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1637     sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction,
1638                                               ptr_cma_info, offset);
1639 
1640     sec_mem_elem->va = va;
1641     sec_mem_elem->direction = direction;
1642     pa_cma_page = sec_mem_elem->pa_cma_page;
1643 
1644     loffset = sec_mem_elem->pa_cma - (sec_mem_elem->pa_cma & ~(PAGE_SIZE - 1));
1645 
1646     pa_cma_kmap_va = sec_mem_elem->vac;
1647     pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1648     buflen = size;
1649 
1650     if (direction == DMA_TX) {
1651         memcpy((uint8 *)pa_cma_kmap_va + offset, va, size);
1652 #ifdef NOT_YET
1653         if (p == NULL) {
1654             memcpy(pa_cma_kmap_va, va, size);
1655         } else {
1656             for (skb = (struct sk_buff *)p; skb != NULL;
1657                  skb = PKTNEXT(osh, skb)) {
1658                 if (skb_is_nonlinear(skb)) {
1659                     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1660                         skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1661                         fragva = kmap_atomic(skb_frag_page(f));
1662                         pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1663                         memcpy((pa_cma_kmap_va), (fragva + f->page_offset),
1664                                skb_frag_size(f));
1665                         kunmap_atomic(fragva);
1666                         buflen += skb_frag_size(f);
1667                     }
1668                 } else {
1669                     pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1670                     memcpy(pa_cma_kmap_va, skb->data, skb->len);
1671                     buflen += skb->len;
1672                 }
1673             }
1674         }
1675 #endif /* NOT_YET */
1676         if (dmah) {
1677             dmah->nsegs = 1;
1678             dmah->origsize = buflen;
1679         }
1680     } else {
1681         if ((p != NULL) && (dmah != NULL)) {
1682             dmah->nsegs = 1;
1683             dmah->origsize = buflen;
1684         }
1685         *(uint32 *)(pa_cma_kmap_va) = 0x0;
1686     }
1687 
1688     if (direction == DMA_RX) {
1689         flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1690     }
1691     dma_handle =
1692         dma_map_page(OSH_NULL, pa_cma_page, loffset + offset, buflen,
1693                      (direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1694     if (dmah) {
1695         dmah->segs[0].addr = dma_handle;
1696         dmah->segs[0].length = buflen;
1697     }
1698     sec_mem_elem->dma_handle = dma_handle;
1699     return dma_handle;
1700 }
1701 
osl_sec_dma_dd_map(osl_t * osh,void * va,uint size,int direction,void * p,hnddma_seg_map_t * map)1702 dma_addr_t BCMFASTPATH osl_sec_dma_dd_map(osl_t *osh, void *va, uint size,
1703                                           int direction, void *p,
1704                                           hnddma_seg_map_t *map)
1705 {
1706     struct page *pa_cma_page;
1707     phys_addr_t pa_cma;
1708     dma_addr_t dma_handle = 0x0;
1709     uint loffset;
1710 
1711     pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1712     pa_cma_page = phys_to_page(pa_cma);
1713     loffset = pa_cma - (pa_cma & ~(PAGE_SIZE - 1));
1714 
1715     dma_handle =
1716         dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1717                      (direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
1718 
1719     return dma_handle;
1720 }
1721 
osl_sec_dma_unmap(osl_t * osh,dma_addr_t dma_handle,uint size,int direction,void * p,hnddma_seg_map_t * map,void * ptr_cma_info,uint offset)1722 void BCMFASTPATH osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size,
1723                                    int direction, void *p,
1724                                    hnddma_seg_map_t *map, void *ptr_cma_info,
1725                                    uint offset)
1726 {
1727     sec_mem_elem_t *sec_mem_elem;
1728 #ifdef NOT_YET
1729     struct page *pa_cma_page;
1730 #endif // endif
1731     void *pa_cma_kmap_va = NULL;
1732     uint buflen = 0;
1733     dma_addr_t pa_cma;
1734     void *va;
1735     int read_count = 0;
1736     BCM_REFERENCE(buflen);
1737     BCM_REFERENCE(read_count);
1738 
1739     sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1740     ASSERT(sec_mem_elem);
1741 
1742     va = sec_mem_elem->va;
1743     va = (uint8 *)va - offset;
1744     pa_cma = sec_mem_elem->pa_cma;
1745 
1746 #ifdef NOT_YET
1747     pa_cma_page = sec_mem_elem->pa_cma_page;
1748 #endif // endif
1749 
1750     if (direction == DMA_RX) {
1751         if (p == NULL) {
1752 
1753             pa_cma_kmap_va = sec_mem_elem->vac;
1754 
1755             do {
1756                 invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1757 
1758                 buflen = *(uint *)(pa_cma_kmap_va);
1759                 if (buflen) {
1760                     break;
1761                 }
1762 
1763                 OSL_DELAY(1);
1764                 read_count++;
1765             } while (read_count < 200);
1766             dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1767             memcpy(va, pa_cma_kmap_va, size);
1768         }
1769 #ifdef NOT_YET
1770         else {
1771             buflen = 0;
1772             for (skb = (struct sk_buff *)p; (buflen < size) && (skb != NULL);
1773                  skb = skb->next) {
1774                 if (skb_is_nonlinear(skb)) {
1775                     pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1776                     for (i = 0;
1777                          (buflen < size) && (i < skb_shinfo(skb)->nr_frags);
1778                          i++) {
1779                         skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1780                         cpuaddr = kmap_atomic(skb_frag_page(f));
1781                         pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1782                         memcpy((cpuaddr + f->page_offset), pa_cma_kmap_va,
1783                                skb_frag_size(f));
1784                         kunmap_atomic(cpuaddr);
1785                         buflen += skb_frag_size(f);
1786                     }
1787                     kunmap_atomic(pa_cma_kmap_va);
1788                 } else {
1789                     pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1790                     pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1791                     memcpy(skb->data, pa_cma_kmap_va, skb->len);
1792                     kunmap_atomic(pa_cma_kmap_va);
1793                     buflen += skb->len;
1794                 }
1795             }
1796         }
1797 #endif /* NOT YET */
1798     } else {
1799         dma_unmap_page(OSH_NULL, pa_cma, size + offset, DMA_TO_DEVICE);
1800     }
1801 
1802     osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1803 }
1804 
osl_sec_dma_unmap_all(osl_t * osh,void * ptr_cma_info)1805 void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1806 {
1807     sec_mem_elem_t *sec_mem_elem;
1808 
1809     sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1810 
1811     while (sec_mem_elem != NULL) {
1812         dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1813                        sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE
1814                                                          : DMA_FROM_DEVICE);
1815         osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1816 
1817         sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1818     }
1819 }
1820 
osl_sec_dma_init_consistent(osl_t * osh)1821 static void osl_sec_dma_init_consistent(osl_t *osh)
1822 {
1823     int i;
1824     void *temp_va = osh->contig_base_alloc_coherent_va;
1825     phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1826 
1827     for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1828         osh->sec_cma_coherent[i].avail = TRUE;
1829         osh->sec_cma_coherent[i].va = temp_va;
1830         osh->sec_cma_coherent[i].pa = temp_pa;
1831         temp_va = ((uint8 *)temp_va) + SEC_CMA_COHERENT_BLK;
1832         temp_pa += SEC_CMA_COHERENT_BLK;
1833     }
1834 }
1835 
osl_sec_dma_alloc_consistent(osl_t * osh,uint size,uint16 align_bits,ulong * pap)1836 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size,
1837                                           uint16 align_bits, ulong *pap)
1838 {
1839     void *temp_va = NULL;
1840     ulong temp_pa = 0;
1841     int i;
1842 
1843     if (size > SEC_CMA_COHERENT_BLK) {
1844         printf("%s unsupported size\n", __FUNCTION__);
1845         return NULL;
1846     }
1847 
1848     for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1849         if (osh->sec_cma_coherent[i].avail == TRUE) {
1850             temp_va = osh->sec_cma_coherent[i].va;
1851             temp_pa = osh->sec_cma_coherent[i].pa;
1852             osh->sec_cma_coherent[i].avail = FALSE;
1853             break;
1854         }
1855     }
1856 
1857     if (i == SEC_CMA_COHERENT_MAX) {
1858         printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n",
1859                __FUNCTION__, temp_va, (ulong)temp_pa, size);
1860     }
1861 
1862     *pap = (unsigned long)temp_pa;
1863     return temp_va;
1864 }
1865 
osl_sec_dma_free_consistent(osl_t * osh,void * va,uint size,dmaaddr_t pa)1866 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size,
1867                                         dmaaddr_t pa)
1868 {
1869     int i = 0;
1870 
1871     for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1872         if (osh->sec_cma_coherent[i].va == va) {
1873             osh->sec_cma_coherent[i].avail = TRUE;
1874             break;
1875         }
1876     }
1877     if (i == SEC_CMA_COHERENT_MAX) {
1878         printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, va,
1879                (ulong)pa, size);
1880     }
1881 }
1882 #endif /* BCM_SECURE_DMA */
1883 
1884 /* timer apis */
1885 /* Note: All timer api's are thread unsafe and should be protected with locks by
1886  * caller */
1887 
1888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 15, 0)
timer_cb_compat(struct timer_list * tl)1889 void timer_cb_compat(struct timer_list *tl)
1890 {
1891     timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1892     t->callback((ulong)t->arg);
1893 }
1894 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 15, 0) */
1895 
osl_timer_init(osl_t * osh,const char * name,void (* fn)(void * arg),void * arg)1896 osl_timer_t *osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg),
1897                             void *arg)
1898 {
1899     osl_timer_t *t;
1900     BCM_REFERENCE(fn);
1901     if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1902         printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1903                (int)sizeof(osl_timer_t));
1904         return (NULL);
1905     }
1906     bzero(t, sizeof(osl_timer_t));
1907     if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1908         printf("osl_timer_init: malloc failed\n");
1909         MFREE(NULL, t, sizeof(osl_timer_t));
1910         return (NULL);
1911     }
1912     t->set = TRUE;
1913 
1914     init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
1915 
1916     return (t);
1917 }
1918 
osl_timer_add(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1919 void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1920 {
1921     if (t == NULL) {
1922         printf("%s: Timer handle is NULL\n", __FUNCTION__);
1923         return;
1924     }
1925     ASSERT(!t->set);
1926 
1927     t->set = TRUE;
1928     if (periodic) {
1929         printf("Periodic timers are not supported by Linux timer apis\n");
1930     }
1931     timer_expires(t->timer) = jiffies + ms * HZ / 0x3E8;
1932 
1933     add_timer(t->timer);
1934 
1935     return;
1936 }
1937 
osl_timer_update(osl_t * osh,osl_timer_t * t,uint32 ms,bool periodic)1938 void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1939 {
1940     if (t == NULL) {
1941         printf("%s: Timer handle is NULL\n", __FUNCTION__);
1942         return;
1943     }
1944     if (periodic) {
1945         printf("Periodic timers are not supported by Linux timer apis\n");
1946     }
1947     t->set = TRUE;
1948     timer_expires(t->timer) = jiffies + ms * HZ / 0x3E8;
1949 
1950     mod_timer(t->timer, timer_expires(t->timer));
1951 
1952     return;
1953 }
1954 
1955 /*
1956  * Return TRUE if timer successfully deleted, FALSE if still pending
1957  */
osl_timer_del(osl_t * osh,osl_timer_t * t)1958 bool osl_timer_del(osl_t *osh, osl_timer_t *t)
1959 {
1960     if (t == NULL) {
1961         printf("%s: Timer handle is NULL\n", __FUNCTION__);
1962         return (FALSE);
1963     }
1964     if (t->set) {
1965         t->set = FALSE;
1966         if (t->timer) {
1967             del_timer(t->timer);
1968             MFREE(NULL, t->timer, sizeof(struct timer_list));
1969         }
1970         MFREE(NULL, t, sizeof(osl_timer_t));
1971     }
1972     return (TRUE);
1973 }
1974 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 14, 0))
kernel_read_compat(struct file * file,loff_t offset,char * addr,unsigned long count)1975 int kernel_read_compat(struct file *file, loff_t offset, char *addr,
1976                        unsigned long count)
1977 {
1978     return (int)kernel_read(file, addr, (size_t)count, &offset);
1979 }
1980 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1981 
osl_spin_lock_init(osl_t * osh)1982 void *osl_spin_lock_init(osl_t *osh)
1983 {
1984     /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
1985     /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
1986     /* and this results in kernel asserts in internal builds */
1987     spinlock_t *lock = MALLOC(osh, sizeof(spinlock_t) + 0x4);
1988     if (lock) {
1989         spin_lock_init(lock);
1990     }
1991     return ((void *)lock);
1992 }
1993 
osl_spin_lock_deinit(osl_t * osh,void * lock)1994 void osl_spin_lock_deinit(osl_t *osh, void *lock)
1995 {
1996     if (lock) {
1997         MFREE(osh, lock, sizeof(spinlock_t) + 0x4);
1998     }
1999 }
2000 
osl_spin_lock(void * lock)2001 unsigned long osl_spin_lock(void *lock)
2002 {
2003     unsigned long flags = 0;
2004 
2005     if (lock) {
2006         spin_lock_irqsave((spinlock_t *)lock, flags);
2007     }
2008 
2009     return flags;
2010 }
2011 
osl_spin_unlock(void * lock,unsigned long flags)2012 void osl_spin_unlock(void *lock, unsigned long flags)
2013 {
2014     if (lock) {
2015         spin_unlock_irqrestore((spinlock_t *)lock, flags);
2016     }
2017 }
2018 
2019 #ifdef USE_DMA_LOCK
osl_dma_lock(osl_t * osh)2020 static void osl_dma_lock(osl_t *osh)
2021 {
2022     if (likely(in_irq() || irqs_disabled())) {
2023         spin_lock(&osh->dma_lock);
2024     } else {
2025         spin_lock_bh(&osh->dma_lock);
2026         osh->dma_lock_bh = TRUE;
2027     }
2028 }
2029 
osl_dma_unlock(osl_t * osh)2030 static void osl_dma_unlock(osl_t *osh)
2031 {
2032     if (unlikely(osh->dma_lock_bh)) {
2033         osh->dma_lock_bh = FALSE;
2034         spin_unlock_bh(&osh->dma_lock);
2035     } else {
2036         spin_unlock(&osh->dma_lock);
2037     }
2038 }
2039 
osl_dma_lock_init(osl_t * osh)2040 static void osl_dma_lock_init(osl_t *osh)
2041 {
2042     spin_lock_init(&osh->dma_lock);
2043     osh->dma_lock_bh = FALSE;
2044 }
2045 #endif /* USE_DMA_LOCK */
2046 
osl_do_gettimeofday(struct osl_timespec * ts)2047 void osl_do_gettimeofday(struct osl_timespec *ts)
2048 {
2049 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2050     struct timespec64 curtime;
2051 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2052     struct timespec curtime;
2053 #else
2054     struct timeval curtime;
2055 #endif
2056 
2057 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2058     ktime_get_real_ts64(&curtime);
2059     ts->tv_nsec = curtime.tv_nsec;
2060     ts->tv_usec = curtime.tv_nsec / 0x3E8;
2061 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
2062     getnstimeofday(&curtime);
2063     ts->tv_nsec = curtime.tv_nsec;
2064     ts->tv_usec = curtime.tv_nsec / 0x3E8;
2065 #else
2066     do_gettimeofday(&curtime);
2067     ts->tv_usec = curtime.tv_usec;
2068     ts->tv_nsec = curtime.tv_usec * 0x3E8;
2069 #endif
2070     ts->tv_sec = curtime.tv_sec;
2071 }
2072 
osl_do_gettimediff(struct osl_timespec * cur_ts,struct osl_timespec * old_ts)2073 uint32 osl_do_gettimediff(struct osl_timespec *cur_ts,
2074                           struct osl_timespec *old_ts)
2075 {
2076     uint32 diff_s, diff_us, total_diff_us;
2077     bool pgc_g = FALSE;
2078 
2079     diff_s = (uint32)cur_ts->tv_sec - (uint32)old_ts->tv_sec;
2080     pgc_g = (cur_ts->tv_usec > old_ts->tv_usec) ? TRUE : FALSE;
2081     diff_us = pgc_g ? (cur_ts->tv_usec - old_ts->tv_usec)
2082                     : (old_ts->tv_usec - cur_ts->tv_usec);
2083     total_diff_us =
2084         pgc_g ? (diff_s * 0xF4240 + diff_us) : (diff_s * 0xF4240 - diff_us);
2085     return total_diff_us;
2086 }
2087 
2088 #if LINUX_VERSION_CODE >= KERNEL_VERSION(0x2, 6, 39)
osl_get_monotonic_boottime(struct osl_timespec * ts)2089 void osl_get_monotonic_boottime(struct osl_timespec *ts)
2090 {
2091 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2092     struct timespec64 curtime;
2093 #else
2094     struct timespec curtime;
2095 #endif
2096 
2097 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2098     curtime = ktime_to_timespec64(ktime_get_boottime());
2099 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(0x4, 0x14, 0)
2100     curtime = ktime_to_timespec(ktime_get_boottime());
2101 #else
2102     get_monotonic_boottime(&curtime);
2103 #endif
2104     ts->tv_sec = curtime.tv_sec;
2105     ts->tv_nsec = curtime.tv_nsec;
2106     ts->tv_usec = curtime.tv_nsec / 0x3E8;
2107 }
2108 #endif
2109