1 /*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2017, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: linux_osl.h 672413 2016-11-28 11:13:23Z $
28 */
29
30 #ifndef _linux_osl_h_
31 #define _linux_osl_h_
32
33 #include <typedefs.h>
34 #define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x)))
35
36 /* Linux Kernel: File Operations: start */
37 extern void * osl_os_open_image(char * filename);
38 extern int osl_os_get_image_block(char * buf, int len, void * image);
39 extern void osl_os_close_image(void * image);
40 extern int osl_os_image_size(void *image);
41 /* Linux Kernel: File Operations: end */
42
43 #ifdef BCMDRIVER
44
45 /* OSL initialization */
46 #ifdef SHARED_OSL_CMN
47 extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
48 #else
49 extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
50 #endif /* SHARED_OSL_CMN */
51
52 extern void osl_detach(osl_t *osh);
53 extern int osl_static_mem_init(osl_t *osh, void *adapter);
54 extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
55 extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
56 extern void* osl_get_bus_handle(osl_t *osh);
57
58 /* Global ASSERT type */
59 extern uint32 g_assert_type;
60
61 #ifdef CONFIG_PHYS_ADDR_T_64BIT
62 #define PRI_FMT_x "llx"
63 #define PRI_FMT_X "llX"
64 #define PRI_FMT_o "llo"
65 #define PRI_FMT_d "lld"
66 #else
67 #define PRI_FMT_x "x"
68 #define PRI_FMT_X "X"
69 #define PRI_FMT_o "o"
70 #define PRI_FMT_d "d"
71 #endif /* CONFIG_PHYS_ADDR_T_64BIT */
72 /* ASSERT */
73 #if defined(BCMASSERT_LOG)
74 #define ASSERT(exp) \
75 do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
76 extern void osl_assert(const char *exp, const char *file, int line);
77 #else
78 #ifdef __GNUC__
79 #define GCC_VERSION \
80 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
81 #if GCC_VERSION > 30100
82 #define ASSERT(exp) do {} while (0)
83 #else
84 /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
85 #define ASSERT(exp)
86 #endif /* GCC_VERSION > 30100 */
87 #endif /* __GNUC__ */
88 #endif
89
90 /* bcm_prefetch_32B */
bcm_prefetch_32B(const uint8 * addr,const int cachelines_32B)91 static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
92 {
93 #if (defined(STB) && defined(__arm__)) && (__LINUX_ARM_ARCH__ >= 5)
94 switch (cachelines_32B) {
95 case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
96 case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
97 case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
98 case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc");
99 }
100 #endif
101 }
102
103 /* microsecond delay */
104 #define OSL_DELAY(usec) osl_delay(usec)
105 extern void osl_delay(uint usec);
106
107 #define OSL_SLEEP(ms) osl_sleep(ms)
108 extern void osl_sleep(uint ms);
109
110 #define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
111 osl_pcmcia_read_attr((osh), (offset), (buf), (size))
112 #define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
113 osl_pcmcia_write_attr((osh), (offset), (buf), (size))
114 extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
115 extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
116
117 /* PCI configuration space access macros */
118 #define OSL_PCI_READ_CONFIG(osh, offset, size) \
119 osl_pci_read_config((osh), (offset), (size))
120 #define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
121 osl_pci_write_config((osh), (offset), (size), (val))
122 extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
123 extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
124
125 /* PCI device bus # and slot # */
126 #define OSL_PCI_BUS(osh) osl_pci_bus(osh)
127 #define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
128 #define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh)
129 #define OSL_PCIE_BUS(osh) osl_pcie_bus(osh)
130 extern uint osl_pci_bus(osl_t *osh);
131 extern uint osl_pci_slot(osl_t *osh);
132 extern uint osl_pcie_domain(osl_t *osh);
133 extern uint osl_pcie_bus(osl_t *osh);
134 extern struct pci_dev *osl_pci_device(osl_t *osh);
135
136 #define OSL_ACP_COHERENCE (1<<1L)
137 #define OSL_FWDERBUF (1<<2L)
138
139 /* Pkttag flag should be part of public information */
140 typedef struct {
141 bool pkttag;
142 bool mmbus; /**< Bus supports memory-mapped register accesses */
143 pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */
144 void *tx_ctx; /**< Context to the callback function */
145 void *unused[3];
146 } osl_pubinfo_t;
147
148 extern void osl_flag_set(osl_t *osh, uint32 mask);
149 extern void osl_flag_clr(osl_t *osh, uint32 mask);
150 extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
151
152 #define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
153 do { \
154 ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
155 ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
156 } while (0)
157
158
159 /* host/bus architecture-specific byte swap */
160 #define BUS_SWAP32(v) (v)
161 #define MALLOC(osh, size) osl_malloc((osh), (size))
162 #define MALLOCZ(osh, size) osl_mallocz((osh), (size))
163 #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
164 #define VMALLOC(osh, size) osl_vmalloc((osh), (size))
165 #define VMALLOCZ(osh, size) osl_vmallocz((osh), (size))
166 #define VMFREE(osh, addr, size) osl_vmfree((osh), (addr), (size))
167 #define MALLOCED(osh) osl_malloced((osh))
168 #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
169 extern void *osl_malloc(osl_t *osh, uint size);
170 extern void *osl_mallocz(osl_t *osh, uint size);
171 extern void osl_mfree(osl_t *osh, void *addr, uint size);
172 extern void *osl_vmalloc(osl_t *osh, uint size);
173 extern void *osl_vmallocz(osl_t *osh, uint size);
174 extern void osl_vmfree(osl_t *osh, void *addr, uint size);
175 extern uint osl_malloced(osl_t *osh);
176 extern uint osl_check_memleak(osl_t *osh);
177
178 #define MALLOC_FAILED(osh) osl_malloc_failed((osh))
179 extern uint osl_malloc_failed(osl_t *osh);
180
181 /* allocate/free shared (dma-able) consistent memory */
182 #define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
183 #define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
184 osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
185 #define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
186 osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
187
188 #define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
189 osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
190 #define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
191 osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
192
193 extern uint osl_dma_consistent_align(void);
194 extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
195 uint *tot, dmaaddr_t *pap);
196 extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
197
198 /* map/unmap direction */
199 #define DMA_NO 0 /* Used to skip cache op */
200 #define DMA_TX 1 /* TX direction for DMA */
201 #define DMA_RX 2 /* RX direction for DMA */
202
203 /* map/unmap shared (dma-able) memory */
204 #define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
205 osl_dma_unmap((osh), (pa), (size), (direction))
206 extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
207 hnddma_seg_map_t *txp_dmah);
208 extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
209
210 /* API for DMA addressing capability */
211 #define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
212
213 #define OSL_SMP_WMB() smp_wmb()
214
215 /* API for CPU relax */
216 extern void osl_cpu_relax(void);
217 #define OSL_CPU_RELAX() osl_cpu_relax()
218
219 extern void osl_preempt_disable(osl_t *osh);
220 extern void osl_preempt_enable(osl_t *osh);
221 #define OSL_DISABLE_PREEMPTION(osh) osl_preempt_disable(osh)
222 #define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh)
223
224 #if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \
225 (defined(STBLINUX) && defined(__ARM_ARCH_7A__))
226 extern void osl_cache_flush(void *va, uint size);
227 extern void osl_cache_inv(void *va, uint size);
228 extern void osl_prefetch(const void *ptr);
229 #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len)
230 #define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len)
231 #define OSL_PREFETCH(ptr) osl_prefetch(ptr)
232 #if defined(__ARM_ARCH_7A__)
233 extern int osl_arch_is_coherent(void);
234 #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent()
235 extern int osl_acp_war_enab(void);
236 #define OSL_ACP_WAR_ENAB() osl_acp_war_enab()
237 #else /* !__ARM_ARCH_7A__ */
238 #define OSL_ARCH_IS_COHERENT() NULL
239 #define OSL_ACP_WAR_ENAB() NULL
240 #endif /* !__ARM_ARCH_7A__ */
241 #else /* !__mips__ && !__ARM_ARCH_7A__ */
242 #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va)
243 #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va)
244 #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr)
245
246 #define OSL_ARCH_IS_COHERENT() NULL
247 #define OSL_ACP_WAR_ENAB() NULL
248 #endif
249
250 /* register access macros */
251 #if defined(BCMSDIO)
252 #include <bcmsdh.h>
253 #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
254 (uintptr)(r), sizeof(*(r)), (v)))
255 #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
256 (uintptr)(r), sizeof(*(r))))
257 #elif (defined(STB) && defined(__arm__))
258 extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
259
260 #define OSL_READ_REG(osh, r) \
261 ({\
262 __typeof(*(r)) __osl_v; \
263 osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
264 __osl_v; \
265 })
266 #endif
267
268 #if (defined(STB) && defined(__arm__))
269 #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
270 #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
271 #else /* !BCM47XX_CA9 */
272 #if defined(BCMSDIO)
273 #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
274 mmap_op else bus_op
275 #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
276 mmap_op : bus_op
277 #else
278 #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
279 #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
280 #endif
281 #endif
282
283 #define OSL_ERROR(bcmerror) osl_error(bcmerror)
284 extern int osl_error(int bcmerror);
285
286 /* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
287 #define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */
288
289 #define OSH_NULL NULL
290
291 /*
292 * BINOSL selects the slightly slower function-call-based binary compatible osl.
293 * Macros expand to calls to functions defined in linux_osl.c .
294 */
295 #include <linuxver.h> /* use current 2.4.x calling conventions */
296 #include <linux/kernel.h> /* for vsn/printf's */
297 #include <linux/string.h> /* for mem*, str* */
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
299 extern uint64 osl_sysuptime_us(void);
300 #define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
301 #define OSL_SYSUPTIME_US() osl_sysuptime_us()
302 #else
303 #define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
304 #error "OSL_SYSUPTIME_US() may need to be defined"
305 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
306 #define printf(fmt, args...) printk(fmt, ## args)
307 #include <linux/kernel.h> /* for vsn/printf's */
308 #include <linux/string.h> /* for mem*, str* */
309 /* bcopy's: Linux kernel doesn't provide these (anymore) */
310 #define bcopy(src, dst, len) memcpy((dst), (src), (len))
311 #define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
312 #define bzero(b, len) memset((b), '\0', (len))
313
314 /* register access macros */
315
316 #ifdef CONFIG_64BIT
317 /* readq is defined only for 64 bit platform */
318 #define R_REG(osh, r) (\
319 SELECT_BUS_READ(osh, \
320 ({ \
321 __typeof(*(r)) __osl_v = 0; \
322 switch (sizeof(*(r))) { \
323 case sizeof(uint8): __osl_v = \
324 readb((volatile uint8*)(r)); break; \
325 case sizeof(uint16): __osl_v = \
326 readw((volatile uint16*)(r)); break; \
327 case sizeof(uint32): __osl_v = \
328 readl((volatile uint32*)(r)); break; \
329 case sizeof(uint64): __osl_v = \
330 readq((volatile uint64*)(r)); break; \
331 } \
332 __osl_v; \
333 }), \
334 OSL_READ_REG(osh, r)) \
335 )
336 #else /* !CONFIG_64BIT */
337 #define R_REG(osh, r) (\
338 SELECT_BUS_READ(osh, \
339 ({ \
340 __typeof(*(r)) __osl_v = 0; \
341 switch (sizeof(*(r))) { \
342 case sizeof(uint8): __osl_v = \
343 readb((volatile uint8*)(r)); break; \
344 case sizeof(uint16): __osl_v = \
345 readw((volatile uint16*)(r)); break; \
346 case sizeof(uint32): __osl_v = \
347 readl((volatile uint32*)(r)); break; \
348 } \
349 __osl_v; \
350 }), \
351 OSL_READ_REG(osh, r)) \
352 )
353 #endif /* CONFIG_64BIT */
354
355 #ifdef CONFIG_64BIT
356 /* writeq is defined only for 64 bit platform */
357 #define W_REG(osh, r, v) do { \
358 SELECT_BUS_WRITE(osh, \
359 switch (sizeof(*(r))) { \
360 case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
361 case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
362 case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
363 case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \
364 }, \
365 (OSL_WRITE_REG(osh, r, v))); \
366 } while (0)
367
368 #else /* !CONFIG_64BIT */
369 #define W_REG(osh, r, v) do { \
370 SELECT_BUS_WRITE(osh, \
371 switch (sizeof(*(r))) { \
372 case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
373 case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
374 case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
375 }, \
376 (OSL_WRITE_REG(osh, r, v))); \
377 } while (0)
378 #endif /* CONFIG_64BIT */
379
380 #define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
381 #define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
382
383 /* bcopy, bcmp, and bzero functions */
384 #define bcopy(src, dst, len) memcpy((dst), (src), (len))
385 #define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
386 #define bzero(b, len) memset((b), '\0', (len))
387
388 /* uncached/cached virtual address */
389 #define OSL_UNCACHED(va) ((void *)va)
390 #define OSL_CACHED(va) ((void *)va)
391
392 #define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
393 #define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
394
395 /* get processor cycle count */
396 #if defined(__i386__)
397 #define OSL_GETCYCLES(x) rdtscl((x))
398 #else
399 #define OSL_GETCYCLES(x) ((x) = 0)
400 #endif
401
402 /* dereference an address that may cause a bus exception */
403 #define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
404
405 /* map/unmap physical to virtual I/O */
406 #if !defined(CONFIG_MMC_MSM7X00A)
407 #define REG_MAP(pa, size) ioremap((unsigned long)(pa), (unsigned long)(size))
408 #else
409 #define REG_MAP(pa, size) (void *)(0)
410 #endif /* !defined(CONFIG_MMC_MSM7X00A */
411 #define REG_UNMAP(va) iounmap((va))
412
413 /* shared (dma-able) memory access macros */
414 #define R_SM(r) *(r)
415 #define W_SM(r, v) (*(r) = (v))
416 #define BZERO_SM(r, len) memset((r), '\0', (len))
417
418 /* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
419 * performance reasons), we need the Linux headers.
420 */
421 #include <linuxver.h> /* use current 2.4.x calling conventions */
422
423 /* packet primitives */
424 #ifdef BCMDBG_CTRACE
425 #define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__)
426 #define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
427 #else
428 #ifdef BCM_OBJECT_TRACE
429 #define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__)
430 #define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
431 #else
432 #define PKTGET(osh, len, send) osl_pktget((osh), (len))
433 #define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
434 #endif /* BCM_OBJECT_TRACE */
435 #endif /* BCMDBG_CTRACE */
436 #define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
437 #define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
438 #if defined(BCM_OBJECT_TRACE)
439 #define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
440 #else
441 #define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
442 #endif /* BCM_OBJECT_TRACE */
443 #ifdef CONFIG_DHD_USE_STATIC_BUF
444 #define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
445 #define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
446 #else
447 #define PKTGET_STATIC PKTGET
448 #define PKTFREE_STATIC PKTFREE
449 #endif /* CONFIG_DHD_USE_STATIC_BUF */
450 #define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
451 #define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
452 #define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
453 #define PKTEXPHEADROOM(osh, skb, b) \
454 ({ \
455 BCM_REFERENCE(osh); \
456 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
457 })
458 #define PKTTAILROOM(osh, skb) \
459 ({ \
460 BCM_REFERENCE(osh); \
461 skb_tailroom((struct sk_buff*)(skb)); \
462 })
463 #define PKTPADTAILROOM(osh, skb, padlen) \
464 ({ \
465 BCM_REFERENCE(osh); \
466 skb_pad((struct sk_buff*)(skb), (padlen)); \
467 })
468 #define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
469 #define PKTSETNEXT(osh, skb, x) \
470 ({ \
471 BCM_REFERENCE(osh); \
472 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
473 })
474 #define PKTSETLEN(osh, skb, len) \
475 ({ \
476 BCM_REFERENCE(osh); \
477 __skb_trim((struct sk_buff*)(skb), (len)); \
478 })
479 #define PKTPUSH(osh, skb, bytes) \
480 ({ \
481 BCM_REFERENCE(osh); \
482 skb_push((struct sk_buff*)(skb), (bytes)); \
483 })
484 #define PKTPULL(osh, skb, bytes) \
485 ({ \
486 BCM_REFERENCE(osh); \
487 skb_pull((struct sk_buff*)(skb), (bytes)); \
488 })
489 #define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
490 #define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh)
491 #define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
492 #define PKTFREELIST(skb) PKTLINK(skb)
493 #define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
494 #define PKTPTR(skb) (skb)
495 #define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
496 #define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
497 #define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
498 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
499 #define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq)
500 extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
501 #else
502 #define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;})
503 #endif /* LINUX VERSION >= 3.6 */
504
505
506 #ifdef BCMDBG_CTRACE
507 #define DEL_CTRACE(zosh, zskb) { \
508 unsigned long zflags; \
509 spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
510 list_del(&(zskb)->ctrace_list); \
511 (zosh)->ctrace_num--; \
512 (zskb)->ctrace_start = 0; \
513 (zskb)->ctrace_count = 0; \
514 spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
515 }
516
517 #define UPDATE_CTRACE(zskb, zfile, zline) { \
518 struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
519 if (_zskb->ctrace_count < CTRACE_NUM) { \
520 _zskb->func[_zskb->ctrace_count] = zfile; \
521 _zskb->line[_zskb->ctrace_count] = zline; \
522 _zskb->ctrace_count++; \
523 } \
524 else { \
525 _zskb->func[_zskb->ctrace_start] = zfile; \
526 _zskb->line[_zskb->ctrace_start] = zline; \
527 _zskb->ctrace_start++; \
528 if (_zskb->ctrace_start >= CTRACE_NUM) \
529 _zskb->ctrace_start = 0; \
530 } \
531 }
532
533 #define ADD_CTRACE(zosh, zskb, zfile, zline) { \
534 unsigned long zflags; \
535 spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
536 list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
537 (zosh)->ctrace_num++; \
538 UPDATE_CTRACE(zskb, zfile, zline); \
539 spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
540 }
541
542 #define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
543 #endif /* BCMDBG_CTRACE */
544
545 #ifdef CTFPOOL
546 #define CTFPOOL_REFILL_THRESH 3
547 typedef struct ctfpool {
548 void *head;
549 spinlock_t lock;
550 osl_t *osh;
551 uint max_obj;
552 uint curr_obj;
553 uint obj_size;
554 uint refills;
555 uint fast_allocs;
556 uint fast_frees;
557 uint slow_allocs;
558 } ctfpool_t;
559
560 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
561 #define FASTBUF (1 << 0)
562 #define PKTSETFAST(osh, skb) \
563 ({ \
564 BCM_REFERENCE(osh); \
565 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
566 })
567 #define PKTCLRFAST(osh, skb) \
568 ({ \
569 BCM_REFERENCE(osh); \
570 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
571 })
572 #define PKTISFAST(osh, skb) \
573 ({ \
574 BCM_REFERENCE(osh); \
575 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
576 })
577 #define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->pktc_flags)
578 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
579 #define FASTBUF (1 << 16)
580 #define PKTSETFAST(osh, skb) \
581 ({ \
582 BCM_REFERENCE(osh); \
583 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
584 })
585 #define PKTCLRFAST(osh, skb) \
586 ({ \
587 BCM_REFERENCE(osh); \
588 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
589 })
590 #define PKTISFAST(osh, skb) \
591 ({ \
592 BCM_REFERENCE(osh); \
593 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
594 })
595 #define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
596 #else
597 #define FASTBUF (1 << 0)
598 #define PKTSETFAST(osh, skb) \
599 ({ \
600 BCM_REFERENCE(osh); \
601 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
602 })
603 #define PKTCLRFAST(osh, skb) \
604 ({ \
605 BCM_REFERENCE(osh); \
606 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
607 })
608 #define PKTISFAST(osh, skb) \
609 ({ \
610 BCM_REFERENCE(osh); \
611 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
612 })
613 #define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
614 #endif /* 2.6.22 */
615
616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
617 #define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool)
618 #define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
619 #else
620 #define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk)
621 #define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
622 #endif
623
624 extern void *osl_ctfpool_add(osl_t *osh);
625 extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
626 extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
627 extern void osl_ctfpool_cleanup(osl_t *osh);
628 extern void osl_ctfpool_stats(osl_t *osh, void *b);
629 #else /* CTFPOOL */
630 #define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
631 #define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
632 #define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
633 #endif /* CTFPOOL */
634
635 #define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
636 #define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
637 #define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
638
639 #ifdef HNDCTF
640
641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
642 #define SKIPCT (1 << 2)
643 #define CHAINED (1 << 3)
644 #define PKTSETSKIPCT(osh, skb) \
645 ({ \
646 BCM_REFERENCE(osh); \
647 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
648 })
649 #define PKTCLRSKIPCT(osh, skb) \
650 ({ \
651 BCM_REFERENCE(osh); \
652 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
653 })
654 #define PKTSKIPCT(osh, skb) \
655 ({ \
656 BCM_REFERENCE(osh); \
657 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
658 })
659 #define PKTSETCHAINED(osh, skb) \
660 ({ \
661 BCM_REFERENCE(osh); \
662 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
663 })
664 #define PKTCLRCHAINED(osh, skb) \
665 ({ \
666 BCM_REFERENCE(osh); \
667 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
668 })
669 #define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->pktc_flags & CHAINED)
670 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
671 #define SKIPCT (1 << 18)
672 #define CHAINED (1 << 19)
673 #define PKTSETSKIPCT(osh, skb) \
674 ({ \
675 BCM_REFERENCE(osh); \
676 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
677 })
678 #define PKTCLRSKIPCT(osh, skb) \
679 ({ \
680 BCM_REFERENCE(osh); \
681 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
682 })
683 #define PKTSKIPCT(osh, skb) \
684 ({ \
685 BCM_REFERENCE(osh); \
686 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
687 })
688 #define PKTSETCHAINED(osh, skb) \
689 ({ \
690 BCM_REFERENCE(osh); \
691 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
692 })
693 #define PKTCLRCHAINED(osh, skb) \
694 ({ \
695 BCM_REFERENCE(osh); \
696 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
697 })
698 #define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->mac_len & CHAINED)
699 #else /* 2.6.22 */
700 #define SKIPCT (1 << 2)
701 #define CHAINED (1 << 3)
702 #define PKTSETSKIPCT(osh, skb) \
703 ({ \
704 BCM_REFERENCE(osh); \
705 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
706 })
707 #define PKTCLRSKIPCT(osh, skb) \
708 ({ \
709 BCM_REFERENCE(osh); \
710 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
711 })
712 #define PKTSKIPCT(osh, skb) \
713 ({ \
714 BCM_REFERENCE(osh); \
715 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
716 })
717 #define PKTSETCHAINED(osh, skb) \
718 ({ \
719 BCM_REFERENCE(osh); \
720 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
721 })
722 #define PKTCLRCHAINED(osh, skb) \
723 ({ \
724 BCM_REFERENCE(osh); \
725 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
726 })
727 #define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->__unused & CHAINED)
728 #endif /* 2.6.22 */
729 typedef struct ctf_mark {
730 uint32 value;
731 } ctf_mark_t;
732 #define CTF_MARK(m) (m.value)
733 #else /* HNDCTF */
734 #define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
735 #define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
736 #define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
737 #define CTF_MARK(m) ({BCM_REFERENCE(m); 0;})
738 #endif /* HNDCTF */
739
740 #if defined(BCM_GMAC3)
741
742 /** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
743
744 /* Account for packets delivered to downstream forwarder by GMAC interface. */
745 extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
746 #define PKTTOFWDER(osh, skbs, skb_cnt) \
747 osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
748
749 /* Account for packets received from downstream forwarder. */
750 #if defined(BCMDBG_CTRACE) /* pkt logging */
751 extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
752 int line, char *file);
753 #define PKTFRMFWDER(osh, skbs, skb_cnt) \
754 osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
755 __LINE__, __FILE__)
756 #else /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
757 extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
758 #define PKTFRMFWDER(osh, skbs, skb_cnt) \
759 osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
760 #endif
761
762
763 /** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
764 * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
765 * been accessed in the GMAC forwarder. This may be used to limit the number of
766 * cachelines that need to be flushed or invalidated.
767 * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
768 * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
769 * Likewise, a debug print of a packet payload in say the ethernet driver needs
770 * to be accompanied with a clear of the FWDERBUF tag.
771 */
772
773 /** Forwarded packets, have a GMAC_FWDER_HWRXOFF sized rx header (etc.h) */
774 #define FWDER_HWRXOFF (18)
775
776 /** Maximum amount of a pkt data that a downstream forwarder (GMAC) may have
777 * read into the L1 cache (not dirty). This may be used in reduced cache ops.
778 *
779 * Max 44: ET HWRXOFF[18] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
780 * Min 32: GMAC_FWDER_HWRXOFF[18] + EtherHdr[14]
781 */
782 #define FWDER_MINMAPSZ (FWDER_HWRXOFF + 14)
783 #define FWDER_MAXMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
784 #define FWDER_PKTMAPSZ (FWDER_MINMAPSZ)
785
786 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
787
788 #define FWDERBUF (1 << 4)
789 #define PKTSETFWDERBUF(osh, skb) \
790 ({ \
791 BCM_REFERENCE(osh); \
792 (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
793 })
794 #define PKTCLRFWDERBUF(osh, skb) \
795 ({ \
796 BCM_REFERENCE(osh); \
797 (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
798 })
799 #define PKTISFWDERBUF(osh, skb) \
800 ({ \
801 BCM_REFERENCE(osh); \
802 (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
803 })
804
805 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
806
807 #define FWDERBUF (1 << 20)
808 #define PKTSETFWDERBUF(osh, skb) \
809 ({ \
810 BCM_REFERENCE(osh); \
811 (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
812 })
813 #define PKTCLRFWDERBUF(osh, skb) \
814 ({ \
815 BCM_REFERENCE(osh); \
816 (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
817 })
818 #define PKTISFWDERBUF(osh, skb) \
819 ({ \
820 BCM_REFERENCE(osh); \
821 (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
822 })
823
824 #else /* 2.6.22 */
825
826 #define FWDERBUF (1 << 4)
827 #define PKTSETFWDERBUF(osh, skb) \
828 ({ \
829 BCM_REFERENCE(osh); \
830 (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
831 })
832 #define PKTCLRFWDERBUF(osh, skb) \
833 ({ \
834 BCM_REFERENCE(osh); \
835 (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
836 })
837 #define PKTISFWDERBUF(osh, skb) \
838 ({ \
839 BCM_REFERENCE(osh); \
840 (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
841 })
842
843 #endif /* 2.6.22 */
844
845 #else /* ! BCM_GMAC3 */
846
847 #define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
848 #define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
849 #define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
850
851 #endif /* ! BCM_GMAC3 */
852
853
854 #ifdef HNDCTF
855 /* For broadstream iqos */
856
857 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
858 #define TOBR (1 << 5)
859 #define PKTSETTOBR(osh, skb) \
860 ({ \
861 BCM_REFERENCE(osh); \
862 (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
863 })
864 #define PKTCLRTOBR(osh, skb) \
865 ({ \
866 BCM_REFERENCE(osh); \
867 (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
868 })
869 #define PKTISTOBR(skb) (((struct sk_buff*)(skb))->pktc_flags & TOBR)
870 #define PKTSETCTFIPCTXIF(skb, ifp) (((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
871 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
872 #define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
873 #define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
874 #define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
875 #define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
876 #else /* 2.6.22 */
877 #define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
878 #define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
879 #define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
880 #define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
881 #endif /* 2.6.22 */
882 #else /* HNDCTF */
883 #define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
884 #define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
885 #define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
886 #endif /* HNDCTF */
887
888
889 #ifdef BCMFA
890 #ifdef BCMFA_HW_HASH
891 #define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx)
892 #else
893 #define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
894 #endif /* BCMFA_SW_HASH */
895 #define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx)
896 #define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp)
897 #define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
898
899 #define AUX_TCP_FIN_RST (1 << 0)
900 #define AUX_FREED (1 << 1)
901 #define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
902 #define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
903 #define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
904 #define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
905 #define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
906 #define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
907 #define PKTISFABRIDGED(skb) PKTISFAAUX(skb)
908 #else
909 #define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;})
910 #define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;})
911 #define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;})
912
913 #define PKTCLRFAAUX(skb) BCM_REFERENCE(skb)
914 #define PKTSETFAFREED(skb) BCM_REFERENCE(skb)
915 #define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
916 #endif /* BCMFA */
917
918 #if defined(BCM_OBJECT_TRACE)
919 extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
920 #else
921 extern void osl_pktfree(osl_t *osh, void *skb, bool send);
922 #endif /* BCM_OBJECT_TRACE */
923 extern void *osl_pktget_static(osl_t *osh, uint len);
924 extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
925 extern void osl_pktclone(osl_t *osh, void **pkt);
926
927 #ifdef BCMDBG_CTRACE
928 #define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b))
929 extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
930 extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
931 extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
932 extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
933 struct bcmstrbuf;
934 extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
935 #else
936 #ifdef BCM_OBJECT_TRACE
937 extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller);
938 extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
939 #else
940 extern void *osl_pktget(osl_t *osh, uint len);
941 extern void *osl_pktdup(osl_t *osh, void *skb);
942 #endif /* BCM_OBJECT_TRACE */
943 extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
944 #endif /* BCMDBG_CTRACE */
945 extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
946 #ifdef BCMDBG_CTRACE
947 #define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \
948 (struct sk_buff*)(skb), __LINE__, __FILE__)
949 #define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
950 #else
951 #define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
952 #endif /* BCMDBG_CTRACE */
953 #define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
954
955 #define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
956 #define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
957 #define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
958 #define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
959 #define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
960 #define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
961 ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
962 /* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
963 #define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
964
965 #ifdef CONFIG_NF_CONNTRACK_MARK
966 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
967 #define PKTMARK(p) (((struct sk_buff *)(p))->mark)
968 #define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m)
969 #else /* !2.6.0 */
970 #define PKTMARK(p) (((struct sk_buff *)(p))->nfmark)
971 #define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m)
972 #endif /* 2.6.0 */
973 #else /* CONFIG_NF_CONNTRACK_MARK */
974 #define PKTMARK(p) 0
975 #define PKTSETMARK(p, m)
976 #endif /* CONFIG_NF_CONNTRACK_MARK */
977
978 #define PKTALLOCED(osh) osl_pktalloced(osh)
979 extern uint osl_pktalloced(osl_t *osh);
980
981 #define OSL_RAND() osl_rand()
982 extern uint32 osl_rand(void);
983
984 #if !defined(BCM_SECURE_DMA)
985 #define DMA_MAP(osh, va, size, direction, p, dmah) \
986 osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
987 #endif /* !(defined(BCM_SECURE_DMA)) */
988
989 #ifdef PKTC
990 /* Use 8 bytes of skb tstamp field to store below info */
991 struct chain_node {
992 struct sk_buff *link;
993 unsigned int flags:3, pkts:9, bytes:20;
994 };
995
996 #define CHAIN_NODE(skb) ((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
997
998 #define PKTCSETATTR(s, f, p, b) ({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
999 CHAIN_NODE(s)->bytes = (b);})
1000 #define PKTCCLRATTR(s) ({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
1001 CHAIN_NODE(s)->bytes = 0;})
1002 #define PKTCGETATTR(s) (CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
1003 CHAIN_NODE(s)->bytes)
1004 #define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts)
1005 #define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes)
1006 #define PKTCGETFLAGS(skb) (CHAIN_NODE(skb)->flags)
1007 #define PKTCSETFLAGS(skb, f) (CHAIN_NODE(skb)->flags = (f))
1008 #define PKTCCLRFLAGS(skb) (CHAIN_NODE(skb)->flags = 0)
1009 #define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags)
1010 #define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c))
1011 #define PKTCINCRCNT(skb) (CHAIN_NODE(skb)->pkts++)
1012 #define PKTCADDCNT(skb, c) (CHAIN_NODE(skb)->pkts += (c))
1013 #define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l))
1014 #define PKTCADDLEN(skb, l) (CHAIN_NODE(skb)->bytes += (l))
1015 #define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb))
1016 #define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb))
1017 #define PKTCLINK(skb) (CHAIN_NODE(skb)->link)
1018 #define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
1019 #define FOREACH_CHAINED_PKT(skb, nskb) \
1020 for (; (skb) != NULL; (skb) = (nskb)) \
1021 if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
1022 PKTSETCLINK((skb), NULL), 1)
1023 #define PKTCFREE(osh, skb, send) \
1024 do { \
1025 void *nskb; \
1026 ASSERT((skb) != NULL); \
1027 FOREACH_CHAINED_PKT((skb), nskb) { \
1028 PKTCLRCHAINED((osh), (skb)); \
1029 PKTCCLRFLAGS((skb)); \
1030 PKTFREE((osh), (skb), (send)); \
1031 } \
1032 } while (0)
1033 #define PKTCENQTAIL(h, t, p) \
1034 do { \
1035 if ((t) == NULL) { \
1036 (h) = (t) = (p); \
1037 } else { \
1038 PKTSETCLINK((t), (p)); \
1039 (t) = (p); \
1040 } \
1041 } while (0)
1042 #endif /* PKTC */
1043
1044 #else /* ! BCMDRIVER */
1045
1046
1047 /* ASSERT */
1048 #define ASSERT(exp) do {} while (0)
1049
1050 /* MALLOC and MFREE */
1051 #define MALLOC(o, l) malloc(l)
1052 #define MFREE(o, p, l) free(p)
1053 #include <stdlib.h>
1054
1055 /* str* and mem* functions */
1056 #include <string.h>
1057
1058 /* *printf functions */
1059 #include <stdio.h>
1060
1061 /* bcopy, bcmp, and bzero */
1062 extern void bcopy(const void *src, void *dst, size_t len);
1063 extern int bcmp(const void *b1, const void *b2, size_t len);
1064 extern void bzero(void *b, size_t len);
1065 #endif /* ! BCMDRIVER */
1066
1067 /* Current STB 7445D1 doesn't use ACP and it is non-coherrent.
1068 * Adding these dummy values for build apss only
1069 * When we revisit need to change these.
1070 */
1071 #if defined(STBLINUX)
1072
1073 #if defined(__ARM_ARCH_7A__)
1074 #define ACP_WAR_ENAB() 0
1075 #define ACP_WIN_LIMIT 1
1076 #define arch_is_coherent() 0
1077 #endif /* __ARM_ARCH_7A__ */
1078
1079 #endif /* STBLINUX */
1080
1081 #ifdef BCM_SECURE_DMA
1082
1083 #define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
1084 osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset))
1085 #define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \
1086 osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah))
1087 #define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \
1088 osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma))
1089 #define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \
1090 osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset))
1091 #define SECURE_DMA_UNMAP_ALL(osh, pcma) \
1092 osl_sec_dma_unmap_all((osh), (pcma))
1093
1094 #define DMA_MAP(osh, va, size, direction, p, dmah)
1095
1096 typedef struct sec_cma_info {
1097 struct sec_mem_elem *sec_alloc_list;
1098 struct sec_mem_elem *sec_alloc_list_tail;
1099 } sec_cma_info_t;
1100
1101 #if defined(__ARM_ARCH_7A__)
1102 #define CMA_BUFSIZE_4K 4096
1103 #define CMA_BUFSIZE_2K 2048
1104 #define CMA_BUFSIZE_512 512
1105
1106 #define CMA_BUFNUM 2048
1107 #define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */
1108 #define SEC_CMA_COHERENT_MAX 278
1109 #define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX)
1110 #define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM)
1111 #define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK)
1112 #define CONT_REGION 0x02 /* Region CMA */
1113 #else
1114 #define CONT_REGION 0x00 /* To access the MIPs mem, Not yet... */
1115 #endif /* !defined __ARM_ARCH_7A__ */
1116
1117 #define SEC_DMA_ALIGN (1<<16)
1118 typedef struct sec_mem_elem {
1119 size_t size;
1120 int direction;
1121 phys_addr_t pa_cma; /**< physical address */
1122 void *va; /**< virtual address of driver pkt */
1123 dma_addr_t dma_handle; /**< bus address assign by linux */
1124 void *vac; /**< virtual address of cma buffer */
1125 struct page *pa_cma_page; /* phys to page address */
1126 struct sec_mem_elem *next;
1127 } sec_mem_elem_t;
1128
1129 extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1130 hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset);
1131 extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p,
1132 hnddma_seg_map_t *dmah);
1133 extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size,
1134 int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info);
1135 extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1136 void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset);
1137 extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info);
1138
1139 #endif /* BCM_SECURE_DMA */
1140
1141 typedef struct sk_buff_head PKT_LIST;
1142 #define PKTLIST_INIT(x) skb_queue_head_init((x))
1143 #define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y))
1144 #define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x))
1145 #define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
1146 #define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x))
1147
1148 #ifdef REPORT_FATAL_TIMEOUTS
1149 typedef struct osl_timer {
1150 struct timer_list *timer;
1151 bool set;
1152 } osl_timer_t;
1153
1154 typedef void (*linux_timer_fn)(ulong arg);
1155
1156 extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg);
1157 extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
1158 extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
1159 extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
1160 #endif
1161
1162 #endif /* _linux_osl_h_ */
1163