1 /*******************************************************************************
2 * Copyright (C) 2018 Cadence Design Systems, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to use this Software with Cadence processor cores only and
7 * not with any other processors and platforms, subject to
8 * the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included
11 * in all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
15 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
17 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
18 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
19 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20
21 ******************************************************************************/
22
23 /*******************************************************************************
24 * xf-mem.h
25 *
26 * System-specific memory allocator
27 *
28 *******************************************************************************/
29
30 #ifndef __XF_H
31 #error "xf-mem.h mustn't be included directly"
32 #endif
33
34 /*******************************************************************************
35 * System specific memory pools
36 ******************************************************************************/
37
38 #if XF_CFG_CORES_NUM > 1
39 /* ...shared memory pool for communication within DSP-cluster */
40 extern xf_mm_pool_t xf_dsp_shmem_pool;
41 #endif
42
43 /*******************************************************************************
44 * Platform-specific SHMEM allocation registering functions
45 ******************************************************************************/
46
47 /* ...register shmem allocation address */
xf_shmem_alloc_addref(u32 core,xf_message_t * m)48 static inline void xf_shmem_alloc_addref(u32 core, xf_message_t *m)
49 {
50 }
51
52 /* ...unregister shmem allocation address */
xf_shmem_alloc_rmref(u32 core,xf_message_t * m)53 static inline void xf_shmem_alloc_rmref(u32 core, xf_message_t *m)
54 {
55 }
56
57 /*******************************************************************************
58 * API functions
59 ******************************************************************************/
60
61 /* ...allocate aligned memory on particular core specifying if it is shared */
xf_mem_alloc(u32 size,u32 align,u32 core,u32 shared)62 static inline void * xf_mem_alloc(u32 size, u32 align, u32 core, u32 shared)
63 {
64 #if XF_CFG_CORES_NUM > 1
65 if (shared)
66 {
67 /* ...if memory is shared, core is dropped */
68 return xf_mm_alloc(&xf_dsp_shmem_pool, size);
69 }
70 #endif
71
72 /* ...select local memory pool basing on core specification */
73 return xf_mm_alloc(&XF_CORE_DATA(core)->local_pool, size);
74 }
75 #ifdef XAF_ENABLE_NON_HIKEY
76 /* ...redefine macro to add bugchecks */
77 #define xf_mem_alloc(size, align, core, shared) \
78 ({ \
79 void *__data; \
80 /* ...size must be properly aligned */ \
81 BUG(!XF_MM_ALIGNED(size), _x("Bad size: %u"), size); \
82 __data = (xf_mem_alloc)(size, align, core, shared); \
83 TRACE(1, _b("alloc-%u: %p[%u] (shared=%u)"), core, __data, size, shared); \
84 __data; \
85 })
86 #endif
87 /* ...release allocated memory */
xf_mem_free(void * p,u32 size,u32 core,u32 shared)88 static inline void xf_mem_free(void *p, u32 size, u32 core, u32 shared)
89 {
90 #if XF_CFG_CORES_NUM > 1
91 if (shared)
92 {
93 /* ...if memory is shared, core is dropped */
94 xf_mm_free(&xf_dsp_shmem_pool, p, size);
95 return;
96 }
97 #endif
98
99 /* ...select proper pool basing on core specification */
100 xf_mm_free(&XF_CORE_DATA(core)->local_pool, p, size);
101 }
102 #ifdef XAF_ENABLE_NON_HIKEY
103 /* ...redefine macro to add bugchecks */
104 #define xf_mem_free(p, size, core, shared) \
105 ({ \
106 void *__data = (p); \
107 /* ...size must be properly aligned */ \
108 BUG(!XF_MM_ALIGNED(size), _x("Bad size: %u"), size); \
109 TRACE(1, _b("free-%u: %p[%u] (shared=%u)"), core, __data, size, shared); \
110 (xf_mem_free)(__data, size, core, shared); \
111 })
112 #endif
113 /* ...allocate AP-DSP shared memory */
xf_shmem_alloc(u32 core,xf_message_t * m)114 static inline int xf_shmem_alloc(u32 core, xf_message_t *m)
115 {
116 xf_mm_pool_t *pool = &XF_CORE_DATA(core)->shared_pool;
117
118 /* ...length is always cache-line aligned */
119 if ((m->buffer = xf_mm_alloc(pool, XF_ALIGNED(m->length))) != NULL)
120 {
121 /* ...register allocation address */
122 xf_shmem_alloc_addref(core, m);
123
124 return 0;
125 }
126 else
127 {
128 return -ENOMEM;
129 }
130 }
131
132 /* ...free AP-DSP shared memory */
xf_shmem_free(u32 core,xf_message_t * m)133 static inline void xf_shmem_free(u32 core, xf_message_t *m)
134 {
135 xf_mm_pool_t *pool = &XF_CORE_DATA(core)->shared_pool;
136
137 /* ...length is always cache-line aligned */
138 xf_mm_free(pool, m->buffer, XF_ALIGNED(m->length));
139
140 /* ...unregister allocation address */
141 xf_shmem_alloc_rmref(core, m);
142 }
143
144 /*******************************************************************************
145 * Scratch memory management
146 ******************************************************************************/
147
xf_scratch_mem_init(u32 core)148 static inline void * xf_scratch_mem_init(u32 core)
149 {
150 /* ...allocate scratch memory from local DSP memory */
151 return xf_mem_alloc(XF_CFG_CODEC_SCRATCHMEM_SIZE, XF_CFG_CODEC_SCRATCHMEM_ALIGN, core, 0);
152 }
153
154 /*******************************************************************************
155 * Helpers - hmm; they are platform-independent - tbd
156 ******************************************************************************/
157
158 /* ...allocate local buffer */
xf_mm_alloc_buffer(u32 size,u32 align,u32 core,xf_mm_buffer_t * b)159 static inline int xf_mm_alloc_buffer(u32 size, u32 align, u32 core, xf_mm_buffer_t *b)
160 {
161 /* ...allocate memory from proper local pool */
162 if ((size = XF_MM(size)) != 0)
163 XF_CHK_ERR(b->addr = xf_mem_alloc(size, align, core, 0), -ENOMEM);
164 else
165 b->addr = NULL;
166
167 /* ...save address */
168 b->size = size;
169
170 return 0;
171 }
172
173 /* ...free local buffer */
xf_mm_free_buffer(xf_mm_buffer_t * b,u32 core)174 static inline void xf_mm_free_buffer(xf_mm_buffer_t *b, u32 core)
175 {
176 if (b->addr)
177 {
178 xf_mem_free(b->addr, b->size, core, 0);
179 }
180 }
181