1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13 #include "shared_mem.h"
14 #include <securec.h>
15 #include <linux/slab.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/of_platform.h>
20 #include <asm/io.h>
21 #include "tc_ns_log.h"
22 #include "tc_ns_client.h"
23 #include "teek_ns_client.h"
24 #include "smc_smp.h"
25 #include "internal_functions.h"
26 #include "mailbox_mempool.h"
27 #include "ko_adapt.h"
28
get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys,uint64_t cmd_size)29 uint64_t get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys, uint64_t cmd_size)
30 {
31 if (cmd_phys == 0 || cmd_size == 0) {
32 tloge("cmd phy or cmd size is error\n");
33 return 0;
34 }
35 uint64_t cmd_vaddr = (uint64_t)(uintptr_t)ioremap_cache(cmd_phys, cmd_size);
36 if (cmd_vaddr == 0) {
37 tloge("io remap for reserved cmd buffer failed\n");
38 return 0;
39 }
40 (void)memset_s((void *)(uintptr_t)cmd_vaddr, cmd_size, 0, cmd_size);
41 return cmd_vaddr;
42 }
43
44 #ifdef CONFIG_SHARED_MEM_RESERVED
45
46 #define CMD_MEM_MIN_SIZE 0x1000
47 #define SPI_MEM_MIN_SIZE 0x1000
48 #define OPERATION_MEM_MIN_SIZE 0x1000
49 uint64_t g_cmd_mem_paddr;
50 uint64_t g_cmd_mem_size;
51 uint64_t g_mailbox_paddr;
52 uint64_t g_mailbox_size;
53 uint64_t g_log_mem_paddr;
54 uint64_t g_log_mem_size;
55 uint64_t g_spi_mem_paddr;
56 uint64_t g_spi_mem_size;
57 static mailbox_page_t *g_mailbox_page;
58 static uintptr_t g_shmem_start_virt;
59 static uintptr_t g_page_offset;
60
load_tz_shared_mem(struct device_node * np)61 int load_tz_shared_mem(struct device_node *np)
62 {
63 int rc;
64
65 rc = of_property_read_u64(np, "tz_shmem_cmd_addr", &g_cmd_mem_paddr);
66 if (rc != 0) {
67 tloge("read tz_shmem_cmd_addr failed\n");
68 return -ENODEV;
69 }
70
71 rc = of_property_read_u64(np, "tz_shmem_cmd_size", &g_cmd_mem_size);
72 if (rc != 0 || g_cmd_mem_size < CMD_MEM_MIN_SIZE) {
73 tloge("read tz_shmem_cmd_size failed or size too short\n");
74 return -ENODEV;
75 }
76
77 rc = of_property_read_u64(np, "tz_shmem_mailbox_addr", &g_mailbox_paddr);
78 if (rc != 0) {
79 tloge("read tz_shmem_mailbox_addr failed\n");
80 return -ENODEV;
81 }
82
83 rc = of_property_read_u64(np, "tz_shmem_mailbox_size", &g_mailbox_size);
84 if (rc != 0 || g_mailbox_size < MAILBOX_POOL_SIZE + OPERATION_MEM_MIN_SIZE) {
85 tloge("read tz_shmem_mailbox_size failed or size too short\n");
86 return -ENODEV;
87 }
88
89 rc = of_property_read_u64(np, "tz_shmem_spi_addr", &g_spi_mem_paddr);
90 if (rc != 0) {
91 tloge("read tz_shmem_spi_addr failed\n");
92 return -ENODEV;
93 }
94
95 rc = of_property_read_u64(np, "tz_shmem_spi_size", &g_spi_mem_size);
96 if (rc != 0 || g_spi_mem_size < SPI_MEM_MIN_SIZE) {
97 tloge("read tz_shmem_spi_size failed or size too short\n");
98 return -ENODEV;
99 }
100
101 rc = of_property_read_u64(np, "tz_shmem_log_addr", &g_log_mem_paddr);
102 if (rc != 0) {
103 tloge("read tz_shmem_log_addr failed\n");
104 return -ENODEV;
105 }
106
107 rc = of_property_read_u64(np, "tz_shmem_log_size", &g_log_mem_size);
108 if (rc != 0 || g_log_mem_size < PAGES_LOG_MEM_LEN) {
109 tloge("read tz_shmem_log_size failed or size too short\n");
110 return -ENODEV;
111 }
112
113 return 0;
114 }
115
mailbox_alloc_pages(int order)116 mailbox_page_t *mailbox_alloc_pages(int order)
117 {
118 uint32_t i;
119 uint32_t page_num = 1 << (unsigned int)order;
120 uint32_t page_size = page_num * sizeof(mailbox_page_t);
121
122 g_page_offset = MAILBOX_POOL_SIZE / page_num;
123 g_mailbox_page = kmalloc(page_size, GFP_KERNEL);
124 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_mailbox_page)) {
125 tloge("Failed to allocate mailbox page\n");
126 return NULL;
127 }
128
129 g_shmem_start_virt = (uintptr_t)ioremap_cache(g_mailbox_paddr, g_mailbox_size);
130 if (g_shmem_start_virt == 0) {
131 tloge("io remap for mailbox page failed\n");
132 kfree(g_mailbox_page);
133 g_mailbox_page = NULL;
134 return NULL;
135 }
136 (void)memset_s((void *)g_shmem_start_virt, g_mailbox_size, 0, g_mailbox_size);
137 g_mailbox_page[0] = (mailbox_page_t)g_shmem_start_virt;
138 for (i = 1; i < page_num; i++)
139 g_mailbox_page[i] = g_mailbox_page[i - 1] + g_page_offset;
140
141 return g_mailbox_page;
142 }
143
mailbox_free_pages(mailbox_page_t * pages,int order)144 void mailbox_free_pages(mailbox_page_t *pages, int order)
145 {
146 if (!pages || pages != g_mailbox_page)
147 return;
148
149 (void)order;
150 kfree(pages);
151 g_mailbox_page = NULL;
152 }
153
mailbox_page_address(mailbox_page_t * page)154 uintptr_t mailbox_page_address(mailbox_page_t *page)
155 {
156 if (!page)
157 return 0;
158
159 return *page;
160 }
161
mailbox_virt_to_phys(uintptr_t addr)162 uintptr_t mailbox_virt_to_phys(uintptr_t addr)
163 {
164 if (addr < g_shmem_start_virt || addr > g_shmem_start_virt + g_mailbox_size)
165 return 0;
166
167 return g_mailbox_paddr + (addr - g_shmem_start_virt);
168 }
169
mailbox_virt_to_page(uint64_t ptr)170 mailbox_page_t *mailbox_virt_to_page(uint64_t ptr)
171 {
172 if (ptr < g_shmem_start_virt || ptr > g_shmem_start_virt + g_mailbox_size)
173 return 0;
174
175 return &g_mailbox_page[(ptr - g_shmem_start_virt) / g_page_offset];
176 }
177
get_operation_vaddr(void)178 uint64_t get_operation_vaddr(void)
179 {
180 return g_shmem_start_virt + MAILBOX_POOL_SIZE;
181 }
182
free_operation(uint64_t op_vaddr)183 void free_operation(uint64_t op_vaddr)
184 {
185 (void)op_vaddr;
186 }
187
188 /*
189 * This function only for wireless platform, CONFIG_LOG_POOL
190 * macro cnotrols the log retention of soft reset feature.
191 * Enable CONFIG_LOG_POOL macro, this function won't memset
192 * log pool memory, and the old log before reset can be retention.
193 */
get_log_mem_vaddr(void)194 uint64_t get_log_mem_vaddr(void)
195 {
196 uint64_t log_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_log_mem_paddr, g_log_mem_size);
197 if (log_vaddr == 0) {
198 tloge("ioremap for log buffer failed\n");
199 return 0;
200 }
201 #ifndef CONFIG_LOG_POOL
202 (void)memset_s((void *)(uintptr_t)log_vaddr, g_log_mem_size, 0, g_log_mem_size);
203 #endif
204
205 return log_vaddr;
206 }
207
get_log_mem_paddr(uint64_t log_vaddr)208 uint64_t get_log_mem_paddr(uint64_t log_vaddr)
209 {
210 (void)log_vaddr;
211 return g_log_mem_paddr;
212 }
213
get_log_mem_size(void)214 uint64_t get_log_mem_size(void)
215 {
216 return g_log_mem_size;
217 }
218
free_log_mem(uint64_t log_vaddr)219 void free_log_mem(uint64_t log_vaddr)
220 {
221 iounmap((void __iomem*)(uintptr_t)log_vaddr);
222 }
223
get_cmd_mem_vaddr(void)224 uint64_t get_cmd_mem_vaddr(void)
225 {
226 return get_reserved_cmd_vaddr_of(g_cmd_mem_paddr, g_cmd_mem_size);
227 }
228
get_cmd_mem_paddr(uint64_t cmd_vaddr)229 uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr)
230 {
231 (void)cmd_vaddr;
232 return g_cmd_mem_paddr;
233 }
234
free_cmd_mem(uint64_t cmd_vaddr)235 void free_cmd_mem(uint64_t cmd_vaddr)
236 {
237 iounmap((void __iomem*)(uintptr_t)cmd_vaddr);
238 }
239
get_spi_mem_vaddr(void)240 uint64_t get_spi_mem_vaddr(void)
241 {
242 uint64_t spi_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_spi_mem_paddr, g_spi_mem_size);
243 if (spi_vaddr == 0) {
244 tloge("io remap for spi buffer failed\n");
245 return 0;
246 }
247 (void)memset_s((void *)(uintptr_t)spi_vaddr, g_spi_mem_size, 0, g_spi_mem_size);
248 return spi_vaddr;
249 }
250
get_spi_mem_paddr(uintptr_t spi_vaddr)251 uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr)
252 {
253 (void)spi_vaddr;
254 return g_spi_mem_paddr;
255 }
256
free_spi_mem(uint64_t spi_vaddr)257 void free_spi_mem(uint64_t spi_vaddr)
258 {
259 iounmap((void __iomem*)(uintptr_t)spi_vaddr);
260 }
261
262 #else
263
load_tz_shared_mem(struct device_node * np)264 int load_tz_shared_mem(struct device_node *np)
265 {
266 (void)np;
267 return 0;
268 }
269
mailbox_alloc_pages(int order)270 mailbox_page_t *mailbox_alloc_pages(int order)
271 {
272 return koadpt_alloc_pages(GFP_KERNEL, order);
273 }
274
mailbox_free_pages(mailbox_page_t * pages,int order)275 void mailbox_free_pages(mailbox_page_t *pages, int order)
276 {
277 if (!pages)
278 return;
279
280 __free_pages(pages, order);
281 }
282
mailbox_page_address(mailbox_page_t * page)283 uintptr_t mailbox_page_address(mailbox_page_t *page)
284 {
285 if (!page)
286 return 0;
287
288 return page_address(page);
289 }
290
mailbox_virt_to_phys(uintptr_t addr)291 uintptr_t mailbox_virt_to_phys(uintptr_t addr)
292 {
293 if (!addr)
294 return 0;
295
296 return virt_to_phys(addr);
297 }
298
mailbox_virt_to_page(uint64_t ptr)299 mailbox_page_t *mailbox_virt_to_page(uint64_t ptr)
300 {
301 if (!ptr)
302 return NULL;
303
304 return virt_to_page(ptr);
305 }
306
get_operation_vaddr(void)307 uint64_t get_operation_vaddr(void)
308 {
309 return kzalloc(sizeof(struct tc_ns_operation), GFP_KERNEL);
310 }
311
free_operation(uint64_t op_vaddr)312 void free_operation(uint64_t op_vaddr)
313 {
314 if (!op_vaddr)
315 return;
316
317 kfree(op_vaddr);
318 }
319
get_log_mem_vaddr(void)320 uint64_t get_log_mem_vaddr(void)
321 {
322 return __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PAGES_LOG_MEM_LEN));
323 }
324
get_log_mem_paddr(uint64_t log_vaddr)325 uint64_t get_log_mem_paddr(uint64_t log_vaddr)
326 {
327 if (!log_vaddr)
328 return 0;
329
330 return virt_to_phys((void *)(uintptr_t)log_vaddr);
331 }
332
get_log_mem_size(void)333 uint64_t get_log_mem_size(void)
334 {
335 return 0;
336 }
337
free_log_mem(uint64_t log_vaddr)338 void free_log_mem(uint64_t log_vaddr)
339 {
340 if (!log_vaddr)
341 return;
342
343 free_pages(log_vaddr, get_order(PAGES_LOG_MEM_LEN));
344 }
345
346 #define PAGES_BIG_SESSION_CMD_LEN 6
get_cmd_mem_vaddr(void)347 uint64_t get_cmd_mem_vaddr(void)
348 {
349 #ifdef CONFIG_BIG_SESSION
350 /* we should map at least 64 pages for 1000 sessions, 2^6 > 40 */
351 return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PAGES_BIG_SESSION_CMD_LEN);
352 #else
353 return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO);
354 #endif
355 }
356
get_cmd_mem_paddr(uint64_t cmd_vaddr)357 uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr)
358 {
359 if (!cmd_vaddr)
360 return 0;
361
362 return virt_to_phys((void *)(uintptr_t)cmd_vaddr);
363 }
364
free_cmd_mem(uint64_t cmd_vaddr)365 void free_cmd_mem(uint64_t cmd_vaddr)
366 {
367 if (!cmd_vaddr)
368 return;
369
370 #ifdef CONFIG_BIG_SESSION
371 free_pages(cmd_vaddr, PAGES_BIG_SESSION_CMD_LEN);
372 #else
373 free_page(cmd_vaddr);
374 #endif
375 }
376
get_spi_mem_vaddr(void)377 uint64_t get_spi_mem_vaddr(void)
378 {
379 #ifdef CONFIG_BIG_SESSION
380 /* we should map at least 3 pages for 100 sessions, 2^2 > 3 */
381 return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, CONFIG_NOTIFY_PAGE_ORDER);
382 #else
383 return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO);
384 #endif
385 }
386
get_spi_mem_paddr(uintptr_t spi_vaddr)387 uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr)
388 {
389 if (spi_vaddr == 0)
390 return 0;
391
392 return virt_to_phys((void *)spi_vaddr);
393 }
394
free_spi_mem(uint64_t spi_vaddr)395 void free_spi_mem(uint64_t spi_vaddr)
396 {
397 if (!spi_vaddr)
398 return;
399
400 #ifdef CONFIG_BIG_SESSION
401 free_pages(spi_vaddr, CONFIG_NOTIFY_PAGE_ORDER);
402 #else
403 free_page(spi_vaddr);
404 #endif
405 }
406 #endif
407