1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 * Decription: mailbox memory managing for sharing memory with TEE.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14 #include "mailbox_mempool.h"
15 #include "shared_mem.h"
16 #include <linux/list.h>
17 #include <linux/sizes.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/uaccess.h>
25 #include <linux/version.h>
26 #include <securec.h>
27 #if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
28 #include <linux/vmalloc.h>
29 #endif
30 #include "teek_client_constants.h"
31 #include "tc_ns_log.h"
32 #include "smc_smp.h"
33 #include "ko_adapt.h"
34 #include "internal_functions.h"
35
36 #define MAILBOX_PAGE_MAX (MAILBOX_POOL_SIZE >> PAGE_SHIFT)
37 static int g_max_oder;
38
39 #define OPT_MODE 0660U
40 #define STATE_MODE 0440U
41
42 struct mb_page_t {
43 struct list_head node;
44 mailbox_page_t *page;
45 int order;
46 unsigned int count; /* whether be used */
47 };
48
49 struct mb_free_area_t {
50 struct list_head page_list;
51 int order;
52 };
53
54 struct mb_zone_t {
55 mailbox_page_t *all_pages;
56 struct mb_page_t pages[MAILBOX_PAGE_MAX];
57 struct mb_free_area_t free_areas[0];
58 };
59
60 static struct mb_zone_t *g_m_zone;
61 static struct mutex g_mb_lock;
62
mailbox_show_status(void)63 static void mailbox_show_status(void)
64 {
65 unsigned int i;
66 struct mb_page_t *pos = NULL;
67 struct list_head *head = NULL;
68 unsigned int used = 0;
69
70 if (!g_m_zone) {
71 tloge("zone struct is NULL\n");
72 return;
73 }
74
75 tloge("########################################\n");
76 mutex_lock(&g_mb_lock);
77 for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
78 if (g_m_zone->pages[i].count != 0) {
79 tloge("page[%02d], order=%02d, count=%d\n", i, g_m_zone->pages[i].order, g_m_zone->pages[i].count);
80 used += (1 << (uint32_t)g_m_zone->pages[i].order);
81 }
82 }
83 tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX);
84 tloge("----------------------------------------\n");
85
86 for (i = 0; i < (unsigned int)g_max_oder; i++) {
87 head = &g_m_zone->free_areas[i].page_list;
88 if (list_empty(head) != 0) {
89 tloge("order[%02d] is empty\n", i);
90 } else {
91 list_for_each_entry(pos, head, node)
92 tloge("order[%02d]\n", i);
93 }
94 }
95 mutex_unlock(&g_mb_lock);
96
97 tloge("########################################\n");
98 }
99
100 #define MB_SHOW_LINE 64
101 #define BITS_OF_BYTE 8
mailbox_show_details(void)102 static void mailbox_show_details(void)
103 {
104 unsigned int i;
105 unsigned int used = 0;
106 unsigned int left = 0;
107 unsigned int order = 0;
108
109 if (!g_m_zone) {
110 tloge("zone struct is NULL\n");
111 return;
112 }
113 tloge("----- show mailbox details -----");
114 mutex_lock(&g_mb_lock);
115 for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
116 if (i % MB_SHOW_LINE == 0) {
117 tloge("\n");
118 tloge("%04d-%04d:", i, i + MB_SHOW_LINE);
119 }
120 if (g_m_zone->pages[i].count != 0) {
121 left = 1 << (uint32_t)g_m_zone->pages[i].order;
122 order = (uint32_t)g_m_zone->pages[i].order;
123 used += (1 << (uint32_t)g_m_zone->pages[i].order);
124 }
125 if (left != 0) {
126 left--;
127 tloge("%01d", order);
128 } else {
129 tloge("X");
130 }
131 if (i > 1 && (i + 1) % (MB_SHOW_LINE / BITS_OF_BYTE) == 0)
132 tloge(" ");
133 }
134 tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX);
135 mutex_unlock(&g_mb_lock);
136 }
137
mailbox_alloc(size_t size,unsigned int flag)138 void *mailbox_alloc(size_t size, unsigned int flag)
139 {
140 unsigned int i;
141 struct mb_page_t *pos = (struct mb_page_t *)NULL;
142 struct list_head *head = NULL;
143 int order = get_order(ALIGN(size, SZ_4K));
144 void *addr = NULL;
145
146 if ((size == 0) || !g_m_zone) {
147 tlogw("alloc 0 size mailbox or zone struct is NULL\n");
148 return NULL;
149 }
150
151 if (order > g_max_oder || order < 0) {
152 tloge("invalid order %d\n", order);
153 return NULL;
154 }
155 mutex_lock(&g_mb_lock);
156
157 for (i = (unsigned int)order; i <= (unsigned int)g_max_oder; i++) {
158 unsigned int j;
159 head = &g_m_zone->free_areas[i].page_list;
160 if (list_empty(head) != 0)
161 continue;
162 pos = list_first_entry(head, struct mb_page_t, node);
163 pos->count = 1;
164 pos->order = order;
165 /* split and add free list */
166 for (j = (unsigned int)order; j < i; j++) {
167 struct mb_page_t *new_page = NULL;
168 new_page = pos + (1 << j);
169 new_page->count = 0;
170 new_page->order = (int)j;
171 list_add_tail(&new_page->node, &g_m_zone->free_areas[j].page_list);
172 }
173 list_del(&pos->node);
174 addr = (void *)mailbox_page_address(pos->page);
175 break;
176 }
177
178 mutex_unlock(&g_mb_lock);
179 if (addr && ((flag & MB_FLAG_ZERO) != 0)) {
180 if (memset_s(addr, ALIGN(size, SZ_4K), 0, ALIGN(size, SZ_4K)) != 0) {
181 tloge("clean mailbox failed\n");
182 mailbox_free(addr);
183 return NULL;
184 }
185 }
186 return addr;
187 }
188
add_max_order_block(unsigned int idex)189 static void add_max_order_block(unsigned int idex)
190 {
191 struct mb_page_t *self = NULL;
192
193 if (idex != (unsigned int)g_max_oder || !g_m_zone)
194 return;
195
196 /*
197 * when idex equal max order, no one use mailbox mem,
198 * we need to hang all pages in the last free area page list
199 */
200 self = &g_m_zone->pages[0];
201 list_add_tail(&self->node,
202 &g_m_zone->free_areas[g_max_oder].page_list);
203 }
204
is_ptr_valid(const mailbox_page_t * page)205 static bool is_ptr_valid(const mailbox_page_t *page)
206 {
207 if (!g_m_zone)
208 return false;
209
210 if (page < g_m_zone->all_pages ||
211 page >= (g_m_zone->all_pages + MAILBOX_PAGE_MAX)) {
212 tloge("invalid ptr to free in mailbox\n");
213 return false;
214 }
215 return true;
216 }
217
mailbox_free(const void * ptr)218 void mailbox_free(const void *ptr)
219 {
220 unsigned int i;
221 mailbox_page_t *page = NULL;
222 struct mb_page_t *self = NULL;
223 struct mb_page_t *buddy = NULL;
224 unsigned int self_idx;
225 unsigned int buddy_idx;
226
227 if (!ptr || !g_m_zone) {
228 tloge("invalid ptr or zone struct is NULL\n");
229 return;
230 }
231
232 page = mailbox_virt_to_page((uint64_t)(uintptr_t)ptr);
233 if (!is_ptr_valid(page))
234 return;
235 mutex_lock(&g_mb_lock);
236 self_idx = page - g_m_zone->all_pages;
237 self = &g_m_zone->pages[self_idx];
238 if (self->count == 0) {
239 tloge("already freed in mailbox\n");
240 mutex_unlock(&g_mb_lock);
241 return;
242 }
243
244 for (i = (unsigned int)self->order; i <
245 (unsigned int)g_max_oder; i++) {
246 self_idx = page - g_m_zone->all_pages;
247 buddy_idx = self_idx ^ (uint32_t)(1 << i);
248 self = &g_m_zone->pages[self_idx];
249 buddy = &g_m_zone->pages[buddy_idx];
250 self->count = 0;
251 /* is buddy free */
252 if ((unsigned int)buddy->order == i && buddy->count == 0) {
253 /* release buddy */
254 list_del(&buddy->node);
255 /* combine self and buddy */
256 if (self_idx > buddy_idx) {
257 page = buddy->page;
258 buddy->order = (int)i + 1;
259 self->order = -1;
260 } else {
261 self->order = (int)i + 1;
262 buddy->order = -1;
263 }
264 } else {
265 /* release self */
266 list_add_tail(&self->node,
267 &g_m_zone->free_areas[i].page_list);
268 mutex_unlock(&g_mb_lock);
269 return;
270 }
271 }
272
273 add_max_order_block(i);
274 mutex_unlock(&g_mb_lock);
275 }
276
mailbox_alloc_cmd_pack(void)277 struct mb_cmd_pack *mailbox_alloc_cmd_pack(void)
278 {
279 void *pack = mailbox_alloc(SZ_4K, MB_FLAG_ZERO);
280
281 if (!pack)
282 tloge("alloc mb cmd pack failed\n");
283
284 return (struct mb_cmd_pack *)pack;
285 }
286
mailbox_copy_alloc(const void * src,size_t size)287 void *mailbox_copy_alloc(const void *src, size_t size)
288 {
289 void *mb_ptr = NULL;
290
291 if (!src || !size) {
292 tloge("invali src to alloc mailbox copy\n");
293 return NULL;
294 }
295
296 mb_ptr = mailbox_alloc(size, 0);
297 if (!mb_ptr) {
298 tloge("alloc size %zu mailbox failed\n", size);
299 return NULL;
300 }
301
302 if (memcpy_s(mb_ptr, size, src, size) != 0) {
303 tloge("memcpy to mailbox failed\n");
304 mailbox_free(mb_ptr);
305 return NULL;
306 }
307
308 return mb_ptr;
309 }
310
311 struct mb_dbg_entry {
312 struct list_head node;
313 unsigned int idx;
314 void *ptr;
315 };
316
317 static LIST_HEAD(mb_dbg_list);
318 static DEFINE_MUTEX(mb_dbg_lock);
319 static unsigned int g_mb_dbg_entry_count = 1;
320 static unsigned int g_mb_dbg_last_res; /* only cache 1 opt result */
321 static struct dentry *g_mb_dbg_dentry;
322
mb_dbg_add_entry(void * ptr)323 static unsigned int mb_dbg_add_entry(void *ptr)
324 {
325 struct mb_dbg_entry *new_entry = NULL;
326 unsigned int index = 0;
327
328 new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
329 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)new_entry)) {
330 tloge("alloc entry failed\n");
331 return 0;
332 }
333
334 INIT_LIST_HEAD(&new_entry->node);
335 new_entry->ptr = ptr;
336 mutex_lock(&mb_dbg_lock);
337 new_entry->idx = g_mb_dbg_entry_count;
338
339 if ((g_mb_dbg_entry_count++) == 0)
340 g_mb_dbg_entry_count++;
341 list_add_tail(&new_entry->node, &mb_dbg_list);
342 index = new_entry->idx;
343 mutex_unlock(&mb_dbg_lock);
344
345 return index;
346 }
347
mb_dbg_remove_entry(unsigned int idx)348 static void mb_dbg_remove_entry(unsigned int idx)
349 {
350 struct mb_dbg_entry *pos = NULL;
351 struct mb_dbg_entry *temp = NULL;
352
353 mutex_lock(&mb_dbg_lock);
354 list_for_each_entry_safe(pos, temp, &mb_dbg_list, node) {
355 if (pos->idx == idx) {
356 mailbox_free(pos->ptr);
357 list_del(&pos->node);
358 kfree(pos);
359 mutex_unlock(&mb_dbg_lock);
360 return;
361 }
362 }
363 mutex_unlock(&mb_dbg_lock);
364
365 tloge("entry %u invalid\n", idx);
366 }
367
mb_dbg_reset(void)368 static void mb_dbg_reset(void)
369 {
370 struct mb_dbg_entry *pos = NULL;
371 struct mb_dbg_entry *tmp = NULL;
372
373 mutex_lock(&mb_dbg_lock);
374 list_for_each_entry_safe(pos, tmp, &mb_dbg_list, node) {
375 mailbox_free(pos->ptr);
376 list_del(&pos->node);
377 kfree(pos);
378 }
379 g_mb_dbg_entry_count = 0;
380 mutex_unlock(&mb_dbg_lock);
381 }
382
383 #define MB_WRITE_SIZE 64
384
is_opt_write_param_valid(const struct file * filp,const char __user * ubuf,size_t cnt,const loff_t * ppos)385 static bool is_opt_write_param_valid(const struct file *filp,
386 const char __user *ubuf, size_t cnt, const loff_t *ppos)
387 {
388 if (!filp || !ppos || !ubuf)
389 return false;
390
391 if (cnt >= MB_WRITE_SIZE || cnt == 0)
392 return false;
393
394 return true;
395 }
396
alloc_dbg_entry(unsigned int alloc_size)397 static void alloc_dbg_entry(unsigned int alloc_size)
398 {
399 unsigned int idx;
400 void *ptr = NULL;
401
402 ptr = mailbox_alloc(alloc_size, 0);
403 if (!ptr) {
404 tloge("alloc order=%u in mailbox failed\n", alloc_size);
405 return;
406 }
407
408 idx = mb_dbg_add_entry(ptr);
409 if (idx == 0)
410 mailbox_free(ptr);
411 g_mb_dbg_last_res = idx;
412 }
413
mb_dbg_opt_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)414 static ssize_t mb_dbg_opt_write(struct file *filp,
415 const char __user *ubuf, size_t cnt, loff_t *ppos)
416 {
417 char buf[MB_WRITE_SIZE] = {0};
418 char *cmd = NULL;
419 char *value = NULL;
420 unsigned int alloc_size;
421 unsigned int free_idx;
422
423 if (!is_opt_write_param_valid(filp, ubuf, cnt, ppos))
424 return -EINVAL;
425
426 if (copy_from_user(buf, ubuf, cnt) != 0)
427 return -EFAULT;
428
429 buf[cnt] = 0;
430 value = buf;
431 if (strncmp(value, "reset", strlen("reset")) == 0) {
432 tlogi("mb dbg reset\n");
433 mb_dbg_reset();
434 return (ssize_t)cnt;
435 }
436
437 cmd = strsep(&value, ":");
438 if (!cmd || !value) {
439 tloge("no valid cmd or value for mb dbg\n");
440 return -EFAULT;
441 }
442
443 if (strncmp(cmd, "alloc", strlen("alloc")) == 0) {
444 if (kstrtou32(value, 10, &alloc_size) == 0)
445 alloc_dbg_entry(alloc_size);
446 else
447 tloge("invalid value format for mb dbg\n");
448 } else if (strncmp(cmd, "free", strlen("free")) == 0) {
449 if (kstrtou32(value, 10, &free_idx) == 0)
450 mb_dbg_remove_entry(free_idx);
451 else
452 tloge("invalid value format for mb dbg\n");
453 } else {
454 tloge("invalid format for mb dbg\n");
455 }
456
457 return (ssize_t)cnt;
458 }
459
mb_dbg_opt_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)460 static ssize_t mb_dbg_opt_read(struct file *filp, char __user *ubuf,
461 size_t cnt, loff_t *ppos)
462 {
463 char buf[16] = {0};
464 ssize_t ret;
465
466 (void)(filp);
467
468 ret = snprintf_s(buf, sizeof(buf), 15, "%u\n", g_mb_dbg_last_res);
469 if (ret < 0) {
470 tloge("snprintf idx failed\n");
471 return -EINVAL;
472 }
473
474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
475 }
476
477 static const struct file_operations g_mb_dbg_opt_fops = {
478 .owner = THIS_MODULE,
479 .read = mb_dbg_opt_read,
480 .write = mb_dbg_opt_write,
481 };
482
mb_dbg_state_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)483 static ssize_t mb_dbg_state_read(struct file *filp, char __user *ubuf,
484 size_t cnt, loff_t *ppos)
485 {
486 (void)cnt;
487 (void)(filp);
488 (void)(ubuf);
489 (void)(ppos);
490 mailbox_show_status();
491 mailbox_show_details();
492 return 0;
493 }
494
495 static const struct file_operations g_mb_dbg_state_fops = {
496 .owner = THIS_MODULE,
497 .read = mb_dbg_state_read,
498 };
499
mailbox_register(const void * mb_pool,unsigned int size)500 static int mailbox_register(const void *mb_pool, unsigned int size)
501 {
502 struct tc_ns_operation *operation = NULL;
503 struct tc_ns_smc_cmd *smc_cmd = NULL;
504 int ret = 0;
505
506 smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL);
507 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) {
508 tloge("alloc smc_cmd failed\n");
509 return -EIO;
510 }
511
512 operation = (struct tc_ns_operation *)(uintptr_t)get_operation_vaddr();
513 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) {
514 tloge("alloc operation failed\n");
515 ret = -EIO;
516 goto free_smc_cmd;
517 }
518
519 operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
520 (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
521 operation->params[0].value.a = mailbox_virt_to_phys((uintptr_t)mb_pool);
522 operation->params[0].value.b =
523 (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pool) >> ADDR_TRANS_NUM;
524 operation->params[1].value.a = size;
525
526 smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
527 smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_MAILBOX;
528 smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)operation);
529 smc_cmd->operation_h_phys =
530 (uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM;
531
532 if (is_tee_rebooting())
533 ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, smc_cmd);
534 else
535 ret= tc_ns_smc(smc_cmd);
536
537 if (ret != 0) {
538 tloge("resigter mailbox failed\n");
539 ret = -EIO;
540 }
541
542 free_operation((uint64_t)(uintptr_t)operation);
543 operation = NULL;
544 free_smc_cmd:
545 kfree(smc_cmd);
546 smc_cmd = NULL;
547 return ret;
548 }
549
mailbox_debug_init(void)550 static void mailbox_debug_init(void)
551 {
552 #ifdef DEF_ENG
553 g_mb_dbg_dentry = debugfs_create_dir("tz_mailbox", NULL);
554 debugfs_create_file("opt", OPT_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_opt_fops);
555 debugfs_create_file("state", STATE_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_state_fops);
556 #endif
557 }
558
re_register_mailbox(void)559 int re_register_mailbox(void)
560 {
561 if (!g_m_zone)
562 return -EFAULT;
563
564 if (g_m_zone->all_pages != NULL) {
565 if (memset_s((void *)mailbox_page_address(g_m_zone->all_pages),
566 MAILBOX_POOL_SIZE, 0, MAILBOX_POOL_SIZE) != EOK) {
567 tloge("memset mailbox failed\n");
568 return -EFAULT;
569 }
570 if (mailbox_register((const void *) mailbox_page_address(g_m_zone->all_pages), MAILBOX_POOL_SIZE) != 0) {
571 tloge("register mailbox failed\n");
572 return -EIO;
573 }
574 }
575
576 return 0;
577 }
578
mailbox_mempool_init(void)579 int mailbox_mempool_init(void)
580 {
581 int i;
582 struct mb_page_t *mb_page = NULL;
583 struct mb_free_area_t *area = NULL;
584 mailbox_page_t *all_pages = NULL;
585 size_t zone_len;
586
587 g_max_oder = get_order(MAILBOX_POOL_SIZE);
588 tlogi("in this RE, mailbox max order is: %d\n", g_max_oder);
589
590 /* zone len is fixed, will not overflow */
591 zone_len = sizeof(*area) * (g_max_oder + 1) + sizeof(*g_m_zone);
592 g_m_zone = kzalloc(zone_len, GFP_KERNEL);
593 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_m_zone)) {
594 tloge("fail to alloc zone struct\n");
595 return -ENOMEM;
596 }
597 all_pages = mailbox_alloc_pages(g_max_oder);
598 if (!all_pages) {
599 tloge("fail to alloc mailbox mempool\n");
600 kfree(g_m_zone);
601 g_m_zone = NULL;
602 return -ENOMEM;
603 }
604 if (mailbox_register((const void *) mailbox_page_address(all_pages), MAILBOX_POOL_SIZE) != 0) {
605 tloge("register mailbox failed\n");
606 mailbox_free_pages(all_pages, g_max_oder);
607 kfree(g_m_zone);
608 g_m_zone = NULL;
609 return -EIO;
610 }
611 for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
612 g_m_zone->pages[i].order = -1;
613 g_m_zone->pages[i].count = 0;
614 g_m_zone->pages[i].page = &all_pages[i];
615 }
616
617 g_m_zone->pages[0].order = g_max_oder;
618 for (i = 0; i <= g_max_oder; i++) {
619 area = &g_m_zone->free_areas[i];
620 INIT_LIST_HEAD(&area->page_list);
621 area->order = i;
622 }
623
624 mb_page = &g_m_zone->pages[0];
625 list_add_tail(&mb_page->node, &area->page_list);
626 g_m_zone->all_pages = all_pages;
627 mutex_init(&g_mb_lock);
628 mailbox_debug_init();
629
630 return 0;
631 }
632
free_mailbox_mempool(void)633 void free_mailbox_mempool(void)
634 {
635 mailbox_free_pages(g_m_zone->all_pages, g_max_oder);
636 g_m_zone->all_pages = NULL;
637 kfree(g_m_zone);
638 g_m_zone = NULL;
639
640 if (!g_mb_dbg_dentry)
641 return;
642 debugfs_remove_recursive(g_mb_dbg_dentry);
643 g_mb_dbg_dentry = NULL;
644 }
645