1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <common/types.h>
13 #include <common/errno.h>
14 #include <common/debug.h>
15 #include <common/macro.h>
16 #include <common/kprint.h>
17 #include <object/thread.h>
18 #include <sched/sched.h>
19 #include <mm/uaccess.h>
20 #include <mm/kmalloc.h>
21 #include <mm/vmspace.h>
22 #include <arch/mmu.h>
23 #include <common/backtrace.h>
24 #include <sched/context.h>
25
26 #if ENABLE_BACKTRACE_FUNC == ON
read_fp(void)27 static inline __attribute__((always_inline)) u64 read_fp(void)
28 {
29 u64 fp;
30 __asm __volatile("mov %0, x29" : "=r"(fp));
31 return fp;
32 }
33
set_backtrace_data(void * pc_buf,void * fp_buf,void * ip)34 int set_backtrace_data(void *pc_buf, void *fp_buf, void *ip)
35 {
36 u64 kbuf[2];
37 u64 lr = 0;
38 u64 fp = read_fp();
39 int iskernel = true;
40 int count = 1;
41 int ret;
42 paddr_t pa;
43 pte_t *pte;
44
45 *(u64 *)ip = arch_get_thread_next_ip(current_thread);
46
47 while (1) {
48 if (iskernel) {
49 lr = ((u64 *)fp)[1];
50 fp = ((u64 *)fp)[0];
51 } else {
52 ret = query_in_pgtbl(current_thread->vmspace, fp, &pa, &pte);
53 if (ret != 0) {
54 goto out;
55 }
56 /*
57 * We don't access the user's stack directly in the kernel.
58 * Instead, we copy the stack data including X29 & X30 from
59 * user's stack to kernel stack by using copy_from_user
60 * function. It will check the address' validity before
61 * copy the data.
62 */
63 copy_from_user(kbuf, (void *)fp, 2 * sizeof(u64));
64 lr = ((u64 *)kbuf)[1];
65 fp = ((u64 *)kbuf)[0];
66 }
67 if (fp == 0 || count >= BACKTRACE_MAX_COUNT) {
68 goto out;
69 }
70
71 if (iskernel && fp < KBASE) {
72 iskernel = false;
73 }
74
75 ((u64 *)pc_buf)[count - 1] = lr;
76 ((u64 *)fp_buf)[count - 1] = fp;
77 count++;
78 }
79
80 out:
81 return count - 1;
82 }
83
backtrace(void)84 int backtrace(void)
85 {
86 u64 *kbuf;
87 u64 lr = 0;
88 u64 fp = read_fp();
89 int iskernel = true;
90 int count = 1;
91 struct vmregion *vmr;
92
93 kbuf = (u64 *)kmalloc(2 * sizeof(u64));
94 if (kbuf == NULL) {
95 return -ENOMEM;
96 }
97
98 print_thread(current_thread);
99
100 kinfo("kernel:\n");
101
102 while (1) {
103 if (iskernel) {
104 lr = ((u64 *)fp)[1];
105 fp = ((u64 *)fp)[0];
106 } else {
107 vmr = find_vmr_for_va(current_thread->vmspace, (vaddr_t)fp);
108 if (vmr == NULL)
109 goto out;
110 /*
111 * We don't access the user's stack directly in the kernel.
112 * Instead, we copy the stack data including X29 & X30 from
113 * user's stack to kernel stack by using copy_from_user
114 * function. It will check the address' validity before
115 * copy the data.
116 */
117 copy_from_user(kbuf, (void *)fp, 2 * sizeof(u64));
118 lr = ((u64 *)kbuf)[1];
119 fp = ((u64 *)kbuf)[0];
120 }
121 if (fp == 0 || count >= BACKTRACE_MAX_COUNT)
122 goto out;
123
124 if (iskernel && fp < KBASE) {
125 kinfo("user:\n");
126 iskernel = false;
127 }
128
129 kinfo("\tbacktrace:%d, fp = %lx, pc = %lx\n", count, fp, lr);
130 count++;
131 }
132
133 out:
134 kfree(kbuf);
135 return count;
136 }
137 #else
set_backtrace_data(void * pc_buf,void * fp_buf,void * ip)138 int set_backtrace_data(void *pc_buf, void *fp_buf, void *ip)
139 {
140 return 0;
141 }
142
backtrace(void)143 int backtrace(void)
144 {
145 return 0;
146 }
147 #endif
148