• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Linaro Limited
4  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5  * Copyright (C) 2017 Andes Technology Corporation
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13 
14 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)15 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 	mutex_lock(&text_mutex);
18 
19 	/*
20 	 * The code sequences we use for ftrace can't be patched while the
21 	 * kernel is running, so we need to use stop_machine() to modify them
22 	 * for now.  This doesn't play nice with text_mutex, we use this flag
23 	 * to elide the check.
24 	 */
25 	riscv_patch_in_stop_machine = true;
26 
27 	return 0;
28 }
29 
ftrace_arch_code_modify_post_process(void)30 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
31 {
32 	riscv_patch_in_stop_machine = false;
33 	mutex_unlock(&text_mutex);
34 	return 0;
35 }
36 
ftrace_check_current_call(unsigned long hook_pos,unsigned int * expected)37 static int ftrace_check_current_call(unsigned long hook_pos,
38 				     unsigned int *expected)
39 {
40 	unsigned int replaced[2];
41 	unsigned int nops[2] = {NOP4, NOP4};
42 
43 	/* we expect nops at the hook position */
44 	if (!expected)
45 		expected = nops;
46 
47 	/*
48 	 * Read the text we want to modify;
49 	 * return must be -EFAULT on read error
50 	 */
51 	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
52 			MCOUNT_INSN_SIZE))
53 		return -EFAULT;
54 
55 	/*
56 	 * Make sure it is what we expect it to be;
57 	 * return must be -EINVAL on failed comparison
58 	 */
59 	if (memcmp(expected, replaced, sizeof(replaced))) {
60 		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
61 		       (void *)hook_pos, expected[0], expected[1], replaced[0],
62 		       replaced[1]);
63 		return -EINVAL;
64 	}
65 
66 	return 0;
67 }
68 
__ftrace_modify_call(unsigned long hook_pos,unsigned long target,bool enable,bool ra)69 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
70 				bool enable, bool ra)
71 {
72 	unsigned int call[2];
73 	unsigned int nops[2] = {NOP4, NOP4};
74 
75 	if (ra)
76 		make_call_ra(hook_pos, target, call);
77 	else
78 		make_call_t0(hook_pos, target, call);
79 
80 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
81 	if (patch_text_nosync
82 	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
83 		return -EPERM;
84 
85 	return 0;
86 }
87 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)88 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
89 {
90 	unsigned int call[2];
91 
92 	make_call_t0(rec->ip, addr, call);
93 
94 	if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
95 		return -EPERM;
96 
97 	return 0;
98 }
99 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)100 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
101 		    unsigned long addr)
102 {
103 	unsigned int nops[2] = {NOP4, NOP4};
104 
105 	if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
106 		return -EPERM;
107 
108 	return 0;
109 }
110 
111 /*
112  * This is called early on, and isn't wrapped by
113  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
114  * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
115  * just directly poke the text, but it's simpler to just take the lock
116  * ourselves.
117  */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)118 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
119 {
120 	int out;
121 
122 	mutex_lock(&text_mutex);
123 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
124 	mutex_unlock(&text_mutex);
125 
126 	return out;
127 }
128 
ftrace_update_ftrace_func(ftrace_func_t func)129 int ftrace_update_ftrace_func(ftrace_func_t func)
130 {
131 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
132 				       (unsigned long)func, true, true);
133 	if (!ret) {
134 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
135 					   (unsigned long)func, true, true);
136 	}
137 
138 	return ret;
139 }
140 
ftrace_dyn_arch_init(void)141 int __init ftrace_dyn_arch_init(void)
142 {
143 	return 0;
144 }
145 #endif
146 
147 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)148 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
149 		       unsigned long addr)
150 {
151 	unsigned int call[2];
152 	unsigned long caller = rec->ip;
153 	int ret;
154 
155 	make_call_t0(caller, old_addr, call);
156 	ret = ftrace_check_current_call(caller, call);
157 
158 	if (ret)
159 		return ret;
160 
161 	return __ftrace_modify_call(caller, addr, true, false);
162 }
163 #endif
164 
165 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
166 /*
167  * Most of this function is copied from arm64.
168  */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)169 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
170 			   unsigned long frame_pointer)
171 {
172 	unsigned long return_hooker = (unsigned long)&return_to_handler;
173 	unsigned long old;
174 
175 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
176 		return;
177 
178 	/*
179 	 * We don't suffer access faults, so no extra fault-recovery assembly
180 	 * is needed here.
181 	 */
182 	old = *parent;
183 
184 	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
185 		*parent = return_hooker;
186 }
187 
188 #ifdef CONFIG_DYNAMIC_FTRACE
189 extern void ftrace_graph_call(void);
190 extern void ftrace_graph_regs_call(void);
ftrace_enable_ftrace_graph_caller(void)191 int ftrace_enable_ftrace_graph_caller(void)
192 {
193 	int ret;
194 
195 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
196 				    (unsigned long)&prepare_ftrace_return, true, true);
197 	if (ret)
198 		return ret;
199 
200 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
201 				    (unsigned long)&prepare_ftrace_return, true, true);
202 }
203 
ftrace_disable_ftrace_graph_caller(void)204 int ftrace_disable_ftrace_graph_caller(void)
205 {
206 	int ret;
207 
208 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
209 				    (unsigned long)&prepare_ftrace_return, false, true);
210 	if (ret)
211 		return ret;
212 
213 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
214 				    (unsigned long)&prepare_ftrace_return, false, true);
215 }
216 #endif /* CONFIG_DYNAMIC_FTRACE */
217 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
218