• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2013 Linaro Limited
6  * Copyright (C) 2020 Loongson Technology Corporation Limited
7  */
8 
9 #include <linux/kprobes.h>
10 #include <linux/ftrace.h>
11 #include <linux/uaccess.h>
12 
13 #include <asm/inst.h>
14 #include <asm/module.h>
15 
ftrace_modify_code(unsigned long pc,u32 old,u32 new,bool validate)16 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
17 			      bool validate)
18 {
19 	u32 replaced;
20 
21 	if (validate) {
22 		if (larch_insn_read((void *)pc, &replaced))
23 			return -EFAULT;
24 
25 		if (replaced != old)
26 			return -EINVAL;
27 	}
28 
29 	if (larch_insn_patch_text((void *)pc, new))
30 		return -EPERM;
31 
32 	return 0;
33 }
34 
ftrace_update_ftrace_func(ftrace_func_t func)35 int ftrace_update_ftrace_func(ftrace_func_t func)
36 {
37 	unsigned long pc;
38 	u32 new;
39 
40 	pc = (unsigned long)&ftrace_call;
41 	new = larch_insn_gen_bl(pc, (unsigned long)func);
42 
43 	return ftrace_modify_code(pc, 0, new, false);
44 }
45 
46 /*
47  * The compiler has inserted 2 NOPs before the regular function prologue.
48  * T series registers are available and safe because of LoongArch psABI.
49  *
50  * At runtime, replace nop with bl to enable ftrace call and replace bl with
51  * nop to disable ftrace call. The bl requires us to save the original RA value,
52  * so here it saves RA at t0.
53  * details are:
54  *
55  * | Compiled   |       Disabled         |        Enabled         |
56  * +------------+------------------------+------------------------+
57  * | nop        | move     t0, ra        | move     t0, ra        |
58  * | nop        | nop                    | bl      ftrace_caller  |
59  * | func_body  | func_body              | func_body              |
60  *
61  * The RA value will be recovered by ftrace_regs_entry, and restored into RA
62  * before returning to the regular function prologue. When a function is not
63  * being traced, the move t0, ra is not harmful.
64  */
65 
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)66 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
67 {
68 	unsigned long pc;
69 	u32 old, new;
70 
71 	pc = rec->ip;
72 	old = larch_insn_gen_nop();
73 	new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
74 
75 	return ftrace_modify_code(pc, old, new, true);
76 }
77 
__get_mod(struct module ** mod,unsigned long addr)78 static inline int __get_mod(struct module **mod, unsigned long addr)
79 {
80 	preempt_disable();
81 	*mod = __module_text_address(addr);
82 	preempt_enable();
83 
84 	if (WARN_ON(!(*mod)))
85 		return -EINVAL;
86 
87 	return 0;
88 }
89 
get_ftrace_plt(struct module * mod,unsigned long addr)90 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
91 {
92 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
93 
94 	if (addr == FTRACE_ADDR)
95 		return &plt[FTRACE_PLT_IDX];
96 	if (addr == FTRACE_REGS_ADDR &&
97 			IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
98 		return &plt[FTRACE_REGS_PLT_IDX];
99 
100 	return NULL;
101 }
102 
get_plt_addr(struct module * mod,unsigned long addr)103 static unsigned long get_plt_addr(struct module *mod, unsigned long addr)
104 {
105 	struct plt_entry *plt;
106 
107 	plt = get_ftrace_plt(mod, addr);
108 	if (!plt) {
109 		pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
110 		return -EINVAL;
111 	}
112 
113 	return (unsigned long)plt;
114 }
115 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)116 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
117 {
118 	unsigned long pc;
119 	long offset;
120 	u32 old, new;
121 
122 	pc = rec->ip + LOONGARCH_INSN_SIZE;
123 	offset = (long)pc - (long)addr;
124 
125 	if (offset < -SZ_128M || offset >= SZ_128M) {
126 		int ret;
127 		struct module *mod;
128 
129 		ret = __get_mod(&mod, pc);
130 		if (ret)
131 			return ret;
132 
133 		addr = get_plt_addr(mod, addr);
134 	}
135 
136 	old = larch_insn_gen_nop();
137 	new = larch_insn_gen_bl(pc, addr);
138 
139 	return ftrace_modify_code(pc, old, new, true);
140 }
141 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)142 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
143 		    unsigned long addr)
144 {
145 	unsigned long pc;
146 	long offset;
147 	u32 old, new;
148 
149 	pc = rec->ip + LOONGARCH_INSN_SIZE;
150 	offset = (long)pc - (long)addr;
151 
152 	if (offset < -SZ_128M || offset >= SZ_128M) {
153 		int ret;
154 		struct module *mod;
155 
156 		ret = __get_mod(&mod, pc);
157 		if (ret)
158 			return ret;
159 
160 		addr = get_plt_addr(mod, addr);
161 	}
162 
163 	new = larch_insn_gen_nop();
164 	old = larch_insn_gen_bl(pc, addr);
165 
166 	return ftrace_modify_code(pc, old, new, true);
167 }
168 
169 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)170 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
171 			unsigned long addr)
172 {
173 	unsigned long pc;
174 	long offset;
175 	u32 old, new;
176 
177 	pc = rec->ip + LOONGARCH_INSN_SIZE;
178 	offset = (long)pc - (long)addr;
179 
180 	if (offset < -SZ_128M || offset >= SZ_128M) {
181 		int ret;
182 		struct module *mod;
183 
184 		ret = __get_mod(&mod, pc);
185 		if (ret)
186 			return ret;
187 
188 		addr = get_plt_addr(mod, addr);
189 
190 		old_addr = get_plt_addr(mod, old_addr);
191 	}
192 
193 	old = larch_insn_gen_bl(pc, old_addr);
194 	new = larch_insn_gen_bl(pc, addr);
195 
196 	return ftrace_modify_code(pc, old, new, true);
197 }
198 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
199 
arch_ftrace_update_code(int command)200 void arch_ftrace_update_code(int command)
201 {
202 	command |= FTRACE_MAY_SLEEP;
203 	ftrace_modify_all_code(command);
204 }
205 
ftrace_dyn_arch_init(void)206 int __init ftrace_dyn_arch_init(void)
207 {
208 	return 0;
209 }
210 
211 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
212 extern void ftrace_graph_call(void);
213 
prepare_ftrace_return(unsigned long self_addr,unsigned long * parent)214 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
215 {
216 	unsigned long return_hooker = (unsigned long)&return_to_handler;
217 	unsigned long old;
218 
219 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
220 		return;
221 
222 	old = *parent;
223 
224 	if (!function_graph_enter(old, self_addr, 0, parent))
225 		*parent = return_hooker;
226 }
227 
ftrace_modify_graph_caller(bool enable)228 static int ftrace_modify_graph_caller(bool enable)
229 {
230 	unsigned long pc, func;
231 	u32 branch, nop;
232 
233 	pc = (unsigned long)&ftrace_graph_call;
234 	func = (unsigned long)&ftrace_graph_caller;
235 
236 	branch = larch_insn_gen_b(pc, func);
237 	nop = larch_insn_gen_nop();
238 
239 	if (enable)
240 		return ftrace_modify_code(pc, nop, branch, true);
241 	else
242 		return ftrace_modify_code(pc, branch, nop, true);
243 }
244 
ftrace_enable_ftrace_graph_caller(void)245 int ftrace_enable_ftrace_graph_caller(void)
246 {
247 	return ftrace_modify_graph_caller(true);
248 }
249 
ftrace_disable_ftrace_graph_caller(void)250 int ftrace_disable_ftrace_graph_caller(void)
251 {
252 	return ftrace_modify_graph_caller(false);
253 }
254 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
255 
256 #ifdef CONFIG_KPROBES_ON_FTRACE
257 /* Ftrace callback handler for kprobes -- called under preepmt disabed */
kprobe_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct pt_regs * regs)258 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
259 			    struct ftrace_ops *ops, struct pt_regs *regs)
260 {
261 	struct kprobe *p;
262 	struct kprobe_ctlblk *kcb;
263 
264 	p = get_kprobe((kprobe_opcode_t *)ip);
265 	if (unlikely(!p) || kprobe_disabled(p))
266 		return;
267 
268 	kcb = get_kprobe_ctlblk();
269 	if (kprobe_running()) {
270 		kprobes_inc_nmissed_count(p);
271 	} else {
272 		unsigned long orig_ip = regs->csr_era;
273 
274 		regs->csr_era = ip;
275 
276 		__this_cpu_write(current_kprobe, p);
277 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
278 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
279 			/*
280 			 * Emulate singlestep (and also recover regs->csr_era)
281 			 * as if there is a nop.
282 			 */
283 			regs->csr_era = (unsigned long)p->addr + LOONGARCH_INSN_SIZE;
284 			if (unlikely(p->post_handler)) {
285 				kcb->kprobe_status = KPROBE_HIT_SSDONE;
286 				p->post_handler(p, regs, 0);
287 			}
288 			regs->csr_era = orig_ip;
289 		}
290 		/*
291 		 * If pre_handler returns !0, it changes regs->ip. We have to
292 		 * skip emulating post_handler.
293 		 */
294 		__this_cpu_write(current_kprobe, NULL);
295 	}
296 }
297 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
298 
arch_prepare_kprobe_ftrace(struct kprobe * p)299 int arch_prepare_kprobe_ftrace(struct kprobe *p)
300 {
301 	p->ainsn.insn = NULL;
302 	return 0;
303 }
304 #endif /* CONFIG_KPROBES_ON_FTRACE */
305