1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/uaccess.h>
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/memory.h>
26#include <linux/vmalloc.h>
27#include <linux/set_memory.h>
28
29#include <trace/syscall.h>
30
31#include <asm/kprobes.h>
32#include <asm/ftrace.h>
33#include <asm/nops.h>
34#include <asm/text-patching.h>
35
36#ifdef CONFIG_DYNAMIC_FTRACE
37
38static int ftrace_poke_late = 0;
39
40void ftrace_arch_code_modify_prepare(void)
41    __acquires(&text_mutex)
42{
43	/*
44	 * Need to grab text_mutex to prevent a race from module loading
45	 * and live kernel patching from changing the text permissions while
46	 * ftrace has it set to "read/write".
47	 */
48	mutex_lock(&text_mutex);
49	ftrace_poke_late = 1;
50}
51
52void ftrace_arch_code_modify_post_process(void)
53    __releases(&text_mutex)
54{
55	/*
56	 * ftrace_make_{call,nop}() may be called during
57	 * module load, and we need to finish the text_poke_queue()
58	 * that they do, here.
59	 */
60	text_poke_finish();
61	ftrace_poke_late = 0;
62	mutex_unlock(&text_mutex);
63}
64
65static const char *ftrace_nop_replace(void)
66{
67	return x86_nops[5];
68}
69
70static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
71{
72	/*
73	 * No need to translate into a callthunk. The trampoline does
74	 * the depth accounting itself.
75	 */
76	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
77}
78
79static int ftrace_verify_code(unsigned long ip, const char *old_code)
80{
81	char cur_code[MCOUNT_INSN_SIZE];
82
83	/*
84	 * Note:
85	 * We are paranoid about modifying text, as if a bug was to happen, it
86	 * could cause us to read or write to someplace that could cause harm.
87	 * Carefully read and modify the code with probe_kernel_*(), and make
88	 * sure what we read is what we expected it to be before modifying it.
89	 */
90	/* read the text we want to modify */
91	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
92		WARN_ON(1);
93		return -EFAULT;
94	}
95
96	/* Make sure it is what we expect it to be */
97	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
98		ftrace_expected = old_code;
99		WARN_ON(1);
100		return -EINVAL;
101	}
102
103	return 0;
104}
105
106/*
107 * Marked __ref because it calls text_poke_early() which is .init.text. That is
108 * ok because that call will happen early, during boot, when .init sections are
109 * still present.
110 */
111static int __ref
112ftrace_modify_code_direct(unsigned long ip, const char *old_code,
113			  const char *new_code)
114{
115	int ret = ftrace_verify_code(ip, old_code);
116	if (ret)
117		return ret;
118
119	/* replace the text with the new text */
120	if (ftrace_poke_late)
121		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
122	else
123		text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
124	return 0;
125}
126
127int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
128{
129	unsigned long ip = rec->ip;
130	const char *new, *old;
131
132	old = ftrace_call_replace(ip, addr);
133	new = ftrace_nop_replace();
134
135	/*
136	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
137	 * is converted to a nop, and will never become MCOUNT_ADDR
138	 * again. This code is either running before SMP (on boot up)
139	 * or before the code will ever be executed (module load).
140	 * We do not want to use the breakpoint version in this case,
141	 * just modify the code directly.
142	 */
143	if (addr == MCOUNT_ADDR)
144		return ftrace_modify_code_direct(ip, old, new);
145
146	/*
147	 * x86 overrides ftrace_replace_code -- this function will never be used
148	 * in this case.
149	 */
150	WARN_ONCE(1, "invalid use of ftrace_make_nop");
151	return -EINVAL;
152}
153
154int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155{
156	unsigned long ip = rec->ip;
157	const char *new, *old;
158
159	old = ftrace_nop_replace();
160	new = ftrace_call_replace(ip, addr);
161
162	/* Should only be called when module is loaded */
163	return ftrace_modify_code_direct(rec->ip, old, new);
164}
165
166/*
167 * Should never be called:
168 *  As it is only called by __ftrace_replace_code() which is called by
169 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
170 *  which is called to turn mcount into nops or nops into function calls
171 *  but not to convert a function from not using regs to one that uses
172 *  regs, which ftrace_modify_call() is for.
173 */
174int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
175				 unsigned long addr)
176{
177	WARN_ON(1);
178	return -EINVAL;
179}
180
181int ftrace_update_ftrace_func(ftrace_func_t func)
182{
183	unsigned long ip;
184	const char *new;
185
186	ip = (unsigned long)(&ftrace_call);
187	new = ftrace_call_replace(ip, (unsigned long)func);
188	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
189
190	ip = (unsigned long)(&ftrace_regs_call);
191	new = ftrace_call_replace(ip, (unsigned long)func);
192	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194	return 0;
195}
196
197void ftrace_replace_code(int enable)
198{
199	struct ftrace_rec_iter *iter;
200	struct dyn_ftrace *rec;
201	const char *new, *old;
202	int ret;
203
204	for_ftrace_rec_iter(iter) {
205		rec = ftrace_rec_iter_record(iter);
206
207		switch (ftrace_test_record(rec, enable)) {
208		case FTRACE_UPDATE_IGNORE:
209		default:
210			continue;
211
212		case FTRACE_UPDATE_MAKE_CALL:
213			old = ftrace_nop_replace();
214			break;
215
216		case FTRACE_UPDATE_MODIFY_CALL:
217		case FTRACE_UPDATE_MAKE_NOP:
218			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
219			break;
220		}
221
222		ret = ftrace_verify_code(rec->ip, old);
223		if (ret) {
224			ftrace_expected = old;
225			ftrace_bug(ret, rec);
226			ftrace_expected = NULL;
227			return;
228		}
229	}
230
231	for_ftrace_rec_iter(iter) {
232		rec = ftrace_rec_iter_record(iter);
233
234		switch (ftrace_test_record(rec, enable)) {
235		case FTRACE_UPDATE_IGNORE:
236		default:
237			continue;
238
239		case FTRACE_UPDATE_MAKE_CALL:
240		case FTRACE_UPDATE_MODIFY_CALL:
241			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
242			break;
243
244		case FTRACE_UPDATE_MAKE_NOP:
245			new = ftrace_nop_replace();
246			break;
247		}
248
249		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
250		ftrace_update_record(rec, enable);
251	}
252	text_poke_finish();
253}
254
255void arch_ftrace_update_code(int command)
256{
257	ftrace_modify_all_code(command);
258}
259
260/* Currently only x86_64 supports dynamic trampolines */
261#ifdef CONFIG_X86_64
262
263#ifdef CONFIG_MODULES
264#include <linux/moduleloader.h>
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
268	return module_alloc(size);
269}
270static inline void tramp_free(void *tramp)
271{
272	module_memfree(tramp);
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278	return NULL;
279}
280static inline void tramp_free(void *tramp) { }
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
284extern void ftrace_regs_caller_end(void);
285extern void ftrace_caller_end(void);
286extern void ftrace_caller_op_ptr(void);
287extern void ftrace_regs_caller_op_ptr(void);
288extern void ftrace_regs_caller_jmp(void);
289
290/* movq function_trace_op(%rip), %rdx */
291/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292#define OP_REF_SIZE	7
293
294/*
295 * The ftrace_ops is passed to the function callback. Since the
296 * trampoline only services a single ftrace_ops, we can pass in
297 * that ops directly.
298 *
299 * The ftrace_op_code_union is used to create a pointer to the
300 * ftrace_ops that will be passed to the callback function.
301 */
302union ftrace_op_code_union {
303	char code[OP_REF_SIZE];
304	struct {
305		char op[3];
306		int offset;
307	} __attribute__((packed));
308};
309
310#define RET_SIZE \
311	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
312
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315{
316	unsigned long start_offset;
317	unsigned long end_offset;
318	unsigned long op_offset;
319	unsigned long call_offset;
320	unsigned long jmp_offset;
321	unsigned long offset;
322	unsigned long npages;
323	unsigned long size;
324	unsigned long *ptr;
325	void *trampoline;
326	void *ip, *dest;
327	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
328	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
329	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
330	union ftrace_op_code_union op_ptr;
331	int ret;
332
333	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334		start_offset = (unsigned long)ftrace_regs_caller;
335		end_offset = (unsigned long)ftrace_regs_caller_end;
336		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337		call_offset = (unsigned long)ftrace_regs_call;
338		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339	} else {
340		start_offset = (unsigned long)ftrace_caller;
341		end_offset = (unsigned long)ftrace_caller_end;
342		op_offset = (unsigned long)ftrace_caller_op_ptr;
343		call_offset = (unsigned long)ftrace_call;
344		jmp_offset = 0;
345	}
346
347	size = end_offset - start_offset;
348
349	/*
350	 * Allocate enough size to store the ftrace_caller code,
351	 * the iret , as well as the address of the ftrace_ops this
352	 * trampoline is used for.
353	 */
354	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355	if (!trampoline)
356		return 0;
357
358	*tramp_size = size + RET_SIZE + sizeof(void *);
359	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361	/* Copy ftrace_caller onto the trampoline memory */
362	ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363	if (WARN_ON(ret < 0))
364		goto fail;
365
366	ip = trampoline + size;
367	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
368		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
369	else
370		memcpy(ip, retq, sizeof(retq));
371
372	/* No need to test direct calls on created trampolines */
373	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
374		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
375		ip = trampoline + (jmp_offset - start_offset);
376		if (WARN_ON(*(char *)ip != 0x75))
377			goto fail;
378		ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
379		if (ret < 0)
380			goto fail;
381	}
382
383	/*
384	 * The address of the ftrace_ops that is used for this trampoline
385	 * is stored at the end of the trampoline. This will be used to
386	 * load the third parameter for the callback. Basically, that
387	 * location at the end of the trampoline takes the place of
388	 * the global function_trace_op variable.
389	 */
390
391	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
392	*ptr = (unsigned long)ops;
393
394	op_offset -= start_offset;
395	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
396
397	/* Are we pointing to the reference? */
398	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
399		goto fail;
400
401	/* Load the contents of ptr into the callback parameter */
402	offset = (unsigned long)ptr;
403	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
404
405	op_ptr.offset = offset;
406
407	/* put in the new offset to the ftrace_ops */
408	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
409
410	/* put in the call to the function */
411	mutex_lock(&text_mutex);
412	call_offset -= start_offset;
413	/*
414	 * No need to translate into a callthunk. The trampoline does
415	 * the depth accounting before the call already.
416	 */
417	dest = ftrace_ops_get_func(ops);
418	memcpy(trampoline + call_offset,
419	       text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
420	       CALL_INSN_SIZE);
421	mutex_unlock(&text_mutex);
422
423	/* ALLOC_TRAMP flags lets us know we created it */
424	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
425
426	set_memory_rox((unsigned long)trampoline, npages);
427	return (unsigned long)trampoline;
428fail:
429	tramp_free(trampoline);
430	return 0;
431}
432
433void set_ftrace_ops_ro(void)
434{
435	struct ftrace_ops *ops;
436	unsigned long start_offset;
437	unsigned long end_offset;
438	unsigned long npages;
439	unsigned long size;
440
441	do_for_each_ftrace_op(ops, ftrace_ops_list) {
442		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
443			continue;
444
445		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
446			start_offset = (unsigned long)ftrace_regs_caller;
447			end_offset = (unsigned long)ftrace_regs_caller_end;
448		} else {
449			start_offset = (unsigned long)ftrace_caller;
450			end_offset = (unsigned long)ftrace_caller_end;
451		}
452		size = end_offset - start_offset;
453		size = size + RET_SIZE + sizeof(void *);
454		npages = DIV_ROUND_UP(size, PAGE_SIZE);
455		set_memory_ro((unsigned long)ops->trampoline, npages);
456	} while_for_each_ftrace_op(ops);
457}
458
459static unsigned long calc_trampoline_call_offset(bool save_regs)
460{
461	unsigned long start_offset;
462	unsigned long call_offset;
463
464	if (save_regs) {
465		start_offset = (unsigned long)ftrace_regs_caller;
466		call_offset = (unsigned long)ftrace_regs_call;
467	} else {
468		start_offset = (unsigned long)ftrace_caller;
469		call_offset = (unsigned long)ftrace_call;
470	}
471
472	return call_offset - start_offset;
473}
474
475void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
476{
477	ftrace_func_t func;
478	unsigned long offset;
479	unsigned long ip;
480	unsigned int size;
481	const char *new;
482
483	if (!ops->trampoline) {
484		ops->trampoline = create_trampoline(ops, &size);
485		if (!ops->trampoline)
486			return;
487		ops->trampoline_size = size;
488		return;
489	}
490
491	/*
492	 * The ftrace_ops caller may set up its own trampoline.
493	 * In such a case, this code must not modify it.
494	 */
495	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
496		return;
497
498	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
499	ip = ops->trampoline + offset;
500	func = ftrace_ops_get_func(ops);
501
502	mutex_lock(&text_mutex);
503	/* Do a safe modify in case the trampoline is executing */
504	new = ftrace_call_replace(ip, (unsigned long)func);
505	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
506	mutex_unlock(&text_mutex);
507}
508
509/* Return the address of the function the trampoline calls */
510static void *addr_from_call(void *ptr)
511{
512	union text_poke_insn call;
513	int ret;
514
515	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
516	if (WARN_ON_ONCE(ret < 0))
517		return NULL;
518
519	/* Make sure this is a call */
520	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
521		pr_warn("Expected E8, got %x\n", call.opcode);
522		return NULL;
523	}
524
525	return ptr + CALL_INSN_SIZE + call.disp;
526}
527
528/*
529 * If the ops->trampoline was not allocated, then it probably
530 * has a static trampoline func, or is the ftrace caller itself.
531 */
532static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
533{
534	unsigned long offset;
535	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
536	void *ptr;
537
538	if (ops && ops->trampoline) {
539#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
540	defined(CONFIG_FUNCTION_GRAPH_TRACER)
541		/*
542		 * We only know about function graph tracer setting as static
543		 * trampoline.
544		 */
545		if (ops->trampoline == FTRACE_GRAPH_ADDR)
546			return (void *)prepare_ftrace_return;
547#endif
548		return NULL;
549	}
550
551	offset = calc_trampoline_call_offset(save_regs);
552
553	if (save_regs)
554		ptr = (void *)FTRACE_REGS_ADDR + offset;
555	else
556		ptr = (void *)FTRACE_ADDR + offset;
557
558	return addr_from_call(ptr);
559}
560
561void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
562{
563	unsigned long offset;
564
565	/* If we didn't allocate this trampoline, consider it static */
566	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
567		return static_tramp_func(ops, rec);
568
569	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
570	return addr_from_call((void *)ops->trampoline + offset);
571}
572
573void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
574{
575	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
576		return;
577
578	tramp_free((void *)ops->trampoline);
579	ops->trampoline = 0;
580}
581
582#endif /* CONFIG_X86_64 */
583#endif /* CONFIG_DYNAMIC_FTRACE */
584
585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
586
587#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
588extern void ftrace_graph_call(void);
589static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
590{
591	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
592}
593
594static int ftrace_mod_jmp(unsigned long ip, void *func)
595{
596	const char *new;
597
598	new = ftrace_jmp_replace(ip, (unsigned long)func);
599	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
600	return 0;
601}
602
603int ftrace_enable_ftrace_graph_caller(void)
604{
605	unsigned long ip = (unsigned long)(&ftrace_graph_call);
606
607	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
608}
609
610int ftrace_disable_ftrace_graph_caller(void)
611{
612	unsigned long ip = (unsigned long)(&ftrace_graph_call);
613
614	return ftrace_mod_jmp(ip, &ftrace_stub);
615}
616#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
617
618/*
619 * Hook the return address and push it in the stack of return addrs
620 * in current thread info.
621 */
622void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
623			   unsigned long frame_pointer)
624{
625	unsigned long return_hooker = (unsigned long)&return_to_handler;
626	int bit;
627
628	/*
629	 * When resuming from suspend-to-ram, this function can be indirectly
630	 * called from early CPU startup code while the CPU is in real mode,
631	 * which would fail miserably.  Make sure the stack pointer is a
632	 * virtual address.
633	 *
634	 * This check isn't as accurate as virt_addr_valid(), but it should be
635	 * good enough for this purpose, and it's fast.
636	 */
637	if (unlikely((long)__builtin_frame_address(0) >= 0))
638		return;
639
640	if (unlikely(ftrace_graph_is_dead()))
641		return;
642
643	if (unlikely(atomic_read(&current->tracing_graph_pause)))
644		return;
645
646	bit = ftrace_test_recursion_trylock(ip, *parent);
647	if (bit < 0)
648		return;
649
650	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
651		*parent = return_hooker;
652
653	ftrace_test_recursion_unlock(bit);
654}
655
656#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
657void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
658		       struct ftrace_ops *op, struct ftrace_regs *fregs)
659{
660	struct pt_regs *regs = &fregs->regs;
661	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
662
663	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
664}
665#endif
666
667#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
668