• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/s390/kernel/
1/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/hardirq.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <trace/syscall.h>
16#include <asm/asm-offsets.h>
17
18#ifdef CONFIG_DYNAMIC_FTRACE
19
20void ftrace_disable_code(void);
21void ftrace_disable_return(void);
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26
27#ifdef CONFIG_64BIT
28
29asm(
30	"	.align	4\n"
31	"ftrace_disable_code:\n"
32	"	j	0f\n"
33	"	.word	0x0024\n"
34	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35	"	basr	%r14,%r1\n"
36	"ftrace_disable_return:\n"
37	"	lg	%r14,8(15)\n"
38	"	lgr	%r0,%r0\n"
39	"0:\n");
40
41asm(
42	"	.align	4\n"
43	"ftrace_nop_code:\n"
44	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
45
46asm(
47	"	.align	4\n"
48	"ftrace_call_code:\n"
49	"	stg	%r14,8(%r15)\n");
50
51#else /* CONFIG_64BIT */
52
53asm(
54	"	.align	4\n"
55	"ftrace_disable_code:\n"
56	"	j	0f\n"
57	"	l	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
58	"	basr	%r14,%r1\n"
59	"ftrace_disable_return:\n"
60	"	l	%r14,4(%r15)\n"
61	"	j	0f\n"
62	"	bcr	0,%r7\n"
63	"	bcr	0,%r7\n"
64	"	bcr	0,%r7\n"
65	"	bcr	0,%r7\n"
66	"	bcr	0,%r7\n"
67	"	bcr	0,%r7\n"
68	"0:\n");
69
70asm(
71	"	.align	4\n"
72	"ftrace_nop_code:\n"
73	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
74
75asm(
76	"	.align	4\n"
77	"ftrace_call_code:\n"
78	"	st	%r14,4(%r15)\n");
79
80#endif /* CONFIG_64BIT */
81
82static int ftrace_modify_code(unsigned long ip,
83			      void *old_code, int old_size,
84			      void *new_code, int new_size)
85{
86	unsigned char replaced[MCOUNT_INSN_SIZE];
87
88	/*
89	 * Note: Due to modules code can disappear and change.
90	 *  We need to protect against faulting as well as code
91	 *  changing. We do this by using the probe_kernel_*
92	 *  functions.
93	 *  This however is just a simple sanity check.
94	 */
95	if (probe_kernel_read(replaced, (void *)ip, old_size))
96		return -EFAULT;
97	if (memcmp(replaced, old_code, old_size) != 0)
98		return -EINVAL;
99	if (probe_kernel_write((void *)ip, new_code, new_size))
100		return -EPERM;
101	return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105				   unsigned long addr)
106{
107	return ftrace_modify_code(rec->ip,
108				  ftrace_call_code, FTRACE_INSN_SIZE,
109				  ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113		    unsigned long addr)
114{
115	if (addr == MCOUNT_ADDR)
116		return ftrace_make_initial_nop(mod, rec, addr);
117	return ftrace_modify_code(rec->ip,
118				  ftrace_call_code, FTRACE_INSN_SIZE,
119				  ftrace_nop_code, FTRACE_INSN_SIZE);
120}
121
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{
124	return ftrace_modify_code(rec->ip,
125				  ftrace_nop_code, FTRACE_INSN_SIZE,
126				  ftrace_call_code, FTRACE_INSN_SIZE);
127}
128
129int ftrace_update_ftrace_func(ftrace_func_t func)
130{
131	ftrace_dyn_func = (unsigned long)func;
132	return 0;
133}
134
135int __init ftrace_dyn_arch_init(void *data)
136{
137	*(unsigned long *)data = 0;
138	return 0;
139}
140
141#endif /* CONFIG_DYNAMIC_FTRACE */
142
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154	unsigned short opcode = 0xa704;
155
156	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161	unsigned short opcode = 0xa7f4;
162
163	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168	return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175	return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/*
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
183 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
185{
186	struct ftrace_graph_ent trace;
187
188	if (unlikely(atomic_read(&current->tracing_graph_pause)))
189		goto out;
190	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
191		goto out;
192	trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
193	/* Only trace if the calling function expects to. */
194	if (!ftrace_graph_entry(&trace)) {
195		current->curr_ret_stack--;
196		goto out;
197	}
198	parent = (unsigned long)return_to_handler;
199out:
200	return parent;
201}
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203