1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/highmem.h>
4#include <linux/ptrace.h>
5#include <linux/uprobes.h>
6#include <asm/insn.h>
7
8#include "decode-insn.h"
9
10#define UPROBE_TRAP_NR	UINT_MAX
11
12bool is_swbp_insn(uprobe_opcode_t *insn)
13{
14#ifdef CONFIG_RISCV_ISA_C
15	return (*insn & 0xffff) == UPROBE_SWBP_INSN;
16#else
17	return *insn == UPROBE_SWBP_INSN;
18#endif
19}
20
21bool is_trap_insn(uprobe_opcode_t *insn)
22{
23	return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
24}
25
26unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
27{
28	return instruction_pointer(regs);
29}
30
31int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
32			     unsigned long addr)
33{
34	probe_opcode_t opcode;
35
36	opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
37
38	auprobe->insn_size = GET_INSN_LENGTH(opcode);
39
40	switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
41	case INSN_REJECTED:
42		return -EINVAL;
43
44	case INSN_GOOD_NO_SLOT:
45		auprobe->simulate = true;
46		break;
47
48	case INSN_GOOD:
49		auprobe->simulate = false;
50		break;
51
52	default:
53		return -EINVAL;
54	}
55
56	return 0;
57}
58
59int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
60{
61	struct uprobe_task *utask = current->utask;
62
63	utask->autask.saved_cause = current->thread.bad_cause;
64	current->thread.bad_cause = UPROBE_TRAP_NR;
65
66	instruction_pointer_set(regs, utask->xol_vaddr);
67
68	return 0;
69}
70
71int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
72{
73	struct uprobe_task *utask = current->utask;
74
75	WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
76	current->thread.bad_cause = utask->autask.saved_cause;
77
78	instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
79
80	return 0;
81}
82
83bool arch_uprobe_xol_was_trapped(struct task_struct *t)
84{
85	if (t->thread.bad_cause != UPROBE_TRAP_NR)
86		return true;
87
88	return false;
89}
90
91bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
92{
93	probe_opcode_t insn;
94	unsigned long addr;
95
96	if (!auprobe->simulate)
97		return false;
98
99	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
100	addr = instruction_pointer(regs);
101
102	if (auprobe->api.handler)
103		auprobe->api.handler(insn, addr, regs);
104
105	return true;
106}
107
108void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
109{
110	struct uprobe_task *utask = current->utask;
111
112	current->thread.bad_cause = utask->autask.saved_cause;
113	/*
114	 * Task has received a fatal signal, so reset back to probbed
115	 * address.
116	 */
117	instruction_pointer_set(regs, utask->vaddr);
118}
119
120bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
121		struct pt_regs *regs)
122{
123	if (ctx == RP_CHECK_CHAIN_CALL)
124		return regs->sp <= ret->stack;
125	else
126		return regs->sp < ret->stack;
127}
128
129unsigned long
130arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
131				  struct pt_regs *regs)
132{
133	unsigned long ra;
134
135	ra = regs->ra;
136
137	regs->ra = trampoline_vaddr;
138
139	return ra;
140}
141
142int arch_uprobe_exception_notify(struct notifier_block *self,
143				 unsigned long val, void *data)
144{
145	return NOTIFY_DONE;
146}
147
148bool uprobe_breakpoint_handler(struct pt_regs *regs)
149{
150	if (uprobe_pre_sstep_notifier(regs))
151		return true;
152
153	return false;
154}
155
156bool uprobe_single_step_handler(struct pt_regs *regs)
157{
158	if (uprobe_post_sstep_notifier(regs))
159		return true;
160
161	return false;
162}
163
164void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
165			   void *src, unsigned long len)
166{
167	/* Initialize the slot */
168	void *kaddr = kmap_atomic(page);
169	void *dst = kaddr + (vaddr & ~PAGE_MASK);
170
171	memcpy(dst, src, len);
172
173	/* Add ebreak behind opcode to simulate singlestep */
174	if (vaddr) {
175		dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
176		*(uprobe_opcode_t *)dst = __BUG_INSN_32;
177	}
178
179	kunmap_atomic(kaddr);
180
181	/*
182	 * We probably need flush_icache_user_page() but it needs vma.
183	 * This should work on most of architectures by default. If
184	 * architecture needs to do something different it can define
185	 * its own version of the function.
186	 */
187	flush_dcache_page(page);
188}
189