1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2001 PPC64 Team, IBM Corp
4 *
5 * This struct defines the way the registers are stored on the
6 * kernel stack during a system call or other kernel entry.
7 *
8 * this should only contain volatile regs
9 * since we can keep non-volatile in the thread_struct
10 * should set this up when only volatiles are saved
11 * by intr code.
12 *
13 * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
14 * that the overall structure is a multiple of 16 bytes in length.
15 *
16 * Note that the offsets of the fields in this struct correspond with
17 * the PT_* values below.  This simplifies arch/powerpc/kernel/ptrace.c.
18 */
19#ifndef _ASM_POWERPC_PTRACE_H
20#define _ASM_POWERPC_PTRACE_H
21
22#include <linux/err.h>
23#include <uapi/asm/ptrace.h>
24#include <asm/asm-const.h>
25#include <asm/reg.h>
26
27#ifndef __ASSEMBLY__
28struct pt_regs
29{
30	union {
31		struct user_pt_regs user_regs;
32		struct {
33			unsigned long gpr[32];
34			unsigned long nip;
35			unsigned long msr;
36			unsigned long orig_gpr3;
37			unsigned long ctr;
38			unsigned long link;
39			unsigned long xer;
40			unsigned long ccr;
41#ifdef CONFIG_PPC64
42			unsigned long softe;
43#else
44			unsigned long mq;
45#endif
46			unsigned long trap;
47			union {
48				unsigned long dar;
49				unsigned long dear;
50			};
51			union {
52				unsigned long dsisr;
53				unsigned long esr;
54			};
55			unsigned long result;
56		};
57	};
58#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
59	union {
60		struct {
61#ifdef CONFIG_PPC64
62			unsigned long ppr;
63			unsigned long exit_result;
64#endif
65			union {
66#ifdef CONFIG_PPC_KUAP
67				unsigned long kuap;
68#endif
69#ifdef CONFIG_PPC_PKEY
70				unsigned long amr;
71#endif
72			};
73#ifdef CONFIG_PPC_PKEY
74			unsigned long iamr;
75#endif
76		};
77		unsigned long __pad[4];	/* Maintain 16 byte interrupt stack alignment */
78	};
79#endif
80#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
81	struct { /* Must be a multiple of 16 bytes */
82		unsigned long mas0;
83		unsigned long mas1;
84		unsigned long mas2;
85		unsigned long mas3;
86		unsigned long mas6;
87		unsigned long mas7;
88		unsigned long srr0;
89		unsigned long srr1;
90		unsigned long csrr0;
91		unsigned long csrr1;
92		unsigned long dsrr0;
93		unsigned long dsrr1;
94	};
95#endif
96};
97#endif
98
99
100// Always displays as "REGS" in memory dumps
101#ifdef CONFIG_CPU_BIG_ENDIAN
102#define STACK_FRAME_REGS_MARKER	ASM_CONST(0x52454753)
103#else
104#define STACK_FRAME_REGS_MARKER	ASM_CONST(0x53474552)
105#endif
106
107#ifdef __powerpc64__
108
109/*
110 * Size of redzone that userspace is allowed to use below the stack
111 * pointer.  This is 288 in the 64-bit big-endian ELF ABI, and 512 in
112 * the new ELFv2 little-endian ABI, so we allow the larger amount.
113 *
114 * For kernel code we allow a 288-byte redzone, in order to conserve
115 * kernel stack space; gcc currently only uses 288 bytes, and will
116 * hopefully allow explicit control of the redzone size in future.
117 */
118#define USER_REDZONE_SIZE	512
119#define KERNEL_REDZONE_SIZE	288
120
121#define STACK_FRAME_LR_SAVE	2	/* Location of LR in stack frame */
122
123#ifdef CONFIG_PPC64_ELF_ABI_V2
124#define STACK_FRAME_MIN_SIZE	32
125#define STACK_USER_INT_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE + 16)
126#define STACK_INT_FRAME_REGS	(STACK_FRAME_MIN_SIZE + 16)
127#define STACK_INT_FRAME_MARKER	STACK_FRAME_MIN_SIZE
128#define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE + 16)
129#define STACK_SWITCH_FRAME_REGS	(STACK_FRAME_MIN_SIZE + 16)
130#else
131/*
132 * The ELFv1 ABI specifies 48 bytes plus a minimum 64 byte parameter save
133 * area. This parameter area is not used by calls to C from interrupt entry,
134 * so the second from last one of those is used for the frame marker.
135 */
136#define STACK_FRAME_MIN_SIZE	112
137#define STACK_USER_INT_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE)
138#define STACK_INT_FRAME_REGS	STACK_FRAME_MIN_SIZE
139#define STACK_INT_FRAME_MARKER	(STACK_FRAME_MIN_SIZE - 16)
140#define STACK_SWITCH_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE)
141#define STACK_SWITCH_FRAME_REGS	STACK_FRAME_MIN_SIZE
142#endif
143
144/* Size of dummy stack frame allocated when calling signal handler. */
145#define __SIGNAL_FRAMESIZE	128
146#define __SIGNAL_FRAMESIZE32	64
147
148#else /* __powerpc64__ */
149
150#define USER_REDZONE_SIZE	0
151#define KERNEL_REDZONE_SIZE	0
152#define STACK_FRAME_MIN_SIZE	16
153#define STACK_FRAME_LR_SAVE	1	/* Location of LR in stack frame */
154#define STACK_USER_INT_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE)
155#define STACK_INT_FRAME_REGS	STACK_FRAME_MIN_SIZE
156#define STACK_INT_FRAME_MARKER	(STACK_FRAME_MIN_SIZE - 8)
157#define STACK_SWITCH_FRAME_SIZE	(sizeof(struct pt_regs) + STACK_FRAME_MIN_SIZE)
158#define STACK_SWITCH_FRAME_REGS	STACK_FRAME_MIN_SIZE
159
160/* Size of stack frame allocated when calling signal handler. */
161#define __SIGNAL_FRAMESIZE	64
162
163#endif /* __powerpc64__ */
164
165#define STACK_INT_FRAME_SIZE	(KERNEL_REDZONE_SIZE + STACK_USER_INT_FRAME_SIZE)
166#define STACK_INT_FRAME_MARKER_LONGS	(STACK_INT_FRAME_MARKER/sizeof(long))
167
168#ifndef __ASSEMBLY__
169#include <asm/paca.h>
170
171#ifdef CONFIG_SMP
172extern unsigned long profile_pc(struct pt_regs *regs);
173#else
174#define profile_pc(regs) instruction_pointer(regs)
175#endif
176
177long do_syscall_trace_enter(struct pt_regs *regs);
178void do_syscall_trace_leave(struct pt_regs *regs);
179
180static inline void set_return_regs_changed(void)
181{
182#ifdef CONFIG_PPC_BOOK3S_64
183	WRITE_ONCE(local_paca->hsrr_valid, 0);
184	WRITE_ONCE(local_paca->srr_valid, 0);
185#endif
186}
187
188static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
189{
190	regs->nip = ip;
191	set_return_regs_changed();
192}
193
194static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
195{
196	regs->msr = msr;
197	set_return_regs_changed();
198}
199
200static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
201{
202	regs_set_return_ip(regs, regs->nip + offset);
203}
204
205static inline unsigned long instruction_pointer(struct pt_regs *regs)
206{
207	return regs->nip;
208}
209
210static inline void instruction_pointer_set(struct pt_regs *regs,
211		unsigned long val)
212{
213	regs_set_return_ip(regs, val);
214}
215
216static inline unsigned long user_stack_pointer(struct pt_regs *regs)
217{
218	return regs->gpr[1];
219}
220
221static inline unsigned long frame_pointer(struct pt_regs *regs)
222{
223	return 0;
224}
225
226#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
227
228#define force_successful_syscall_return()   \
229	do { \
230		set_thread_flag(TIF_NOERROR); \
231	} while(0)
232
233#define current_pt_regs() \
234	((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
235
236/*
237 * The 4 low bits (0xf) are available as flags to overload the trap word,
238 * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
239 * must cover the bits used as flags, including bit 0 which is used as the
240 * "norestart" bit.
241 */
242#ifdef __powerpc64__
243#define TRAP_FLAGS_MASK		0x1
244#else
245/*
246 * On 4xx we use bit 1 in the trap word to indicate whether the exception
247 * is a critical exception (1 means it is).
248 */
249#define TRAP_FLAGS_MASK		0xf
250#define IS_CRITICAL_EXC(regs)	(((regs)->trap & 2) != 0)
251#define IS_MCHECK_EXC(regs)	(((regs)->trap & 4) != 0)
252#define IS_DEBUG_EXC(regs)	(((regs)->trap & 8) != 0)
253#endif /* __powerpc64__ */
254#define TRAP(regs)		((regs)->trap & ~TRAP_FLAGS_MASK)
255
256static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
257{
258	regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
259}
260
261static inline bool trap_is_scv(struct pt_regs *regs)
262{
263	return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
264}
265
266static inline bool trap_is_unsupported_scv(struct pt_regs *regs)
267{
268	return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0;
269}
270
271static inline bool trap_is_syscall(struct pt_regs *regs)
272{
273	return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
274}
275
276static inline bool trap_norestart(struct pt_regs *regs)
277{
278	return regs->trap & 0x1;
279}
280
281static __always_inline void set_trap_norestart(struct pt_regs *regs)
282{
283	regs->trap |= 0x1;
284}
285
286#define kernel_stack_pointer(regs) ((regs)->gpr[1])
287static inline int is_syscall_success(struct pt_regs *regs)
288{
289	if (trap_is_scv(regs))
290		return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
291	else
292		return !(regs->ccr & 0x10000000);
293}
294
295static inline long regs_return_value(struct pt_regs *regs)
296{
297	if (trap_is_scv(regs))
298		return regs->gpr[3];
299
300	if (is_syscall_success(regs))
301		return regs->gpr[3];
302	else
303		return -regs->gpr[3];
304}
305
306static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
307{
308	regs->gpr[3] = rc;
309}
310
311static inline bool cpu_has_msr_ri(void)
312{
313	return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
314}
315
316static inline bool regs_is_unrecoverable(struct pt_regs *regs)
317{
318	return unlikely(cpu_has_msr_ri() && !(regs->msr & MSR_RI));
319}
320
321static inline void regs_set_recoverable(struct pt_regs *regs)
322{
323	if (cpu_has_msr_ri())
324		regs_set_return_msr(regs, regs->msr | MSR_RI);
325}
326
327static inline void regs_set_unrecoverable(struct pt_regs *regs)
328{
329	if (cpu_has_msr_ri())
330		regs_set_return_msr(regs, regs->msr & ~MSR_RI);
331}
332
333#define arch_has_single_step()	(1)
334#define arch_has_block_step()	(true)
335#define ARCH_HAS_USER_SINGLE_STEP_REPORT
336
337/*
338 * kprobe-based event tracer support
339 */
340
341#include <linux/stddef.h>
342#include <linux/thread_info.h>
343extern int regs_query_register_offset(const char *name);
344extern const char *regs_query_register_name(unsigned int offset);
345#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
346
347/**
348 * regs_get_register() - get register value from its offset
349 * @regs:	   pt_regs from which register value is gotten
350 * @offset:    offset number of the register.
351 *
352 * regs_get_register returns the value of a register whose offset from @regs.
353 * The @offset is the offset of the register in struct pt_regs.
354 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
355 */
356static inline unsigned long regs_get_register(struct pt_regs *regs,
357						unsigned int offset)
358{
359	if (unlikely(offset > MAX_REG_OFFSET))
360		return 0;
361	return *(unsigned long *)((unsigned long)regs + offset);
362}
363
364/**
365 * regs_within_kernel_stack() - check the address in the stack
366 * @regs:      pt_regs which contains kernel stack pointer.
367 * @addr:      address which is checked.
368 *
369 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
370 * If @addr is within the kernel stack, it returns true. If not, returns false.
371 */
372
373static inline bool regs_within_kernel_stack(struct pt_regs *regs,
374						unsigned long addr)
375{
376	return ((addr & ~(THREAD_SIZE - 1))  ==
377		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
378}
379
380/**
381 * regs_get_kernel_stack_nth() - get Nth entry of the stack
382 * @regs:	pt_regs which contains kernel stack pointer.
383 * @n:		stack entry number.
384 *
385 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
386 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
387 * this returns 0.
388 */
389static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
390						      unsigned int n)
391{
392	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
393	addr += n;
394	if (regs_within_kernel_stack(regs, (unsigned long)addr))
395		return *addr;
396	else
397		return 0;
398}
399
400/**
401 * regs_get_kernel_argument() - get Nth function argument in kernel
402 * @regs:	pt_regs of that context
403 * @n:		function argument number (start from 0)
404 *
405 * We support up to 8 arguments and assume they are sent in through the GPRs.
406 * This will fail for fp/vector arguments, but those aren't usually found in
407 * kernel code. This is expected to be called from kprobes or ftrace with regs.
408 */
409static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, unsigned int n)
410{
411#define NR_REG_ARGUMENTS 8
412	if (n < NR_REG_ARGUMENTS)
413		return regs_get_register(regs, offsetof(struct pt_regs, gpr[3 + n]));
414	return 0;
415}
416
417#endif /* __ASSEMBLY__ */
418
419#ifndef __powerpc64__
420/* We need PT_SOFTE defined at all time to avoid #ifdefs */
421#define PT_SOFTE PT_MQ
422#else /* __powerpc64__ */
423#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1)	/* each FP reg occupies 2 32-bit userspace slots */
424#define PT_VR0_32 164	/* each Vector reg occupies 4 slots in 32-bit */
425#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
426#define PT_VRSAVE_32 (PT_VR0 + 33*4)
427#define PT_VSR0_32 300 	/* each VSR reg occupies 4 slots in 32-bit */
428#endif /* __powerpc64__ */
429#endif /* _ASM_POWERPC_PTRACE_H */
430