1/*	$OpenBSD: cpu.h,v 1.24 2024/06/11 16:02:35 jca Exp $	*/
2
3/*
4 * Copyright (c) 2019 Mike Larkin <mlarkin@openbsd.org>
5 * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#ifndef _MACHINE_CPU_H_
21#define _MACHINE_CPU_H_
22
23/*
24 * User-visible definitions
25 */
26
27/*  CTL_MACHDEP definitions. */
28#define	CPU_COMPATIBLE		1	/* compatible property */
29#define	CPU_MAXID		2	/* number of valid machdep ids */
30
31#define	CTL_MACHDEP_NAMES { \
32	{ 0, 0 }, \
33	{ "compatible", CTLTYPE_STRING }, \
34}
35
36#ifdef _KERNEL
37
38/*
39 * Kernel-only definitions
40 */
41#include <machine/intr.h>
42#include <machine/frame.h>
43#include <machine/riscvreg.h>
44
45/* All the CLKF_* macros take a struct clockframe * as an argument. */
46
47#define clockframe trapframe
48/*
49 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
50 * frame came from USR mode or not.
51 */
52#define CLKF_USERMODE(frame)	((frame->tf_sstatus & SSTATUS_SPP) == 0)
53
54/*
55 * CLKF_INTR: True if we took the interrupt from inside another
56 * interrupt handler.
57 */
58#define CLKF_INTR(frame)	(curcpu()->ci_idepth > 1)
59
60/*
61 * CLKF_PC: Extract the program counter from a clockframe
62 */
63#define CLKF_PC(frame)		(frame->tf_sepc)
64
65/*
66 * PROC_PC: Find out the program counter for the given process.
67 */
68#define PROC_PC(p)	((p)->p_addr->u_pcb.pcb_tf->tf_sepc)
69#define PROC_STACK(p)	((p)->p_addr->u_pcb.pcb_tf->tf_sp)
70
71#include <sys/clockintr.h>
72#include <sys/device.h>
73#include <sys/sched.h>
74#include <sys/srp.h>
75#include <uvm/uvm_percpu.h>
76
77struct cpu_info {
78	struct device		*ci_dev; /* Device corresponding to this CPU */
79	struct cpu_info		*ci_next;
80	struct schedstate_percpu ci_schedstate; /* scheduler state */
81
82	u_int32_t		ci_cpuid;
83	uint64_t		ci_hartid;
84	int			ci_node;
85	struct cpu_info		*ci_self;
86
87	struct proc		*ci_curproc;
88	struct pmap		*ci_curpm;
89	u_int32_t		ci_randseed;
90
91	struct pcb		*ci_curpcb;
92	struct pcb		*ci_idle_pcb;
93
94	struct clockqueue	ci_queue;
95	volatile int		ci_timer_deferred;
96
97	uint32_t		ci_cpl;
98	uint32_t		ci_ipending;
99	uint32_t		ci_idepth;
100#ifdef DIAGNOSTIC
101	int			ci_mutex_level;
102#endif
103	int			ci_want_resched;
104
105	struct opp_table	*ci_opp_table;
106	volatile int		ci_opp_idx;
107	volatile int		ci_opp_max;
108	uint32_t		ci_cpu_supply;
109
110#ifdef MULTIPROCESSOR
111	struct srp_hazard	ci_srp_hazards[SRP_HAZARD_NUM];
112#define	__HAVE_UVM_PERCPU
113	struct uvm_pmr_cache	ci_uvm;
114	volatile int		ci_flags;
115	uint64_t		ci_satp;
116	vaddr_t			ci_initstack_end;
117	int			ci_ipi_reason;
118
119	volatile int		ci_ddb_paused;
120#define CI_DDB_RUNNING		0
121#define CI_DDB_SHOULDSTOP	1
122#define CI_DDB_STOPPED		2
123#define CI_DDB_ENTERDDB		3
124#define CI_DDB_INDDB		4
125
126#endif
127
128#ifdef GPROF
129	struct gmonparam	*ci_gmon;
130	struct clockintr	ci_gmonclock;
131#endif
132
133	char			ci_panicbuf[512];
134};
135
136#define CPUF_PRIMARY		(1<<0)
137#define CPUF_AP			(1<<1)
138#define CPUF_IDENTIFY		(1<<2)
139#define CPUF_IDENTIFIED		(1<<3)
140#define CPUF_PRESENT		(1<<4)
141#define CPUF_GO			(1<<5)
142#define CPUF_RUNNING		(1<<6)
143
144static inline struct cpu_info *
145curcpu(void)
146{
147	struct cpu_info *__ci = NULL;
148	__asm volatile("mv %0, tp" : "=&r"(__ci));
149	return (__ci);
150}
151
152extern uint32_t boot_hart;	/* The hart we booted on. */
153extern struct cpu_info cpu_info_primary;
154extern struct cpu_info *cpu_info_list;
155
156#ifndef MULTIPROCESSOR
157
158#define cpu_number()	0
159#define CPU_IS_PRIMARY(ci)	1
160#define CPU_IS_RUNNING(ci)	1
161#define CPU_INFO_ITERATOR	int
162#define CPU_INFO_FOREACH(cii, ci) \
163	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
164#define CPU_INFO_UNIT(ci)	0
165#define MAXCPUS	1
166#define cpu_unidle(ci)
167
168#else
169
170#define cpu_number()		(curcpu()->ci_cpuid)
171#define CPU_IS_PRIMARY(ci)	((ci) == &cpu_info_primary)
172#define CPU_IS_RUNNING(ci)	((ci)->ci_flags & CPUF_RUNNING)
173#define CPU_INFO_ITERATOR		int
174#define CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
175					    ci != NULL; ci = ci->ci_next)
176#define CPU_INFO_UNIT(ci)	((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
177#define MAXCPUS	32
178
179extern struct cpu_info *cpu_info[MAXCPUS];
180
181void	cpu_boot_secondary_processors(void);
182
183#endif /* !MULTIPROCESSOR */
184
185/* Zihintpause ratified extension */
186#define CPU_BUSY_CYCLE()	__asm volatile(".long 0x0100000f" ::: "memory")
187
188#define curpcb		curcpu()->ci_curpcb
189
190static inline unsigned int
191cpu_rnd_messybits(void)
192{
193	// Should do bit reversal ^ with csr_read(time);
194	return csr_read(time);
195}
196
197/*
198 * Scheduling glue
199 */
200#define aston(p)	((p)->p_md.md_astpending = 1)
201#define	setsoftast()	aston(curcpu()->ci_curproc)
202
203/*
204 * Notify the current process (p) that it has a signal pending,
205 * process as soon as possible.
206 */
207
208#ifdef MULTIPROCESSOR
209void cpu_unidle(struct cpu_info *ci);
210#define signotify(p)	(aston(p), cpu_unidle((p)->p_cpu))
211void cpu_kick(struct cpu_info *);
212#else
213#define cpu_kick(ci)
214#define cpu_unidle(ci)
215#define signotify(p)	setsoftast()
216#endif
217
218/*
219 * Preempt the current process if in interrupt from user mode,
220 * or after the current trap/syscall if in system mode.
221 */
222void need_resched(struct cpu_info *);
223#define clear_resched(ci)	((ci)->ci_want_resched = 0)
224
225/*
226 * Give a profiling tick to the current process when the user profiling
227 * buffer pages are invalid.  On the i386, request an ast to send us
228 * through trap(), marking the proc as needing a profiling tick.
229 */
230#define	need_proftick(p)	aston(p)
231
232// asm code to start new kernel contexts.
233void	proc_trampoline(void);
234
235/*
236 * Random cruft
237 */
238void	dumpconf(void);
239
240static inline void
241intr_enable(void)
242{
243	__asm volatile("csrsi sstatus, %0" :: "i" (SSTATUS_SIE));
244}
245
246static inline u_long
247intr_disable(void)
248{
249	uint64_t ret;
250
251	__asm volatile(
252	    "csrrci %0, sstatus, %1"
253	    : "=&r" (ret) : "i" (SSTATUS_SIE)
254	);
255
256	return (ret & (SSTATUS_SIE));
257}
258
259static inline void
260intr_restore(u_long s)
261{
262	__asm volatile("csrs sstatus, %0" :: "r" (s));
263}
264
265void	delay (unsigned);
266#define	DELAY(x)	delay(x)
267
268extern void (*cpu_startclock_fcn)(void);
269
270void fpu_save(struct proc *, struct trapframe *);
271void fpu_load(struct proc *);
272
273extern int cpu_errata_sifive_cip_1200;
274
275#define	cpu_idle_enter()	do { /* nothing */ } while (0)
276#define	cpu_idle_leave()	do { /* nothing */ } while (0)
277
278#endif /* _KERNEL */
279
280#ifdef MULTIPROCESSOR
281#include <sys/mplock.h>
282#endif /* MULTIPROCESSOR */
283
284#endif /* !_MACHINE_CPU_H_ */
285