cpu.h revision 1.125
1/*	$OpenBSD: cpu.h,v 1.125 2018/12/04 16:24:13 visa Exp $	*/
2
3/*-
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell and Rick Macklem.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	Copyright (C) 1989 Digital Equipment Corporation.
35 *	Permission to use, copy, modify, and distribute this software and
36 *	its documentation for any purpose and without fee is hereby granted,
37 *	provided that the above copyright notice appears in all copies.
38 *	Digital Equipment Corporation makes no representations about the
39 *	suitability of this software for any purpose.  It is provided "as is"
40 *	without express or implied warranty.
41 *
42 *	from: @(#)cpu.h	8.4 (Berkeley) 1/4/94
43 */
44
45#ifndef _MIPS64_CPU_H_
46#define	_MIPS64_CPU_H_
47
48#ifndef _LOCORE
49
50/*
51 * MIPS32-style segment definitions.
52 * They only cover the first 512MB of physical addresses.
53 */
54#define	CKSEG0_BASE		0xffffffff80000000UL
55#define	CKSEG1_BASE		0xffffffffa0000000UL
56#define	CKSSEG_BASE		0xffffffffc0000000UL
57#define	CKSEG3_BASE		0xffffffffe0000000UL
58#define	CKSEG_SIZE		0x0000000020000000UL
59
60#define	CKSEG0_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
61#define	CKSEG1_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
62#define	PHYS_TO_CKSEG0(x)	((u_long)(x) | CKSEG0_BASE)
63#define	PHYS_TO_CKSEG1(x)	((u_long)(x) | CKSEG1_BASE)
64
65/*
66 * MIPS64-style segment definitions.
67 * These allow for 36 bits of addressable physical memory, thus 64GB.
68 */
69
70/*
71 * Cache Coherency Attributes.
72 */
73/* r8k only */
74#define	CCA_NC_COPROCESSOR	0UL	/* uncached, coprocessor ordered */
75/* common to r4, r5k, r8k and r1xk */
76#define	CCA_NC			2UL	/* uncached, write-around */
77#define	CCA_NONCOHERENT		3UL	/* cached, non-coherent, write-back */
78/* r8k, r1xk only */
79#define	CCA_COHERENT_EXCL	4UL	/* cached, coherent, exclusive */
80#define	CCA_COHERENT_EXCLWRITE	5UL	/* cached, coherent, exclusive write */
81/* r4k only */
82#define	CCA_COHERENT_UPDWRITE	6UL	/* cached, coherent, update on write */
83/* r1xk only */
84#define	CCA_NC_ACCELERATED	7UL	/* uncached accelerated */
85
86#ifdef TGT_COHERENT
87#define	CCA_CACHED		CCA_COHERENT_EXCLWRITE
88#else
89#define	CCA_CACHED		CCA_NONCOHERENT
90#endif
91
92/*
93 * Uncached spaces.
94 * R1x000 processors use bits 58:57 of uncached virtual addresses (CCA_NC)
95 * to select different spaces. Unfortunately, other processors need these
96 * bits to be zero, so uncached address have to be decided at runtime.
97 */
98#define	SP_HUB			0UL	/* Hub space */
99#define	SP_IO			1UL	/* I/O space */
100#define	SP_SPECIAL		2UL	/* Memory Special space */
101#define	SP_NC			3UL	/* Memory Uncached space */
102
103#define	XKSSSEG_BASE		0x4000000000000000UL
104#define	XKPHYS_BASE		0x8000000000000000UL
105#define	XKSSEG_BASE		0xc000000000000000UL
106
107#define	XKPHYS_TO_PHYS(x)	((paddr_t)(x) & 0x0000000fffffffffUL)
108#define	PHYS_TO_XKPHYS(x,c)	((paddr_t)(x) | XKPHYS_BASE | ((c) << 59))
109#define	PHYS_TO_XKPHYS_UNCACHED(x,s) \
110	(PHYS_TO_XKPHYS(x, CCA_NC) | ((s) << 57))
111#define	IS_XKPHYS(va)		(((va) >> 62) == 2)
112#define	XKPHYS_TO_CCA(x)	(((x) >> 59) & 0x07)
113#define	XKPHYS_TO_SP(x)		(((x) >> 57) & 0x03)
114
115#endif	/* _LOCORE */
116
117/*
118 * Exported definitions unique to mips cpu support.
119 */
120
121#if defined(_KERNEL) && !defined(_LOCORE)
122
123#include <sys/device.h>
124#include <machine/intr.h>
125#include <sys/sched.h>
126
127struct cpu_hwinfo {
128	uint32_t	c0prid;
129	uint32_t	c1prid;
130	uint32_t	clock;	/* Hz */
131	uint32_t	tlbsize;
132	uint		type;
133	uint32_t	l2size;
134};
135
136/*
137 * Cache memory configuration. One struct per cache.
138 */
139struct cache_info {
140	uint		size;		/* total cache size */
141	uint		linesize;	/* line size */
142	uint		setsize;	/* set size */
143	uint		sets;		/* number of sets */
144};
145
146struct cpu_info {
147	struct device	*ci_dev;	/* our device */
148	struct cpu_info	*ci_self;	/* pointer to this structure */
149	struct cpu_info	*ci_next;	/* next cpu */
150	struct proc	*ci_curproc;
151	struct user	*ci_curprocpaddr;
152	struct proc	*ci_fpuproc;	/* pointer to last proc to use FP */
153	uint32_t	 ci_delayconst;
154	struct cpu_hwinfo
155			ci_hw;
156
157#if defined(MULTIPROCESSOR)
158	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
159#endif
160
161	/* cache information and pending flush state */
162	uint		ci_cacheconfiguration;
163	uint64_t	ci_cachepending_l1i;
164	struct cache_info
165			ci_l1inst,
166			ci_l1data,
167			ci_l2,
168			ci_l3;
169
170	/* function pointers for the cache handling routines */
171	void		(*ci_SyncCache)(struct cpu_info *);
172	void		(*ci_InvalidateICache)(struct cpu_info *, vaddr_t,
173			    size_t);
174	void		(*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t);
175	void		(*ci_SyncICache)(struct cpu_info *);
176	void		(*ci_SyncDCachePage)(struct cpu_info *, vaddr_t,
177			    paddr_t);
178	void		(*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t,
179			    paddr_t);
180	void		(*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t);
181	void		(*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t,
182			    size_t);
183	void		(*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t,
184			    int);
185
186	struct schedstate_percpu
187			ci_schedstate;
188	int		ci_want_resched;	/* need_resched() invoked */
189	cpuid_t		ci_cpuid;		/* our CPU ID */
190	uint32_t	ci_randseed;		/* per cpu random seed */
191	volatile int	ci_ipl;			/* software IPL */
192	uint32_t	ci_softpending;		/* pending soft interrupts */
193	int		ci_clock_started;
194	u_int32_t	ci_cpu_counter_last;	/* last compare value loaded */
195	u_int32_t	ci_cpu_counter_interval; /* # of counter ticks/tick */
196
197	u_int32_t	ci_pendingticks;
198
199#ifdef TGT_ORIGIN
200	u_int16_t	ci_nasid;
201	u_int16_t	ci_slice;
202#endif
203
204	struct pmap	*ci_curpmap;
205	uint		ci_intrdepth;		/* interrupt depth */
206#ifdef MULTIPROCESSOR
207	u_long		ci_flags;		/* flags; see below */
208#endif
209	volatile int    ci_ddb;
210#define	CI_DDB_RUNNING		0
211#define	CI_DDB_SHOULDSTOP	1
212#define	CI_DDB_STOPPED		2
213#define	CI_DDB_ENTERDDB		3
214#define	CI_DDB_INDDB		4
215
216#ifdef DIAGNOSTIC
217	int	ci_mutex_level;
218#endif
219#ifdef GPROF
220	struct gmonparam *ci_gmon;
221#endif
222};
223
224#define	CPUF_PRIMARY	0x01		/* CPU is primary CPU */
225#define	CPUF_PRESENT	0x02		/* CPU is present */
226#define	CPUF_RUNNING	0x04		/* CPU is running */
227
228extern struct cpu_info cpu_info_primary;
229extern struct cpu_info *cpu_info_list;
230#define CPU_INFO_ITERATOR		int
231#define	CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
232					    ci != NULL; ci = ci->ci_next)
233
234#define CPU_INFO_UNIT(ci)               ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
235
236extern void (*cpu_idle_cycle_func)(void);
237#define cpu_idle_cycle()		(*cpu_idle_cycle_func)()
238
239#ifdef MULTIPROCESSOR
240#define getcurcpu()			hw_getcurcpu()
241#define setcurcpu(ci)			hw_setcurcpu(ci)
242extern struct cpu_info *get_cpu_info(int);
243#define curcpu() getcurcpu()
244#define	CPU_IS_PRIMARY(ci)		((ci)->ci_flags & CPUF_PRIMARY)
245#define cpu_number()			(curcpu()->ci_cpuid)
246
247extern struct cpuset cpus_running;
248void cpu_unidle(struct cpu_info *);
249void cpu_boot_secondary_processors(void);
250#define cpu_boot_secondary(ci)          hw_cpu_boot_secondary(ci)
251#define cpu_hatch(ci)                   hw_cpu_hatch(ci)
252
253vaddr_t alloc_contiguous_pages(size_t);
254
255#define MIPS64_IPI_NOP		0x00000001
256#define MIPS64_IPI_RENDEZVOUS	0x00000002
257#define MIPS64_IPI_DDB		0x00000004
258#define MIPS64_NIPIS		3	/* must not exceed 32 */
259
260void	mips64_ipi_init(void);
261void	mips64_send_ipi(unsigned int, unsigned int);
262void	smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg);
263
264#include <sys/mplock.h>
265#else
266#define MAXCPUS				1
267#define curcpu()			(&cpu_info_primary)
268#define	CPU_IS_PRIMARY(ci)		1
269#define cpu_number()			0
270#define cpu_unidle(ci)
271#define get_cpu_info(i)			(&cpu_info_primary)
272#endif
273
274#define CPU_BUSY_CYCLE()	do {} while (0)
275
276extern void (*md_startclock)(struct cpu_info *);
277void	cp0_calibrate(struct cpu_info *);
278
279#include <machine/frame.h>
280
281/*
282 * Arguments to hardclock encapsulate the previous machine state in
283 * an opaque clockframe.
284 */
285#define	clockframe trapframe	/* Use normal trap frame */
286
287#define	SR_KSU_USER		0x00000010
288#define	CLKF_USERMODE(framep)	((framep)->sr & SR_KSU_USER)
289#define	CLKF_PC(framep)		((framep)->pc)
290#define	CLKF_INTR(framep)	(curcpu()->ci_intrdepth > 1)	/* XXX */
291
292/*
293 * This is used during profiling to integrate system time.
294 */
295#define	PROC_PC(p)	((p)->p_md.md_regs->pc)
296#define	PROC_STACK(p)	((p)->p_md.md_regs->sp)
297
298/*
299 * Preempt the current process if in interrupt from user mode,
300 * or after the current trap/syscall if in system mode.
301 */
302#define	need_resched(ci) \
303	do { \
304		(ci)->ci_want_resched = 1; \
305		if ((ci)->ci_curproc != NULL) \
306			aston((ci)->ci_curproc); \
307	} while(0)
308#define	clear_resched(ci) 	(ci)->ci_want_resched = 0
309
310/*
311 * Give a profiling tick to the current process when the user profiling
312 * buffer pages are invalid.  On MIPS designs, request an ast to send us
313 * through trap, marking the proc as needing a profiling tick.
314 */
315#define	need_proftick(p)	aston(p)
316
317/*
318 * Notify the current process (p) that it has a signal pending,
319 * process as soon as possible.
320 */
321#ifdef MULTIPROCESSOR
322#define	signotify(p)		(aston(p), cpu_unidle((p)->p_cpu))
323#else
324#define	signotify(p)		aston(p)
325#endif
326
327#define	aston(p)		((p)->p_md.md_astpending = 1)
328
329#ifdef CPU_R8000
330#define	mips_sync()		__asm__ volatile ("lw $0, 0(%0)" :: \
331				    "r" (PHYS_TO_XKPHYS(0, CCA_NC)) : "memory")
332#else
333#define	mips_sync()		__asm__ volatile ("sync" ::: "memory")
334#endif
335
336#endif /* _KERNEL && !_LOCORE */
337
338#ifdef _KERNEL
339/*
340 * Values for the code field in a break instruction.
341 */
342#define	BREAK_INSTR		0x0000000d
343#define	BREAK_VAL_MASK		0x03ff0000
344#define	BREAK_VAL_SHIFT		16
345#define	BREAK_KDB_VAL		512
346#define	BREAK_SSTEP_VAL		513
347#define	BREAK_BRKPT_VAL		514
348#define	BREAK_SOVER_VAL		515
349#define	BREAK_DDB_VAL		516
350#define	BREAK_FPUEMUL_VAL	517
351#define	BREAK_KDB	(BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
352#define	BREAK_SSTEP	(BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
353#define	BREAK_BRKPT	(BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
354#define	BREAK_SOVER	(BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
355#define	BREAK_DDB	(BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
356#define	BREAK_FPUEMUL	(BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT))
357
358#endif /* _KERNEL */
359
360/*
361 * CTL_MACHDEP definitions.
362 */
363#define	CPU_ALLOWAPERTURE	1	/* allow mmap of /dev/xf86 */
364		/*		2	   formerly: keyboard reset */
365		/*		3	   formerly: CPU_LIDSUSPEND */
366#define CPU_LIDACTION		4	/* action caused by lid close */
367#define	CPU_MAXID		5	/* number of valid machdep ids */
368
369#define	CTL_MACHDEP_NAMES {			\
370	{ 0, 0 },				\
371	{ "allowaperture", CTLTYPE_INT },	\
372	{ 0, 0 },				\
373	{ 0, 0 },				\
374	{ "lidaction", CTLTYPE_INT },		\
375}
376
377/*
378 * MIPS CPU types (cp_imp).
379 */
380#define	MIPS_R2000	0x01	/* MIPS R2000 CPU		ISA I   */
381#define	MIPS_R3000	0x02	/* MIPS R3000 CPU		ISA I   */
382#define	MIPS_R6000	0x03	/* MIPS R6000 CPU		ISA II	*/
383#define	MIPS_R4000	0x04	/* MIPS R4000/4400 CPU		ISA III	*/
384#define	MIPS_R3LSI	0x05	/* LSI Logic R3000 derivate	ISA I	*/
385#define	MIPS_R6000A	0x06	/* MIPS R6000A CPU		ISA II	*/
386#define	MIPS_CN50XX	0x06	/* Cavium OCTEON CN50xx		MIPS64R2*/
387#define	MIPS_R3IDT	0x07	/* IDT R3000 derivate		ISA I	*/
388#define	MIPS_R10000	0x09	/* MIPS R10000/T5 CPU		ISA IV  */
389#define	MIPS_R4200	0x0a	/* MIPS R4200 CPU (ICE)		ISA III */
390#define	MIPS_R4300	0x0b	/* NEC VR4300 CPU		ISA III */
391#define	MIPS_R4100	0x0c	/* NEC VR41xx CPU MIPS-16	ISA III */
392#define	MIPS_R12000	0x0e	/* MIPS R12000			ISA IV  */
393#define	MIPS_R14000	0x0f	/* MIPS R14000			ISA IV  */
394#define	MIPS_R8000	0x10	/* MIPS R8000 Blackbird/TFP	ISA IV  */
395#define	MIPS_R4600	0x20	/* PMCS R4600 Orion		ISA III */
396#define	MIPS_R4700	0x21	/* PMCS R4700 Orion		ISA III */
397#define	MIPS_R3TOSH	0x22	/* Toshiba R3000 based CPU	ISA I	*/
398#define	MIPS_R5000	0x23	/* MIPS R5000 CPU		ISA IV  */
399#define	MIPS_RM7000	0x27	/* PMCS RM7000 CPU		ISA IV  */
400#define	MIPS_RM52X0	0x28	/* PMCS RM52X0 CPU		ISA IV  */
401#define	MIPS_RM9000	0x34	/* PMCS RM9000 CPU		ISA IV  */
402#define	MIPS_LOONGSON	0x42	/* STC LoongSon CPU		ISA III */
403#define	MIPS_VR5400	0x54	/* NEC Vr5400 CPU		ISA IV+ */
404#define	MIPS_LOONGSON2	0x63	/* STC LoongSon2/3 CPU		ISA III+ */
405#define	MIPS_CN63XX	0x90	/* Cavium OCTEON II CN6[23]xx	MIPS64R2 */
406#define	MIPS_CN68XX	0x91	/* Cavium OCTEON II CN68xx	MIPS64R2 */
407#define	MIPS_CN66XX	0x92	/* Cavium OCTEON II CN66xx	MIPS64R2 */
408#define	MIPS_CN61XX	0x93	/* Cavium OCTEON II CN6[01]xx	MIPS64R2 */
409#define	MIPS_CN78XX	0x95	/* Cavium OCTEON III CN7[678]xx	MIPS64R2 */
410#define	MIPS_CN71XX	0x96	/* Cavium OCTEON III CN7[01]xx	MIPS64R2 */
411#define	MIPS_CN73XX	0x97	/* Cavium OCTEON III CN7[23]xx	MIPS64R2 */
412
413/*
414 * MIPS FPU types. Only soft, rest is the same as cpu type.
415 */
416#define	MIPS_SOFT	0x00	/* Software emulation		ISA I   */
417
418
419#if defined(_KERNEL) && !defined(_LOCORE)
420
421extern register_t protosr;
422extern int cpu_has_userlocal;
423
424#ifdef FPUEMUL
425#define	CPU_HAS_FPU(ci)	((ci)->ci_hw.c1prid != 0)
426#else
427#define	CPU_HAS_FPU(ci)	1
428#endif
429
430struct exec_package;
431struct user;
432
433void	tlb_asid_wrap(struct cpu_info *);
434void	tlb_flush(int);
435void	tlb_flush_addr(vaddr_t);
436void	tlb_init(unsigned int);
437int64_t	tlb_probe(vaddr_t);
438void	tlb_set_gbase(vaddr_t, vsize_t);
439void	tlb_set_page_mask(uint32_t);
440void	tlb_set_pid(u_int);
441void	tlb_set_wired(uint32_t);
442int	tlb_update(vaddr_t, register_t);
443void	tlb_update_indexed(vaddr_t, register_t, register_t, uint);
444
445void	build_trampoline(vaddr_t, vaddr_t);
446void	cpu_switchto_asm(struct proc *, struct proc *);
447int	exec_md_map(struct proc *, struct exec_package *);
448void	savectx(struct user *, int);
449
450void	enable_fpu(struct proc *);
451void	save_fpu(void);
452int	fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t,
453	    vaddr_t);
454void	MipsSaveCurFPState(struct proc *);
455void	MipsSaveCurFPState16(struct proc *);
456void	MipsSwitchFPState(struct proc *, struct trapframe *);
457void	MipsSwitchFPState16(struct proc *, struct trapframe *);
458
459int	guarded_read_1(paddr_t, uint8_t *);
460int	guarded_read_2(paddr_t, uint16_t *);
461int	guarded_read_4(paddr_t, uint32_t *);
462int	guarded_write_4(paddr_t, uint32_t);
463
464void	MipsFPTrap(struct trapframe *);
465register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t);
466
467int	classify_insn(uint32_t);
468#define	INSNCLASS_NEUTRAL	0
469#define	INSNCLASS_CALL		1
470#define	INSNCLASS_BRANCH	2
471
472/*
473 * R4000 end-of-page errata workaround routines
474 */
475
476extern int r4000_errata;
477u_int	eop_page_check(paddr_t);
478void	eop_tlb_flush_addr(struct pmap *, vaddr_t, u_long);
479int	eop_tlb_miss_handler(struct trapframe *, struct cpu_info *,
480	    struct proc *);
481void	eop_cleanup(struct trapframe *, struct proc *);
482
483/*
484 * Low level access routines to CPU registers
485 */
486
487void	setsoftintr0(void);
488void	clearsoftintr0(void);
489void	setsoftintr1(void);
490void	clearsoftintr1(void);
491register_t enableintr(void);
492register_t disableintr(void);
493register_t getsr(void);
494register_t setsr(register_t);
495
496u_int	cp0_get_count(void);
497register_t cp0_get_config(void);
498uint32_t cp0_get_config_1(void);
499uint32_t cp0_get_config_2(void);
500uint32_t cp0_get_config_3(void);
501uint32_t cp0_get_config_4(void);
502uint32_t cp0_get_pagegrain(void);
503register_t cp0_get_prid(void);
504void	cp0_reset_cause(register_t);
505void	cp0_set_compare(u_int);
506void	cp0_set_config(register_t);
507void	cp0_set_pagegrain(uint32_t);
508void	cp0_set_trapbase(register_t);
509u_int	cp1_get_prid(void);
510
511static inline uint32_t
512cp0_get_hwrena(void)
513{
514	uint32_t value;
515	__asm__ volatile ("mfc0 %0, $7" : "=r" (value));
516	return value;
517}
518
519static inline void
520cp0_set_hwrena(uint32_t value)
521{
522	__asm__ volatile ("mtc0 %0, $7" : : "r" (value));
523}
524
525static inline void
526cp0_set_userlocal(void *value)
527{
528	__asm__ volatile (
529	"	.set	push\n"
530	"	.set	mips64r2\n"
531	"	dmtc0	%0, $4, 2\n"
532	"	.set	pop\n"
533	: : "r" (value));
534}
535
536static inline u_long
537intr_disable(void)
538{
539	return disableintr();
540}
541
542static inline void
543intr_restore(u_long sr)
544{
545	setsr(sr);
546}
547
548/*
549 * Cache routines (may be overridden)
550 */
551
552#ifndef	Mips_SyncCache
553#define	Mips_SyncCache(ci) \
554	((ci)->ci_SyncCache)(ci)
555#endif
556#ifndef	Mips_InvalidateICache
557#define	Mips_InvalidateICache(ci, va, l) \
558	((ci)->ci_InvalidateICache)(ci, va, l)
559#endif
560#ifndef	Mips_InvalidateICachePage
561#define	Mips_InvalidateICachePage(ci, va) \
562	((ci)->ci_InvalidateICachePage)(ci, va)
563#endif
564#ifndef	Mips_SyncICache
565#define	Mips_SyncICache(ci) \
566	((ci)->ci_SyncICache)(ci)
567#endif
568#ifndef	Mips_SyncDCachePage
569#define	Mips_SyncDCachePage(ci, va, pa) \
570	((ci)->ci_SyncDCachePage)(ci, va, pa)
571#endif
572#ifndef	Mips_HitSyncDCachePage
573#define	Mips_HitSyncDCachePage(ci, va, pa) \
574	((ci)->ci_HitSyncDCachePage)(ci, va, pa)
575#endif
576#ifndef	Mips_HitSyncDCache
577#define	Mips_HitSyncDCache(ci, va, l) \
578	((ci)->ci_HitSyncDCache)(ci, va, l)
579#endif
580#ifndef	Mips_HitInvalidateDCache
581#define	Mips_HitInvalidateDCache(ci, va, l) \
582	((ci)->ci_HitInvalidateDCache)(ci, va, l)
583#endif
584#ifndef	Mips_IOSyncDCache
585#define	Mips_IOSyncDCache(ci, va, l, h) \
586	((ci)->ci_IOSyncDCache)(ci, va, l, h)
587#endif
588
589#endif /* _KERNEL && !_LOCORE */
590#endif /* !_MIPS64_CPU_H_ */
591