1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *      This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD: stable/11/sys/powerpc/aim/aim_machdep.c 368823 2020-12-30 01:10:59Z bdragon $");
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63#include "opt_platform.h"
64
65#include <sys/param.h>
66#include <sys/proc.h>
67#include <sys/systm.h>
68#include <sys/bio.h>
69#include <sys/buf.h>
70#include <sys/bus.h>
71#include <sys/cons.h>
72#include <sys/cpu.h>
73#include <sys/eventhandler.h>
74#include <sys/exec.h>
75#include <sys/imgact.h>
76#include <sys/kdb.h>
77#include <sys/kernel.h>
78#include <sys/ktr.h>
79#include <sys/linker.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mbuf.h>
83#include <sys/msgbuf.h>
84#include <sys/mutex.h>
85#include <sys/ptrace.h>
86#include <sys/reboot.h>
87#include <sys/rwlock.h>
88#include <sys/signalvar.h>
89#include <sys/syscallsubr.h>
90#include <sys/sysctl.h>
91#include <sys/sysent.h>
92#include <sys/sysproto.h>
93#include <sys/ucontext.h>
94#include <sys/uio.h>
95#include <sys/vmmeter.h>
96#include <sys/vnode.h>
97
98#include <net/netisr.h>
99
100#include <vm/vm.h>
101#include <vm/vm_extern.h>
102#include <vm/vm_kern.h>
103#include <vm/vm_page.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/kdb.h>
117#include <machine/md_var.h>
118#include <machine/metadata.h>
119#include <machine/mmuvar.h>
120#include <machine/pcb.h>
121#include <machine/reg.h>
122#include <machine/sigframe.h>
123#include <machine/spr.h>
124#include <machine/trap.h>
125#include <machine/vmparam.h>
126#include <machine/ofw_machdep.h>
127
128#include <ddb/ddb.h>
129
130#include <dev/ofw/openfirm.h>
131
132#ifdef __powerpc64__
133extern int n_slbs;
134#endif
135
136#ifndef __powerpc64__
137struct bat	battable[16];
138#endif
139
140#ifndef __powerpc64__
141/* Bits for running on 64-bit systems in 32-bit mode. */
142extern void	*testppc64, *testppc64size;
143extern void	*restorebridge, *restorebridgesize;
144extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
145extern void	*trapcode64;
146
147extern Elf_Addr	_GLOBAL_OFFSET_TABLE_[];
148#endif
149
150extern void	*rstcode, *rstcodeend;
151extern void	*trapcode, *trapcodeend;
152extern void	*generictrap, *generictrap64;
153extern void	*slbtrap, *slbtrapend;
154extern void	*alitrap, *aliend;
155extern void	*dsitrap, *dsiend;
156extern void	*decrint, *decrsize;
157extern void     *extint, *extsize;
158extern void	*dblow, *dbend;
159extern void	*imisstrap, *imisssize;
160extern void	*dlmisstrap, *dlmisssize;
161extern void	*dsmisstrap, *dsmisssize;
162
163extern void *ap_pcpu;
164
165void aim_cpu_init(vm_offset_t toc);
166
167void
168aim_cpu_init(vm_offset_t toc)
169{
170	size_t		trap_offset, trapsize;
171	vm_offset_t	trap;
172	register_t	msr, scratch;
173	uint8_t		*cache_check;
174	int		cacheline_warn;
175	#ifndef __powerpc64__
176	int		ppc64;
177	#endif
178
179	trap_offset = 0;
180	cacheline_warn = 0;
181
182	/* Various very early CPU fix ups */
183	switch (mfpvr() >> 16) {
184		/*
185		 * PowerPC 970 CPUs have a misfeature requested by Apple that
186		 * makes them pretend they have a 32-byte cacheline. Turn this
187		 * off before we measure the cacheline size.
188		 */
189		case IBM970:
190		case IBM970FX:
191		case IBM970MP:
192		case IBM970GX:
193			scratch = mfspr(SPR_HID5);
194			scratch &= ~HID5_970_DCBZ_SIZE_HI;
195			mtspr(SPR_HID5, scratch);
196			break;
197	#ifdef __powerpc64__
198		case IBMPOWER7:
199		case IBMPOWER7PLUS:
200		case IBMPOWER8:
201		case IBMPOWER8E:
202			/* XXX: get from ibm,slb-size in device tree */
203			n_slbs = 32;
204			break;
205	#endif
206	}
207
208	/*
209	 * Initialize the interrupt tables and figure out our cache line
210	 * size and whether or not we need the 64-bit bridge code.
211	 */
212
213	/*
214	 * Disable translation in case the vector area hasn't been
215	 * mapped (G5). Note that no OFW calls can be made until
216	 * translation is re-enabled.
217	 */
218
219	msr = mfmsr();
220	mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
221
222	/*
223	 * Measure the cacheline size using dcbz
224	 *
225	 * Use EXC_PGM as a playground. We are about to overwrite it
226	 * anyway, we know it exists, and we know it is cache-aligned.
227	 */
228
229	cache_check = (void *)EXC_PGM;
230
231	for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
232		cache_check[cacheline_size] = 0xff;
233
234	__asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
235
236	/* Find the first byte dcbz did not zero to get the cache line size */
237	for (cacheline_size = 0; cacheline_size < 0x100 &&
238	    cache_check[cacheline_size] == 0; cacheline_size++);
239
240	/* Work around psim bug */
241	if (cacheline_size == 0) {
242		cacheline_warn = 1;
243		cacheline_size = 32;
244	}
245
246	#ifndef __powerpc64__
247	/*
248	 * Figure out whether we need to use the 64 bit PMAP. This works by
249	 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
250	 * and setting ppc64 = 0 if that causes a trap.
251	 */
252
253	ppc64 = 1;
254
255	bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
256	__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
257
258	__asm __volatile("\
259		mfmsr %0;	\
260		mtsprg2 %1;	\
261				\
262		mtmsrd %0;	\
263		mfsprg2 %1;"
264	    : "=r"(scratch), "=r"(ppc64));
265
266	if (ppc64)
267		cpu_features |= PPC_FEATURE_64;
268
269	/*
270	 * Now copy restorebridge into all the handlers, if necessary,
271	 * and set up the trap tables.
272	 */
273
274	if (cpu_features & PPC_FEATURE_64) {
275		/* Patch the two instances of rfi -> rfid */
276		bcopy(&rfid_patch,&rfi_patch1,4);
277	#ifdef KDB
278		/* rfi_patch2 is at the end of dbleave */
279		bcopy(&rfid_patch,&rfi_patch2,4);
280	#endif
281	}
282	#else /* powerpc64 */
283	cpu_features |= PPC_FEATURE_64;
284	#endif
285
286	trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
287
288	/*
289	 * Copy generic handler into every possible trap. Special cases will get
290	 * different ones in a minute.
291	 */
292	for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
293		bcopy(&trapcode, (void *)trap, trapsize);
294
295	#ifndef __powerpc64__
296	if (cpu_features & PPC_FEATURE_64) {
297		/*
298		 * Copy a code snippet to restore 32-bit bridge mode
299		 * to the top of every non-generic trap handler
300		 */
301
302		trap_offset += (size_t)&restorebridgesize;
303		bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
304		bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
305		bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
306		bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
307		bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
308		bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
309		bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
310	} else {
311
312		/*
313		 * Use an IBAT and a DBAT to map the bottom 256M segment.
314		 *
315		 * It is very important to do it *now* to avoid taking a
316		 * fault in .text / .data before the MMU is bootstrapped,
317		 * because until then, the translation data has not been
318		 * copied over from OpenFirmware, so our DSI/ISI will fail
319		 * to find a match.
320		 */
321
322		battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
323		battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
324
325		__asm (".balign 32; \n"
326		    "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
327		    "mtdbatu 0,%0; mtdbatl 0,%1; isync"
328		    :: "r"(battable[0].batu), "r"(battable[0].batl));
329	}
330	#endif
331
332	bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
333	    (size_t)&rstcode);
334
335#ifdef KDB
336	bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
337	    (size_t)&dblow);
338	bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
339	    (size_t)&dblow);
340	bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
341	    (size_t)&dblow);
342	bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
343	    (size_t)&dblow);
344#endif
345	bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&aliend -
346	    (size_t)&alitrap);
347	bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsiend -
348	    (size_t)&dsitrap);
349
350	#ifdef __powerpc64__
351	/* Set TOC base so that the interrupt code can get at it */
352	*((void **)TRAP_GENTRAP) = &generictrap;
353	*((register_t *)TRAP_TOCBASE) = toc;
354
355	bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
356	bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
357	#else
358	/* Set branch address for trap code */
359	if (cpu_features & PPC_FEATURE_64)
360		*((void **)TRAP_GENTRAP) = &generictrap64;
361	else
362		*((void **)TRAP_GENTRAP) = &generictrap;
363	*((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
364
365	/* G2-specific TLB miss helper handlers */
366	bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
367	bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);
368	bcopy(&dsmisstrap, (void *)EXC_DSMISS,  (size_t)&dsmisssize);
369	#endif
370	__syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
371
372	/*
373	 * Restore MSR
374	 */
375	mtmsr(msr);
376
377	/* Warn if cachline size was not determined */
378	if (cacheline_warn == 1) {
379		printf("WARNING: cacheline size undetermined, setting to 32\n");
380	}
381
382	/*
383	 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
384	 * in case the platform module had a better idea of what we
385	 * should do.
386	 */
387	if (cpu_features & PPC_FEATURE_64)
388		pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
389	else
390		pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
391}
392
393/*
394 * Shutdown the CPU as much as possible.
395 */
396void
397cpu_halt(void)
398{
399
400	OF_exit();
401}
402
403int
404ptrace_single_step(struct thread *td)
405{
406	struct trapframe *tf;
407
408	tf = td->td_frame;
409	tf->srr1 |= PSL_SE;
410
411	return (0);
412}
413
414int
415ptrace_clear_single_step(struct thread *td)
416{
417	struct trapframe *tf;
418
419	tf = td->td_frame;
420	tf->srr1 &= ~PSL_SE;
421
422	return (0);
423}
424
425void
426kdb_cpu_clear_singlestep(void)
427{
428
429	kdb_frame->srr1 &= ~PSL_SE;
430}
431
432void
433kdb_cpu_set_singlestep(void)
434{
435
436	kdb_frame->srr1 |= PSL_SE;
437}
438
439/*
440 * Initialise a struct pcpu.
441 */
442void
443cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
444{
445#ifdef __powerpc64__
446/* Copy the SLB contents from the current CPU */
447memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
448#endif
449}
450
451#ifndef __powerpc64__
452uint64_t
453va_to_vsid(pmap_t pm, vm_offset_t va)
454{
455	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
456}
457
458#endif
459
460vm_offset_t
461pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
462{
463
464	return (pa);
465}
466
467/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
468void
469flush_disable_caches(void)
470{
471	register_t msr;
472	register_t msscr0;
473	register_t cache_reg;
474	volatile uint32_t *memp;
475	uint32_t temp;
476	int i;
477	int x;
478
479	msr = mfmsr();
480	powerpc_sync();
481	mtmsr(msr & ~(PSL_EE | PSL_DR));
482	msscr0 = mfspr(SPR_MSSCR0);
483	msscr0 &= ~MSSCR0_L2PFE;
484	mtspr(SPR_MSSCR0, msscr0);
485	powerpc_sync();
486	isync();
487	__asm__ __volatile__("dssall; sync");
488	powerpc_sync();
489	isync();
490	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
491	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
492	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
493
494	/* Lock the L1 Data cache. */
495	mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
496	powerpc_sync();
497	isync();
498
499	mtspr(SPR_LDSTCR, 0);
500
501	/*
502	 * Perform this in two stages: Flush the cache starting in RAM, then do it
503	 * from ROM.
504	 */
505	memp = (volatile uint32_t *)0x00000000;
506	for (i = 0; i < 128 * 1024; i++) {
507		temp = *memp;
508		__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
509		memp += 32/sizeof(*memp);
510	}
511
512	memp = (volatile uint32_t *)0xfff00000;
513	x = 0xfe;
514
515	for (; x != 0xff;) {
516		mtspr(SPR_LDSTCR, x);
517		for (i = 0; i < 128; i++) {
518			temp = *memp;
519			__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
520			memp += 32/sizeof(*memp);
521		}
522		x = ((x << 1) | 1) & 0xff;
523	}
524	mtspr(SPR_LDSTCR, 0);
525
526	cache_reg = mfspr(SPR_L2CR);
527	if (cache_reg & L2CR_L2E) {
528		cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
529		mtspr(SPR_L2CR, cache_reg);
530		powerpc_sync();
531		mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
532		while (mfspr(SPR_L2CR) & L2CR_L2HWF)
533			; /* Busy wait for cache to flush */
534		powerpc_sync();
535		cache_reg &= ~L2CR_L2E;
536		mtspr(SPR_L2CR, cache_reg);
537		powerpc_sync();
538		mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
539		powerpc_sync();
540		while (mfspr(SPR_L2CR) & L2CR_L2I)
541			; /* Busy wait for L2 cache invalidate */
542		powerpc_sync();
543	}
544
545	cache_reg = mfspr(SPR_L3CR);
546	if (cache_reg & L3CR_L3E) {
547		cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
548		mtspr(SPR_L3CR, cache_reg);
549		powerpc_sync();
550		mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
551		while (mfspr(SPR_L3CR) & L3CR_L3HWF)
552			; /* Busy wait for cache to flush */
553		powerpc_sync();
554		cache_reg &= ~L3CR_L3E;
555		mtspr(SPR_L3CR, cache_reg);
556		powerpc_sync();
557		mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
558		powerpc_sync();
559		while (mfspr(SPR_L3CR) & L3CR_L3I)
560			; /* Busy wait for L3 cache invalidate */
561		powerpc_sync();
562	}
563
564	mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
565	powerpc_sync();
566	isync();
567
568	mtmsr(msr);
569}
570
571void
572cpu_sleep()
573{
574	static u_quad_t timebase = 0;
575	static register_t sprgs[4];
576	static register_t srrs[2];
577
578	jmp_buf resetjb;
579	struct thread *fputd;
580	struct thread *vectd;
581	register_t hid0;
582	register_t msr;
583	register_t saved_msr;
584
585	ap_pcpu = pcpup;
586
587	PCPU_SET(restore, &resetjb);
588
589	saved_msr = mfmsr();
590	fputd = PCPU_GET(fputhread);
591	vectd = PCPU_GET(vecthread);
592	if (fputd != NULL)
593		save_fpu(fputd);
594	if (vectd != NULL)
595		save_vec(vectd);
596	if (setjmp(resetjb) == 0) {
597		sprgs[0] = mfspr(SPR_SPRG0);
598		sprgs[1] = mfspr(SPR_SPRG1);
599		sprgs[2] = mfspr(SPR_SPRG2);
600		sprgs[3] = mfspr(SPR_SPRG3);
601		srrs[0] = mfspr(SPR_SRR0);
602		srrs[1] = mfspr(SPR_SRR1);
603		timebase = mftb();
604		powerpc_sync();
605		flush_disable_caches();
606		hid0 = mfspr(SPR_HID0);
607		hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
608		powerpc_sync();
609		isync();
610		msr = mfmsr() | PSL_POW;
611		mtspr(SPR_HID0, hid0);
612		powerpc_sync();
613
614		while (1)
615			mtmsr(msr);
616	}
617	mttb(timebase);
618	PCPU_SET(curthread, curthread);
619	PCPU_SET(curpcb, curthread->td_pcb);
620	pmap_activate(curthread);
621	powerpc_sync();
622	mtspr(SPR_SPRG0, sprgs[0]);
623	mtspr(SPR_SPRG1, sprgs[1]);
624	mtspr(SPR_SPRG2, sprgs[2]);
625	mtspr(SPR_SPRG3, sprgs[3]);
626	mtspr(SPR_SRR0, srrs[0]);
627	mtspr(SPR_SRR1, srrs[1]);
628	mtmsr(saved_msr);
629	if (fputd == curthread)
630		enable_fpu(curthread);
631	if (vectd == curthread)
632		enable_vec(curthread);
633	powerpc_sync();
634}
635
636