machdep.c revision 217688
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *      This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD: head/sys/powerpc/aim/machdep.c 217688 2011-01-21 10:26:26Z pluknet $");
59
60#include "opt_compat.h"
61#include "opt_ddb.h"
62#include "opt_kstack_pages.h"
63
64#include <sys/param.h>
65#include <sys/proc.h>
66#include <sys/systm.h>
67#include <sys/bio.h>
68#include <sys/buf.h>
69#include <sys/bus.h>
70#include <sys/cons.h>
71#include <sys/cpu.h>
72#include <sys/eventhandler.h>
73#include <sys/exec.h>
74#include <sys/imgact.h>
75#include <sys/kdb.h>
76#include <sys/kernel.h>
77#include <sys/ktr.h>
78#include <sys/linker.h>
79#include <sys/lock.h>
80#include <sys/malloc.h>
81#include <sys/mbuf.h>
82#include <sys/msgbuf.h>
83#include <sys/mutex.h>
84#include <sys/ptrace.h>
85#include <sys/reboot.h>
86#include <sys/signalvar.h>
87#include <sys/syscallsubr.h>
88#include <sys/sysctl.h>
89#include <sys/sysent.h>
90#include <sys/sysproto.h>
91#include <sys/ucontext.h>
92#include <sys/uio.h>
93#include <sys/vmmeter.h>
94#include <sys/vnode.h>
95
96#include <net/netisr.h>
97
98#include <vm/vm.h>
99#include <vm/vm_extern.h>
100#include <vm/vm_kern.h>
101#include <vm/vm_page.h>
102#include <vm/vm_map.h>
103#include <vm/vm_object.h>
104#include <vm/vm_pager.h>
105
106#include <machine/altivec.h>
107#ifndef __powerpc64__
108#include <machine/bat.h>
109#endif
110#include <machine/cpu.h>
111#include <machine/elf.h>
112#include <machine/fpu.h>
113#include <machine/hid.h>
114#include <machine/kdb.h>
115#include <machine/md_var.h>
116#include <machine/metadata.h>
117#include <machine/mmuvar.h>
118#include <machine/pcb.h>
119#include <machine/reg.h>
120#include <machine/sigframe.h>
121#include <machine/spr.h>
122#include <machine/trap.h>
123#include <machine/vmparam.h>
124
125#include <ddb/ddb.h>
126
127#include <dev/ofw/openfirm.h>
128
129#ifdef DDB
130extern vm_offset_t ksym_start, ksym_end;
131#endif
132
133int cold = 1;
134#ifdef __powerpc64__
135int cacheline_size = 128;
136#else
137int cacheline_size = 32;
138#endif
139int hw_direct_map = 1;
140
141struct pcpu __pcpu[MAXCPU];
142
143static struct trapframe frame0;
144
145char		machine[] = "powerpc";
146SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
147
148static void	cpu_startup(void *);
149SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
150
151SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
152	   CTLFLAG_RD, &cacheline_size, 0, "");
153
154uintptr_t	powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *);
155
156int             setfault(faultbuf);             /* defined in locore.S */
157
158long		Maxmem = 0;
159long		realmem = 0;
160
161#ifndef __powerpc64__
162struct bat	battable[16];
163#endif
164
165struct kva_md_info kmi;
166
167static void
168cpu_startup(void *dummy)
169{
170
171	/*
172	 * Initialise the decrementer-based clock.
173	 */
174	decr_init();
175
176	/*
177	 * Good {morning,afternoon,evening,night}.
178	 */
179	cpu_setup(PCPU_GET(cpuid));
180
181#ifdef PERFMON
182	perfmon_init();
183#endif
184	printf("real memory  = %ld (%ld MB)\n", ptoa(physmem),
185	    ptoa(physmem) / 1048576);
186	realmem = physmem;
187
188	if (bootverbose)
189		printf("available KVA = %zd (%zd MB)\n",
190		    virtual_end - virtual_avail,
191		    (virtual_end - virtual_avail) / 1048576);
192
193	/*
194	 * Display any holes after the first chunk of extended memory.
195	 */
196	if (bootverbose) {
197		int indx;
198
199		printf("Physical memory chunk(s):\n");
200		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
201			vm_offset_t size1 =
202			    phys_avail[indx + 1] - phys_avail[indx];
203
204			#ifdef __powerpc64__
205			printf("0x%16lx - 0x%16lx, %ld bytes (%ld pages)\n",
206			#else
207			printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n",
208			#endif
209			    phys_avail[indx], phys_avail[indx + 1] - 1, size1,
210			    size1 / PAGE_SIZE);
211		}
212	}
213
214	vm_ksubmap_init(&kmi);
215
216	printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
217	    ptoa(cnt.v_free_count) / 1048576);
218
219	/*
220	 * Set up buffers, so they can be used to read disk labels.
221	 */
222	bufinit();
223	vm_pager_bufferinit();
224}
225
226extern char	kernel_text[], _end[];
227
228#ifndef __powerpc64__
229/* Bits for running on 64-bit systems in 32-bit mode. */
230extern void	*testppc64, *testppc64size;
231extern void	*restorebridge, *restorebridgesize;
232extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
233extern void	*trapcode64;
234#endif
235
236#ifdef SMP
237extern void	*rstcode, *rstsize;
238#endif
239extern void	*trapcode, *trapsize;
240extern void	*alitrap, *alisize;
241extern void	*dsitrap, *dsisize;
242extern void	*decrint, *decrsize;
243extern void     *extint, *extsize;
244extern void	*dblow, *dbsize;
245extern void	*imisstrap, *imisssize;
246extern void	*dlmisstrap, *dlmisssize;
247extern void	*dsmisstrap, *dsmisssize;
248
249uintptr_t
250powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
251    vm_offset_t basekernel, void *mdp)
252{
253	struct		pcpu *pc;
254	vm_offset_t	end;
255	void		*generictrap;
256	size_t		trap_offset;
257	void		*kmdp;
258        char		*env;
259	register_t	msr, scratch;
260	uint8_t		*cache_check;
261	int		cacheline_warn;
262	#ifndef __powerpc64__
263	int		ppc64;
264	#endif
265
266	end = 0;
267	kmdp = NULL;
268	trap_offset = 0;
269	cacheline_warn = 0;
270
271	/*
272	 * Parse metadata if present and fetch parameters.  Must be done
273	 * before console is inited so cninit gets the right value of
274	 * boothowto.
275	 */
276	if (mdp != NULL) {
277		preload_metadata = mdp;
278		kmdp = preload_search_by_type("elf kernel");
279		if (kmdp != NULL) {
280			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
281			kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
282			end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
283#ifdef DDB
284			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
285			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
286#endif
287		}
288	}
289
290	/*
291	 * Init params/tunables that can be overridden by the loader
292	 */
293	init_param1();
294
295	/*
296	 * Start initializing proc0 and thread0.
297	 */
298	proc_linkup0(&proc0, &thread0);
299	thread0.td_frame = &frame0;
300
301	/*
302	 * Set up per-cpu data.
303	 */
304	pc = __pcpu;
305	pcpu_init(pc, 0, sizeof(struct pcpu));
306	pc->pc_curthread = &thread0;
307	pc->pc_cpuid = 0;
308
309	__asm __volatile("mtsprg 0, %0" :: "r"(pc));
310
311	/*
312	 * Init mutexes, which we use heavily in PMAP
313	 */
314
315	mutex_init();
316
317	/*
318	 * Install the OF client interface
319	 */
320
321	OF_bootstrap();
322
323	/*
324	 * Initialize the console before printing anything.
325	 */
326	cninit();
327
328	/*
329	 * Complain if there is no metadata.
330	 */
331	if (mdp == NULL || kmdp == NULL) {
332		printf("powerpc_init: no loader metadata.\n");
333	}
334
335	/*
336	 * Init KDB
337	 */
338
339	kdb_init();
340
341	/*
342	 * PowerPC 970 CPUs have a misfeature requested by Apple that makes
343	 * them pretend they have a 32-byte cacheline. Turn this off
344	 * before we measure the cacheline size.
345	 */
346
347	switch (mfpvr() >> 16) {
348		case IBM970:
349		case IBM970FX:
350		case IBM970MP:
351		case IBM970GX:
352			scratch = mfspr(SPR_HID5);
353			scratch &= ~HID5_970_DCBZ_SIZE_HI;
354			mtspr(SPR_HID5, scratch);
355			break;
356	}
357
358	/*
359	 * Initialize the interrupt tables and figure out our cache line
360	 * size and whether or not we need the 64-bit bridge code.
361	 */
362
363	/*
364	 * Disable translation in case the vector area hasn't been
365	 * mapped (G5). Note that no OFW calls can be made until
366	 * translation is re-enabled.
367	 */
368
369	msr = mfmsr();
370	mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
371	isync();
372
373	/*
374	 * Measure the cacheline size using dcbz
375	 *
376	 * Use EXC_PGM as a playground. We are about to overwrite it
377	 * anyway, we know it exists, and we know it is cache-aligned.
378	 */
379
380	cache_check = (void *)EXC_PGM;
381
382	for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
383		cache_check[cacheline_size] = 0xff;
384
385	__asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
386
387	/* Find the first byte dcbz did not zero to get the cache line size */
388	for (cacheline_size = 0; cacheline_size < 0x100 &&
389	    cache_check[cacheline_size] == 0; cacheline_size++);
390
391	/* Work around psim bug */
392	if (cacheline_size == 0) {
393		cacheline_warn = 1;
394		cacheline_size = 32;
395	}
396
397	#ifndef __powerpc64__
398	/*
399	 * Figure out whether we need to use the 64 bit PMAP. This works by
400	 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
401	 * and setting ppc64 = 0 if that causes a trap.
402	 */
403
404	ppc64 = 1;
405
406	bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
407	__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
408
409	__asm __volatile("\
410		mfmsr %0;	\
411		mtsprg2 %1;	\
412				\
413		mtmsrd %0;	\
414		mfsprg2 %1;"
415	    : "=r"(scratch), "=r"(ppc64));
416
417	if (ppc64)
418		cpu_features |= PPC_FEATURE_64;
419
420	/*
421	 * Now copy restorebridge into all the handlers, if necessary,
422	 * and set up the trap tables.
423	 */
424
425	if (cpu_features & PPC_FEATURE_64) {
426		/* Patch the two instances of rfi -> rfid */
427		bcopy(&rfid_patch,&rfi_patch1,4);
428	#ifdef KDB
429		/* rfi_patch2 is at the end of dbleave */
430		bcopy(&rfid_patch,&rfi_patch2,4);
431	#endif
432
433		/*
434		 * Copy a code snippet to restore 32-bit bridge mode
435		 * to the top of every non-generic trap handler
436		 */
437
438		trap_offset += (size_t)&restorebridgesize;
439		bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
440		bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
441		bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
442		bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
443		bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
444		bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
445		bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
446
447		/*
448		 * Set the common trap entry point to the one that
449		 * knows to restore 32-bit operation on execution.
450		 */
451
452		generictrap = &trapcode64;
453	} else {
454		generictrap = &trapcode;
455	}
456
457	#else /* powerpc64 */
458	cpu_features |= PPC_FEATURE_64;
459	generictrap = &trapcode;
460	#endif
461
462#ifdef SMP
463	bcopy(&rstcode, (void *)(EXC_RST + trap_offset),  (size_t)&rstsize);
464#else
465	bcopy(generictrap, (void *)EXC_RST,  (size_t)&trapsize);
466#endif
467
468#ifdef KDB
469	bcopy(&dblow,	(void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
470	bcopy(&dblow,   (void *)(EXC_PGM + trap_offset),  (size_t)&dbsize);
471	bcopy(&dblow,   (void *)(EXC_TRC + trap_offset),  (size_t)&dbsize);
472	bcopy(&dblow,   (void *)(EXC_BPT + trap_offset),  (size_t)&dbsize);
473#else
474	bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
475	bcopy(generictrap, (void *)EXC_PGM,  (size_t)&trapsize);
476	bcopy(generictrap, (void *)EXC_TRC,  (size_t)&trapsize);
477	bcopy(generictrap, (void *)EXC_BPT,  (size_t)&trapsize);
478#endif
479	bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&alisize);
480	bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsisize);
481	bcopy(generictrap, (void *)EXC_ISI,  (size_t)&trapsize);
482	#ifdef __powerpc64__
483	bcopy(generictrap, (void *)EXC_DSE,  (size_t)&trapsize);
484	bcopy(generictrap, (void *)EXC_ISE,  (size_t)&trapsize);
485	#endif
486	bcopy(generictrap, (void *)EXC_EXI,  (size_t)&trapsize);
487	bcopy(generictrap, (void *)EXC_FPU,  (size_t)&trapsize);
488	bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
489	bcopy(generictrap, (void *)EXC_SC,   (size_t)&trapsize);
490	bcopy(generictrap, (void *)EXC_FPA,  (size_t)&trapsize);
491	bcopy(generictrap, (void *)EXC_VEC,  (size_t)&trapsize);
492	bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
493	bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
494	#ifndef __powerpc64__
495	/* G2-specific TLB miss helper handlers */
496	bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
497	bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);
498	bcopy(&dsmisstrap, (void *)EXC_DSMISS,  (size_t)&dsmisssize);
499	#endif
500	__syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
501
502	/*
503	 * Restore MSR
504	 */
505	mtmsr(msr);
506	isync();
507
508	/* Warn if cachline size was not determined */
509	if (cacheline_warn == 1) {
510		printf("WARNING: cacheline size undetermined, setting to 32\n");
511	}
512
513	/*
514	 * Choose a platform module so we can get the physical memory map.
515	 */
516
517	platform_probe_and_attach();
518
519	/*
520	 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
521	 * in case the platform module had a better idea of what we
522	 * should do.
523	 */
524	if (cpu_features & PPC_FEATURE_64)
525		pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
526	else
527		pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
528
529	pmap_bootstrap(startkernel, endkernel);
530	mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
531	isync();
532
533	/*
534	 * Initialize params/tunables that are derived from memsize
535	 */
536	init_param2(physmem);
537
538	/*
539	 * Grab booted kernel's name
540	 */
541        env = getenv("kernelname");
542        if (env != NULL) {
543		strlcpy(kernelname, env, sizeof(kernelname));
544		freeenv(env);
545	}
546
547	/*
548	 * Finish setting up thread0.
549	 */
550	thread0.td_pcb = (struct pcb *)
551	    ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
552	    sizeof(struct pcb)) & ~15UL);
553	bzero((void *)thread0.td_pcb, sizeof(struct pcb));
554	pc->pc_curpcb = thread0.td_pcb;
555
556	/* Initialise the message buffer. */
557	msgbufinit(msgbufp, msgbufsize);
558
559#ifdef KDB
560	if (boothowto & RB_KDB)
561		kdb_enter(KDB_WHY_BOOTFLAGS,
562		    "Boot flags requested debugger");
563#endif
564
565	return (((uintptr_t)thread0.td_pcb -
566	    (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
567}
568
569void
570bzero(void *buf, size_t len)
571{
572	caddr_t	p;
573
574	p = buf;
575
576	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
577		*p++ = 0;
578		len--;
579	}
580
581	while (len >= sizeof(u_long) * 8) {
582		*(u_long*) p = 0;
583		*((u_long*) p + 1) = 0;
584		*((u_long*) p + 2) = 0;
585		*((u_long*) p + 3) = 0;
586		len -= sizeof(u_long) * 8;
587		*((u_long*) p + 4) = 0;
588		*((u_long*) p + 5) = 0;
589		*((u_long*) p + 6) = 0;
590		*((u_long*) p + 7) = 0;
591		p += sizeof(u_long) * 8;
592	}
593
594	while (len >= sizeof(u_long)) {
595		*(u_long*) p = 0;
596		len -= sizeof(u_long);
597		p += sizeof(u_long);
598	}
599
600	while (len) {
601		*p++ = 0;
602		len--;
603	}
604}
605
606void
607cpu_boot(int howto)
608{
609}
610
611/*
612 * Flush the D-cache for non-DMA I/O so that the I-cache can
613 * be made coherent later.
614 */
615void
616cpu_flush_dcache(void *ptr, size_t len)
617{
618	/* TBD */
619}
620
621void
622cpu_initclocks(void)
623{
624
625	decr_tc_init();
626	cpu_initclocks_bsp();
627}
628
629/*
630 * Shutdown the CPU as much as possible.
631 */
632void
633cpu_halt(void)
634{
635
636	OF_exit();
637}
638
639int
640ptrace_set_pc(struct thread *td, unsigned long addr)
641{
642	struct trapframe *tf;
643
644	tf = td->td_frame;
645	tf->srr0 = (register_t)addr;
646
647	return (0);
648}
649
650int
651ptrace_single_step(struct thread *td)
652{
653	struct trapframe *tf;
654
655	tf = td->td_frame;
656	tf->srr1 |= PSL_SE;
657
658	return (0);
659}
660
661int
662ptrace_clear_single_step(struct thread *td)
663{
664	struct trapframe *tf;
665
666	tf = td->td_frame;
667	tf->srr1 &= ~PSL_SE;
668
669	return (0);
670}
671
672void
673kdb_cpu_clear_singlestep(void)
674{
675
676	kdb_frame->srr1 &= ~PSL_SE;
677}
678
679void
680kdb_cpu_set_singlestep(void)
681{
682
683	kdb_frame->srr1 |= PSL_SE;
684}
685
686/*
687 * Initialise a struct pcpu.
688 */
689void
690cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
691{
692#ifdef __powerpc64__
693/* Copy the SLB contents from the current CPU */
694memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
695#endif
696}
697
698void
699spinlock_enter(void)
700{
701	struct thread *td;
702	register_t msr;
703
704	td = curthread;
705	if (td->td_md.md_spinlock_count == 0) {
706		msr = intr_disable();
707		td->td_md.md_spinlock_count = 1;
708		td->td_md.md_saved_msr = msr;
709	} else
710		td->td_md.md_spinlock_count++;
711	critical_enter();
712}
713
714void
715spinlock_exit(void)
716{
717	struct thread *td;
718	register_t msr;
719
720	td = curthread;
721	critical_exit();
722	msr = td->td_md.md_saved_msr;
723	td->td_md.md_spinlock_count--;
724	if (td->td_md.md_spinlock_count == 0)
725		intr_restore(msr);
726}
727
728/*
729 * kcopy(const void *src, void *dst, size_t len);
730 *
731 * Copy len bytes from src to dst, aborting if we encounter a fatal
732 * page fault.
733 *
734 * kcopy() _must_ save and restore the old fault handler since it is
735 * called by uiomove(), which may be in the path of servicing a non-fatal
736 * page fault.
737 */
738int
739kcopy(const void *src, void *dst, size_t len)
740{
741	struct thread	*td;
742	faultbuf	env, *oldfault;
743	int		rv;
744
745	td = PCPU_GET(curthread);
746	oldfault = td->td_pcb->pcb_onfault;
747	if ((rv = setfault(env)) != 0) {
748		td->td_pcb->pcb_onfault = oldfault;
749		return rv;
750	}
751
752	memcpy(dst, src, len);
753
754	td->td_pcb->pcb_onfault = oldfault;
755	return (0);
756}
757
758int db_trap_glue(struct trapframe *);		/* Called from trap_subr.S */
759
760int
761db_trap_glue(struct trapframe *frame)
762{
763	if (!(frame->srr1 & PSL_PR)
764	    && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
765		|| (frame->exc == EXC_PGM
766		    && (frame->srr1 & 0x20000))
767		|| frame->exc == EXC_BPT
768		|| frame->exc == EXC_DSI)) {
769		int type = frame->exc;
770		if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
771			type = T_BREAKPOINT;
772		}
773		return (kdb_trap(type, 0, frame));
774	}
775
776	return (0);
777}
778
779#ifndef __powerpc64__
780
781uint64_t
782va_to_vsid(pmap_t pm, vm_offset_t va)
783{
784	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
785}
786
787#endif
788