1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/machine.h>
31#include <mach/processor_info.h>
32
33#include <kern/kalloc.h>
34#include <kern/kern_types.h>
35#include <kern/machine.h>
36#include <kern/misc_protos.h>
37#include <kern/thread.h>
38#include <kern/sched_prim.h>
39#include <kern/timer_queue.h>
40#include <kern/processor.h>
41#include <kern/pms.h>
42
43#include <vm/pmap.h>
44#include <IOKit/IOHibernatePrivate.h>
45
46#include <ppc/proc_reg.h>
47#include <ppc/misc_protos.h>
48#include <ppc/fpu_protos.h>
49#include <ppc/machine_routines.h>
50#include <ppc/cpu_internal.h>
51#include <ppc/exception.h>
52#include <ppc/asm.h>
53#include <ppc/hw_perfmon.h>
54#include <pexpert/pexpert.h>
55#include <kern/cpu_data.h>
56#include <ppc/mappings.h>
57#include <ppc/Diagnostics.h>
58#include <ppc/trap.h>
59#include <ppc/machine_cpu.h>
60#include <ppc/rtclock.h>
61
62decl_mutex_data(static,ppt_lock);
63
64unsigned int		real_ncpus = 1;
65unsigned int		max_ncpus  = MAX_CPUS;
66
67decl_simple_lock_data(static,rht_lock);
68
69static unsigned int	rht_state = 0;
70#define RHT_WAIT	0x01
71#define RHT_BUSY	0x02
72
73decl_simple_lock_data(static,SignalReadyLock);
74
75struct SIGtimebase {
76	volatile boolean_t	avail;
77	volatile boolean_t	ready;
78	volatile boolean_t	done;
79	uint64_t	abstime;
80};
81
82perfCallback	   	perfCpuSigHook;			/* Pointer to CHUD cpu signal hook routine */
83
84extern uint32_t			debugger_sync;
85
86/*
87 * Forward definitions
88 */
89
90void	cpu_sync_timebase(
91			void);
92
93void	cpu_timebase_signal_handler(
94			struct per_proc_info    *proc_info,
95			struct SIGtimebase		*timebaseAddr);
96
97/*
98 *	Routine:	cpu_bootstrap
99 *	Function:
100 */
101void
102cpu_bootstrap(
103	void)
104{
105	simple_lock_init(&rht_lock,0);
106	simple_lock_init(&SignalReadyLock,0);
107	mutex_init(&ppt_lock,0);
108}
109
110
111/*
112 *	Routine:	cpu_init
113 *	Function:
114 */
115void
116cpu_init(
117	void)
118{
119	struct per_proc_info *proc_info;
120
121	proc_info = getPerProc();
122
123	/*
124	 * Restore the TBR.
125	 */
126	if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
127		mttb(0);
128		mttbu(proc_info->save_tbu);
129		mttb(proc_info->save_tbl);
130	}
131
132	proc_info->rtcPop = EndOfAllTime;			/* forget any existing decrementer setting */
133	etimer_resync_deadlines();				/* Now that the time base is sort of correct, request the next timer pop */
134
135	proc_info->cpu_type = CPU_TYPE_POWERPC;
136	proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
137	proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
138	proc_info->running = TRUE;
139
140}
141
142/*
143 *	Routine:	cpu_machine_init
144 *	Function:
145 */
146void
147cpu_machine_init(
148	void)
149{
150	struct per_proc_info			*proc_info;
151	volatile struct per_proc_info	*mproc_info;
152
153
154	proc_info = getPerProc();
155	mproc_info = PerProcTable[master_cpu].ppe_vaddr;
156
157	if (proc_info != mproc_info) {
158		simple_lock(&rht_lock);
159		if (rht_state & RHT_WAIT)
160			thread_wakeup(&rht_state);
161		rht_state &= ~(RHT_BUSY|RHT_WAIT);
162		simple_unlock(&rht_lock);
163	}
164
165	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
166
167	if (proc_info->hibernate) {
168		uint32_t	tbu, tbl;
169
170		do {
171			tbu = mftbu();
172			tbl = mftb();
173		} while (mftbu() != tbu);
174
175	    proc_info->hibernate = 0;
176	    hibernate_machine_init();
177
178		// hibernate_machine_init() could take minutes and we don't want timeouts
179		// to fire as soon as scheduling starts. Reset timebase so it appears
180		// no time has elapsed, as it would for regular sleep.
181		mttb(0);
182		mttbu(tbu);
183		mttb(tbl);
184	}
185
186	if (proc_info != mproc_info) {
187	while (!((mproc_info->cpu_flags) & SignalReady))
188			continue;
189		cpu_sync_timebase();
190	}
191
192	ml_init_interrupt();
193	if (proc_info != mproc_info)
194		simple_lock(&SignalReadyLock);
195	proc_info->cpu_flags |= BootDone|SignalReady;
196	if (proc_info != mproc_info) {
197		if (proc_info->ppXFlags & SignalReadyWait) {
198			(void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
199			thread_wakeup(&proc_info->cpu_flags);
200		}
201		simple_unlock(&SignalReadyLock);
202		pmsPark();						/* Timers should be cool now, park the power management stepper */
203	}
204}
205
206
207/*
208 *	Routine:	cpu_per_proc_alloc
209 *	Function:
210 */
211struct per_proc_info *
212cpu_per_proc_alloc(
213		void)
214{
215	struct per_proc_info	*proc_info = NULL;
216	void			*interrupt_stack = NULL;
217	void			*debugger_stack = NULL;
218
219	if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
220		return (struct per_proc_info *)NULL;
221	if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
222		kfree(proc_info, sizeof(struct per_proc_info));
223		return (struct per_proc_info *)NULL;
224	}
225
226	if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
227		kfree(proc_info, sizeof(struct per_proc_info));
228		kfree(interrupt_stack, INTSTACK_SIZE);
229		return (struct per_proc_info *)NULL;
230	}
231
232	bzero((void *)proc_info, sizeof(struct per_proc_info));
233
234	/* Set physical address of the second page */
235	proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap,
236				((addr64_t)(unsigned int)proc_info) + 0x1000)
237			       << PAGE_SHIFT;
238	proc_info->next_savearea = (uint64_t)save_get_init();
239	proc_info->pf = BootProcInfo.pf;
240	proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
241	proc_info->intstack_top_ss = proc_info->istackptr;
242	proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
243	proc_info->debstack_top_ss = proc_info->debstackptr;
244
245	queue_init(&proc_info->rtclock_timer.queue);
246	proc_info->rtclock_timer.deadline = EndOfAllTime;
247
248	return proc_info;
249
250}
251
252
253/*
254 *	Routine:	cpu_per_proc_free
255 *	Function:
256 */
257void
258cpu_per_proc_free(
259	struct per_proc_info	*proc_info
260)
261{
262	if (proc_info->cpu_number == master_cpu)
263		return;
264	kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
265	kfree((void *)(proc_info->debstack_top_ss -  KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
266	kfree((void *)proc_info, sizeof(struct per_proc_info));			/* Release the per_proc */
267}
268
269
270/*
271 *	Routine:	cpu_per_proc_register
272 *	Function:
273 */
274kern_return_t
275cpu_per_proc_register(
276	struct per_proc_info	*proc_info
277)
278{
279	int						cpu;
280
281	mutex_lock(&ppt_lock);
282	if (real_ncpus >= max_ncpus) {
283		mutex_unlock(&ppt_lock);
284		return KERN_FAILURE;
285	}
286	cpu = real_ncpus;
287	proc_info->cpu_number = cpu;
288	PerProcTable[cpu].ppe_vaddr = proc_info;
289	PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT;
290	eieio();
291	real_ncpus++;
292	mutex_unlock(&ppt_lock);
293	return KERN_SUCCESS;
294}
295
296
297/*
298 *	Routine:	cpu_start
299 *	Function:
300 */
301kern_return_t
302cpu_start(
303	int cpu)
304{
305	struct per_proc_info	*proc_info;
306	kern_return_t			ret;
307	mapping_t				*mp;
308
309	proc_info = PerProcTable[cpu].ppe_vaddr;
310
311	if (cpu == cpu_number()) {
312 	  PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
313	  ml_init_interrupt();
314	  proc_info->cpu_flags |= BootDone|SignalReady;
315
316	  return KERN_SUCCESS;
317	} else {
318		proc_info->cpu_flags &= BootDone;
319		proc_info->interrupts_enabled = 0;
320		proc_info->pending_ast = AST_NONE;
321		proc_info->istackptr = proc_info->intstack_top_ss;
322		proc_info->rtcPop = EndOfAllTime;
323		proc_info->FPU_owner = NULL;
324		proc_info->VMX_owner = NULL;
325		proc_info->pms.pmsStamp = 0;									/* Dummy transition time */
326		proc_info->pms.pmsPop = EndOfAllTime;							/* Set the pop way into the future */
327		proc_info->pms.pmsState = pmsParked;							/* Park the stepper */
328		proc_info->pms.pmsCSetCmd = pmsCInit;							/* Set dummy initial hardware state */
329		mp = (mapping_t *)(&proc_info->ppUMWmp);
330		mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
331		mp->mpSpace = invalSpace;
332
333		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
334
335			simple_lock(&rht_lock);
336			while (rht_state & RHT_BUSY) {
337				rht_state |= RHT_WAIT;
338				thread_sleep_usimple_lock((event_t)&rht_state,
339						    &rht_lock, THREAD_UNINT);
340			}
341			rht_state |= RHT_BUSY;
342			simple_unlock(&rht_lock);
343
344			ml_phys_write((vm_offset_t)&ResetHandler + 0,
345					  RESET_HANDLER_START);
346			ml_phys_write((vm_offset_t)&ResetHandler + 4,
347					  (vm_offset_t)_start_cpu);
348			ml_phys_write((vm_offset_t)&ResetHandler + 8,
349					  (vm_offset_t)&PerProcTable[cpu]);
350		}
351/*
352 *		Note: we pass the current time to the other processor here. He will load it
353 *		as early as possible so that there is a chance that it is close to accurate.
354 *		After the machine is up a while, we will officially resync the clocks so
355 *		that all processors are the same.  This is just to get close.
356 */
357
358		ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
359
360		__asm__ volatile("sync");				/* Commit to storage */
361		__asm__ volatile("isync");				/* Wait a second */
362		ret = PE_cpu_start(proc_info->cpu_id,
363						   proc_info->start_paddr, (vm_offset_t)proc_info);
364
365		if (ret != KERN_SUCCESS) {
366			if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
367				simple_lock(&rht_lock);
368				if (rht_state & RHT_WAIT)
369					thread_wakeup(&rht_state);
370				rht_state &= ~(RHT_BUSY|RHT_WAIT);
371				simple_unlock(&rht_lock);
372			};
373		} else {
374			simple_lock(&SignalReadyLock);
375			if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
376				(void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
377				thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
378				                          &SignalReadyLock, THREAD_UNINT);
379			}
380			simple_unlock(&SignalReadyLock);
381
382		}
383		return(ret);
384	}
385}
386
387/*
388 *	Routine:	cpu_exit_wait
389 *	Function:
390 */
391void
392cpu_exit_wait(
393	int	cpu)
394{
395	struct per_proc_info	*tpproc;
396
397	if ( cpu != master_cpu) {
398		tpproc = PerProcTable[cpu].ppe_vaddr;
399		while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
400	}
401}
402
403
404/*
405 *	Routine:	cpu_doshutdown
406 *	Function:
407 */
408void
409cpu_doshutdown(
410	void)
411{
412	enable_preemption();
413	processor_offline(current_processor());
414}
415
416
417/*
418 *	Routine:	cpu_sleep
419 *	Function:
420 */
421void
422cpu_sleep(
423	void)
424{
425	struct per_proc_info	*proc_info;
426	unsigned int			i;
427	unsigned int			wait_ncpus_sleep, ncpus_sleep;
428	facility_context		*fowner;
429
430	proc_info = getPerProc();
431
432	proc_info->running = FALSE;
433
434	if (proc_info->cpu_number != master_cpu) {
435		timer_queue_shutdown(&proc_info->rtclock_timer.queue);
436		proc_info->rtclock_timer.deadline = EndOfAllTime;
437	}
438
439	fowner = proc_info->FPU_owner;					/* Cache this */
440	if(fowner) /* If anyone owns FPU, save it */
441		fpu_save(fowner);
442	proc_info->FPU_owner = NULL;						/* Set no fpu owner now */
443
444	fowner = proc_info->VMX_owner;					/* Cache this */
445	if(fowner) vec_save(fowner);					/* If anyone owns vectors, save it */
446	proc_info->VMX_owner = NULL;						/* Set no vector owner now */
447
448	if (proc_info->cpu_number == master_cpu)  {
449		proc_info->cpu_flags &= BootDone;
450		proc_info->interrupts_enabled = 0;
451		proc_info->pending_ast = AST_NONE;
452
453		if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
454			ml_phys_write((vm_offset_t)&ResetHandler + 0,
455					  RESET_HANDLER_START);
456			ml_phys_write((vm_offset_t)&ResetHandler + 4,
457					  (vm_offset_t)_start_cpu);
458			ml_phys_write((vm_offset_t)&ResetHandler + 8,
459					  (vm_offset_t)&PerProcTable[master_cpu]);
460
461			__asm__ volatile("sync");
462			__asm__ volatile("isync");
463		}
464
465		wait_ncpus_sleep = real_ncpus-1;
466		ncpus_sleep = 0;
467		while (wait_ncpus_sleep != ncpus_sleep) {
468			ncpus_sleep = 0;
469			for(i=1; i < real_ncpus ; i++) {
470				if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
471					ncpus_sleep++;
472			}
473		}
474
475	}
476
477	/*
478	 * Save the TBR before stopping.
479	 */
480	do {
481		proc_info->save_tbu = mftbu();
482		proc_info->save_tbl = mftb();
483	} while (mftbu() != proc_info->save_tbu);
484
485	PE_cpu_machine_quiesce(proc_info->cpu_id);
486}
487
488
489/*
490 *	Routine:	cpu_signal
491 *	Function:
492 *	Here is where we send a message to another processor.  So far we only have two:
493 *	SIGPast and SIGPdebug.  SIGPast is used to preempt and kick off threads (this is
494 *	currently disabled). SIGPdebug is used to enter the debugger.
495 *
496 *	We set up the SIGP function to indicate that this is a simple message and set the
497 *	order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
498 *	block for the target, we lock the message block. Then we set the parameter(s).
499 *	Next we change the lock (also called "busy") to "passing" and finally signal
500 *	the other processor. Note that we only wait about 1ms to get the message lock.
501 *	If we time out, we return failure to our caller. It is their responsibility to
502 *	recover.
503 */
504kern_return_t
505cpu_signal(
506	int target,
507	int signal,
508	unsigned int p1,
509	unsigned int p2)
510{
511
512	unsigned int				holdStat;
513	struct per_proc_info		*tpproc, *mpproc;
514	int							busybitset=0;
515
516#if DEBUG
517	if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
518#endif
519
520	mpproc = getPerProc();							/* Point to our block */
521	tpproc = PerProcTable[target].ppe_vaddr;		/* Point to the target's block */
522	if(mpproc == tpproc) return KERN_FAILURE;		/* Cannot signal ourselves */
523
524	if(!tpproc->running) return KERN_FAILURE;
525
526	if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
527
528	if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) {	/* Is there an unreceived message already pending? */
529
530		if(signal == SIGPwake) {					/* SIGPwake can merge into all others... */
531			mpproc->hwCtr.numSIGPmwake++;			/* Account for merged wakes */
532			return KERN_SUCCESS;
533		}
534
535		if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) {	/* We can merge ASTs */
536			mpproc->hwCtr.numSIGPmast++;			/* Account for merged ASTs */
537			return KERN_SUCCESS;					/* Don't bother to send this one... */
538		}
539
540		if (tpproc->MPsigpParm0 == SIGPwake) {
541			if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
542			                  (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
543				busybitset = 1;
544				mpproc->hwCtr.numSIGPmwake++;
545			}
546		}
547	}
548
549	if((busybitset == 0) &&
550	   (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
551	   (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) {	/* Try to lock the message block with a .5ms timeout */
552		mpproc->hwCtr.numSIGPtimo++;				/* Account for timeouts */
553		return KERN_FAILURE;						/* Timed out, take your ball and go home... */
554	}
555
556	holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number;	/* Set up the signal status word */
557	tpproc->MPsigpParm0 = signal;					/* Set message order */
558	tpproc->MPsigpParm1 = p1;						/* Set additional parm */
559	tpproc->MPsigpParm2 = p2;						/* Set additional parm */
560
561	__asm__ volatile("sync");						/* Make sure it's all there */
562
563	tpproc->MPsigpStat = holdStat;					/* Set status and pass the lock */
564	__asm__ volatile("eieio");						/* I'm a paraniod freak */
565
566	if (busybitset == 0)
567		PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id);	/* Kick the other processor */
568
569	return KERN_SUCCESS;							/* All is goodness and rainbows... */
570}
571
572
573/*
574 *	Routine:	cpu_signal_handler
575 *	Function:
576 *	Here is where we implement the receiver of the signaling protocol.
577 *	We wait for the signal status area to be passed to us. Then we snarf
578 *	up the status, the sender, and the 3 potential parms. Next we release
579 *	the lock and signal the other guy.
580 */
581void
582cpu_signal_handler(void)
583{
584	unsigned int holdStat, holdParm0, holdParm1, holdParm2;
585	unsigned int *parmAddr;
586	struct per_proc_info	*proc_info;
587	int cpu;
588	broadcastFunc xfunc;
589	cpu = cpu_number();								/* Get the CPU number */
590
591	proc_info = getPerProc();
592
593/*
594 *	Since we've been signaled, wait about 31 ms for the signal lock to pass
595 */
596	if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
597	  (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
598		panic("cpu_signal_handler: Lock pass timed out\n");
599	}
600
601	holdStat = proc_info->MPsigpStat;				/* Snarf stat word */
602	holdParm0 = proc_info->MPsigpParm0;				/* Snarf parameter */
603	holdParm1 = proc_info->MPsigpParm1;				/* Snarf parameter */
604	holdParm2 = proc_info->MPsigpParm2;				/* Snarf parameter */
605
606	__asm__ volatile("isync");						/* Make sure we don't unlock until memory is in */
607
608	proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc);	/* Release lock */
609
610	switch ((holdStat & MPsigpFunc) >> 8) {			/* Decode function code */
611
612		case MPsigpIdle:							/* Was function cancelled? */
613			return;									/* Yup... */
614
615		case MPsigpSigp:							/* Signal Processor message? */
616
617			switch (holdParm0) {					/* Decode SIGP message order */
618
619				case SIGPast:						/* Should we do an AST? */
620					proc_info->hwCtr.numSIGPast++;		/* Count this one */
621#if 0
622					kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
623#endif
624					ast_check((processor_t)proc_info->processor);
625					return;							/* All done... */
626
627				case SIGPcpureq:					/* CPU specific function? */
628
629					proc_info->hwCtr.numSIGPcpureq++;	/* Count this one */
630					switch (holdParm1) {			/* Select specific function */
631
632						case CPRQtimebase:
633
634							cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
635							return;
636
637						case CPRQsegload:
638							return;
639
640 						case CPRQchud:
641 							parmAddr = (unsigned int *)holdParm2;	/* Get the destination address */
642 							if(perfCpuSigHook) {
643 								struct savearea *ssp = current_thread()->machine.pcb;
644 								if(ssp) {
645 									(perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
646 								}
647   							}
648 							parmAddr[1] = 0;
649 							parmAddr[0] = 0;		/* Show we're done */
650  							return;
651
652						case CPRQscom:
653							if(((scomcomm *)holdParm2)->scomfunc) {	/* Are we writing */
654								((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata);	/* Write scom */
655							}
656							else {					/* No, reading... */
657								((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata);	/* Read scom */
658							}
659							return;
660
661						case CPRQsps:
662							{
663							ml_set_processor_speed_slave(holdParm2);
664							return;
665						}
666						default:
667							panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
668							return;
669					}
670
671
672				case SIGPdebug:						/* Enter the debugger? */
673
674					proc_info->hwCtr.numSIGPdebug++;	/* Count this one */
675					proc_info->debugger_is_slave++;		/* Bump up the count to show we're here */
676					(void)hw_atomic_sub(&debugger_sync, 1);	/* Show we've received the 'rupt */
677					__asm__ volatile("tw 4,r3,r3");	/* Enter the debugger */
678					return;							/* All done now... */
679
680				case SIGPwake:						/* Wake up CPU */
681					proc_info->hwCtr.numSIGPwake++;		/* Count this one */
682					return;							/* No need to do anything, the interrupt does it all... */
683
684				case SIGPcall:						/* Call function on CPU */
685					proc_info->hwCtr.numSIGPcall++;	/* Count this one */
686					xfunc = (broadcastFunc)holdParm1;				/* Do this since I can't seem to figure C out */
687					xfunc(holdParm2);				/* Call the passed function */
688					return;							/* Done... */
689
690				default:
691					panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
692					return;
693
694			}
695
696		default:
697			panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
698			return;
699
700	}
701	panic("cpu_signal_handler: we should never get here\n");
702}
703
704
705/*
706 *	Routine:	cpu_sync_timebase
707 *	Function:
708 */
709void
710cpu_sync_timebase(
711	void)
712{
713	natural_t tbu, tbl;
714	boolean_t	intr;
715	struct SIGtimebase	syncClkSpot;
716
717	intr = ml_set_interrupts_enabled(FALSE);		/* No interruptions in here */
718
719	syncClkSpot.avail = FALSE;
720	syncClkSpot.ready = FALSE;
721	syncClkSpot.done = FALSE;
722
723	while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
724							(unsigned int)&syncClkSpot) != KERN_SUCCESS)
725		continue;
726
727	while (syncClkSpot.avail == FALSE)
728		continue;
729
730	isync();
731
732	/*
733	 * We do the following to keep the compiler from generating extra stuff
734	 * in tb set part
735	 */
736	tbu = syncClkSpot.abstime >> 32;
737	tbl = (uint32_t)syncClkSpot.abstime;
738
739	mttb(0);
740	mttbu(tbu);
741	mttb(tbl);
742
743	syncClkSpot.ready = TRUE;
744
745	while (syncClkSpot.done == FALSE)
746		continue;
747
748	etimer_resync_deadlines();									/* Start the timer */
749	(void)ml_set_interrupts_enabled(intr);
750}
751
752
753/*
754 *	Routine:	cpu_timebase_signal_handler
755 *	Function:
756 */
757void
758cpu_timebase_signal_handler(
759	struct per_proc_info    *proc_info,
760	struct SIGtimebase		*timebaseAddr)
761{
762	unsigned int		tbu, tbu2, tbl;
763
764	if(proc_info->time_base_enable !=  (void(*)(cpu_id_t, boolean_t ))NULL)
765		proc_info->time_base_enable(proc_info->cpu_id, FALSE);
766
767	timebaseAddr->abstime = 0;	/* Touch to force into cache */
768	sync();
769
770	do {
771		asm volatile("	mftbu %0" : "=r" (tbu));
772		asm volatile("	mftb %0" : "=r" (tbl));
773		asm volatile("	mftbu %0" : "=r" (tbu2));
774	} while (tbu != tbu2);
775
776	timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
777	sync();					/* Force order */
778
779	timebaseAddr->avail = TRUE;
780
781	while (timebaseAddr->ready == FALSE)
782		continue;
783
784	if(proc_info->time_base_enable !=  (void(*)(cpu_id_t, boolean_t ))NULL)
785		proc_info->time_base_enable(proc_info->cpu_id, TRUE);
786
787	timebaseAddr->done = TRUE;
788}
789
790
791/*
792 *	Routine:	cpu_control
793 *	Function:
794 */
795kern_return_t
796cpu_control(
797	int			slot_num,
798	processor_info_t	info,
799	unsigned int    	count)
800{
801	struct per_proc_info	*proc_info;
802	cpu_type_t		tcpu_type;
803	cpu_subtype_t		tcpu_subtype;
804	processor_pm_regs_t	perf_regs;
805	processor_control_cmd_t	cmd;
806	boolean_t		oldlevel;
807#define MMCR0_SUPPORT_MASK	0xf83f1fff
808#define MMCR1_SUPPORT_MASK	0xffc00000
809#define MMCR2_SUPPORT_MASK	0x80000000
810
811	proc_info = PerProcTable[slot_num].ppe_vaddr;
812	tcpu_type = proc_info->cpu_type;
813	tcpu_subtype = proc_info->cpu_subtype;
814	cmd = (processor_control_cmd_t) info;
815
816	if (count < PROCESSOR_CONTROL_CMD_COUNT)
817	  return(KERN_FAILURE);
818
819	if ( tcpu_type != cmd->cmd_cpu_type ||
820	     tcpu_subtype != cmd->cmd_cpu_subtype)
821	  return(KERN_FAILURE);
822
823	if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
824		return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
825	}
826
827	switch (cmd->cmd_op)
828	  {
829	  case PROCESSOR_PM_CLR_PMC:       /* Clear Performance Monitor Counters */
830	    switch (tcpu_subtype)
831	      {
832	      case CPU_SUBTYPE_POWERPC_750:
833	      case CPU_SUBTYPE_POWERPC_7400:
834	      case CPU_SUBTYPE_POWERPC_7450:
835		{
836		  oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
837		  mtpmc1(0x0);
838		  mtpmc2(0x0);
839		  mtpmc3(0x0);
840		  mtpmc4(0x0);
841		  ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
842		  return(KERN_SUCCESS);
843		}
844	      default:
845		return(KERN_FAILURE);
846	      } /* tcpu_subtype */
847	  case PROCESSOR_PM_SET_REGS:      /* Set Performance Monitor Registors */
848	    switch (tcpu_subtype)
849	      {
850	      case CPU_SUBTYPE_POWERPC_750:
851		if (count <  (PROCESSOR_CONTROL_CMD_COUNT +
852		       PROCESSOR_PM_REGS_COUNT_POWERPC_750))
853		  return(KERN_FAILURE);
854		else
855		  {
856		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
857		    oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
858		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
859		    mtpmc1(PERFMON_PMC1(perf_regs));
860		    mtpmc2(PERFMON_PMC2(perf_regs));
861		    mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
862		    mtpmc3(PERFMON_PMC3(perf_regs));
863		    mtpmc4(PERFMON_PMC4(perf_regs));
864		    ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
865		    return(KERN_SUCCESS);
866		  }
867	      case CPU_SUBTYPE_POWERPC_7400:
868	      case CPU_SUBTYPE_POWERPC_7450:
869		if (count <  (PROCESSOR_CONTROL_CMD_COUNT +
870		       PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
871		  return(KERN_FAILURE);
872		else
873		  {
874		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
875		    oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
876		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
877		    mtpmc1(PERFMON_PMC1(perf_regs));
878		    mtpmc2(PERFMON_PMC2(perf_regs));
879		    mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
880		    mtpmc3(PERFMON_PMC3(perf_regs));
881		    mtpmc4(PERFMON_PMC4(perf_regs));
882		    mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
883		    ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
884		    return(KERN_SUCCESS);
885		  }
886	      default:
887		return(KERN_FAILURE);
888	      } /* switch tcpu_subtype */
889	  case PROCESSOR_PM_SET_MMCR:
890	    switch (tcpu_subtype)
891	      {
892	      case CPU_SUBTYPE_POWERPC_750:
893		if (count < (PROCESSOR_CONTROL_CMD_COUNT +
894		      PROCESSOR_PM_REGS_COUNT_POWERPC_750))
895		  return(KERN_FAILURE);
896		else
897		  {
898		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
899		    oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
900		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
901		    mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
902		    ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
903		    return(KERN_SUCCESS);
904		  }
905	      case CPU_SUBTYPE_POWERPC_7400:
906	      case CPU_SUBTYPE_POWERPC_7450:
907		if (count < (PROCESSOR_CONTROL_CMD_COUNT +
908		      PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
909		  return(KERN_FAILURE);
910		else
911		  {
912		    perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
913		    oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
914		    mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
915		    mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
916		    mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
917		    ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
918		    return(KERN_SUCCESS);
919		  }
920	      default:
921		return(KERN_FAILURE);
922	      } /* tcpu_subtype */
923	  default:
924	    return(KERN_FAILURE);
925	  } /* switch cmd_op */
926}
927
928
929/*
930 *	Routine:	cpu_info_count
931 *	Function:
932 */
933kern_return_t
934cpu_info_count(
935	processor_flavor_t	flavor,
936	unsigned int    	*count)
937{
938	cpu_subtype_t     tcpu_subtype;
939
940	/*
941	 * For now, we just assume that all CPUs are of the same type
942	 */
943	tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
944	switch (flavor) {
945		case PROCESSOR_PM_REGS_INFO:
946			switch (tcpu_subtype) {
947				case CPU_SUBTYPE_POWERPC_750:
948
949					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
950					return(KERN_SUCCESS);
951
952				case CPU_SUBTYPE_POWERPC_7400:
953				case CPU_SUBTYPE_POWERPC_7450:
954
955					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
956					return(KERN_SUCCESS);
957
958				default:
959					*count = 0;
960					return(KERN_INVALID_ARGUMENT);
961			} /* switch tcpu_subtype */
962
963		case PROCESSOR_TEMPERATURE:
964			*count = PROCESSOR_TEMPERATURE_COUNT;
965			return (KERN_SUCCESS);
966
967		default:
968			*count = 0;
969			return(KERN_INVALID_ARGUMENT);
970
971	}
972}
973
974
975/*
976 *	Routine:	cpu_info
977 *	Function:
978 */
979kern_return_t
980cpu_info(
981	processor_flavor_t	flavor,
982	int			slot_num,
983	processor_info_t	info,
984	unsigned int    	*count)
985{
986	cpu_subtype_t     tcpu_subtype;
987	processor_pm_regs_t  perf_regs;
988	boolean_t oldlevel;
989
990	tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
991
992	switch (flavor) {
993		case PROCESSOR_PM_REGS_INFO:
994
995			perf_regs = (processor_pm_regs_t) info;
996
997			switch (tcpu_subtype) {
998				case CPU_SUBTYPE_POWERPC_750:
999
1000					if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
1001					  return(KERN_FAILURE);
1002
1003					oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
1004					PERFMON_MMCR0(perf_regs) = mfmmcr0();
1005					PERFMON_PMC1(perf_regs)  = mfpmc1();
1006					PERFMON_PMC2(perf_regs)  = mfpmc2();
1007					PERFMON_MMCR1(perf_regs) = mfmmcr1();
1008					PERFMON_PMC3(perf_regs)  = mfpmc3();
1009					PERFMON_PMC4(perf_regs)  = mfpmc4();
1010					ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
1011
1012					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1013					return(KERN_SUCCESS);
1014
1015				case CPU_SUBTYPE_POWERPC_7400:
1016				case CPU_SUBTYPE_POWERPC_7450:
1017
1018					if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1019					  return(KERN_FAILURE);
1020
1021					oldlevel = ml_set_interrupts_enabled(FALSE);    /* disable interrupts */
1022					PERFMON_MMCR0(perf_regs) = mfmmcr0();
1023					PERFMON_PMC1(perf_regs)  = mfpmc1();
1024					PERFMON_PMC2(perf_regs)  = mfpmc2();
1025					PERFMON_MMCR1(perf_regs) = mfmmcr1();
1026					PERFMON_PMC3(perf_regs)  = mfpmc3();
1027					PERFMON_PMC4(perf_regs)  = mfpmc4();
1028					PERFMON_MMCR2(perf_regs) = mfmmcr2();
1029					ml_set_interrupts_enabled(oldlevel);     /* enable interrupts */
1030
1031					*count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1032					return(KERN_SUCCESS);
1033
1034				default:
1035					return(KERN_FAILURE);
1036			} /* switch tcpu_subtype */
1037
1038		case PROCESSOR_TEMPERATURE:					/* Get the temperature of a processor */
1039
1040			*info = -1;								/* Get the temperature */
1041			return(KERN_FAILURE);
1042
1043		default:
1044			return(KERN_INVALID_ARGUMENT);
1045
1046	} /* flavor */
1047}
1048
1049
1050/*
1051 *	Routine:	cpu_to_processor
1052 *	Function:
1053 */
1054processor_t
1055cpu_to_processor(
1056	int			cpu)
1057{
1058	return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1059}
1060
1061
1062/*
1063 *	Routine:	slot_type
1064 *	Function:
1065 */
1066cpu_type_t
1067slot_type(
1068	int		slot_num)
1069{
1070	return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1071}
1072
1073
1074/*
1075 *	Routine:	slot_subtype
1076 *	Function:
1077 */
1078cpu_subtype_t
1079slot_subtype(
1080	int		slot_num)
1081{
1082	return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1083}
1084
1085
1086/*
1087 *	Routine:	slot_threadtype
1088 *	Function:
1089 */
1090cpu_threadtype_t
1091slot_threadtype(
1092	int		slot_num)
1093{
1094	return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1095}
1096
1097
1098/*
1099 *	Routine:	cpu_type
1100 *	Function:
1101 */
1102cpu_type_t
1103cpu_type(void)
1104{
1105	return (getPerProc()->cpu_type);
1106}
1107
1108
1109/*
1110 *	Routine:	cpu_subtype
1111 *	Function:
1112 */
1113cpu_subtype_t
1114cpu_subtype(void)
1115{
1116	return (getPerProc()->cpu_subtype);
1117}
1118
1119
1120/*
1121 *	Routine:	cpu_threadtype
1122 *	Function:
1123 */
1124cpu_threadtype_t
1125cpu_threadtype(void)
1126{
1127	return (getPerProc()->cpu_threadtype);
1128}
1129
1130/*
1131 *	Call a function on all running processors
1132 *
1133 *	Note that the synch paramter is used to wait until all functions are complete.
1134 *	It is not passed to the other processor and must be known by the called function.
1135 *	The called function must do a thread_wakeup on the synch if it decrements the
1136 *	synch count to 0.
1137 *
1138 *	We start by initializing the synchronizer to the number of possible cpus.
1139 *	The we signal each popssible processor.
1140 *	If the signal fails, we count it.  We also skip our own.
1141 *	When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals.
1142 *	Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0
1143 *	If this happens, all other processors are finished with the function.
1144 *	If so, we clear the wait and continue
1145 *	Otherwise, we block waiting for the other processor(s) to finish.
1146 *
1147 *	Meanwhile, the other processors are decrementing the synchronizer when they are done
1148 *	If it goes to zero, thread_wakeup is called to run the broadcaster
1149 *
1150 *	Note that because we account for the broadcaster in the synchronization count, we will not get any
1151 *	premature wakeup calls.
1152 *
1153 *	Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that
1154 *	all of the other processors are finished.  Otherwise, we know that there is at least one more.
1155 *	When that thread decrements the synchronizer to zero, it will do a thread_wake.
1156 *
1157 */
1158
1159int32_t
1160cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm)
1161{
1162	int failsig;
1163	unsigned int cpu, ocpu;
1164
1165	cpu = cpu_number();						/* Who are we? */
1166	failsig = 0;							/* Clear called processor count */
1167
1168	if(real_ncpus > 1) {						/* Are we just a uni? */
1169
1170		*synch = real_ncpus;					/* Set how many we are going to try */
1171		assert_wait((event_t)synch, THREAD_UNINT);		/* If more than one processor, we may have to wait */
1172
1173		for(ocpu = 0; ocpu < real_ncpus; ocpu++) {		/* Tell everyone to call */
1174
1175			if(ocpu == cpu)	continue;			/* If we talk to ourselves, people will wonder... */
1176
1177			if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) {	/* Call the function on the other processor */
1178				failsig++;				/* Count failed signals */
1179			}
1180		}
1181
1182		if (hw_atomic_sub(synch, failsig + 1) == 0)
1183			clear_wait(current_thread(), THREAD_AWAKENED);	/* Clear wait if we never signalled or all of the others finished */
1184		else
1185			thread_block(THREAD_CONTINUE_NULL);		/* Wait for everyone to get into step... */
1186	}
1187
1188	return (real_ncpus - failsig - 1);				/* Return the number of guys actually signalled... */
1189}
1190