1/*
2 *	linux/arch/alpha/kernel/smp.c
3 *
4 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6 *            Created an function that conforms to the old calling convention
7 *            of smp_call_function().
8 *
9 *            This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/threads.h>
20#include <linux/smp.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/spinlock.h>
25#include <linux/irq.h>
26#include <linux/cache.h>
27#include <linux/profile.h>
28#include <linux/bitops.h>
29
30#include <asm/hwrpb.h>
31#include <asm/ptrace.h>
32#include <asm/atomic.h>
33
34#include <asm/io.h>
35#include <asm/irq.h>
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <asm/mmu_context.h>
39#include <asm/tlbflush.h>
40
41#include "proto.h"
42#include "irq_impl.h"
43
44
45#define DEBUG_SMP 0
46#if DEBUG_SMP
47#define DBGS(args)	printk args
48#else
49#define DBGS(args)
50#endif
51
52/* A collection of per-processor data.  */
53struct cpuinfo_alpha cpu_data[NR_CPUS];
54EXPORT_SYMBOL(cpu_data);
55
56/* A collection of single bit ipi messages.  */
57static struct {
58	unsigned long bits ____cacheline_aligned;
59} ipi_data[NR_CPUS] __cacheline_aligned;
60
61enum ipi_message_type {
62	IPI_RESCHEDULE,
63	IPI_CALL_FUNC,
64	IPI_CPU_STOP,
65};
66
67/* Set to a secondary's cpuid when it comes online.  */
68static int smp_secondary_alive __initdata = 0;
69
70/* Which cpus ids came online.  */
71cpumask_t cpu_online_map;
72
73EXPORT_SYMBOL(cpu_online_map);
74
75int smp_num_probed;		/* Internal processor count */
76int smp_num_cpus = 1;		/* Number that came online.  */
77EXPORT_SYMBOL(smp_num_cpus);
78
79extern void calibrate_delay(void);
80
81
82
83/*
84 * Called by both boot and secondaries to move global data into
85 *  per-processor storage.
86 */
87static inline void __init
88smp_store_cpu_info(int cpuid)
89{
90	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
91	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
92	cpu_data[cpuid].need_new_asn = 0;
93	cpu_data[cpuid].asn_lock = 0;
94}
95
96/*
97 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
98 */
99static inline void __init
100smp_setup_percpu_timer(int cpuid)
101{
102	cpu_data[cpuid].prof_counter = 1;
103	cpu_data[cpuid].prof_multiplier = 1;
104}
105
106static void __init
107wait_boot_cpu_to_stop(int cpuid)
108{
109	unsigned long stop = jiffies + 10*HZ;
110
111	while (time_before(jiffies, stop)) {
112	        if (!smp_secondary_alive)
113			return;
114		barrier();
115	}
116
117	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
118	for (;;)
119		barrier();
120}
121
122/*
123 * Where secondaries begin a life of C.
124 */
125void __init
126smp_callin(void)
127{
128	int cpuid = hard_smp_processor_id();
129
130	if (cpu_test_and_set(cpuid, cpu_online_map)) {
131		printk("??, cpu 0x%x already present??\n", cpuid);
132		BUG();
133	}
134
135	/* Turn on machine checks.  */
136	wrmces(7);
137
138	/* Set trap vectors.  */
139	trap_init();
140
141	/* Set interrupt vector.  */
142	wrent(entInt, 0);
143
144	/* Get our local ticker going. */
145	smp_setup_percpu_timer(cpuid);
146
147	/* Call platform-specific callin, if specified */
148	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
149
150	/* All kernel threads share the same mm context.  */
151	atomic_inc(&init_mm.mm_count);
152	current->active_mm = &init_mm;
153
154	/* Must have completely accurate bogos.  */
155	local_irq_enable();
156
157	/* Wait boot CPU to stop with irq enabled before running
158	   calibrate_delay. */
159	wait_boot_cpu_to_stop(cpuid);
160	mb();
161	calibrate_delay();
162
163	smp_store_cpu_info(cpuid);
164	/* Allow master to continue only after we written loops_per_jiffy.  */
165	wmb();
166	smp_secondary_alive = 1;
167
168	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169	      cpuid, current, current->active_mm));
170
171	/* Do nothing.  */
172	cpu_idle();
173}
174
175/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
176static int __init
177wait_for_txrdy (unsigned long cpumask)
178{
179	unsigned long timeout;
180
181	if (!(hwrpb->txrdy & cpumask))
182		return 0;
183
184	timeout = jiffies + 10*HZ;
185	while (time_before(jiffies, timeout)) {
186		if (!(hwrpb->txrdy & cpumask))
187			return 0;
188		udelay(10);
189		barrier();
190	}
191
192	return -1;
193}
194
195/*
196 * Send a message to a secondary's console.  "START" is one such
197 * interesting message.  ;-)
198 */
199static void __init
200send_secondary_console_msg(char *str, int cpuid)
201{
202	struct percpu_struct *cpu;
203	register char *cp1, *cp2;
204	unsigned long cpumask;
205	size_t len;
206
207	cpu = (struct percpu_struct *)
208		((char*)hwrpb
209		 + hwrpb->processor_offset
210		 + cpuid * hwrpb->processor_size);
211
212	cpumask = (1UL << cpuid);
213	if (wait_for_txrdy(cpumask))
214		goto timeout;
215
216	cp2 = str;
217	len = strlen(cp2);
218	*(unsigned int *)&cpu->ipc_buffer[0] = len;
219	cp1 = (char *) &cpu->ipc_buffer[1];
220	memcpy(cp1, cp2, len);
221
222	/* atomic test and set */
223	wmb();
224	set_bit(cpuid, &hwrpb->rxrdy);
225
226	if (wait_for_txrdy(cpumask))
227		goto timeout;
228	return;
229
230 timeout:
231	printk("Processor %x not ready\n", cpuid);
232}
233
234/*
235 * A secondary console wants to send a message.  Receive it.
236 */
237static void
238recv_secondary_console_msg(void)
239{
240	int mycpu, i, cnt;
241	unsigned long txrdy = hwrpb->txrdy;
242	char *cp1, *cp2, buf[80];
243	struct percpu_struct *cpu;
244
245	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246
247	mycpu = hard_smp_processor_id();
248
249	for (i = 0; i < NR_CPUS; i++) {
250		if (!(txrdy & (1UL << i)))
251			continue;
252
253		DBGS(("recv_secondary_console_msg: "
254		      "TXRDY contains CPU %d.\n", i));
255
256		cpu = (struct percpu_struct *)
257		  ((char*)hwrpb
258		   + hwrpb->processor_offset
259		   + i * hwrpb->processor_size);
260
261 		DBGS(("recv_secondary_console_msg: on %d from %d"
262		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263		      mycpu, i, cpu->halt_reason, cpu->flags));
264
265		cnt = cpu->ipc_buffer[0] >> 32;
266		if (cnt <= 0 || cnt >= 80)
267			strcpy(buf, "<<< BOGUS MSG >>>");
268		else {
269			cp1 = (char *) &cpu->ipc_buffer[11];
270			cp2 = buf;
271			strcpy(cp2, cp1);
272
273			while ((cp2 = strchr(cp2, '\r')) != 0) {
274				*cp2 = ' ';
275				if (cp2[1] == '\n')
276					cp2[1] = ' ';
277			}
278		}
279
280		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
281		      "message is '%s'\n", mycpu, buf));
282	}
283
284	hwrpb->txrdy = 0;
285}
286
287/*
288 * Convince the console to have a secondary cpu begin execution.
289 */
290static int __init
291secondary_cpu_start(int cpuid, struct task_struct *idle)
292{
293	struct percpu_struct *cpu;
294	struct pcb_struct *hwpcb, *ipcb;
295	unsigned long timeout;
296
297	cpu = (struct percpu_struct *)
298		((char*)hwrpb
299		 + hwrpb->processor_offset
300		 + cpuid * hwrpb->processor_size);
301	hwpcb = (struct pcb_struct *) cpu->hwpcb;
302	ipcb = &task_thread_info(idle)->pcb;
303
304	/* Initialize the CPU's HWPCB to something just good enough for
305	   us to get started.  Immediately after starting, we'll swpctx
306	   to the target idle task's pcb.  Reuse the stack in the mean
307	   time.  Precalculate the target PCBB.  */
308	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
309	hwpcb->usp = 0;
310	hwpcb->ptbr = ipcb->ptbr;
311	hwpcb->pcc = 0;
312	hwpcb->asn = 0;
313	hwpcb->unique = virt_to_phys(ipcb);
314	hwpcb->flags = ipcb->flags;
315	hwpcb->res1 = hwpcb->res2 = 0;
316
317	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
318	      cpuid, idle->state, ipcb->flags));
319
320	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
321	hwrpb->CPU_restart = __smp_callin;
322	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
323
324	/* Recalculate and update the HWRPB checksum */
325	hwrpb_update_checksum(hwrpb);
326
327	/*
328	 * Send a "start" command to the specified processor.
329	 */
330
331	/* SRM III 3.4.1.3 */
332	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
333	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
334	wmb();
335
336	send_secondary_console_msg("START\r\n", cpuid);
337
338	/* Wait 10 seconds for an ACK from the console.  */
339	timeout = jiffies + 10*HZ;
340	while (time_before(jiffies, timeout)) {
341		if (cpu->flags & 1)
342			goto started;
343		udelay(10);
344		barrier();
345	}
346	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
347	return -1;
348
349 started:
350	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
351	return 0;
352}
353
354/*
355 * Bring one cpu online.
356 */
357static int __init
358smp_boot_one_cpu(int cpuid)
359{
360	struct task_struct *idle;
361	unsigned long timeout;
362
363	/* Cook up an idler for this guy.  Note that the address we
364	   give to kernel_thread is irrelevant -- it's going to start
365	   where HWRPB.CPU_restart says to start.  But this gets all
366	   the other task-y sort of data structures set up like we
367	   wish.  We can't use kernel_thread since we must avoid
368	   rescheduling the child.  */
369	idle = fork_idle(cpuid);
370	if (IS_ERR(idle))
371		panic("failed fork for CPU %d", cpuid);
372
373	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
374	      cpuid, idle->state, idle->flags));
375
376	/* Signal the secondary to wait a moment.  */
377	smp_secondary_alive = -1;
378
379	/* Whirrr, whirrr, whirrrrrrrrr... */
380	if (secondary_cpu_start(cpuid, idle))
381		return -1;
382
383	/* Notify the secondary CPU it can run calibrate_delay.  */
384	mb();
385	smp_secondary_alive = 0;
386
387	/* We've been acked by the console; wait one second for
388	   the task to start up for real.  */
389	timeout = jiffies + 1*HZ;
390	while (time_before(jiffies, timeout)) {
391		if (smp_secondary_alive == 1)
392			goto alive;
393		udelay(10);
394		barrier();
395	}
396
397	/* We failed to boot the CPU.  */
398
399	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
400	return -1;
401
402 alive:
403	/* Another "Red Snapper". */
404	return 0;
405}
406
407/*
408 * Called from setup_arch.  Detect an SMP system and which processors
409 * are present.
410 */
411void __init
412setup_smp(void)
413{
414	struct percpu_struct *cpubase, *cpu;
415	unsigned long i;
416
417	if (boot_cpuid != 0) {
418		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
419		       boot_cpuid);
420	}
421
422	if (hwrpb->nr_processors > 1) {
423		int boot_cpu_palrev;
424
425		DBGS(("setup_smp: nr_processors %ld\n",
426		      hwrpb->nr_processors));
427
428		cpubase = (struct percpu_struct *)
429			((char*)hwrpb + hwrpb->processor_offset);
430		boot_cpu_palrev = cpubase->pal_revision;
431
432		for (i = 0; i < hwrpb->nr_processors; i++) {
433			cpu = (struct percpu_struct *)
434				((char *)cpubase + i*hwrpb->processor_size);
435			if ((cpu->flags & 0x1cc) == 0x1cc) {
436				smp_num_probed++;
437				/* Assume here that "whami" == index */
438				cpu_set(i, cpu_present_map);
439				cpu->pal_revision = boot_cpu_palrev;
440			}
441
442			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
443			      i, cpu->flags, cpu->type));
444			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
445			      i, cpu->pal_revision));
446		}
447	} else {
448		smp_num_probed = 1;
449	}
450
451	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
452	       smp_num_probed, cpu_present_map.bits[0]);
453}
454
455/*
456 * Called by smp_init prepare the secondaries
457 */
458void __init
459smp_prepare_cpus(unsigned int max_cpus)
460{
461	/* Take care of some initial bookkeeping.  */
462	memset(ipi_data, 0, sizeof(ipi_data));
463
464	current_thread_info()->cpu = boot_cpuid;
465
466	smp_store_cpu_info(boot_cpuid);
467	smp_setup_percpu_timer(boot_cpuid);
468
469	/* Nothing to do on a UP box, or when told not to.  */
470	if (smp_num_probed == 1 || max_cpus == 0) {
471		cpu_present_map = cpumask_of_cpu(boot_cpuid);
472		printk(KERN_INFO "SMP mode deactivated.\n");
473		return;
474	}
475
476	printk(KERN_INFO "SMP starting up secondaries.\n");
477
478	smp_num_cpus = smp_num_probed;
479}
480
481void __devinit
482smp_prepare_boot_cpu(void)
483{
484}
485
486int __devinit
487__cpu_up(unsigned int cpu)
488{
489	smp_boot_one_cpu(cpu);
490
491	return cpu_online(cpu) ? 0 : -ENOSYS;
492}
493
494void __init
495smp_cpus_done(unsigned int max_cpus)
496{
497	int cpu;
498	unsigned long bogosum = 0;
499
500	for(cpu = 0; cpu < NR_CPUS; cpu++)
501		if (cpu_online(cpu))
502			bogosum += cpu_data[cpu].loops_per_jiffy;
503
504	printk(KERN_INFO "SMP: Total of %d processors activated "
505	       "(%lu.%02lu BogoMIPS).\n",
506	       num_online_cpus(),
507	       (bogosum + 2500) / (500000/HZ),
508	       ((bogosum + 2500) / (5000/HZ)) % 100);
509}
510
511
512void
513smp_percpu_timer_interrupt(struct pt_regs *regs)
514{
515	struct pt_regs *old_regs;
516	int cpu = smp_processor_id();
517	unsigned long user = user_mode(regs);
518	struct cpuinfo_alpha *data = &cpu_data[cpu];
519
520	old_regs = set_irq_regs(regs);
521
522	/* Record kernel PC.  */
523	profile_tick(CPU_PROFILING);
524
525	if (!--data->prof_counter) {
526		/* We need to make like a normal interrupt -- otherwise
527		   timer interrupts ignore the global interrupt lock,
528		   which would be a Bad Thing.  */
529		irq_enter();
530
531		update_process_times(user);
532
533		data->prof_counter = data->prof_multiplier;
534
535		irq_exit();
536	}
537	set_irq_regs(old_regs);
538}
539
540int __init
541setup_profiling_timer(unsigned int multiplier)
542{
543	return -EINVAL;
544}
545
546
547static void
548send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
549{
550	int i;
551
552	mb();
553	for_each_cpu_mask(i, to_whom)
554		set_bit(operation, &ipi_data[i].bits);
555
556	mb();
557	for_each_cpu_mask(i, to_whom)
558		wripir(i);
559}
560
561/* Structure and data for smp_call_function.  This is designed to
562   minimize static memory requirements.  Plus it looks cleaner.  */
563
564struct smp_call_struct {
565	void (*func) (void *info);
566	void *info;
567	long wait;
568	atomic_t unstarted_count;
569	atomic_t unfinished_count;
570};
571
572static struct smp_call_struct *smp_call_function_data;
573
574/* Atomicly drop data into a shared pointer.  The pointer is free if
575   it is initially locked.  If retry, spin until free.  */
576
577static int
578pointer_lock (void *lock, void *data, int retry)
579{
580	void *old, *tmp;
581
582	mb();
583 again:
584	/* Compare and swap with zero.  */
585	asm volatile (
586	"1:	ldq_l	%0,%1\n"
587	"	mov	%3,%2\n"
588	"	bne	%0,2f\n"
589	"	stq_c	%2,%1\n"
590	"	beq	%2,1b\n"
591	"2:"
592	: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
593	: "r"(data)
594	: "memory");
595
596	if (old == 0)
597		return 0;
598	if (! retry)
599		return -EBUSY;
600
601	while (*(void **)lock)
602		barrier();
603	goto again;
604}
605
606void
607handle_ipi(struct pt_regs *regs)
608{
609	int this_cpu = smp_processor_id();
610	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
611	unsigned long ops;
612
613
614	mb();	/* Order interrupt and bit testing. */
615	while ((ops = xchg(pending_ipis, 0)) != 0) {
616	  mb();	/* Order bit clearing and data access. */
617	  do {
618		unsigned long which;
619
620		which = ops & -ops;
621		ops &= ~which;
622		which = __ffs(which);
623
624		switch (which) {
625		case IPI_RESCHEDULE:
626			/* Reschedule callback.  Everything to be done
627			   is done by the interrupt return path.  */
628			break;
629
630		case IPI_CALL_FUNC:
631		    {
632			struct smp_call_struct *data;
633			void (*func)(void *info);
634			void *info;
635			int wait;
636
637			data = smp_call_function_data;
638			func = data->func;
639			info = data->info;
640			wait = data->wait;
641
642			/* Notify the sending CPU that the data has been
643			   received, and execution is about to begin.  */
644			mb();
645			atomic_dec (&data->unstarted_count);
646
647			/* At this point the structure may be gone unless
648			   wait is true.  */
649			(*func)(info);
650
651			/* Notify the sending CPU that the task is done.  */
652			mb();
653			if (wait) atomic_dec (&data->unfinished_count);
654			break;
655		    }
656
657		case IPI_CPU_STOP:
658			halt();
659
660		default:
661			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
662			       this_cpu, which);
663			break;
664		}
665	  } while (ops);
666
667	  mb();	/* Order data access and bit testing. */
668	}
669
670	cpu_data[this_cpu].ipi_count++;
671
672	if (hwrpb->txrdy)
673		recv_secondary_console_msg();
674}
675
676void
677smp_send_reschedule(int cpu)
678{
679#ifdef DEBUG_IPI_MSG
680	if (cpu == hard_smp_processor_id())
681		printk(KERN_WARNING
682		       "smp_send_reschedule: Sending IPI to self.\n");
683#endif
684	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
685}
686
687void
688smp_send_stop(void)
689{
690	cpumask_t to_whom = cpu_possible_map;
691	cpu_clear(smp_processor_id(), to_whom);
692#ifdef DEBUG_IPI_MSG
693	if (hard_smp_processor_id() != boot_cpu_id)
694		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
695#endif
696	send_ipi_message(to_whom, IPI_CPU_STOP);
697}
698
699/*
700 * Run a function on all other CPUs.
701 *  <func>	The function to run. This must be fast and non-blocking.
702 *  <info>	An arbitrary pointer to pass to the function.
703 *  <retry>	If true, keep retrying until ready.
704 *  <wait>	If true, wait until function has completed on other CPUs.
705 *  [RETURNS]   0 on success, else a negative status code.
706 *
707 * Does not return until remote CPUs are nearly ready to execute <func>
708 * or are or have executed.
709 * You must not call this function with disabled interrupts or from a
710 * hardware interrupt handler or from a bottom half handler.
711 */
712
713int
714smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
715			  int wait, cpumask_t to_whom)
716{
717	struct smp_call_struct data;
718	unsigned long timeout;
719	int num_cpus_to_call;
720
721	/* Can deadlock when called with interrupts disabled */
722	WARN_ON(irqs_disabled());
723
724	data.func = func;
725	data.info = info;
726	data.wait = wait;
727
728	cpu_clear(smp_processor_id(), to_whom);
729	num_cpus_to_call = cpus_weight(to_whom);
730
731	atomic_set(&data.unstarted_count, num_cpus_to_call);
732	atomic_set(&data.unfinished_count, num_cpus_to_call);
733
734	/* Acquire the smp_call_function_data mutex.  */
735	if (pointer_lock(&smp_call_function_data, &data, retry))
736		return -EBUSY;
737
738	/* Send a message to the requested CPUs.  */
739	send_ipi_message(to_whom, IPI_CALL_FUNC);
740
741	/* Wait for a minimal response.  */
742	timeout = jiffies + HZ;
743	while (atomic_read (&data.unstarted_count) > 0
744	       && time_before (jiffies, timeout))
745		barrier();
746
747	/* If there's no response yet, log a message but allow a longer
748	 * timeout period -- if we get a response this time, log
749	 * a message saying when we got it..
750	 */
751	if (atomic_read(&data.unstarted_count) > 0) {
752		long start_time = jiffies;
753		printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
754		       __FUNCTION__);
755		timeout = jiffies + 30 * HZ;
756		while (atomic_read(&data.unstarted_count) > 0
757		       && time_before(jiffies, timeout))
758			barrier();
759		if (atomic_read(&data.unstarted_count) <= 0) {
760			long delta = jiffies - start_time;
761			printk(KERN_ERR
762			       "%s: response %ld.%ld seconds into long wait\n",
763			       __FUNCTION__, delta / HZ,
764			       (100 * (delta - ((delta / HZ) * HZ))) / HZ);
765		}
766	}
767
768	/* We either got one or timed out -- clear the lock. */
769	mb();
770	smp_call_function_data = NULL;
771
772	/*
773	 * If after both the initial and long timeout periods we still don't
774	 * have a response, something is very wrong...
775	 */
776	BUG_ON(atomic_read (&data.unstarted_count) > 0);
777
778	/* Wait for a complete response, if needed.  */
779	if (wait) {
780		while (atomic_read (&data.unfinished_count) > 0)
781			barrier();
782	}
783
784	return 0;
785}
786EXPORT_SYMBOL(smp_call_function_on_cpu);
787
788int
789smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
790{
791	return smp_call_function_on_cpu (func, info, retry, wait,
792					 cpu_online_map);
793}
794EXPORT_SYMBOL(smp_call_function);
795
796static void
797ipi_imb(void *ignored)
798{
799	imb();
800}
801
802void
803smp_imb(void)
804{
805	/* Must wait other processors to flush their icache before continue. */
806	if (on_each_cpu(ipi_imb, NULL, 1, 1))
807		printk(KERN_CRIT "smp_imb: timed out\n");
808}
809EXPORT_SYMBOL(smp_imb);
810
811static void
812ipi_flush_tlb_all(void *ignored)
813{
814	tbia();
815}
816
817void
818flush_tlb_all(void)
819{
820	/* Although we don't have any data to pass, we do want to
821	   synchronize with the other processors.  */
822	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
823		printk(KERN_CRIT "flush_tlb_all: timed out\n");
824	}
825}
826
827#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
828
829static void
830ipi_flush_tlb_mm(void *x)
831{
832	struct mm_struct *mm = (struct mm_struct *) x;
833	if (mm == current->active_mm && !asn_locked())
834		flush_tlb_current(mm);
835	else
836		flush_tlb_other(mm);
837}
838
839void
840flush_tlb_mm(struct mm_struct *mm)
841{
842	preempt_disable();
843
844	if (mm == current->active_mm) {
845		flush_tlb_current(mm);
846		if (atomic_read(&mm->mm_users) <= 1) {
847			int cpu, this_cpu = smp_processor_id();
848			for (cpu = 0; cpu < NR_CPUS; cpu++) {
849				if (!cpu_online(cpu) || cpu == this_cpu)
850					continue;
851				if (mm->context[cpu])
852					mm->context[cpu] = 0;
853			}
854			preempt_enable();
855			return;
856		}
857	}
858
859	if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
860		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
861	}
862
863	preempt_enable();
864}
865EXPORT_SYMBOL(flush_tlb_mm);
866
867struct flush_tlb_page_struct {
868	struct vm_area_struct *vma;
869	struct mm_struct *mm;
870	unsigned long addr;
871};
872
873static void
874ipi_flush_tlb_page(void *x)
875{
876	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
877	struct mm_struct * mm = data->mm;
878
879	if (mm == current->active_mm && !asn_locked())
880		flush_tlb_current_page(mm, data->vma, data->addr);
881	else
882		flush_tlb_other(mm);
883}
884
885void
886flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
887{
888	struct flush_tlb_page_struct data;
889	struct mm_struct *mm = vma->vm_mm;
890
891	preempt_disable();
892
893	if (mm == current->active_mm) {
894		flush_tlb_current_page(mm, vma, addr);
895		if (atomic_read(&mm->mm_users) <= 1) {
896			int cpu, this_cpu = smp_processor_id();
897			for (cpu = 0; cpu < NR_CPUS; cpu++) {
898				if (!cpu_online(cpu) || cpu == this_cpu)
899					continue;
900				if (mm->context[cpu])
901					mm->context[cpu] = 0;
902			}
903			preempt_enable();
904			return;
905		}
906	}
907
908	data.vma = vma;
909	data.mm = mm;
910	data.addr = addr;
911
912	if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
913		printk(KERN_CRIT "flush_tlb_page: timed out\n");
914	}
915
916	preempt_enable();
917}
918EXPORT_SYMBOL(flush_tlb_page);
919
920void
921flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
922{
923	/* On the Alpha we always flush the whole user tlb.  */
924	flush_tlb_mm(vma->vm_mm);
925}
926EXPORT_SYMBOL(flush_tlb_range);
927
928static void
929ipi_flush_icache_page(void *x)
930{
931	struct mm_struct *mm = (struct mm_struct *) x;
932	if (mm == current->active_mm && !asn_locked())
933		__load_new_mm_context(mm);
934	else
935		flush_tlb_other(mm);
936}
937
938void
939flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
940			unsigned long addr, int len)
941{
942	struct mm_struct *mm = vma->vm_mm;
943
944	if ((vma->vm_flags & VM_EXEC) == 0)
945		return;
946
947	preempt_disable();
948
949	if (mm == current->active_mm) {
950		__load_new_mm_context(mm);
951		if (atomic_read(&mm->mm_users) <= 1) {
952			int cpu, this_cpu = smp_processor_id();
953			for (cpu = 0; cpu < NR_CPUS; cpu++) {
954				if (!cpu_online(cpu) || cpu == this_cpu)
955					continue;
956				if (mm->context[cpu])
957					mm->context[cpu] = 0;
958			}
959			preempt_enable();
960			return;
961		}
962	}
963
964	if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
965		printk(KERN_CRIT "flush_icache_page: timed out\n");
966	}
967
968	preempt_enable();
969}
970