1/*
2 *  linux/arch/m32r/kernel/smp.c
3 *
4 *  M32R SMP support routines.
5 *
6 *  Copyright (c) 2001, 2002  Hitoshi Yamamoto
7 *
8 *  Taken from i386 version.
9 *    (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 *    (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 *  This code is released under the GNU General Public License version 2 or
13 *  later.
14 */
15
16#undef DEBUG_SMP
17
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/spinlock.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/profile.h>
24#include <linux/cpu.h>
25
26#include <asm/cacheflush.h>
27#include <asm/pgalloc.h>
28#include <asm/atomic.h>
29#include <asm/io.h>
30#include <asm/mmu_context.h>
31#include <asm/m32r.h>
32
33/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
34/* Data structures and variables                                             */
35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36
37/*
38 * Structure and data for smp_call_function(). This is designed to minimise
39 * static memory requirements. It also looks cleaner.
40 */
41static DEFINE_SPINLOCK(call_lock);
42
43struct call_data_struct {
44	void (*func) (void *info);
45	void *info;
46	atomic_t started;
47	atomic_t finished;
48	int wait;
49} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
50
51static struct call_data_struct *call_data;
52
53/*
54 * For flush_cache_all()
55 */
56static DEFINE_SPINLOCK(flushcache_lock);
57static volatile unsigned long flushcache_cpumask = 0;
58
59/*
60 * For flush_tlb_others()
61 */
62static volatile cpumask_t flush_cpumask;
63static struct mm_struct *flush_mm;
64static struct vm_area_struct *flush_vma;
65static volatile unsigned long flush_va;
66static DEFINE_SPINLOCK(tlbstate_lock);
67#define FLUSH_ALL 0xffffffff
68
69DECLARE_PER_CPU(int, prof_multiplier);
70DECLARE_PER_CPU(int, prof_old_multiplier);
71DECLARE_PER_CPU(int, prof_counter);
72
73extern spinlock_t ipi_lock[];
74
75/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
76/* Function Prototypes                                                       */
77/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
78
79void smp_send_reschedule(int);
80void smp_reschedule_interrupt(void);
81
82void smp_flush_cache_all(void);
83void smp_flush_cache_all_interrupt(void);
84
85void smp_flush_tlb_all(void);
86static void flush_tlb_all_ipi(void *);
87
88void smp_flush_tlb_mm(struct mm_struct *);
89void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
90	unsigned long);
91void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
92static void flush_tlb_others(cpumask_t, struct mm_struct *,
93	struct vm_area_struct *, unsigned long);
94void smp_invalidate_interrupt(void);
95
96void smp_send_stop(void);
97static void stop_this_cpu(void *);
98
99int smp_call_function(void (*) (void *), void *, int, int);
100void smp_call_function_interrupt(void);
101
102void smp_send_timer(void);
103void smp_ipi_timer_interrupt(struct pt_regs *);
104void smp_local_timer_interrupt(void);
105
106void send_IPI_allbutself(int, int);
107static void send_IPI_mask(cpumask_t, int, int);
108unsigned long send_IPI_mask_phys(cpumask_t, int, int);
109
110/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
111/* Rescheduling request Routines                                             */
112/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
113
114/*==========================================================================*
115 * Name:         smp_send_reschedule
116 *
117 * Description:  This routine requests other CPU to execute rescheduling.
118 *               1.Send 'RESCHEDULE_IPI' to other CPU.
119 *                 Request other CPU to execute 'smp_reschedule_interrupt()'.
120 *
121 * Born on Date: 2002.02.05
122 *
123 * Arguments:    cpu_id - Target CPU ID
124 *
125 * Returns:      void (cannot fail)
126 *
127 * Modification log:
128 * Date       Who Description
129 * ---------- --- --------------------------------------------------------
130 *
131 *==========================================================================*/
132void smp_send_reschedule(int cpu_id)
133{
134	WARN_ON(cpu_is_offline(cpu_id));
135	send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
136}
137
138/*==========================================================================*
139 * Name:         smp_reschedule_interrupt
140 *
141 * Description:  This routine executes on CPU which received
142 *               'RESCHEDULE_IPI'.
143 *               Rescheduling is processed at the exit of interrupt
144 *               operation.
145 *
146 * Born on Date: 2002.02.05
147 *
148 * Arguments:    NONE
149 *
150 * Returns:      void (cannot fail)
151 *
152 * Modification log:
153 * Date       Who Description
154 * ---------- --- --------------------------------------------------------
155 *
156 *==========================================================================*/
157void smp_reschedule_interrupt(void)
158{
159	/* nothing to do */
160}
161
162/*==========================================================================*
163 * Name:         smp_flush_cache_all
164 *
165 * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
166 *               CPUs in the system.
167 *
168 * Born on Date: 2003-05-28
169 *
170 * Arguments:    NONE
171 *
172 * Returns:      void (cannot fail)
173 *
174 * Modification log:
175 * Date       Who Description
176 * ---------- --- --------------------------------------------------------
177 *
178 *==========================================================================*/
179void smp_flush_cache_all(void)
180{
181	cpumask_t cpumask;
182	unsigned long *mask;
183
184	preempt_disable();
185	cpumask = cpu_online_map;
186	cpu_clear(smp_processor_id(), cpumask);
187	spin_lock(&flushcache_lock);
188	mask=cpus_addr(cpumask);
189	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
190	send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
191	_flush_cache_copyback_all();
192	while (flushcache_cpumask)
193		mb();
194	spin_unlock(&flushcache_lock);
195	preempt_enable();
196}
197
198void smp_flush_cache_all_interrupt(void)
199{
200	_flush_cache_copyback_all();
201	clear_bit(smp_processor_id(), &flushcache_cpumask);
202}
203
204/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
205/* TLB flush request Routins                                                 */
206/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
207
208/*==========================================================================*
209 * Name:         smp_flush_tlb_all
210 *
211 * Description:  This routine flushes all processes TLBs.
212 *               1.Request other CPU to execute 'flush_tlb_all_ipi()'.
213 *               2.Execute 'do_flush_tlb_all_local()'.
214 *
215 * Born on Date: 2002.02.05
216 *
217 * Arguments:    NONE
218 *
219 * Returns:      void (cannot fail)
220 *
221 * Modification log:
222 * Date       Who Description
223 * ---------- --- --------------------------------------------------------
224 *
225 *==========================================================================*/
226void smp_flush_tlb_all(void)
227{
228	unsigned long flags;
229
230	preempt_disable();
231	local_irq_save(flags);
232	__flush_tlb_all();
233	local_irq_restore(flags);
234	smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
235	preempt_enable();
236}
237
238/*==========================================================================*
239 * Name:         flush_tlb_all_ipi
240 *
241 * Description:  This routine flushes all local TLBs.
242 *               1.Execute 'do_flush_tlb_all_local()'.
243 *
244 * Born on Date: 2002.02.05
245 *
246 * Arguments:    *info - not used
247 *
248 * Returns:      void (cannot fail)
249 *
250 * Modification log:
251 * Date       Who Description
252 * ---------- --- --------------------------------------------------------
253 *
254 *==========================================================================*/
255static void flush_tlb_all_ipi(void *info)
256{
257	__flush_tlb_all();
258}
259
260/*==========================================================================*
261 * Name:         smp_flush_tlb_mm
262 *
263 * Description:  This routine flushes the specified mm context TLB's.
264 *
265 * Born on Date: 2002.02.05
266 *
267 * Arguments:    *mm - a pointer to the mm struct for flush TLB
268 *
269 * Returns:      void (cannot fail)
270 *
271 * Modification log:
272 * Date       Who Description
273 * ---------- --- --------------------------------------------------------
274 *
275 *==========================================================================*/
276void smp_flush_tlb_mm(struct mm_struct *mm)
277{
278	int cpu_id;
279	cpumask_t cpu_mask;
280	unsigned long *mmc;
281	unsigned long flags;
282
283	preempt_disable();
284	cpu_id = smp_processor_id();
285	mmc = &mm->context[cpu_id];
286	cpu_mask = mm->cpu_vm_mask;
287	cpu_clear(cpu_id, cpu_mask);
288
289	if (*mmc != NO_CONTEXT) {
290		local_irq_save(flags);
291		*mmc = NO_CONTEXT;
292		if (mm == current->mm)
293			activate_context(mm);
294		else
295			cpu_clear(cpu_id, mm->cpu_vm_mask);
296		local_irq_restore(flags);
297	}
298	if (!cpus_empty(cpu_mask))
299		flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
300
301	preempt_enable();
302}
303
304/*==========================================================================*
305 * Name:         smp_flush_tlb_range
306 *
307 * Description:  This routine flushes a range of pages.
308 *
309 * Born on Date: 2002.02.05
310 *
311 * Arguments:    *mm - a pointer to the mm struct for flush TLB
312 *               start - not used
313 *               end - not used
314 *
315 * Returns:      void (cannot fail)
316 *
317 * Modification log:
318 * Date       Who Description
319 * ---------- --- --------------------------------------------------------
320 *
321 *==========================================================================*/
322void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
323	unsigned long end)
324{
325	smp_flush_tlb_mm(vma->vm_mm);
326}
327
328/*==========================================================================*
329 * Name:         smp_flush_tlb_page
330 *
331 * Description:  This routine flushes one page.
332 *
333 * Born on Date: 2002.02.05
334 *
335 * Arguments:    *vma - a pointer to the vma struct include va
336 *               va - virtual address for flush TLB
337 *
338 * Returns:      void (cannot fail)
339 *
340 * Modification log:
341 * Date       Who Description
342 * ---------- --- --------------------------------------------------------
343 *
344 *==========================================================================*/
345void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
346{
347	struct mm_struct *mm = vma->vm_mm;
348	int cpu_id;
349	cpumask_t cpu_mask;
350	unsigned long *mmc;
351	unsigned long flags;
352
353	preempt_disable();
354	cpu_id = smp_processor_id();
355	mmc = &mm->context[cpu_id];
356	cpu_mask = mm->cpu_vm_mask;
357	cpu_clear(cpu_id, cpu_mask);
358
359#ifdef DEBUG_SMP
360	if (!mm)
361		BUG();
362#endif
363
364	if (*mmc != NO_CONTEXT) {
365		local_irq_save(flags);
366		va &= PAGE_MASK;
367		va |= (*mmc & MMU_CONTEXT_ASID_MASK);
368		__flush_tlb_page(va);
369		local_irq_restore(flags);
370	}
371	if (!cpus_empty(cpu_mask))
372		flush_tlb_others(cpu_mask, mm, vma, va);
373
374	preempt_enable();
375}
376
377/*==========================================================================*
378 * Name:         flush_tlb_others
379 *
380 * Description:  This routine requests other CPU to execute flush TLB.
381 *               1.Setup parmeters.
382 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
383 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
384 *               3.Wait for other CPUs operation finished.
385 *
386 * Born on Date: 2002.02.05
387 *
388 * Arguments:    cpumask - bitmap of target CPUs
389 *               *mm -  a pointer to the mm struct for flush TLB
390 *               *vma -  a pointer to the vma struct include va
391 *               va - virtual address for flush TLB
392 *
393 * Returns:      void (cannot fail)
394 *
395 * Modification log:
396 * Date       Who Description
397 * ---------- --- --------------------------------------------------------
398 *
399 *==========================================================================*/
400static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
401	struct vm_area_struct *vma, unsigned long va)
402{
403	unsigned long *mask;
404#ifdef DEBUG_SMP
405	unsigned long flags;
406	__save_flags(flags);
407	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
408		BUG();
409#endif /* DEBUG_SMP */
410
411	/*
412	 * A couple of (to be removed) sanity checks:
413	 *
414	 * - we do not send IPIs to not-yet booted CPUs.
415	 * - current CPU must not be in mask
416	 * - mask must exist :)
417	 */
418	BUG_ON(cpus_empty(cpumask));
419
420	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
421	BUG_ON(!mm);
422
423	/* If a CPU which we ran on has gone down, OK. */
424	cpus_and(cpumask, cpumask, cpu_online_map);
425	if (cpus_empty(cpumask))
426		return;
427
428	/*
429	 * i'm not happy about this global shared spinlock in the
430	 * MM hot path, but we'll see how contended it is.
431	 * Temporarily this turns IRQs off, so that lockups are
432	 * detected by the NMI watchdog.
433	 */
434	spin_lock(&tlbstate_lock);
435
436	flush_mm = mm;
437	flush_vma = vma;
438	flush_va = va;
439	mask=cpus_addr(cpumask);
440	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
441
442	/*
443	 * We have to send the IPI only to
444	 * CPUs affected.
445	 */
446	send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
447
448	while (!cpus_empty(flush_cpumask)) {
449		/* nothing. lockup detection does not belong here */
450		mb();
451	}
452
453	flush_mm = NULL;
454	flush_vma = NULL;
455	flush_va = 0;
456	spin_unlock(&tlbstate_lock);
457}
458
459/*==========================================================================*
460 * Name:         smp_invalidate_interrupt
461 *
462 * Description:  This routine executes on CPU which received
463 *               'INVALIDATE_TLB_IPI'.
464 *               1.Flush local TLB.
465 *               2.Report flush TLB process was finished.
466 *
467 * Born on Date: 2002.02.05
468 *
469 * Arguments:    NONE
470 *
471 * Returns:      void (cannot fail)
472 *
473 * Modification log:
474 * Date       Who Description
475 * ---------- --- --------------------------------------------------------
476 *
477 *==========================================================================*/
478void smp_invalidate_interrupt(void)
479{
480	int cpu_id = smp_processor_id();
481	unsigned long *mmc = &flush_mm->context[cpu_id];
482
483	if (!cpu_isset(cpu_id, flush_cpumask))
484		return;
485
486	if (flush_va == FLUSH_ALL) {
487		*mmc = NO_CONTEXT;
488		if (flush_mm == current->active_mm)
489			activate_context(flush_mm);
490		else
491			cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
492	} else {
493		unsigned long va = flush_va;
494
495		if (*mmc != NO_CONTEXT) {
496			va &= PAGE_MASK;
497			va |= (*mmc & MMU_CONTEXT_ASID_MASK);
498			__flush_tlb_page(va);
499		}
500	}
501	cpu_clear(cpu_id, flush_cpumask);
502}
503
504/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
505/* Stop CPU request Routins                                                 */
506/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
507
508/*==========================================================================*
509 * Name:         smp_send_stop
510 *
511 * Description:  This routine requests stop all CPUs.
512 *               1.Request other CPU to execute 'stop_this_cpu()'.
513 *
514 * Born on Date: 2002.02.05
515 *
516 * Arguments:    NONE
517 *
518 * Returns:      void (cannot fail)
519 *
520 * Modification log:
521 * Date       Who Description
522 * ---------- --- --------------------------------------------------------
523 *
524 *==========================================================================*/
525void smp_send_stop(void)
526{
527	smp_call_function(stop_this_cpu, NULL, 1, 0);
528}
529
530/*==========================================================================*
531 * Name:         stop_this_cpu
532 *
533 * Description:  This routine halt CPU.
534 *
535 * Born on Date: 2002.02.05
536 *
537 * Arguments:    NONE
538 *
539 * Returns:      void (cannot fail)
540 *
541 * Modification log:
542 * Date       Who Description
543 * ---------- --- --------------------------------------------------------
544 *
545 *==========================================================================*/
546static void stop_this_cpu(void *dummy)
547{
548	int cpu_id = smp_processor_id();
549
550	/*
551	 * Remove this CPU:
552	 */
553	cpu_clear(cpu_id, cpu_online_map);
554
555	/*
556	 * PSW IE = 1;
557	 * IMASK = 0;
558	 * goto SLEEP
559	 */
560	local_irq_disable();
561	outl(0, M32R_ICU_IMASK_PORTL);
562	inl(M32R_ICU_IMASK_PORTL);	/* dummy read */
563	local_irq_enable();
564
565	for ( ; ; );
566}
567
568/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
569/* Call function Routins                                                     */
570/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
571
572/*==========================================================================*
573 * Name:         smp_call_function
574 *
575 * Description:  This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
576 *               in the system.
577 *
578 * Born on Date: 2002.02.05
579 *
580 * Arguments:    *func - The function to run. This must be fast and
581 *                       non-blocking.
582 *               *info - An arbitrary pointer to pass to the function.
583 *               nonatomic - currently unused.
584 *               wait - If true, wait (atomically) until function has
585 *                      completed on other CPUs.
586 *
587 * Returns:      0 on success, else a negative status code. Does not return
588 *               until remote CPUs are nearly ready to execute <<func>> or
589 *               are or have executed.
590 *
591 * Cautions:     You must not call this function with disabled interrupts or
592 *               from a hardware interrupt handler, you may call it from a
593 *               bottom half handler.
594 *
595 * Modification log:
596 * Date       Who Description
597 * ---------- --- --------------------------------------------------------
598 *
599 *==========================================================================*/
600int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
601	int wait)
602{
603	struct call_data_struct data;
604	int cpus;
605
606#ifdef DEBUG_SMP
607	unsigned long flags;
608	__save_flags(flags);
609	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
610		BUG();
611#endif /* DEBUG_SMP */
612
613	/* Holding any lock stops cpus from going down. */
614	spin_lock(&call_lock);
615	cpus = num_online_cpus() - 1;
616
617	if (!cpus) {
618		spin_unlock(&call_lock);
619		return 0;
620	}
621
622	/* Can deadlock when called with interrupts disabled */
623	WARN_ON(irqs_disabled());
624
625	data.func = func;
626	data.info = info;
627	atomic_set(&data.started, 0);
628	data.wait = wait;
629	if (wait)
630		atomic_set(&data.finished, 0);
631
632	call_data = &data;
633	mb();
634
635	/* Send a message to all other CPUs and wait for them to respond */
636	send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
637
638	/* Wait for response */
639	while (atomic_read(&data.started) != cpus)
640		barrier();
641
642	if (wait)
643		while (atomic_read(&data.finished) != cpus)
644			barrier();
645	spin_unlock(&call_lock);
646
647	return 0;
648}
649
650/*==========================================================================*
651 * Name:         smp_call_function_interrupt
652 *
653 * Description:  This routine executes on CPU which received
654 *               'CALL_FUNCTION_IPI'.
655 *
656 * Born on Date: 2002.02.05
657 *
658 * Arguments:    NONE
659 *
660 * Returns:      void (cannot fail)
661 *
662 * Modification log:
663 * Date       Who Description
664 * ---------- --- --------------------------------------------------------
665 *
666 *==========================================================================*/
667void smp_call_function_interrupt(void)
668{
669	void (*func) (void *info) = call_data->func;
670	void *info = call_data->info;
671	int wait = call_data->wait;
672
673	/*
674	 * Notify initiating CPU that I've grabbed the data and am
675	 * about to execute the function
676	 */
677	mb();
678	atomic_inc(&call_data->started);
679	/*
680	 * At this point the info structure may be out of scope unless wait==1
681	 */
682	irq_enter();
683	(*func)(info);
684	irq_exit();
685
686	if (wait) {
687		mb();
688		atomic_inc(&call_data->finished);
689	}
690}
691
692/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
693/* Timer Routins                                                             */
694/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
695
696/*==========================================================================*
697 * Name:         smp_send_timer
698 *
699 * Description:  This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
700 *               in the system.
701 *
702 * Born on Date: 2002.02.05
703 *
704 * Arguments:    NONE
705 *
706 * Returns:      void (cannot fail)
707 *
708 * Modification log:
709 * Date       Who Description
710 * ---------- --- --------------------------------------------------------
711 *
712 *==========================================================================*/
713void smp_send_timer(void)
714{
715	send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
716}
717
718/*==========================================================================*
719 * Name:         smp_send_timer
720 *
721 * Description:  This routine executes on CPU which received
722 *               'LOCAL_TIMER_IPI'.
723 *
724 * Born on Date: 2002.02.05
725 *
726 * Arguments:    *regs - a pointer to the saved regster info
727 *
728 * Returns:      void (cannot fail)
729 *
730 * Modification log:
731 * Date       Who Description
732 * ---------- --- --------------------------------------------------------
733 *
734 *==========================================================================*/
735void smp_ipi_timer_interrupt(struct pt_regs *regs)
736{
737	struct pt_regs *old_regs;
738	old_regs = set_irq_regs(regs);
739	irq_enter();
740	smp_local_timer_interrupt();
741	irq_exit();
742	set_irq_regs(old_regs);
743}
744
745/*==========================================================================*
746 * Name:         smp_local_timer_interrupt
747 *
748 * Description:  Local timer interrupt handler. It does both profiling and
749 *               process statistics/rescheduling.
750 *               We do profiling in every local tick, statistics/rescheduling
751 *               happen only every 'profiling multiplier' ticks. The default
752 *               multiplier is 1 and it can be changed by writing the new
753 *               multiplier value into /proc/profile.
754 *
755 * Born on Date: 2002.02.05
756 *
757 * Arguments:    *regs - a pointer to the saved regster info
758 *
759 * Returns:      void (cannot fail)
760 *
761 * Original:     arch/i386/kernel/apic.c
762 *
763 * Modification log:
764 * Date       Who Description
765 * ---------- --- --------------------------------------------------------
766 * 2003-06-24 hy  use per_cpu structure.
767 *==========================================================================*/
768void smp_local_timer_interrupt(void)
769{
770	int user = user_mode(get_irq_regs());
771	int cpu_id = smp_processor_id();
772
773	/*
774	 * The profiling function is SMP safe. (nothing can mess
775	 * around with "current", and the profiling counters are
776	 * updated with atomic operations). This is especially
777	 * useful with a profiling multiplier != 1
778	 */
779
780	profile_tick(CPU_PROFILING);
781
782	if (--per_cpu(prof_counter, cpu_id) <= 0) {
783		/*
784		 * The multiplier may have changed since the last time we got
785		 * to this point as a result of the user writing to
786		 * /proc/profile. In this case we need to adjust the APIC
787		 * timer accordingly.
788		 *
789		 * Interrupts are already masked off at this point.
790		 */
791		per_cpu(prof_counter, cpu_id)
792			= per_cpu(prof_multiplier, cpu_id);
793		if (per_cpu(prof_counter, cpu_id)
794			!= per_cpu(prof_old_multiplier, cpu_id))
795		{
796			per_cpu(prof_old_multiplier, cpu_id)
797				= per_cpu(prof_counter, cpu_id);
798		}
799
800		update_process_times(user);
801	}
802}
803
804/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
805/* Send IPI Routins                                                          */
806/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
807
808/*==========================================================================*
809 * Name:         send_IPI_allbutself
810 *
811 * Description:  This routine sends a IPI to all other CPUs in the system.
812 *
813 * Born on Date: 2002.02.05
814 *
815 * Arguments:    ipi_num - Number of IPI
816 *               try -  0 : Send IPI certainly.
817 *                     !0 : The following IPI is not sended when Target CPU
818 *                          has not received the before IPI.
819 *
820 * Returns:      void (cannot fail)
821 *
822 * Modification log:
823 * Date       Who Description
824 * ---------- --- --------------------------------------------------------
825 *
826 *==========================================================================*/
827void send_IPI_allbutself(int ipi_num, int try)
828{
829	cpumask_t cpumask;
830
831	cpumask = cpu_online_map;
832	cpu_clear(smp_processor_id(), cpumask);
833
834	send_IPI_mask(cpumask, ipi_num, try);
835}
836
837/*==========================================================================*
838 * Name:         send_IPI_mask
839 *
840 * Description:  This routine sends a IPI to CPUs in the system.
841 *
842 * Born on Date: 2002.02.05
843 *
844 * Arguments:    cpu_mask - Bitmap of target CPUs logical ID
845 *               ipi_num - Number of IPI
846 *               try -  0 : Send IPI certainly.
847 *                     !0 : The following IPI is not sended when Target CPU
848 *                          has not received the before IPI.
849 *
850 * Returns:      void (cannot fail)
851 *
852 * Modification log:
853 * Date       Who Description
854 * ---------- --- --------------------------------------------------------
855 *
856 *==========================================================================*/
857static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
858{
859	cpumask_t physid_mask, tmp;
860	int cpu_id, phys_id;
861	int num_cpus = num_online_cpus();
862
863	if (num_cpus <= 1)	/* NO MP */
864		return;
865
866	cpus_and(tmp, cpumask, cpu_online_map);
867	BUG_ON(!cpus_equal(cpumask, tmp));
868
869	physid_mask = CPU_MASK_NONE;
870	for_each_cpu_mask(cpu_id, cpumask){
871		if ((phys_id = cpu_to_physid(cpu_id)) != -1)
872			cpu_set(phys_id, physid_mask);
873	}
874
875	send_IPI_mask_phys(physid_mask, ipi_num, try);
876}
877
878/*==========================================================================*
879 * Name:         send_IPI_mask_phys
880 *
881 * Description:  This routine sends a IPI to other CPUs in the system.
882 *
883 * Born on Date: 2002.02.05
884 *
885 * Arguments:    cpu_mask - Bitmap of target CPUs physical ID
886 *               ipi_num - Number of IPI
887 *               try -  0 : Send IPI certainly.
888 *                     !0 : The following IPI is not sended when Target CPU
889 *                          has not received the before IPI.
890 *
891 * Returns:      IPICRi regster value.
892 *
893 * Modification log:
894 * Date       Who Description
895 * ---------- --- --------------------------------------------------------
896 *
897 *==========================================================================*/
898unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
899	int try)
900{
901	spinlock_t *ipilock;
902	volatile unsigned long *ipicr_addr;
903	unsigned long ipicr_val;
904	unsigned long my_physid_mask;
905	unsigned long mask = cpus_addr(physid_mask)[0];
906
907
908	if (mask & ~physids_coerce(phys_cpu_present_map))
909		BUG();
910	if (ipi_num >= NR_IPIS)
911		BUG();
912
913	mask <<= IPI_SHIFT;
914	ipilock = &ipi_lock[ipi_num];
915	ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
916		+ (ipi_num << 2));
917	my_physid_mask = ~(1 << smp_processor_id());
918
919	/*
920	 * lock ipi_lock[i]
921	 * check IPICRi == 0
922	 * write IPICRi (send IPIi)
923	 * unlock ipi_lock[i]
924	 */
925	spin_lock(ipilock);
926	__asm__ __volatile__ (
927		";; CHECK IPICRi == 0		\n\t"
928		".fillinsn			\n"
929		"1:				\n\t"
930		"ld	%0, @%1			\n\t"
931		"and	%0, %4			\n\t"
932		"beqz	%0, 2f			\n\t"
933		"bnez	%3, 3f			\n\t"
934		"bra	1b			\n\t"
935		";; WRITE IPICRi (send IPIi)	\n\t"
936		".fillinsn			\n"
937		"2:				\n\t"
938		"st	%2, @%1			\n\t"
939		".fillinsn			\n"
940		"3:				\n\t"
941		: "=&r"(ipicr_val)
942		: "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
943		: "memory"
944	);
945	spin_unlock(ipilock);
946
947	return ipicr_val;
948}
949