• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sparc/kernel/
1/* smp.c: Sparc SMP support.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6 */
7
8#include <asm/head.h>
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/threads.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/mm.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/delay.h>
23
24#include <asm/ptrace.h>
25#include <asm/atomic.h>
26
27#include <asm/irq.h>
28#include <asm/page.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31#include <asm/oplib.h>
32#include <asm/cacheflush.h>
33#include <asm/tlbflush.h>
34#include <asm/cpudata.h>
35#include <asm/leon.h>
36
37#include "irq.h"
38
39volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
40unsigned char boot_cpu_id = 0;
41unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42
43cpumask_t smp_commenced_mask = CPU_MASK_NONE;
44
45/* The only guaranteed locking primitive available on all Sparc
46 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
47 * places the current byte at the effective address into dest_reg and
48 * places 0xff there afterwards.  Pretty lame locking primitive
49 * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
50 * instruction which is much better...
51 */
52
53void __cpuinit smp_store_cpu_info(int id)
54{
55	int cpu_node;
56
57	cpu_data(id).udelay_val = loops_per_jiffy;
58
59	cpu_find_by_mid(id, &cpu_node);
60	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
61						     "clock-frequency", 0);
62	cpu_data(id).prom_node = cpu_node;
63	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
64
65	if (cpu_data(id).mid < 0)
66		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
67}
68
69void __init smp_cpus_done(unsigned int max_cpus)
70{
71	extern void smp4m_smp_done(void);
72	extern void smp4d_smp_done(void);
73	unsigned long bogosum = 0;
74	int cpu, num = 0;
75
76	for_each_online_cpu(cpu) {
77		num++;
78		bogosum += cpu_data(cpu).udelay_val;
79	}
80
81	printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
82		num, bogosum/(500000/HZ),
83		(bogosum/(5000/HZ))%100);
84
85	switch(sparc_cpu_model) {
86	case sun4:
87		printk("SUN4\n");
88		BUG();
89		break;
90	case sun4c:
91		printk("SUN4C\n");
92		BUG();
93		break;
94	case sun4m:
95		smp4m_smp_done();
96		break;
97	case sun4d:
98		smp4d_smp_done();
99		break;
100	case sparc_leon:
101		leon_smp_done();
102		break;
103	case sun4e:
104		printk("SUN4E\n");
105		BUG();
106		break;
107	case sun4u:
108		printk("SUN4U\n");
109		BUG();
110		break;
111	default:
112		printk("UNKNOWN!\n");
113		BUG();
114		break;
115	};
116}
117
118void cpu_panic(void)
119{
120	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
121	panic("SMP bolixed\n");
122}
123
124struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
125
126void smp_send_reschedule(int cpu)
127{
128	/* See sparc64 */
129}
130
131void smp_send_stop(void)
132{
133}
134
135void smp_flush_cache_all(void)
136{
137	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
138	local_flush_cache_all();
139}
140
141void smp_flush_tlb_all(void)
142{
143	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
144	local_flush_tlb_all();
145}
146
147void smp_flush_cache_mm(struct mm_struct *mm)
148{
149	if(mm->context != NO_CONTEXT) {
150		cpumask_t cpu_mask = *mm_cpumask(mm);
151		cpu_clear(smp_processor_id(), cpu_mask);
152		if (!cpus_empty(cpu_mask))
153			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
154		local_flush_cache_mm(mm);
155	}
156}
157
158void smp_flush_tlb_mm(struct mm_struct *mm)
159{
160	if(mm->context != NO_CONTEXT) {
161		cpumask_t cpu_mask = *mm_cpumask(mm);
162		cpu_clear(smp_processor_id(), cpu_mask);
163		if (!cpus_empty(cpu_mask)) {
164			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
165			if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
166				cpumask_copy(mm_cpumask(mm),
167					     cpumask_of(smp_processor_id()));
168		}
169		local_flush_tlb_mm(mm);
170	}
171}
172
173void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
174			   unsigned long end)
175{
176	struct mm_struct *mm = vma->vm_mm;
177
178	if (mm->context != NO_CONTEXT) {
179		cpumask_t cpu_mask = *mm_cpumask(mm);
180		cpu_clear(smp_processor_id(), cpu_mask);
181		if (!cpus_empty(cpu_mask))
182			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
183		local_flush_cache_range(vma, start, end);
184	}
185}
186
187void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
188			 unsigned long end)
189{
190	struct mm_struct *mm = vma->vm_mm;
191
192	if (mm->context != NO_CONTEXT) {
193		cpumask_t cpu_mask = *mm_cpumask(mm);
194		cpu_clear(smp_processor_id(), cpu_mask);
195		if (!cpus_empty(cpu_mask))
196			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
197		local_flush_tlb_range(vma, start, end);
198	}
199}
200
201void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
202{
203	struct mm_struct *mm = vma->vm_mm;
204
205	if(mm->context != NO_CONTEXT) {
206		cpumask_t cpu_mask = *mm_cpumask(mm);
207		cpu_clear(smp_processor_id(), cpu_mask);
208		if (!cpus_empty(cpu_mask))
209			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
210		local_flush_cache_page(vma, page);
211	}
212}
213
214void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
215{
216	struct mm_struct *mm = vma->vm_mm;
217
218	if(mm->context != NO_CONTEXT) {
219		cpumask_t cpu_mask = *mm_cpumask(mm);
220		cpu_clear(smp_processor_id(), cpu_mask);
221		if (!cpus_empty(cpu_mask))
222			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
223		local_flush_tlb_page(vma, page);
224	}
225}
226
227void smp_reschedule_irq(void)
228{
229	set_need_resched();
230}
231
232void smp_flush_page_to_ram(unsigned long page)
233{
234	xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
235	local_flush_page_to_ram(page);
236}
237
238void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
239{
240	cpumask_t cpu_mask = *mm_cpumask(mm);
241	cpu_clear(smp_processor_id(), cpu_mask);
242	if (!cpus_empty(cpu_mask))
243		xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
244	local_flush_sig_insns(mm, insn_addr);
245}
246
247extern unsigned int lvl14_resolution;
248
249/* /proc/profile writes can call this, don't __init it please. */
250static DEFINE_SPINLOCK(prof_setup_lock);
251
252int setup_profiling_timer(unsigned int multiplier)
253{
254	int i;
255	unsigned long flags;
256
257	/* Prevent level14 ticker IRQ flooding. */
258	if((!multiplier) || (lvl14_resolution / multiplier) < 500)
259		return -EINVAL;
260
261	spin_lock_irqsave(&prof_setup_lock, flags);
262	for_each_possible_cpu(i) {
263		load_profile_irq(i, lvl14_resolution / multiplier);
264		prof_multiplier(i) = multiplier;
265	}
266	spin_unlock_irqrestore(&prof_setup_lock, flags);
267
268	return 0;
269}
270
271void __init smp_prepare_cpus(unsigned int max_cpus)
272{
273	extern void __init smp4m_boot_cpus(void);
274	extern void __init smp4d_boot_cpus(void);
275	int i, cpuid, extra;
276
277	printk("Entering SMP Mode...\n");
278
279	extra = 0;
280	for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
281		if (cpuid >= NR_CPUS)
282			extra++;
283	}
284	/* i = number of cpus */
285	if (extra && max_cpus > i - extra)
286		printk("Warning: NR_CPUS is too low to start all cpus\n");
287
288	smp_store_cpu_info(boot_cpu_id);
289
290	switch(sparc_cpu_model) {
291	case sun4:
292		printk("SUN4\n");
293		BUG();
294		break;
295	case sun4c:
296		printk("SUN4C\n");
297		BUG();
298		break;
299	case sun4m:
300		smp4m_boot_cpus();
301		break;
302	case sun4d:
303		smp4d_boot_cpus();
304		break;
305	case sparc_leon:
306		leon_boot_cpus();
307		break;
308	case sun4e:
309		printk("SUN4E\n");
310		BUG();
311		break;
312	case sun4u:
313		printk("SUN4U\n");
314		BUG();
315		break;
316	default:
317		printk("UNKNOWN!\n");
318		BUG();
319		break;
320	};
321}
322
323/* Set this up early so that things like the scheduler can init
324 * properly.  We use the same cpu mask for both the present and
325 * possible cpu map.
326 */
327void __init smp_setup_cpu_possible_map(void)
328{
329	int instance, mid;
330
331	instance = 0;
332	while (!cpu_find_by_instance(instance, NULL, &mid)) {
333		if (mid < NR_CPUS) {
334			set_cpu_possible(mid, true);
335			set_cpu_present(mid, true);
336		}
337		instance++;
338	}
339}
340
341void __init smp_prepare_boot_cpu(void)
342{
343	int cpuid = hard_smp_processor_id();
344
345	if (cpuid >= NR_CPUS) {
346		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
347		prom_halt();
348	}
349	if (cpuid != 0)
350		printk("boot cpu id != 0, this could work but is untested\n");
351
352	current_thread_info()->cpu = cpuid;
353	set_cpu_online(cpuid, true);
354	set_cpu_possible(cpuid, true);
355}
356
357int __cpuinit __cpu_up(unsigned int cpu)
358{
359	extern int __cpuinit smp4m_boot_one_cpu(int);
360	extern int __cpuinit smp4d_boot_one_cpu(int);
361	int ret=0;
362
363	switch(sparc_cpu_model) {
364	case sun4:
365		printk("SUN4\n");
366		BUG();
367		break;
368	case sun4c:
369		printk("SUN4C\n");
370		BUG();
371		break;
372	case sun4m:
373		ret = smp4m_boot_one_cpu(cpu);
374		break;
375	case sun4d:
376		ret = smp4d_boot_one_cpu(cpu);
377		break;
378	case sparc_leon:
379		ret = leon_boot_one_cpu(cpu);
380		break;
381	case sun4e:
382		printk("SUN4E\n");
383		BUG();
384		break;
385	case sun4u:
386		printk("SUN4U\n");
387		BUG();
388		break;
389	default:
390		printk("UNKNOWN!\n");
391		BUG();
392		break;
393	};
394
395	if (!ret) {
396		cpu_set(cpu, smp_commenced_mask);
397		while (!cpu_online(cpu))
398			mb();
399	}
400	return ret;
401}
402
403void smp_bogo(struct seq_file *m)
404{
405	int i;
406
407	for_each_online_cpu(i) {
408		seq_printf(m,
409			   "Cpu%dBogo\t: %lu.%02lu\n",
410			   i,
411			   cpu_data(i).udelay_val/(500000/HZ),
412			   (cpu_data(i).udelay_val/(5000/HZ))%100);
413	}
414}
415
416void smp_info(struct seq_file *m)
417{
418	int i;
419
420	seq_printf(m, "State:\n");
421	for_each_online_cpu(i)
422		seq_printf(m, "CPU%d\t\t: online\n", i);
423}
424