• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/sh/kernel/
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <asm/atomic.h>
24#include <asm/processor.h>
25#include <asm/system.h>
26#include <asm/mmu_context.h>
27#include <asm/smp.h>
28#include <asm/cacheflush.h>
29#include <asm/sections.h>
30
31int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
32int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
33
34struct plat_smp_ops *mp_ops = NULL;
35
36/* State of each CPU */
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
40{
41	if (mp_ops)
42		printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44	mp_ops = ops;
45}
46
47static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
48{
49	struct sh_cpuinfo *c = cpu_data + cpu;
50
51	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53	c->loops_per_jiffy = loops_per_jiffy;
54}
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58	unsigned int cpu = smp_processor_id();
59
60	init_new_context(current, &init_mm);
61	current_thread_info()->cpu = cpu;
62	mp_ops->prepare_cpus(max_cpus);
63
64#ifndef CONFIG_HOTPLUG_CPU
65	init_cpu_present(&cpu_possible_map);
66#endif
67}
68
69void __init smp_prepare_boot_cpu(void)
70{
71	unsigned int cpu = smp_processor_id();
72
73	__cpu_number_map[0] = cpu;
74	__cpu_logical_map[0] = cpu;
75
76	set_cpu_online(cpu, true);
77	set_cpu_possible(cpu, true);
78
79	per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85	unsigned int i;
86
87	for (i = 0; i < 10; i++) {
88		smp_rmb();
89		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90			if (system_state == SYSTEM_RUNNING)
91				pr_info("CPU %u is now offline\n", cpu);
92
93			return;
94		}
95
96		msleep(100);
97	}
98
99	pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104	return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109	idle_task_exit();
110	irq_ctx_exit(raw_smp_processor_id());
111	mb();
112
113	__get_cpu_var(cpu_state) = CPU_DEAD;
114	local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119	play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124	unsigned int cpu = smp_processor_id();
125	struct task_struct *p;
126	int ret;
127
128	ret = mp_ops->cpu_disable(cpu);
129	if (ret)
130		return ret;
131
132	/*
133	 * Take this CPU offline.  Once we clear this, we can't return,
134	 * and we must not schedule until we're ready to give up the cpu.
135	 */
136	set_cpu_online(cpu, false);
137
138	/*
139	 * OK - migrate IRQs away from this CPU
140	 */
141	migrate_irqs();
142
143	/*
144	 * Stop the local timer for this CPU.
145	 */
146	local_timer_stop(cpu);
147
148	/*
149	 * Flush user cache and TLB mappings, and then remove this CPU
150	 * from the vm mask set of all processes.
151	 */
152	flush_cache_all();
153	local_flush_tlb_all();
154
155	read_lock(&tasklist_lock);
156	for_each_process(p)
157		if (p->mm)
158			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159	read_unlock(&tasklist_lock);
160
161	return 0;
162}
163#else /* ... !CONFIG_HOTPLUG_CPU */
164int native_cpu_disable(unsigned int cpu)
165{
166	return -ENOSYS;
167}
168
169void native_cpu_die(unsigned int cpu)
170{
171	/* We said "no" in __cpu_disable */
172	BUG();
173}
174
175void native_play_dead(void)
176{
177	BUG();
178}
179#endif
180
181asmlinkage void __cpuinit start_secondary(void)
182{
183	unsigned int cpu = smp_processor_id();
184	struct mm_struct *mm = &init_mm;
185
186	enable_mmu();
187	atomic_inc(&mm->mm_count);
188	atomic_inc(&mm->mm_users);
189	current->active_mm = mm;
190	enter_lazy_tlb(mm, current);
191	local_flush_tlb_all();
192
193	per_cpu_trap_init();
194
195	preempt_disable();
196
197	notify_cpu_starting(cpu);
198
199	local_irq_enable();
200
201	/* Enable local timers */
202	local_timer_setup(cpu);
203	calibrate_delay();
204
205	smp_store_cpu_info(cpu);
206
207	set_cpu_online(cpu, true);
208	per_cpu(cpu_state, cpu) = CPU_ONLINE;
209
210	cpu_idle();
211}
212
213extern struct {
214	unsigned long sp;
215	unsigned long bss_start;
216	unsigned long bss_end;
217	void *start_kernel_fn;
218	void *cpu_init_fn;
219	void *thread_info;
220} stack_start;
221
222int __cpuinit __cpu_up(unsigned int cpu)
223{
224	struct task_struct *tsk;
225	unsigned long timeout;
226
227	tsk = cpu_data[cpu].idle;
228	if (!tsk) {
229		tsk = fork_idle(cpu);
230		if (IS_ERR(tsk)) {
231			pr_err("Failed forking idle task for cpu %d\n", cpu);
232			return PTR_ERR(tsk);
233		}
234
235		cpu_data[cpu].idle = tsk;
236	}
237
238	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
239
240	/* Fill in data in head.S for secondary cpus */
241	stack_start.sp = tsk->thread.sp;
242	stack_start.thread_info = tsk->stack;
243	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
244	stack_start.start_kernel_fn = start_secondary;
245
246	flush_icache_range((unsigned long)&stack_start,
247			   (unsigned long)&stack_start + sizeof(stack_start));
248	wmb();
249
250	mp_ops->start_cpu(cpu, (unsigned long)_stext);
251
252	timeout = jiffies + HZ;
253	while (time_before(jiffies, timeout)) {
254		if (cpu_online(cpu))
255			break;
256
257		udelay(10);
258		barrier();
259	}
260
261	if (cpu_online(cpu))
262		return 0;
263
264	return -ENOENT;
265}
266
267void __init smp_cpus_done(unsigned int max_cpus)
268{
269	unsigned long bogosum = 0;
270	int cpu;
271
272	for_each_online_cpu(cpu)
273		bogosum += cpu_data[cpu].loops_per_jiffy;
274
275	printk(KERN_INFO "SMP: Total of %d processors activated "
276	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
277	       bogosum / (500000/HZ),
278	       (bogosum / (5000/HZ)) % 100);
279}
280
281void smp_send_reschedule(int cpu)
282{
283	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
284}
285
286void smp_send_stop(void)
287{
288	smp_call_function(stop_this_cpu, 0, 0);
289}
290
291void arch_send_call_function_ipi_mask(const struct cpumask *mask)
292{
293	int cpu;
294
295	for_each_cpu(cpu, mask)
296		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
297}
298
299void arch_send_call_function_single_ipi(int cpu)
300{
301	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
302}
303
304void smp_timer_broadcast(const struct cpumask *mask)
305{
306	int cpu;
307
308	for_each_cpu(cpu, mask)
309		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
310}
311
312static void ipi_timer(void)
313{
314	irq_enter();
315	local_timer_interrupt();
316	irq_exit();
317}
318
319void smp_message_recv(unsigned int msg)
320{
321	switch (msg) {
322	case SMP_MSG_FUNCTION:
323		generic_smp_call_function_interrupt();
324		break;
325	case SMP_MSG_RESCHEDULE:
326		break;
327	case SMP_MSG_FUNCTION_SINGLE:
328		generic_smp_call_function_single_interrupt();
329		break;
330	case SMP_MSG_TIMER:
331		ipi_timer();
332		break;
333	default:
334		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
335		       smp_processor_id(), __func__, msg);
336		break;
337	}
338}
339
340/* Not really SMP stuff ... */
341int setup_profiling_timer(unsigned int multiplier)
342{
343	return 0;
344}
345
346static void flush_tlb_all_ipi(void *info)
347{
348	local_flush_tlb_all();
349}
350
351void flush_tlb_all(void)
352{
353	on_each_cpu(flush_tlb_all_ipi, 0, 1);
354}
355
356static void flush_tlb_mm_ipi(void *mm)
357{
358	local_flush_tlb_mm((struct mm_struct *)mm);
359}
360
361/*
362 * The following tlb flush calls are invoked when old translations are
363 * being torn down, or pte attributes are changing. For single threaded
364 * address spaces, a new context is obtained on the current cpu, and tlb
365 * context on other cpus are invalidated to force a new context allocation
366 * at switch_mm time, should the mm ever be used on other cpus. For
367 * multithreaded address spaces, intercpu interrupts have to be sent.
368 * Another case where intercpu interrupts are required is when the target
369 * mm might be active on another cpu (eg debuggers doing the flushes on
370 * behalf of debugees, kswapd stealing pages from another process etc).
371 * Kanoj 07/00.
372 */
373void flush_tlb_mm(struct mm_struct *mm)
374{
375	preempt_disable();
376
377	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
378		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
379	} else {
380		int i;
381		for (i = 0; i < num_online_cpus(); i++)
382			if (smp_processor_id() != i)
383				cpu_context(i, mm) = 0;
384	}
385	local_flush_tlb_mm(mm);
386
387	preempt_enable();
388}
389
390struct flush_tlb_data {
391	struct vm_area_struct *vma;
392	unsigned long addr1;
393	unsigned long addr2;
394};
395
396static void flush_tlb_range_ipi(void *info)
397{
398	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
399
400	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
401}
402
403void flush_tlb_range(struct vm_area_struct *vma,
404		     unsigned long start, unsigned long end)
405{
406	struct mm_struct *mm = vma->vm_mm;
407
408	preempt_disable();
409	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
410		struct flush_tlb_data fd;
411
412		fd.vma = vma;
413		fd.addr1 = start;
414		fd.addr2 = end;
415		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
416	} else {
417		int i;
418		for (i = 0; i < num_online_cpus(); i++)
419			if (smp_processor_id() != i)
420				cpu_context(i, mm) = 0;
421	}
422	local_flush_tlb_range(vma, start, end);
423	preempt_enable();
424}
425
426static void flush_tlb_kernel_range_ipi(void *info)
427{
428	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
429
430	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
431}
432
433void flush_tlb_kernel_range(unsigned long start, unsigned long end)
434{
435	struct flush_tlb_data fd;
436
437	fd.addr1 = start;
438	fd.addr2 = end;
439	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
440}
441
442static void flush_tlb_page_ipi(void *info)
443{
444	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
445
446	local_flush_tlb_page(fd->vma, fd->addr1);
447}
448
449void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
450{
451	preempt_disable();
452	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
453	    (current->mm != vma->vm_mm)) {
454		struct flush_tlb_data fd;
455
456		fd.vma = vma;
457		fd.addr1 = page;
458		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
459	} else {
460		int i;
461		for (i = 0; i < num_online_cpus(); i++)
462			if (smp_processor_id() != i)
463				cpu_context(i, vma->vm_mm) = 0;
464	}
465	local_flush_tlb_page(vma, page);
466	preempt_enable();
467}
468
469static void flush_tlb_one_ipi(void *info)
470{
471	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
472	local_flush_tlb_one(fd->addr1, fd->addr2);
473}
474
475void flush_tlb_one(unsigned long asid, unsigned long vaddr)
476{
477	struct flush_tlb_data fd;
478
479	fd.addr1 = asid;
480	fd.addr2 = vaddr;
481
482	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
483	local_flush_tlb_one(asid, vaddr);
484}
485