• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/um/kernel/
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/percpu.h"
7#include "asm/pgalloc.h"
8#include "asm/tlb.h"
9
10/* For some reason, mmu_gathers are referenced when CONFIG_SMP is off. */
11DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
12
13#ifdef CONFIG_SMP
14
15#include "linux/sched.h"
16#include "linux/module.h"
17#include "linux/threads.h"
18#include "linux/interrupt.h"
19#include "linux/err.h"
20#include "linux/hardirq.h"
21#include "asm/smp.h"
22#include "asm/processor.h"
23#include "asm/spinlock.h"
24#include "kern.h"
25#include "irq_user.h"
26#include "os.h"
27
28/* Per CPU bogomips and other parameters
29 * The only piece used here is the ipi pipe, which is set before SMP is
30 * started and never changed.
31 */
32struct cpuinfo_um cpu_data[NR_CPUS];
33
34/* A statistic, can be a little off */
35int num_reschedules_sent = 0;
36
37/* Not changed after boot */
38struct task_struct *idle_threads[NR_CPUS];
39
40void smp_send_reschedule(int cpu)
41{
42	os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1);
43	num_reschedules_sent++;
44}
45
46void smp_send_stop(void)
47{
48	int i;
49
50	printk(KERN_INFO "Stopping all CPUs...");
51	for (i = 0; i < num_online_cpus(); i++) {
52		if (i == current_thread->cpu)
53			continue;
54		os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
55	}
56	printk(KERN_CONT "done\n");
57}
58
59static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
60static cpumask_t cpu_callin_map = CPU_MASK_NONE;
61
62static int idle_proc(void *cpup)
63{
64	int cpu = (int) cpup, err;
65
66	err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
67	if (err < 0)
68		panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
69
70	os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
71
72	wmb();
73	if (cpu_test_and_set(cpu, cpu_callin_map)) {
74		printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
75		BUG();
76	}
77
78	while (!cpu_isset(cpu, smp_commenced_mask))
79		cpu_relax();
80
81	notify_cpu_starting(cpu);
82	cpu_set(cpu, cpu_online_map);
83	default_idle();
84	return 0;
85}
86
87static struct task_struct *idle_thread(int cpu)
88{
89	struct task_struct *new_task;
90
91	current->thread.request.u.thread.proc = idle_proc;
92	current->thread.request.u.thread.arg = (void *) cpu;
93	new_task = fork_idle(cpu);
94	if (IS_ERR(new_task))
95		panic("copy_process failed in idle_thread, error = %ld",
96		      PTR_ERR(new_task));
97
98	cpu_tasks[cpu] = ((struct cpu_task)
99		          { .pid = 	new_task->thread.mode.tt.extern_pid,
100			    .task = 	new_task } );
101	idle_threads[cpu] = new_task;
102	panic("skas mode doesn't support SMP");
103	return new_task;
104}
105
106void smp_prepare_cpus(unsigned int maxcpus)
107{
108	struct task_struct *idle;
109	unsigned long waittime;
110	int err, cpu, me = smp_processor_id();
111	int i;
112
113	for (i = 0; i < ncpus; ++i)
114		set_cpu_possible(i, true);
115
116	cpu_clear(me, cpu_online_map);
117	cpu_set(me, cpu_online_map);
118	cpu_set(me, cpu_callin_map);
119
120	err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
121	if (err < 0)
122		panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
123
124	os_set_fd_async(cpu_data[me].ipi_pipe[0]);
125
126	for (cpu = 1; cpu < ncpus; cpu++) {
127		printk(KERN_INFO "Booting processor %d...\n", cpu);
128
129		idle = idle_thread(cpu);
130
131		init_idle(idle, cpu);
132
133		waittime = 200000000;
134		while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
135			cpu_relax();
136
137		printk(KERN_INFO "%s\n",
138		       cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
139	}
140}
141
142void smp_prepare_boot_cpu(void)
143{
144	cpu_set(smp_processor_id(), cpu_online_map);
145}
146
147int __cpu_up(unsigned int cpu)
148{
149	cpu_set(cpu, smp_commenced_mask);
150	while (!cpu_isset(cpu, cpu_online_map))
151		mb();
152	return 0;
153}
154
155int setup_profiling_timer(unsigned int multiplier)
156{
157	printk(KERN_INFO "setup_profiling_timer\n");
158	return 0;
159}
160
161void smp_call_function_slave(int cpu);
162
163void IPI_handler(int cpu)
164{
165	unsigned char c;
166	int fd;
167
168	fd = cpu_data[cpu].ipi_pipe[0];
169	while (os_read_file(fd, &c, 1) == 1) {
170		switch (c) {
171		case 'C':
172			smp_call_function_slave(cpu);
173			break;
174
175		case 'R':
176			set_tsk_need_resched(current);
177			break;
178
179		case 'S':
180			printk(KERN_INFO "CPU#%d stopping\n", cpu);
181			while (1)
182				pause();
183			break;
184
185		default:
186			printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n",
187			       cpu, c);
188			break;
189		}
190	}
191}
192
193int hard_smp_processor_id(void)
194{
195	return pid_to_processor_id(os_getpid());
196}
197
198static DEFINE_SPINLOCK(call_lock);
199static atomic_t scf_started;
200static atomic_t scf_finished;
201static void (*func)(void *info);
202static void *info;
203
204void smp_call_function_slave(int cpu)
205{
206	atomic_inc(&scf_started);
207	(*func)(info);
208	atomic_inc(&scf_finished);
209}
210
211int smp_call_function(void (*_func)(void *info), void *_info, int wait)
212{
213	int cpus = num_online_cpus() - 1;
214	int i;
215
216	if (!cpus)
217		return 0;
218
219	/* Can deadlock when called with interrupts disabled */
220	WARN_ON(irqs_disabled());
221
222	spin_lock_bh(&call_lock);
223	atomic_set(&scf_started, 0);
224	atomic_set(&scf_finished, 0);
225	func = _func;
226	info = _info;
227
228	for_each_online_cpu(i)
229		os_write_file(cpu_data[i].ipi_pipe[1], "C", 1);
230
231	while (atomic_read(&scf_started) != cpus)
232		barrier();
233
234	if (wait)
235		while (atomic_read(&scf_finished) != cpus)
236			barrier();
237
238	spin_unlock_bh(&call_lock);
239	return 0;
240}
241
242#endif
243