1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3** SMP Support
4**
5** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
8**
9** Lots of stuff stolen from arch/alpha/kernel/smp.c
10** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11**
12** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
13** -grant (1/12/2001)
14**
15*/
16#include <linux/types.h>
17#include <linux/spinlock.h>
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sched/mm.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/smp.h>
25#include <linux/kernel_stat.h>
26#include <linux/mm.h>
27#include <linux/err.h>
28#include <linux/delay.h>
29#include <linux/bitops.h>
30#include <linux/ftrace.h>
31#include <linux/cpu.h>
32#include <linux/kgdb.h>
33#include <linux/sched/hotplug.h>
34
35#include <linux/atomic.h>
36#include <asm/current.h>
37#include <asm/delay.h>
38#include <asm/tlbflush.h>
39
40#include <asm/io.h>
41#include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */
42#include <asm/mmu_context.h>
43#include <asm/page.h>
44#include <asm/processor.h>
45#include <asm/ptrace.h>
46#include <asm/unistd.h>
47#include <asm/cacheflush.h>
48
49#undef DEBUG_SMP
50#ifdef DEBUG_SMP
51static int smp_debug_lvl = 0;
52#define smp_debug(lvl, printargs...)		\
53		if (lvl >= smp_debug_lvl)	\
54			printk(printargs);
55#else
56#define smp_debug(lvl, ...)	do { } while(0)
57#endif /* DEBUG_SMP */
58
59volatile struct task_struct *smp_init_current_idle_task;
60
61/* track which CPU is booting */
62static volatile int cpu_now_booting;
63
64static DEFINE_PER_CPU(spinlock_t, ipi_lock);
65
66enum ipi_message_type {
67	IPI_NOP=0,
68	IPI_RESCHEDULE=1,
69	IPI_CALL_FUNC,
70	IPI_CPU_START,
71	IPI_CPU_STOP,
72	IPI_CPU_TEST,
73#ifdef CONFIG_KGDB
74	IPI_ENTER_KGDB,
75#endif
76};
77
78
79/********** SMP inter processor interrupt and communication routines */
80
81#undef PER_CPU_IRQ_REGION
82#ifdef PER_CPU_IRQ_REGION
83/* XXX REVISIT Ignore for now.
84**    *May* need this "hook" to register IPI handler
85**    once we have perCPU ExtIntr switch tables.
86*/
87static void
88ipi_init(int cpuid)
89{
90#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
91
92	if(cpu_online(cpuid) )
93	{
94		switch_to_idle_task(current);
95	}
96
97	return;
98}
99#endif
100
101
102/*
103** Yoink this CPU from the runnable list...
104**
105*/
106static void
107halt_processor(void)
108{
109	/* REVISIT : redirect I/O Interrupts to another CPU? */
110	/* REVISIT : does PM *know* this CPU isn't available? */
111	set_cpu_online(smp_processor_id(), false);
112	local_irq_disable();
113	__pdc_cpu_rendezvous();
114	for (;;)
115		;
116}
117
118
119irqreturn_t __irq_entry
120ipi_interrupt(int irq, void *dev_id)
121{
122	int this_cpu = smp_processor_id();
123	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
124	unsigned long ops;
125	unsigned long flags;
126
127	for (;;) {
128		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
129		spin_lock_irqsave(lock, flags);
130		ops = p->pending_ipi;
131		p->pending_ipi = 0;
132		spin_unlock_irqrestore(lock, flags);
133
134		mb(); /* Order bit clearing and data access. */
135
136		if (!ops)
137		    break;
138
139		while (ops) {
140			unsigned long which = ffz(~ops);
141
142			ops &= ~(1 << which);
143
144			switch (which) {
145			case IPI_NOP:
146				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
147				break;
148
149			case IPI_RESCHEDULE:
150				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
151				inc_irq_stat(irq_resched_count);
152				scheduler_ipi();
153				break;
154
155			case IPI_CALL_FUNC:
156				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
157				inc_irq_stat(irq_call_count);
158				generic_smp_call_function_interrupt();
159				break;
160
161			case IPI_CPU_START:
162				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
163				break;
164
165			case IPI_CPU_STOP:
166				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
167				halt_processor();
168				break;
169
170			case IPI_CPU_TEST:
171				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
172				break;
173#ifdef CONFIG_KGDB
174			case IPI_ENTER_KGDB:
175				smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu);
176				kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
177				break;
178#endif
179			default:
180				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
181					this_cpu, which);
182				return IRQ_NONE;
183			} /* Switch */
184
185			/* before doing more, let in any pending interrupts */
186			if (ops) {
187				local_irq_enable();
188				local_irq_disable();
189			}
190		} /* while (ops) */
191	}
192	return IRQ_HANDLED;
193}
194
195
196static inline void
197ipi_send(int cpu, enum ipi_message_type op)
198{
199	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
200	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
201	unsigned long flags;
202
203	spin_lock_irqsave(lock, flags);
204	p->pending_ipi |= 1 << op;
205	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
206	spin_unlock_irqrestore(lock, flags);
207}
208
209static void
210send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
211{
212	int cpu;
213
214	for_each_cpu(cpu, mask)
215		ipi_send(cpu, op);
216}
217
218static inline void
219send_IPI_single(int dest_cpu, enum ipi_message_type op)
220{
221	BUG_ON(dest_cpu == NO_PROC_ID);
222
223	ipi_send(dest_cpu, op);
224}
225
226static inline void
227send_IPI_allbutself(enum ipi_message_type op)
228{
229	int i;
230
231	preempt_disable();
232	for_each_online_cpu(i) {
233		if (i != smp_processor_id())
234			send_IPI_single(i, op);
235	}
236	preempt_enable();
237}
238
239#ifdef CONFIG_KGDB
240void kgdb_roundup_cpus(void)
241{
242	send_IPI_allbutself(IPI_ENTER_KGDB);
243}
244#endif
245
246inline void
247smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }
248
249void
250arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
251
252void
253smp_send_all_nop(void)
254{
255	send_IPI_allbutself(IPI_NOP);
256}
257
258void arch_send_call_function_ipi_mask(const struct cpumask *mask)
259{
260	send_IPI_mask(mask, IPI_CALL_FUNC);
261}
262
263void arch_send_call_function_single_ipi(int cpu)
264{
265	send_IPI_single(cpu, IPI_CALL_FUNC);
266}
267
268/*
269 * Called by secondaries to update state and initialize CPU registers.
270 */
271static void
272smp_cpu_init(int cpunum)
273{
274	/* Set modes and Enable floating point coprocessor */
275	init_per_cpu(cpunum);
276
277	disable_sr_hashing();
278
279	mb();
280
281	/* Well, support 2.4 linux scheme as well. */
282	if (cpu_online(cpunum))	{
283		extern void machine_halt(void); /* arch/parisc.../process.c */
284
285		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
286		machine_halt();
287	}
288
289	notify_cpu_starting(cpunum);
290
291	set_cpu_online(cpunum, true);
292
293	/* Initialise the idle task for this CPU */
294	mmgrab(&init_mm);
295	current->active_mm = &init_mm;
296	BUG_ON(current->mm);
297	enter_lazy_tlb(&init_mm, current);
298
299	init_IRQ();   /* make sure no IRQs are enabled or pending */
300	start_cpu_itimer();
301}
302
303
304/*
305 * Slaves start using C here. Indirectly called from smp_slave_stext.
306 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
307 */
308void smp_callin(unsigned long pdce_proc)
309{
310	int slave_id = cpu_now_booting;
311
312#ifdef CONFIG_64BIT
313	WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
314			| PAGE0->mem_pdc) != pdce_proc);
315#endif
316
317	smp_cpu_init(slave_id);
318
319	flush_cache_all_local(); /* start with known state */
320	flush_tlb_all_local(NULL);
321
322	local_irq_enable();  /* Interrupts have been off until now */
323
324	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
325
326	/* NOTREACHED */
327	panic("smp_callin() AAAAaaaaahhhh....\n");
328}
329
330/*
331 * Bring one cpu online.
332 */
333static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
334{
335	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
336	long timeout;
337
338#ifdef CONFIG_HOTPLUG_CPU
339	int i;
340
341	/* reset irq statistics for this CPU */
342	memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
343	for (i = 0; i < NR_IRQS; i++) {
344		struct irq_desc *desc = irq_to_desc(i);
345
346		if (desc && desc->kstat_irqs)
347			*per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
348	}
349#endif
350
351	/* wait until last booting CPU has started. */
352	while (cpu_now_booting)
353		;
354
355	/* Let _start know what logical CPU we're booting
356	** (offset into init_tasks[],cpu_data[])
357	*/
358	cpu_now_booting = cpuid;
359
360	/*
361	** boot strap code needs to know the task address since
362	** it also contains the process stack.
363	*/
364	smp_init_current_idle_task = idle ;
365	mb();
366
367	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
368
369	/*
370	** This gets PDC to release the CPU from a very tight loop.
371	**
372	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
373	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
374	** is executed after receiving the rendezvous signal (an interrupt to
375	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
376	** contents of memory are valid."
377	*/
378	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
379	mb();
380
381	/*
382	 * OK, wait a bit for that CPU to finish staggering about.
383	 * Slave will set a bit when it reaches smp_cpu_init().
384	 * Once the "monarch CPU" sees the bit change, it can move on.
385	 */
386	for (timeout = 0; timeout < 10000; timeout++) {
387		if(cpu_online(cpuid)) {
388			/* Which implies Slave has started up */
389			cpu_now_booting = 0;
390			goto alive ;
391		}
392		udelay(100);
393		barrier();
394	}
395	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
396	return -1;
397
398alive:
399	/* Remember the Slave data */
400	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
401		cpuid, timeout * 100);
402	return 0;
403}
404
405void __init smp_prepare_boot_cpu(void)
406{
407	pr_info("SMP: bootstrap CPU ID is 0\n");
408}
409
410
411
412/*
413** inventory.c:do_inventory() hasn't yet been run and thus we
414** don't 'discover' the additional CPUs until later.
415*/
416void __init smp_prepare_cpus(unsigned int max_cpus)
417{
418	int cpu;
419
420	for_each_possible_cpu(cpu)
421		spin_lock_init(&per_cpu(ipi_lock, cpu));
422
423	init_cpu_present(cpumask_of(0));
424}
425
426
427void __init smp_cpus_done(unsigned int cpu_max)
428{
429}
430
431
432int __cpu_up(unsigned int cpu, struct task_struct *tidle)
433{
434	if (cpu_online(cpu))
435		return 0;
436
437	if (num_online_cpus() < nr_cpu_ids &&
438		num_online_cpus() < setup_max_cpus &&
439		smp_boot_one_cpu(cpu, tidle))
440		return -EIO;
441
442	return cpu_online(cpu) ? 0 : -EIO;
443}
444
445/*
446 * __cpu_disable runs on the processor to be shutdown.
447 */
448int __cpu_disable(void)
449{
450#ifdef CONFIG_HOTPLUG_CPU
451	unsigned int cpu = smp_processor_id();
452
453	remove_cpu_topology(cpu);
454
455	/*
456	 * Take this CPU offline.  Once we clear this, we can't return,
457	 * and we must not schedule until we're ready to give up the cpu.
458	 */
459	set_cpu_online(cpu, false);
460
461	/* Find a new timesync master */
462	if (cpu == time_keeper_id) {
463		time_keeper_id = cpumask_first(cpu_online_mask);
464		pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
465	}
466
467	disable_percpu_irq(IPI_IRQ);
468
469	irq_migrate_all_off_this_cpu();
470
471	flush_cache_all_local();
472	flush_tlb_all_local(NULL);
473
474	/* disable all irqs, including timer irq */
475	local_irq_disable();
476
477	/* wait for next timer irq ... */
478	mdelay(1000/HZ+100);
479
480	/* ... and then clear all pending external irqs */
481	set_eiem(0);
482	mtctl(~0UL, CR_EIRR);
483	mfctl(CR_EIRR);
484	mtctl(0, CR_EIRR);
485#endif
486	return 0;
487}
488
489/*
490 * called on the thread which is asking for a CPU to be shutdown -
491 * waits until shutdown has completed, or it is timed out.
492 */
493void __cpu_die(unsigned int cpu)
494{
495	pdc_cpu_rendezvous_lock();
496}
497
498void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
499{
500	pr_info("CPU%u: is shutting down\n", cpu);
501
502	/* set task's state to interruptible sleep */
503	set_current_state(TASK_INTERRUPTIBLE);
504	schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
505
506	pdc_cpu_rendezvous_unlock();
507}
508