1// SPDX-License-Identifier: GPL-2.0
2/*
3 *	linux/arch/alpha/kernel/smp.c
4 *
5 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
6 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
7 *            Created an function that conforms to the old calling convention
8 *            of smp_call_function().
9 *
10 *            This is helpful for DCPI.
11 *
12 */
13
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/kernel_stat.h>
17#include <linux/module.h>
18#include <linux/sched/mm.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/threads.h>
22#include <linux/smp.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/spinlock.h>
27#include <linux/irq.h>
28#include <linux/cache.h>
29#include <linux/profile.h>
30#include <linux/bitops.h>
31#include <linux/cpu.h>
32
33#include <asm/hwrpb.h>
34#include <asm/ptrace.h>
35#include <linux/atomic.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41
42#include "proto.h"
43#include "irq_impl.h"
44
45
46#define DEBUG_SMP 0
47#if DEBUG_SMP
48#define DBGS(args)	printk args
49#else
50#define DBGS(args)
51#endif
52
53/* A collection of per-processor data.  */
54struct cpuinfo_alpha cpu_data[NR_CPUS];
55EXPORT_SYMBOL(cpu_data);
56
57/* A collection of single bit ipi messages.  */
58static struct {
59	unsigned long bits ____cacheline_aligned;
60} ipi_data[NR_CPUS] __cacheline_aligned;
61
62enum ipi_message_type {
63	IPI_RESCHEDULE,
64	IPI_CALL_FUNC,
65	IPI_CPU_STOP,
66};
67
68/* Set to a secondary's cpuid when it comes online.  */
69static int smp_secondary_alive = 0;
70
71int smp_num_probed;		/* Internal processor count */
72int smp_num_cpus = 1;		/* Number that came online.  */
73EXPORT_SYMBOL(smp_num_cpus);
74
75/*
76 * Called by both boot and secondaries to move global data into
77 *  per-processor storage.
78 */
79static inline void __init
80smp_store_cpu_info(int cpuid)
81{
82	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
83	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
84	cpu_data[cpuid].need_new_asn = 0;
85	cpu_data[cpuid].asn_lock = 0;
86}
87
88/*
89 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
90 */
91static inline void __init
92smp_setup_percpu_timer(int cpuid)
93{
94	cpu_data[cpuid].prof_counter = 1;
95	cpu_data[cpuid].prof_multiplier = 1;
96}
97
98static void __init
99wait_boot_cpu_to_stop(int cpuid)
100{
101	unsigned long stop = jiffies + 10*HZ;
102
103	while (time_before(jiffies, stop)) {
104	        if (!smp_secondary_alive)
105			return;
106		barrier();
107	}
108
109	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
110	for (;;)
111		barrier();
112}
113
114/*
115 * Where secondaries begin a life of C.
116 */
117void __init
118smp_callin(void)
119{
120	int cpuid = hard_smp_processor_id();
121
122	if (cpu_online(cpuid)) {
123		printk("??, cpu 0x%x already present??\n", cpuid);
124		BUG();
125	}
126	set_cpu_online(cpuid, true);
127
128	/* Turn on machine checks.  */
129	wrmces(7);
130
131	/* Set trap vectors.  */
132	trap_init();
133
134	/* Set interrupt vector.  */
135	wrent(entInt, 0);
136
137	/* Get our local ticker going. */
138	smp_setup_percpu_timer(cpuid);
139	init_clockevent();
140
141	/* Call platform-specific callin, if specified */
142	if (alpha_mv.smp_callin)
143		alpha_mv.smp_callin();
144
145	/* All kernel threads share the same mm context.  */
146	mmgrab(&init_mm);
147	current->active_mm = &init_mm;
148
149	/* inform the notifiers about the new cpu */
150	notify_cpu_starting(cpuid);
151
152	/* Must have completely accurate bogos.  */
153	local_irq_enable();
154
155	/* Wait boot CPU to stop with irq enabled before running
156	   calibrate_delay. */
157	wait_boot_cpu_to_stop(cpuid);
158	mb();
159	calibrate_delay();
160
161	smp_store_cpu_info(cpuid);
162	/* Allow master to continue only after we written loops_per_jiffy.  */
163	wmb();
164	smp_secondary_alive = 1;
165
166	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167	      cpuid, current, current->active_mm));
168
169	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
170}
171
172/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
173static int
174wait_for_txrdy (unsigned long cpumask)
175{
176	unsigned long timeout;
177
178	if (!(hwrpb->txrdy & cpumask))
179		return 0;
180
181	timeout = jiffies + 10*HZ;
182	while (time_before(jiffies, timeout)) {
183		if (!(hwrpb->txrdy & cpumask))
184			return 0;
185		udelay(10);
186		barrier();
187	}
188
189	return -1;
190}
191
192/*
193 * Send a message to a secondary's console.  "START" is one such
194 * interesting message.  ;-)
195 */
196static void
197send_secondary_console_msg(char *str, int cpuid)
198{
199	struct percpu_struct *cpu;
200	register char *cp1, *cp2;
201	unsigned long cpumask;
202	size_t len;
203
204	cpu = (struct percpu_struct *)
205		((char*)hwrpb
206		 + hwrpb->processor_offset
207		 + cpuid * hwrpb->processor_size);
208
209	cpumask = (1UL << cpuid);
210	if (wait_for_txrdy(cpumask))
211		goto timeout;
212
213	cp2 = str;
214	len = strlen(cp2);
215	*(unsigned int *)&cpu->ipc_buffer[0] = len;
216	cp1 = (char *) &cpu->ipc_buffer[1];
217	memcpy(cp1, cp2, len);
218
219	/* atomic test and set */
220	wmb();
221	set_bit(cpuid, &hwrpb->rxrdy);
222
223	if (wait_for_txrdy(cpumask))
224		goto timeout;
225	return;
226
227 timeout:
228	printk("Processor %x not ready\n", cpuid);
229}
230
231/*
232 * A secondary console wants to send a message.  Receive it.
233 */
234static void
235recv_secondary_console_msg(void)
236{
237	int mycpu, i, cnt;
238	unsigned long txrdy = hwrpb->txrdy;
239	char *cp1, *cp2, buf[80];
240	struct percpu_struct *cpu;
241
242	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
243
244	mycpu = hard_smp_processor_id();
245
246	for (i = 0; i < NR_CPUS; i++) {
247		if (!(txrdy & (1UL << i)))
248			continue;
249
250		DBGS(("recv_secondary_console_msg: "
251		      "TXRDY contains CPU %d.\n", i));
252
253		cpu = (struct percpu_struct *)
254		  ((char*)hwrpb
255		   + hwrpb->processor_offset
256		   + i * hwrpb->processor_size);
257
258 		DBGS(("recv_secondary_console_msg: on %d from %d"
259		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
260		      mycpu, i, cpu->halt_reason, cpu->flags));
261
262		cnt = cpu->ipc_buffer[0] >> 32;
263		if (cnt <= 0 || cnt >= 80)
264			strcpy(buf, "<<< BOGUS MSG >>>");
265		else {
266			cp1 = (char *) &cpu->ipc_buffer[1];
267			cp2 = buf;
268			memcpy(cp2, cp1, cnt);
269			cp2[cnt] = '\0';
270
271			while ((cp2 = strchr(cp2, '\r')) != 0) {
272				*cp2 = ' ';
273				if (cp2[1] == '\n')
274					cp2[1] = ' ';
275			}
276		}
277
278		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279		      "message is '%s'\n", mycpu, buf));
280	}
281
282	hwrpb->txrdy = 0;
283}
284
285/*
286 * Convince the console to have a secondary cpu begin execution.
287 */
288static int
289secondary_cpu_start(int cpuid, struct task_struct *idle)
290{
291	struct percpu_struct *cpu;
292	struct pcb_struct *hwpcb, *ipcb;
293	unsigned long timeout;
294
295	cpu = (struct percpu_struct *)
296		((char*)hwrpb
297		 + hwrpb->processor_offset
298		 + cpuid * hwrpb->processor_size);
299	hwpcb = (struct pcb_struct *) cpu->hwpcb;
300	ipcb = &task_thread_info(idle)->pcb;
301
302	/* Initialize the CPU's HWPCB to something just good enough for
303	   us to get started.  Immediately after starting, we'll swpctx
304	   to the target idle task's pcb.  Reuse the stack in the mean
305	   time.  Precalculate the target PCBB.  */
306	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307	hwpcb->usp = 0;
308	hwpcb->ptbr = ipcb->ptbr;
309	hwpcb->pcc = 0;
310	hwpcb->asn = 0;
311	hwpcb->unique = virt_to_phys(ipcb);
312	hwpcb->flags = ipcb->flags;
313	hwpcb->res1 = hwpcb->res2 = 0;
314
315#if 0
316	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
317	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
318#endif
319	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
320	      cpuid, idle->state, ipcb->flags));
321
322	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
323	hwrpb->CPU_restart = __smp_callin;
324	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
325
326	/* Recalculate and update the HWRPB checksum */
327	hwrpb_update_checksum(hwrpb);
328
329	/*
330	 * Send a "start" command to the specified processor.
331	 */
332
333	/* SRM III 3.4.1.3 */
334	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
335	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
336	wmb();
337
338	send_secondary_console_msg("START\r\n", cpuid);
339
340	/* Wait 10 seconds for an ACK from the console.  */
341	timeout = jiffies + 10*HZ;
342	while (time_before(jiffies, timeout)) {
343		if (cpu->flags & 1)
344			goto started;
345		udelay(10);
346		barrier();
347	}
348	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
349	return -1;
350
351 started:
352	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
353	return 0;
354}
355
356/*
357 * Bring one cpu online.
358 */
359static int
360smp_boot_one_cpu(int cpuid, struct task_struct *idle)
361{
362	unsigned long timeout;
363
364	/* Signal the secondary to wait a moment.  */
365	smp_secondary_alive = -1;
366
367	/* Whirrr, whirrr, whirrrrrrrrr... */
368	if (secondary_cpu_start(cpuid, idle))
369		return -1;
370
371	/* Notify the secondary CPU it can run calibrate_delay.  */
372	mb();
373	smp_secondary_alive = 0;
374
375	/* We've been acked by the console; wait one second for
376	   the task to start up for real.  */
377	timeout = jiffies + 1*HZ;
378	while (time_before(jiffies, timeout)) {
379		if (smp_secondary_alive == 1)
380			goto alive;
381		udelay(10);
382		barrier();
383	}
384
385	/* We failed to boot the CPU.  */
386
387	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
388	return -1;
389
390 alive:
391	/* Another "Red Snapper". */
392	return 0;
393}
394
395/*
396 * Called from setup_arch.  Detect an SMP system and which processors
397 * are present.
398 */
399void __init
400setup_smp(void)
401{
402	struct percpu_struct *cpubase, *cpu;
403	unsigned long i;
404
405	if (boot_cpuid != 0) {
406		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
407		       boot_cpuid);
408	}
409
410	if (hwrpb->nr_processors > 1) {
411		int boot_cpu_palrev;
412
413		DBGS(("setup_smp: nr_processors %ld\n",
414		      hwrpb->nr_processors));
415
416		cpubase = (struct percpu_struct *)
417			((char*)hwrpb + hwrpb->processor_offset);
418		boot_cpu_palrev = cpubase->pal_revision;
419
420		for (i = 0; i < hwrpb->nr_processors; i++) {
421			cpu = (struct percpu_struct *)
422				((char *)cpubase + i*hwrpb->processor_size);
423			if ((cpu->flags & 0x1cc) == 0x1cc) {
424				smp_num_probed++;
425				set_cpu_possible(i, true);
426				set_cpu_present(i, true);
427				cpu->pal_revision = boot_cpu_palrev;
428			}
429
430			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
431			      i, cpu->flags, cpu->type));
432			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
433			      i, cpu->pal_revision));
434		}
435	} else {
436		smp_num_probed = 1;
437	}
438
439	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
440	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
441}
442
443/*
444 * Called by smp_init prepare the secondaries
445 */
446void __init
447smp_prepare_cpus(unsigned int max_cpus)
448{
449	/* Take care of some initial bookkeeping.  */
450	memset(ipi_data, 0, sizeof(ipi_data));
451
452	current_thread_info()->cpu = boot_cpuid;
453
454	smp_store_cpu_info(boot_cpuid);
455	smp_setup_percpu_timer(boot_cpuid);
456
457	/* Nothing to do on a UP box, or when told not to.  */
458	if (smp_num_probed == 1 || max_cpus == 0) {
459		init_cpu_possible(cpumask_of(boot_cpuid));
460		init_cpu_present(cpumask_of(boot_cpuid));
461		printk(KERN_INFO "SMP mode deactivated.\n");
462		return;
463	}
464
465	printk(KERN_INFO "SMP starting up secondaries.\n");
466
467	smp_num_cpus = smp_num_probed;
468}
469
470int
471__cpu_up(unsigned int cpu, struct task_struct *tidle)
472{
473	smp_boot_one_cpu(cpu, tidle);
474
475	return cpu_online(cpu) ? 0 : -ENOSYS;
476}
477
478void __init
479smp_cpus_done(unsigned int max_cpus)
480{
481	int cpu;
482	unsigned long bogosum = 0;
483
484	for(cpu = 0; cpu < NR_CPUS; cpu++)
485		if (cpu_online(cpu))
486			bogosum += cpu_data[cpu].loops_per_jiffy;
487
488	printk(KERN_INFO "SMP: Total of %d processors activated "
489	       "(%lu.%02lu BogoMIPS).\n",
490	       num_online_cpus(),
491	       (bogosum + 2500) / (500000/HZ),
492	       ((bogosum + 2500) / (5000/HZ)) % 100);
493}
494
495static void
496send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
497{
498	int i;
499
500	mb();
501	for_each_cpu(i, to_whom)
502		set_bit(operation, &ipi_data[i].bits);
503
504	mb();
505	for_each_cpu(i, to_whom)
506		wripir(i);
507}
508
509void
510handle_ipi(struct pt_regs *regs)
511{
512	int this_cpu = smp_processor_id();
513	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
514	unsigned long ops;
515
516#if 0
517	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
518	      this_cpu, *pending_ipis, regs->pc));
519#endif
520
521	mb();	/* Order interrupt and bit testing. */
522	while ((ops = xchg(pending_ipis, 0)) != 0) {
523	  mb();	/* Order bit clearing and data access. */
524	  do {
525		unsigned long which;
526
527		which = ops & -ops;
528		ops &= ~which;
529		which = __ffs(which);
530
531		switch (which) {
532		case IPI_RESCHEDULE:
533			scheduler_ipi();
534			break;
535
536		case IPI_CALL_FUNC:
537			generic_smp_call_function_interrupt();
538			break;
539
540		case IPI_CPU_STOP:
541			halt();
542
543		default:
544			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
545			       this_cpu, which);
546			break;
547		}
548	  } while (ops);
549
550	  mb();	/* Order data access and bit testing. */
551	}
552
553	cpu_data[this_cpu].ipi_count++;
554
555	if (hwrpb->txrdy)
556		recv_secondary_console_msg();
557}
558
559void
560arch_smp_send_reschedule(int cpu)
561{
562#ifdef DEBUG_IPI_MSG
563	if (cpu == hard_smp_processor_id())
564		printk(KERN_WARNING
565		       "smp_send_reschedule: Sending IPI to self.\n");
566#endif
567	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
568}
569
570void
571smp_send_stop(void)
572{
573	cpumask_t to_whom;
574	cpumask_copy(&to_whom, cpu_online_mask);
575	cpumask_clear_cpu(smp_processor_id(), &to_whom);
576#ifdef DEBUG_IPI_MSG
577	if (hard_smp_processor_id() != boot_cpu_id)
578		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
579#endif
580	send_ipi_message(&to_whom, IPI_CPU_STOP);
581}
582
583void arch_send_call_function_ipi_mask(const struct cpumask *mask)
584{
585	send_ipi_message(mask, IPI_CALL_FUNC);
586}
587
588void arch_send_call_function_single_ipi(int cpu)
589{
590	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
591}
592
593static void
594ipi_imb(void *ignored)
595{
596	imb();
597}
598
599void
600smp_imb(void)
601{
602	/* Must wait other processors to flush their icache before continue. */
603	on_each_cpu(ipi_imb, NULL, 1);
604}
605EXPORT_SYMBOL(smp_imb);
606
607static void
608ipi_flush_tlb_all(void *ignored)
609{
610	tbia();
611}
612
613void
614flush_tlb_all(void)
615{
616	/* Although we don't have any data to pass, we do want to
617	   synchronize with the other processors.  */
618	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
619}
620
621#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
622
623static void
624ipi_flush_tlb_mm(void *x)
625{
626	struct mm_struct *mm = x;
627	if (mm == current->active_mm && !asn_locked())
628		flush_tlb_current(mm);
629	else
630		flush_tlb_other(mm);
631}
632
633void
634flush_tlb_mm(struct mm_struct *mm)
635{
636	preempt_disable();
637
638	if (mm == current->active_mm) {
639		flush_tlb_current(mm);
640		if (atomic_read(&mm->mm_users) <= 1) {
641			int cpu, this_cpu = smp_processor_id();
642			for (cpu = 0; cpu < NR_CPUS; cpu++) {
643				if (!cpu_online(cpu) || cpu == this_cpu)
644					continue;
645				if (mm->context[cpu])
646					mm->context[cpu] = 0;
647			}
648			preempt_enable();
649			return;
650		}
651	}
652
653	smp_call_function(ipi_flush_tlb_mm, mm, 1);
654
655	preempt_enable();
656}
657EXPORT_SYMBOL(flush_tlb_mm);
658
659struct flush_tlb_page_struct {
660	struct vm_area_struct *vma;
661	struct mm_struct *mm;
662	unsigned long addr;
663};
664
665static void
666ipi_flush_tlb_page(void *x)
667{
668	struct flush_tlb_page_struct *data = x;
669	struct mm_struct * mm = data->mm;
670
671	if (mm == current->active_mm && !asn_locked())
672		flush_tlb_current_page(mm, data->vma, data->addr);
673	else
674		flush_tlb_other(mm);
675}
676
677void
678flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
679{
680	struct flush_tlb_page_struct data;
681	struct mm_struct *mm = vma->vm_mm;
682
683	preempt_disable();
684
685	if (mm == current->active_mm) {
686		flush_tlb_current_page(mm, vma, addr);
687		if (atomic_read(&mm->mm_users) <= 1) {
688			int cpu, this_cpu = smp_processor_id();
689			for (cpu = 0; cpu < NR_CPUS; cpu++) {
690				if (!cpu_online(cpu) || cpu == this_cpu)
691					continue;
692				if (mm->context[cpu])
693					mm->context[cpu] = 0;
694			}
695			preempt_enable();
696			return;
697		}
698	}
699
700	data.vma = vma;
701	data.mm = mm;
702	data.addr = addr;
703
704	smp_call_function(ipi_flush_tlb_page, &data, 1);
705
706	preempt_enable();
707}
708EXPORT_SYMBOL(flush_tlb_page);
709
710void
711flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
712{
713	/* On the Alpha we always flush the whole user tlb.  */
714	flush_tlb_mm(vma->vm_mm);
715}
716EXPORT_SYMBOL(flush_tlb_range);
717
718static void
719ipi_flush_icache_page(void *x)
720{
721	struct mm_struct *mm = (struct mm_struct *) x;
722	if (mm == current->active_mm && !asn_locked())
723		__load_new_mm_context(mm);
724	else
725		flush_tlb_other(mm);
726}
727
728void
729flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
730			unsigned long addr, int len)
731{
732	struct mm_struct *mm = vma->vm_mm;
733
734	if ((vma->vm_flags & VM_EXEC) == 0)
735		return;
736
737	preempt_disable();
738
739	if (mm == current->active_mm) {
740		__load_new_mm_context(mm);
741		if (atomic_read(&mm->mm_users) <= 1) {
742			int cpu, this_cpu = smp_processor_id();
743			for (cpu = 0; cpu < NR_CPUS; cpu++) {
744				if (!cpu_online(cpu) || cpu == this_cpu)
745					continue;
746				if (mm->context[cpu])
747					mm->context[cpu] = 0;
748			}
749			preempt_enable();
750			return;
751		}
752	}
753
754	smp_call_function(ipi_flush_icache_page, mm, 1);
755
756	preempt_enable();
757}
758