1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SMP support for Hexagon
4 *
5 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
6 */
7
8#include <linux/err.h>
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/sched/mm.h>
16#include <linux/smp.h>
17#include <linux/spinlock.h>
18#include <linux/cpu.h>
19#include <linux/mm_types.h>
20
21#include <asm/time.h>    /*  timer_interrupt  */
22#include <asm/hexagon_vm.h>
23
24#define BASE_IPI_IRQ 26
25
26/*
27 * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
28 * (which is prior to any of our smp_prepare_cpu crap), in order to set
29 * up the...  per_cpu areas.
30 */
31
32struct ipi_data {
33	unsigned long bits;
34};
35
36static DEFINE_PER_CPU(struct ipi_data, ipi_data);
37
38static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
39				int cpu)
40{
41	unsigned long msg = 0;
42	do {
43		msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
44
45		switch (msg) {
46
47		case IPI_TIMER:
48			ipi_timer();
49			break;
50
51		case IPI_CALL_FUNC:
52			generic_smp_call_function_interrupt();
53			break;
54
55		case IPI_CPU_STOP:
56			/*
57			 * call vmstop()
58			 */
59			__vmstop();
60			break;
61
62		case IPI_RESCHEDULE:
63			scheduler_ipi();
64			break;
65		}
66	} while (msg < BITS_PER_LONG);
67}
68
69/*  Used for IPI call from other CPU's to unmask int  */
70void smp_vm_unmask_irq(void *info)
71{
72	__vmintop_locen((long) info);
73}
74
75
76/*
77 * This is based on Alpha's IPI stuff.
78 * Supposed to take (int, void*) as args now.
79 * Specifically, first arg is irq, second is the irq_desc.
80 */
81
82static irqreturn_t handle_ipi(int irq, void *desc)
83{
84	int cpu = smp_processor_id();
85	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
86	unsigned long ops;
87
88	while ((ops = xchg(&ipi->bits, 0)) != 0)
89		__handle_ipi(&ops, ipi, cpu);
90	return IRQ_HANDLED;
91}
92
93void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
94{
95	unsigned long flags;
96	unsigned long cpu;
97	unsigned long retval;
98
99	local_irq_save(flags);
100
101	for_each_cpu(cpu, cpumask) {
102		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
103
104		set_bit(msg, &ipi->bits);
105		/*  Possible barrier here  */
106		retval = __vmintop_post(BASE_IPI_IRQ+cpu);
107
108		if (retval != 0) {
109			printk(KERN_ERR "interrupt %ld not configured?\n",
110				BASE_IPI_IRQ+cpu);
111		}
112	}
113
114	local_irq_restore(flags);
115}
116
117/*
118 * interrupts should already be disabled from the VM
119 * SP should already be correct; need to set THREADINFO_REG
120 * to point to current thread info
121 */
122
123static void start_secondary(void)
124{
125	unsigned long thread_ptr;
126	unsigned int cpu, irq;
127
128	/*  Calculate thread_info pointer from stack pointer  */
129	__asm__ __volatile__(
130		"%0 = SP;\n"
131		: "=r" (thread_ptr)
132	);
133
134	thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
135
136	__asm__ __volatile__(
137		QUOTED_THREADINFO_REG " = %0;\n"
138		:
139		: "r" (thread_ptr)
140	);
141
142	/*  Set the memory struct  */
143	mmgrab(&init_mm);
144	current->active_mm = &init_mm;
145
146	cpu = smp_processor_id();
147
148	irq = BASE_IPI_IRQ + cpu;
149	if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler",
150			NULL))
151		pr_err("Failed to request irq %u (ipi_handler)\n", irq);
152
153	/*  Register the clock_event dummy  */
154	setup_percpu_clockdev();
155
156	printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
157
158	notify_cpu_starting(cpu);
159
160	set_cpu_online(cpu, true);
161
162	local_irq_enable();
163
164	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
165}
166
167
168/*
169 * called once for each present cpu
170 * apparently starts up the CPU and then
171 * maintains control until "cpu_online(cpu)" is set.
172 */
173
174int __cpu_up(unsigned int cpu, struct task_struct *idle)
175{
176	struct thread_info *thread = (struct thread_info *)idle->stack;
177	void *stack_start;
178
179	thread->cpu = cpu;
180
181	/*  Boot to the head.  */
182	stack_start =  ((void *) thread) + THREAD_SIZE;
183	__vmstart(start_secondary, stack_start);
184
185	while (!cpu_online(cpu))
186		barrier();
187
188	return 0;
189}
190
191void __init smp_cpus_done(unsigned int max_cpus)
192{
193}
194
195void __init smp_prepare_cpus(unsigned int max_cpus)
196{
197	int i, irq = BASE_IPI_IRQ;
198
199	/*
200	 * should eventually have some sort of machine
201	 * descriptor that has this stuff
202	 */
203
204	/*  Right now, let's just fake it. */
205	for (i = 0; i < max_cpus; i++)
206		set_cpu_present(i, true);
207
208	/*  Also need to register the interrupts for IPI  */
209	if (max_cpus > 1) {
210		if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING,
211				"ipi_handler", NULL))
212			pr_err("Failed to request irq %d (ipi_handler)\n", irq);
213	}
214}
215
216void arch_smp_send_reschedule(int cpu)
217{
218	send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
219}
220
221void smp_send_stop(void)
222{
223	struct cpumask targets;
224	cpumask_copy(&targets, cpu_online_mask);
225	cpumask_clear_cpu(smp_processor_id(), &targets);
226	send_ipi(&targets, IPI_CPU_STOP);
227}
228
229void arch_send_call_function_single_ipi(int cpu)
230{
231	send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
232}
233
234void arch_send_call_function_ipi_mask(const struct cpumask *mask)
235{
236	send_ipi(mask, IPI_CALL_FUNC);
237}
238
239void smp_start_cpus(void)
240{
241	int i;
242
243	for (i = 0; i < NR_CPUS; i++)
244		set_cpu_possible(i, true);
245}
246