1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16static struct {
17	struct list_head	queue;
18	raw_spinlock_t		lock;
19} call_function __cacheline_aligned_in_smp =
20	{
21		.queue		= LIST_HEAD_INIT(call_function.queue),
22		.lock		= __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
23	};
24
25enum {
26	CSD_FLAG_LOCK		= 0x01,
27};
28
29struct call_function_data {
30	struct call_single_data	csd;
31	atomic_t		refs;
32	cpumask_var_t		cpumask;
33};
34
35static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
36
37struct call_single_queue {
38	struct list_head	list;
39	raw_spinlock_t		lock;
40};
41
42static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
43
44static int
45hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
46{
47	long cpu = (long)hcpu;
48	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
49
50	switch (action) {
51	case CPU_UP_PREPARE:
52	case CPU_UP_PREPARE_FROZEN:
53		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
54				cpu_to_node(cpu)))
55			return notifier_from_errno(-ENOMEM);
56		break;
57
58#ifdef CONFIG_HOTPLUG_CPU
59	case CPU_UP_CANCELED:
60	case CPU_UP_CANCELED_FROZEN:
61
62	case CPU_DEAD:
63	case CPU_DEAD_FROZEN:
64		free_cpumask_var(cfd->cpumask);
65		break;
66#endif
67	};
68
69	return NOTIFY_OK;
70}
71
72static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
73	.notifier_call		= hotplug_cfd,
74};
75
76static int __cpuinit init_call_single_data(void)
77{
78	void *cpu = (void *)(long)smp_processor_id();
79	int i;
80
81	for_each_possible_cpu(i) {
82		struct call_single_queue *q = &per_cpu(call_single_queue, i);
83
84		raw_spin_lock_init(&q->lock);
85		INIT_LIST_HEAD(&q->list);
86	}
87
88	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
89	register_cpu_notifier(&hotplug_cfd_notifier);
90
91	return 0;
92}
93early_initcall(init_call_single_data);
94
95/*
96 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
97 *
98 * For non-synchronous ipi calls the csd can still be in use by the
99 * previous function call. For multi-cpu calls its even more interesting
100 * as we'll have to ensure no other cpu is observing our csd.
101 */
102static void csd_lock_wait(struct call_single_data *data)
103{
104	while (data->flags & CSD_FLAG_LOCK)
105		cpu_relax();
106}
107
108static void csd_lock(struct call_single_data *data)
109{
110	csd_lock_wait(data);
111	data->flags = CSD_FLAG_LOCK;
112
113	/*
114	 * prevent CPU from reordering the above assignment
115	 * to ->flags with any subsequent assignments to other
116	 * fields of the specified call_single_data structure:
117	 */
118	smp_mb();
119}
120
121static void csd_unlock(struct call_single_data *data)
122{
123	WARN_ON(!(data->flags & CSD_FLAG_LOCK));
124
125	/*
126	 * ensure we're all done before releasing data:
127	 */
128	smp_mb();
129
130	data->flags &= ~CSD_FLAG_LOCK;
131}
132
133/*
134 * Insert a previously allocated call_single_data element
135 * for execution on the given CPU. data must already have
136 * ->func, ->info, and ->flags set.
137 */
138static
139void generic_exec_single(int cpu, struct call_single_data *data, int wait)
140{
141	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
142	unsigned long flags;
143	int ipi;
144
145	raw_spin_lock_irqsave(&dst->lock, flags);
146	ipi = list_empty(&dst->list);
147	list_add_tail(&data->list, &dst->list);
148	raw_spin_unlock_irqrestore(&dst->lock, flags);
149
150	/*
151	 * The list addition should be visible before sending the IPI
152	 * handler locks the list to pull the entry off it because of
153	 * normal cache coherency rules implied by spinlocks.
154	 *
155	 * If IPIs can go out of order to the cache coherency protocol
156	 * in an architecture, sufficient synchronisation should be added
157	 * to arch code to make it appear to obey cache coherency WRT
158	 * locking and barrier primitives. Generic code isn't really
159	 * equipped to do the right thing...
160	 */
161	if (ipi)
162		arch_send_call_function_single_ipi(cpu);
163
164	if (wait)
165		csd_lock_wait(data);
166}
167
168/*
169 * Invoked by arch to handle an IPI for call function. Must be called with
170 * interrupts disabled.
171 */
172void generic_smp_call_function_interrupt(void)
173{
174	struct call_function_data *data;
175	int cpu = smp_processor_id();
176
177	/*
178	 * Shouldn't receive this interrupt on a cpu that is not yet online.
179	 */
180	WARN_ON_ONCE(!cpu_online(cpu));
181
182	/*
183	 * Ensure entry is visible on call_function_queue after we have
184	 * entered the IPI. See comment in smp_call_function_many.
185	 * If we don't have this, then we may miss an entry on the list
186	 * and never get another IPI to process it.
187	 */
188	smp_mb();
189
190	/*
191	 * It's ok to use list_for_each_rcu() here even though we may
192	 * delete 'pos', since list_del_rcu() doesn't clear ->next
193	 */
194	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
195		int refs;
196
197		/*
198		 * Since we walk the list without any locks, we might
199		 * see an entry that was completed, removed from the
200		 * list and is in the process of being reused.
201		 *
202		 * We must check that the cpu is in the cpumask before
203		 * checking the refs, and both must be set before
204		 * executing the callback on this cpu.
205		 */
206
207		if (!cpumask_test_cpu(cpu, data->cpumask))
208			continue;
209
210		smp_rmb();
211
212		if (atomic_read(&data->refs) == 0)
213			continue;
214
215		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
216			continue;
217
218		data->csd.func(data->csd.info);
219
220		refs = atomic_dec_return(&data->refs);
221		WARN_ON(refs < 0);
222		if (!refs) {
223			WARN_ON(!cpumask_empty(data->cpumask));
224
225			raw_spin_lock(&call_function.lock);
226			list_del_rcu(&data->csd.list);
227			raw_spin_unlock(&call_function.lock);
228		}
229
230		if (refs)
231			continue;
232
233		csd_unlock(&data->csd);
234	}
235
236}
237
238/*
239 * Invoked by arch to handle an IPI for call function single. Must be
240 * called from the arch with interrupts disabled.
241 */
242void generic_smp_call_function_single_interrupt(void)
243{
244	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
245	unsigned int data_flags;
246	LIST_HEAD(list);
247
248	/*
249	 * Shouldn't receive this interrupt on a cpu that is not yet online.
250	 */
251	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
252
253	raw_spin_lock(&q->lock);
254	list_replace_init(&q->list, &list);
255	raw_spin_unlock(&q->lock);
256
257	while (!list_empty(&list)) {
258		struct call_single_data *data;
259
260		data = list_entry(list.next, struct call_single_data, list);
261		list_del(&data->list);
262
263		/*
264		 * 'data' can be invalid after this call if flags == 0
265		 * (when called through generic_exec_single()),
266		 * so save them away before making the call:
267		 */
268		data_flags = data->flags;
269
270		data->func(data->info);
271
272		/*
273		 * Unlocked CSDs are valid through generic_exec_single():
274		 */
275		if (data_flags & CSD_FLAG_LOCK)
276			csd_unlock(data);
277	}
278}
279
280static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
281
282/*
283 * smp_call_function_single - Run a function on a specific CPU
284 * @func: The function to run. This must be fast and non-blocking.
285 * @info: An arbitrary pointer to pass to the function.
286 * @wait: If true, wait until function has completed on other CPUs.
287 *
288 * Returns 0 on success, else a negative status code.
289 */
290int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
291			     int wait)
292{
293	struct call_single_data d = {
294		.flags = 0,
295	};
296	unsigned long flags;
297	int this_cpu;
298	int err = 0;
299
300	/*
301	 * prevent preemption and reschedule on another processor,
302	 * as well as CPU removal
303	 */
304	this_cpu = get_cpu();
305
306	/*
307	 * Can deadlock when called with interrupts disabled.
308	 * We allow cpu's that are not yet online though, as no one else can
309	 * send smp call function interrupt to this cpu and as such deadlocks
310	 * can't happen.
311	 */
312	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
313		     && !oops_in_progress);
314
315	if (cpu == this_cpu) {
316		local_irq_save(flags);
317		func(info);
318		local_irq_restore(flags);
319	} else {
320		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
321			struct call_single_data *data = &d;
322
323			if (!wait)
324				data = &__get_cpu_var(csd_data);
325
326			csd_lock(data);
327
328			data->func = func;
329			data->info = info;
330			generic_exec_single(cpu, data, wait);
331		} else {
332			err = -ENXIO;	/* CPU not online */
333		}
334	}
335
336	put_cpu();
337
338	return err;
339}
340EXPORT_SYMBOL(smp_call_function_single);
341
342/*
343 * smp_call_function_any - Run a function on any of the given cpus
344 * @mask: The mask of cpus it can run on.
345 * @func: The function to run. This must be fast and non-blocking.
346 * @info: An arbitrary pointer to pass to the function.
347 * @wait: If true, wait until function has completed.
348 *
349 * Returns 0 on success, else a negative status code (if no cpus were online).
350 * Note that @wait will be implicitly turned on in case of allocation failures,
351 * since we fall back to on-stack allocation.
352 *
353 * Selection preference:
354 *	1) current cpu if in @mask
355 *	2) any cpu of current node if in @mask
356 *	3) any other online cpu in @mask
357 */
358int smp_call_function_any(const struct cpumask *mask,
359			  void (*func)(void *info), void *info, int wait)
360{
361	unsigned int cpu;
362	const struct cpumask *nodemask;
363	int ret;
364
365	/* Try for same CPU (cheapest) */
366	cpu = get_cpu();
367	if (cpumask_test_cpu(cpu, mask))
368		goto call;
369
370	/* Try for same node. */
371	nodemask = cpumask_of_node(cpu_to_node(cpu));
372	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
373	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
374		if (cpu_online(cpu))
375			goto call;
376	}
377
378	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
379	cpu = cpumask_any_and(mask, cpu_online_mask);
380call:
381	ret = smp_call_function_single(cpu, func, info, wait);
382	put_cpu();
383	return ret;
384}
385EXPORT_SYMBOL_GPL(smp_call_function_any);
386
387/**
388 * __smp_call_function_single(): Run a function on a specific CPU
389 * @cpu: The CPU to run on.
390 * @data: Pre-allocated and setup data structure
391 * @wait: If true, wait until function has completed on specified CPU.
392 *
393 * Like smp_call_function_single(), but allow caller to pass in a
394 * pre-allocated data structure. Useful for embedding @data inside
395 * other structures, for instance.
396 */
397void __smp_call_function_single(int cpu, struct call_single_data *data,
398				int wait)
399{
400	unsigned int this_cpu;
401	unsigned long flags;
402
403	this_cpu = get_cpu();
404	/*
405	 * Can deadlock when called with interrupts disabled.
406	 * We allow cpu's that are not yet online though, as no one else can
407	 * send smp call function interrupt to this cpu and as such deadlocks
408	 * can't happen.
409	 */
410	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
411		     && !oops_in_progress);
412
413	if (cpu == this_cpu) {
414		local_irq_save(flags);
415		data->func(data->info);
416		local_irq_restore(flags);
417	} else {
418		csd_lock(data);
419		generic_exec_single(cpu, data, wait);
420	}
421	put_cpu();
422}
423
424/**
425 * smp_call_function_many(): Run a function on a set of other CPUs.
426 * @mask: The set of cpus to run on (only runs on online subset).
427 * @func: The function to run. This must be fast and non-blocking.
428 * @info: An arbitrary pointer to pass to the function.
429 * @wait: If true, wait (atomically) until function has completed
430 *        on other CPUs.
431 *
432 * If @wait is true, then returns once @func has returned.
433 *
434 * You must not call this function with disabled interrupts or from a
435 * hardware interrupt handler or from a bottom half handler. Preemption
436 * must be disabled when calling this function.
437 */
438void smp_call_function_many(const struct cpumask *mask,
439			    void (*func)(void *), void *info, bool wait)
440{
441	struct call_function_data *data;
442	unsigned long flags;
443	int cpu, next_cpu, this_cpu = smp_processor_id();
444
445	/*
446	 * Can deadlock when called with interrupts disabled.
447	 * We allow cpu's that are not yet online though, as no one else can
448	 * send smp call function interrupt to this cpu and as such deadlocks
449	 * can't happen.
450	 */
451	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
452		     && !oops_in_progress);
453
454	/* So, what's a CPU they want? Ignoring this one. */
455	cpu = cpumask_first_and(mask, cpu_online_mask);
456	if (cpu == this_cpu)
457		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
458
459	/* No online cpus?  We're done. */
460	if (cpu >= nr_cpu_ids)
461		return;
462
463	/* Do we have another CPU which isn't us? */
464	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
465	if (next_cpu == this_cpu)
466		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
467
468	/* Fastpath: do that cpu by itself. */
469	if (next_cpu >= nr_cpu_ids) {
470		smp_call_function_single(cpu, func, info, wait);
471		return;
472	}
473
474	data = &__get_cpu_var(cfd_data);
475	csd_lock(&data->csd);
476	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
477
478	data->csd.func = func;
479	data->csd.info = info;
480	cpumask_and(data->cpumask, mask, cpu_online_mask);
481	cpumask_clear_cpu(this_cpu, data->cpumask);
482
483	/*
484	 * To ensure the interrupt handler gets an complete view
485	 * we order the cpumask and refs writes and order the read
486	 * of them in the interrupt handler.  In addition we may
487	 * only clear our own cpu bit from the mask.
488	 */
489	smp_wmb();
490
491	atomic_set(&data->refs, cpumask_weight(data->cpumask));
492
493	raw_spin_lock_irqsave(&call_function.lock, flags);
494	/*
495	 * Place entry at the _HEAD_ of the list, so that any cpu still
496	 * observing the entry in generic_smp_call_function_interrupt()
497	 * will not miss any other list entries:
498	 */
499	list_add_rcu(&data->csd.list, &call_function.queue);
500	raw_spin_unlock_irqrestore(&call_function.lock, flags);
501
502	/*
503	 * Make the list addition visible before sending the ipi.
504	 * (IPIs must obey or appear to obey normal Linux cache
505	 * coherency rules -- see comment in generic_exec_single).
506	 */
507	smp_mb();
508
509	/* Send a message to all CPUs in the map */
510	arch_send_call_function_ipi_mask(data->cpumask);
511
512	/* Optionally wait for the CPUs to complete */
513	if (wait)
514		csd_lock_wait(&data->csd);
515}
516EXPORT_SYMBOL(smp_call_function_many);
517
518/**
519 * smp_call_function(): Run a function on all other CPUs.
520 * @func: The function to run. This must be fast and non-blocking.
521 * @info: An arbitrary pointer to pass to the function.
522 * @wait: If true, wait (atomically) until function has completed
523 *        on other CPUs.
524 *
525 * Returns 0.
526 *
527 * If @wait is true, then returns once @func has returned; otherwise
528 * it returns just before the target cpu calls @func.
529 *
530 * You must not call this function with disabled interrupts or from a
531 * hardware interrupt handler or from a bottom half handler.
532 */
533int smp_call_function(void (*func)(void *), void *info, int wait)
534{
535	preempt_disable();
536	smp_call_function_many(cpu_online_mask, func, info, wait);
537	preempt_enable();
538
539	return 0;
540}
541EXPORT_SYMBOL(smp_call_function);
542
543void ipi_call_lock(void)
544{
545	raw_spin_lock(&call_function.lock);
546}
547
548void ipi_call_unlock(void)
549{
550	raw_spin_unlock(&call_function.lock);
551}
552
553void ipi_call_lock_irq(void)
554{
555	raw_spin_lock_irq(&call_function.lock);
556}
557
558void ipi_call_unlock_irq(void)
559{
560	raw_spin_unlock_irq(&call_function.lock);
561}
562