• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/powerpc/platforms/pseries/
1/*
2 * arch/powerpc/platforms/pseries/xics.c
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 *  This program is free software; you can redistribute it and/or
7 *  modify it under the terms of the GNU General Public License
8 *  as published by the Free Software Foundation; either version
9 *  2 of the License, or (at your option) any later version.
10 */
11
12#undef DEBUG
13
14#include <linux/types.h>
15#include <linux/threads.h>
16#include <linux/kernel.h>
17#include <linux/irq.h>
18#include <linux/smp.h>
19#include <linux/interrupt.h>
20#include <linux/signal.h>
21#include <linux/init.h>
22#include <linux/gfp.h>
23#include <linux/radix-tree.h>
24#include <linux/cpu.h>
25
26#include <asm/firmware.h>
27#include <asm/prom.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/smp.h>
31#include <asm/rtas.h>
32#include <asm/hvcall.h>
33#include <asm/machdep.h>
34#include <asm/i8259.h>
35
36#include "xics.h"
37#include "plpar_wrappers.h"
38
39#define XICS_IPI		2
40#define XICS_IRQ_SPURIOUS	0
41
42/* Want a priority other than 0.  Various HW issues require this. */
43#define	DEFAULT_PRIORITY	5
44
45/*
46 * Mark IPIs as higher priority so we can take them inside interrupts that
47 * arent marked IRQF_DISABLED
48 */
49#define IPI_PRIORITY		4
50
51struct xics_ipl {
52	union {
53		u32 word;
54		u8 bytes[4];
55	} xirr_poll;
56	union {
57		u32 word;
58		u8 bytes[4];
59	} xirr;
60	u32 dummy;
61	union {
62		u32 word;
63		u8 bytes[4];
64	} qirr;
65};
66
67static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
68
69static unsigned int default_server = 0xFF;
70static unsigned int default_distrib_server = 0;
71static unsigned int interrupt_server_size = 8;
72
73static struct irq_host *xics_host;
74
75/*
76 * XICS only has a single IPI, so encode the messages per CPU
77 */
78struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
79
80/* RTAS service tokens */
81static int ibm_get_xive;
82static int ibm_set_xive;
83static int ibm_int_on;
84static int ibm_int_off;
85
86
87/* Direct HW low level accessors */
88
89
90static inline unsigned int direct_xirr_info_get(int n_cpu)
91{
92	return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
93}
94
95static inline void direct_xirr_info_set(int n_cpu, int value)
96{
97	out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
98}
99
100static inline void direct_cppr_info(int n_cpu, u8 value)
101{
102	out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
103}
104
105static inline void direct_qirr_info(int n_cpu, u8 value)
106{
107	out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
108}
109
110
111/* LPAR low level accessors */
112
113
114static inline unsigned int lpar_xirr_info_get(int n_cpu)
115{
116	unsigned long lpar_rc;
117	unsigned long return_value;
118
119	lpar_rc = plpar_xirr(&return_value);
120	if (lpar_rc != H_SUCCESS)
121		panic(" bad return code xirr - rc = %lx \n", lpar_rc);
122	return (unsigned int)return_value;
123}
124
125static inline void lpar_xirr_info_set(int n_cpu, int value)
126{
127	unsigned long lpar_rc;
128	unsigned long val64 = value & 0xffffffff;
129
130	lpar_rc = plpar_eoi(val64);
131	if (lpar_rc != H_SUCCESS)
132		panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
133		      val64);
134}
135
136static inline void lpar_cppr_info(int n_cpu, u8 value)
137{
138	unsigned long lpar_rc;
139
140	lpar_rc = plpar_cppr(value);
141	if (lpar_rc != H_SUCCESS)
142		panic("bad return code cppr - rc = %lx\n", lpar_rc);
143}
144
145static inline void lpar_qirr_info(int n_cpu , u8 value)
146{
147	unsigned long lpar_rc;
148
149	lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
150	if (lpar_rc != H_SUCCESS)
151		panic("bad return code qirr - rc = %lx\n", lpar_rc);
152}
153
154
155/* High level handlers and init code */
156
157
158#ifdef CONFIG_SMP
159static int get_irq_server(unsigned int virq)
160{
161	unsigned int server;
162	/* For the moment only implement delivery to all cpus or one cpu */
163	cpumask_t cpumask = irq_desc[virq].affinity;
164	cpumask_t tmp = CPU_MASK_NONE;
165
166	if (!distribute_irqs)
167		return default_server;
168
169	if (cpus_equal(cpumask, CPU_MASK_ALL)) {
170		server = default_distrib_server;
171	} else {
172		cpus_and(tmp, cpu_online_map, cpumask);
173
174		if (cpus_empty(tmp))
175			server = default_distrib_server;
176		else
177			server = get_hard_smp_processor_id(first_cpu(tmp));
178	}
179
180	return server;
181
182}
183#else
184static int get_irq_server(unsigned int virq)
185{
186	return default_server;
187}
188#endif
189
190
191static void xics_unmask_irq(unsigned int virq)
192{
193	unsigned int irq;
194	int call_status;
195	unsigned int server;
196
197	pr_debug("xics: unmask virq %d\n", virq);
198
199	irq = (unsigned int)irq_map[virq].hwirq;
200	pr_debug(" -> map to hwirq 0x%x\n", irq);
201	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
202		return;
203
204	server = get_irq_server(virq);
205
206	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
207				DEFAULT_PRIORITY);
208	if (call_status != 0) {
209		printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
210		       "returned %d\n", irq, call_status);
211		printk("set_xive %x, server %x\n", ibm_set_xive, server);
212		return;
213	}
214
215	/* Now unmask the interrupt (often a no-op) */
216	call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
217	if (call_status != 0) {
218		printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
219		       "returned %d\n", irq, call_status);
220		return;
221	}
222}
223
224static void xics_mask_real_irq(unsigned int irq)
225{
226	int call_status;
227
228	if (irq == XICS_IPI)
229		return;
230
231	call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
232	if (call_status != 0) {
233		printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
234		       "ibm_int_off returned %d\n", irq, call_status);
235		return;
236	}
237
238	/* Have to set XIVE to 0xff to be able to remove a slot */
239	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
240				default_server, 0xff);
241	if (call_status != 0) {
242		printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
243		       " returned %d\n", irq, call_status);
244		return;
245	}
246}
247
248static void xics_mask_irq(unsigned int virq)
249{
250	unsigned int irq;
251
252	pr_debug("xics: mask virq %d\n", virq);
253
254	irq = (unsigned int)irq_map[virq].hwirq;
255	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
256		return;
257	xics_mask_real_irq(irq);
258}
259
260static unsigned int xics_startup(unsigned int virq)
261{
262	unsigned int irq;
263
264	/* force a reverse mapping of the interrupt so it gets in the cache */
265	irq = (unsigned int)irq_map[virq].hwirq;
266	irq_radix_revmap(xics_host, irq);
267
268	/* unmask it */
269	xics_unmask_irq(virq);
270	return 0;
271}
272
273static void xics_eoi_direct(unsigned int virq)
274{
275	int cpu = smp_processor_id();
276	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
277
278	iosync();
279	direct_xirr_info_set(cpu, (0xff << 24) | irq);
280}
281
282
283static void xics_eoi_lpar(unsigned int virq)
284{
285	int cpu = smp_processor_id();
286	unsigned int irq = (unsigned int)irq_map[virq].hwirq;
287
288	iosync();
289	lpar_xirr_info_set(cpu, (0xff << 24) | irq);
290}
291
292static inline unsigned int xics_remap_irq(unsigned int vec)
293{
294	unsigned int irq;
295
296	vec &= 0x00ffffff;
297
298	if (vec == XICS_IRQ_SPURIOUS)
299		return NO_IRQ;
300	irq = irq_radix_revmap(xics_host, vec);
301	if (likely(irq != NO_IRQ))
302		return irq;
303
304	printk(KERN_ERR "Interrupt %u (real) is invalid,"
305	       " disabling it.\n", vec);
306	xics_mask_real_irq(vec);
307	return NO_IRQ;
308}
309
310static unsigned int xics_get_irq_direct(void)
311{
312	unsigned int cpu = smp_processor_id();
313
314	return xics_remap_irq(direct_xirr_info_get(cpu));
315}
316
317static unsigned int xics_get_irq_lpar(void)
318{
319	unsigned int cpu = smp_processor_id();
320
321	return xics_remap_irq(lpar_xirr_info_get(cpu));
322}
323
324#ifdef CONFIG_SMP
325
326static irqreturn_t xics_ipi_dispatch(int cpu)
327{
328	WARN_ON(cpu_is_offline(cpu));
329
330	while (xics_ipi_message[cpu].value) {
331		if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
332				       &xics_ipi_message[cpu].value)) {
333			mb();
334			smp_message_recv(PPC_MSG_CALL_FUNCTION);
335		}
336		if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
337				       &xics_ipi_message[cpu].value)) {
338			mb();
339			smp_message_recv(PPC_MSG_RESCHEDULE);
340		}
341#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
342		if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
343				       &xics_ipi_message[cpu].value)) {
344			mb();
345			smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
346		}
347#endif
348	}
349	return IRQ_HANDLED;
350}
351
352static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
353{
354	int cpu = smp_processor_id();
355
356	direct_qirr_info(cpu, 0xff);
357
358	return xics_ipi_dispatch(cpu);
359}
360
361static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
362{
363	int cpu = smp_processor_id();
364
365	lpar_qirr_info(cpu, 0xff);
366
367	return xics_ipi_dispatch(cpu);
368}
369
370void xics_cause_IPI(int cpu)
371{
372	if (firmware_has_feature(FW_FEATURE_LPAR))
373		lpar_qirr_info(cpu, IPI_PRIORITY);
374	else
375		direct_qirr_info(cpu, IPI_PRIORITY);
376}
377
378#endif /* CONFIG_SMP */
379
380static void xics_set_cpu_priority(int cpu, unsigned char cppr)
381{
382	if (firmware_has_feature(FW_FEATURE_LPAR))
383		lpar_cppr_info(cpu, cppr);
384	else
385		direct_cppr_info(cpu, cppr);
386	iosync();
387}
388
389static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
390{
391	unsigned int irq;
392	int status;
393	int xics_status[2];
394	unsigned long newmask;
395	cpumask_t tmp = CPU_MASK_NONE;
396
397	irq = (unsigned int)irq_map[virq].hwirq;
398	if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
399		return;
400
401	status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
402
403	if (status) {
404		printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
405		       "returns %d\n", irq, status);
406		return;
407	}
408
409	/* For the moment only implement delivery to all cpus or one cpu */
410	if (cpus_equal(cpumask, CPU_MASK_ALL)) {
411		newmask = default_distrib_server;
412	} else {
413		cpus_and(tmp, cpu_online_map, cpumask);
414		if (cpus_empty(tmp))
415			return;
416		newmask = get_hard_smp_processor_id(first_cpu(tmp));
417	}
418
419	status = rtas_call(ibm_set_xive, 3, 1, NULL,
420				irq, newmask, xics_status[1]);
421
422	if (status) {
423		printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
424		       "returns %d\n", irq, status);
425		return;
426	}
427}
428
429void xics_setup_cpu(void)
430{
431	int cpu = smp_processor_id();
432
433	xics_set_cpu_priority(cpu, 0xff);
434
435	rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
436		(1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
437}
438
439
440static struct irq_chip xics_pic_direct = {
441	.typename = " XICS     ",
442	.startup = xics_startup,
443	.mask = xics_mask_irq,
444	.unmask = xics_unmask_irq,
445	.eoi = xics_eoi_direct,
446	.set_affinity = xics_set_affinity
447};
448
449
450static struct irq_chip xics_pic_lpar = {
451	.typename = " XICS     ",
452	.startup = xics_startup,
453	.mask = xics_mask_irq,
454	.unmask = xics_unmask_irq,
455	.eoi = xics_eoi_lpar,
456	.set_affinity = xics_set_affinity
457};
458
459
460static int xics_host_match(struct irq_host *h, struct device_node *node)
461{
462	/* IBM machines have interrupt parents of various funky types for things
463	 * like vdevices, events, etc... The trick we use here is to match
464	 * everything here except the legacy 8259 which is compatible "chrp,iic"
465	 */
466	return !of_device_is_compatible(node, "chrp,iic");
467}
468
469static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
470				irq_hw_number_t hw)
471{
472	pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
473
474	get_irq_desc(virq)->status |= IRQ_LEVEL;
475	set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
476	return 0;
477}
478
479static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
480			      irq_hw_number_t hw)
481{
482	pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
483
484	get_irq_desc(virq)->status |= IRQ_LEVEL;
485	set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
486	return 0;
487}
488
489static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
490			   u32 *intspec, unsigned int intsize,
491			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)
492
493{
494	/* Current xics implementation translates everything
495	 * to level. It is not technically right for MSIs but this
496	 * is irrelevant at this point. We might get smarter in the future
497	 */
498	*out_hwirq = intspec[0];
499	*out_flags = IRQ_TYPE_LEVEL_LOW;
500
501	return 0;
502}
503
504static struct irq_host_ops xics_host_direct_ops = {
505	.match = xics_host_match,
506	.map = xics_host_map_direct,
507	.xlate = xics_host_xlate,
508};
509
510static struct irq_host_ops xics_host_lpar_ops = {
511	.match = xics_host_match,
512	.map = xics_host_map_lpar,
513	.xlate = xics_host_xlate,
514};
515
516static void __init xics_init_host(void)
517{
518	struct irq_host_ops *ops;
519
520	if (firmware_has_feature(FW_FEATURE_LPAR))
521		ops = &xics_host_lpar_ops;
522	else
523		ops = &xics_host_direct_ops;
524	xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
525				   XICS_IRQ_SPURIOUS);
526	BUG_ON(xics_host == NULL);
527	irq_set_default_host(xics_host);
528}
529
530static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
531				     unsigned long size)
532{
533#ifdef CONFIG_SMP
534	int i;
535
536	/* This may look gross but it's good enough for now, we don't quite
537	 * have a hard -> linux processor id matching.
538	 */
539	for_each_possible_cpu(i) {
540		if (!cpu_present(i))
541			continue;
542		if (hw_id == get_hard_smp_processor_id(i)) {
543			xics_per_cpu[i] = ioremap(addr, size);
544			return;
545		}
546	}
547#else
548	if (hw_id != 0)
549		return;
550	xics_per_cpu[0] = ioremap(addr, size);
551#endif /* CONFIG_SMP */
552}
553
554static void __init xics_init_one_node(struct device_node *np,
555				      unsigned int *indx)
556{
557	unsigned int ilen;
558	const u32 *ireg;
559
560	/* This code does the theorically broken assumption that the interrupt
561	 * server numbers are the same as the hard CPU numbers.
562	 * This happens to be the case so far but we are playing with fire...
563	 * should be fixed one of these days. -BenH.
564	 */
565	ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
566
567	/* Do that ever happen ? we'll know soon enough... but even good'old
568	 * f80 does have that property ..
569	 */
570	WARN_ON(ireg == NULL);
571	if (ireg) {
572		/*
573		 * set node starting index for this node
574		 */
575		*indx = *ireg;
576	}
577	ireg = of_get_property(np, "reg", &ilen);
578	if (!ireg)
579		panic("xics_init_IRQ: can't find interrupt reg property");
580
581	while (ilen >= (4 * sizeof(u32))) {
582		unsigned long addr, size;
583
584		addr = (unsigned long)*ireg++ << 32;
585		ilen -= sizeof(u32);
586		addr |= *ireg++;
587		ilen -= sizeof(u32);
588		size = (unsigned long)*ireg++ << 32;
589		ilen -= sizeof(u32);
590		size |= *ireg++;
591		ilen -= sizeof(u32);
592		xics_map_one_cpu(*indx, addr, size);
593		(*indx)++;
594	}
595}
596
597
598static void __init xics_setup_8259_cascade(void)
599{
600	struct device_node *np, *old, *found = NULL;
601	int cascade, naddr;
602	const u32 *addrp;
603	unsigned long intack = 0;
604
605	for_each_node_by_type(np, "interrupt-controller")
606		if (of_device_is_compatible(np, "chrp,iic")) {
607			found = np;
608			break;
609		}
610	if (found == NULL) {
611		printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
612		return;
613	}
614	cascade = irq_of_parse_and_map(found, 0);
615	if (cascade == NO_IRQ) {
616		printk(KERN_ERR "xics: failed to map cascade interrupt");
617		return;
618	}
619	pr_debug("xics: cascade mapped to irq %d\n", cascade);
620
621	for (old = of_node_get(found); old != NULL ; old = np) {
622		np = of_get_parent(old);
623		of_node_put(old);
624		if (np == NULL)
625			break;
626		if (strcmp(np->name, "pci") != 0)
627			continue;
628		addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
629		if (addrp == NULL)
630			continue;
631		naddr = of_n_addr_cells(np);
632		intack = addrp[naddr-1];
633		if (naddr > 1)
634			intack |= ((unsigned long)addrp[naddr-2]) << 32;
635	}
636	if (intack)
637		printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
638	i8259_init(found, intack);
639	of_node_put(found);
640	set_irq_chained_handler(cascade, pseries_8259_cascade);
641}
642
643static struct device_node *cpuid_to_of_node(int cpu)
644{
645	struct device_node *np;
646	u32 hcpuid = get_hard_smp_processor_id(cpu);
647
648	for_each_node_by_type(np, "cpu") {
649		int i, len;
650		const u32 *intserv;
651
652		intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
653					&len);
654
655		if (!intserv)
656			intserv = of_get_property(np, "reg", &len);
657
658		i = len / sizeof(u32);
659
660		while (i--)
661			if (intserv[i] == hcpuid)
662				return np;
663	}
664
665	return NULL;
666}
667
668void __init xics_init_IRQ(void)
669{
670	int i, j;
671	struct device_node *np;
672	u32 ilen, indx = 0;
673	const u32 *ireg, *isize;
674	int found = 0;
675	u32 hcpuid;
676
677	ppc64_boot_msg(0x20, "XICS Init");
678
679	ibm_get_xive = rtas_token("ibm,get-xive");
680	ibm_set_xive = rtas_token("ibm,set-xive");
681	ibm_int_on  = rtas_token("ibm,int-on");
682	ibm_int_off = rtas_token("ibm,int-off");
683
684	for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
685		found = 1;
686		if (firmware_has_feature(FW_FEATURE_LPAR))
687			break;
688		xics_init_one_node(np, &indx);
689	}
690	if (found == 0)
691		return;
692
693	xics_init_host();
694
695	/* Find the server numbers for the boot cpu. */
696	np = cpuid_to_of_node(boot_cpuid);
697	BUG_ON(!np);
698	ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
699	if (!ireg)
700		goto skip_gserver_check;
701	i = ilen / sizeof(int);
702	hcpuid = get_hard_smp_processor_id(boot_cpuid);
703
704	/* Global interrupt distribution server is specified in the last
705	 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
706	 * entry fom this property for current boot cpu id and use it as
707	 * default distribution server
708	 */
709	for (j = 0; j < i; j += 2) {
710		if (ireg[j] == hcpuid) {
711			default_server = hcpuid;
712			default_distrib_server = ireg[j+1];
713
714			isize = of_get_property(np,
715					"ibm,interrupt-server#-size", NULL);
716			if (isize)
717				interrupt_server_size = *isize;
718		}
719	}
720skip_gserver_check:
721	of_node_put(np);
722
723	if (firmware_has_feature(FW_FEATURE_LPAR))
724		ppc_md.get_irq = xics_get_irq_lpar;
725	else
726		ppc_md.get_irq = xics_get_irq_direct;
727
728	xics_setup_cpu();
729
730	xics_setup_8259_cascade();
731
732	ppc64_boot_msg(0x21, "XICS Done");
733}
734
735
736#ifdef CONFIG_SMP
737void xics_request_IPIs(void)
738{
739	unsigned int ipi;
740	int rc;
741
742	ipi = irq_create_mapping(xics_host, XICS_IPI);
743	BUG_ON(ipi == NO_IRQ);
744
745	/*
746	 * IPIs are marked IRQF_DISABLED as they must run with irqs
747	 * disabled
748	 */
749	set_irq_handler(ipi, handle_percpu_irq);
750	if (firmware_has_feature(FW_FEATURE_LPAR))
751		rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
752				"IPI", NULL);
753	else
754		rc = request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
755				"IPI", NULL);
756	BUG_ON(rc);
757}
758#endif /* CONFIG_SMP */
759
760void xics_teardown_cpu(int secondary)
761{
762	int cpu = smp_processor_id();
763	unsigned int ipi;
764	struct irq_desc *desc;
765
766	xics_set_cpu_priority(cpu, 0);
767
768	/*
769	 * Clear IPI
770	 */
771	if (firmware_has_feature(FW_FEATURE_LPAR))
772		lpar_qirr_info(cpu, 0xff);
773	else
774		direct_qirr_info(cpu, 0xff);
775
776	/*
777	 * we need to EOI the IPI if we got here from kexec down IPI
778	 *
779	 * probably need to check all the other interrupts too
780	 * should we be flagging idle loop instead?
781	 * or creating some task to be scheduled?
782	 */
783
784	ipi = irq_find_mapping(xics_host, XICS_IPI);
785	if (ipi == XICS_IRQ_SPURIOUS)
786		return;
787	desc = get_irq_desc(ipi);
788	if (desc->chip && desc->chip->eoi)
789		desc->chip->eoi(ipi);
790
791	/*
792	 * Some machines need to have at least one cpu in the GIQ,
793	 * so leave the master cpu in the group.
794	 */
795	if (secondary)
796		rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
797				   (1UL << interrupt_server_size) - 1 -
798				   default_distrib_server, 0);
799}
800
801#ifdef CONFIG_HOTPLUG_CPU
802
803/* Interrupts are disabled. */
804void xics_migrate_irqs_away(void)
805{
806	int status;
807	unsigned int irq, virq, cpu = smp_processor_id();
808
809	/* Reject any interrupt that was queued to us... */
810	xics_set_cpu_priority(cpu, 0);
811
812	/* remove ourselves from the global interrupt queue */
813	status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
814		(1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
815	WARN_ON(status < 0);
816
817	/* Allow IPIs again... */
818	xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
819
820	for_each_irq(virq) {
821		struct irq_desc *desc;
822		int xics_status[2];
823		unsigned long flags;
824
825		/* We cant set affinity on ISA interrupts */
826		if (virq < NUM_ISA_INTERRUPTS)
827			continue;
828		if (irq_map[virq].host != xics_host)
829			continue;
830		irq = (unsigned int)irq_map[virq].hwirq;
831		/* We need to get IPIs still. */
832		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
833			continue;
834		desc = get_irq_desc(virq);
835
836		/* We only need to migrate enabled IRQS */
837		if (desc == NULL || desc->chip == NULL
838		    || desc->action == NULL
839		    || desc->chip->set_affinity == NULL)
840			continue;
841
842		spin_lock_irqsave(&desc->lock, flags);
843
844		status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
845		if (status) {
846			printk(KERN_ERR "migrate_irqs_away: irq=%u "
847					"ibm,get-xive returns %d\n",
848					virq, status);
849			goto unlock;
850		}
851
852		/*
853		 * We only support delivery to all cpus or to one cpu.
854		 * The irq has to be migrated only in the single cpu
855		 * case.
856		 */
857		if (xics_status[0] != get_hard_smp_processor_id(cpu))
858			goto unlock;
859
860		printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
861		       virq, cpu);
862
863		/* Reset affinity to all cpus */
864		desc->chip->set_affinity(virq, CPU_MASK_ALL);
865		irq_desc[irq].affinity = CPU_MASK_ALL;
866unlock:
867		spin_unlock_irqrestore(&desc->lock, flags);
868	}
869}
870#endif
871