1/* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6#include <asm/cpudata.h>
7#include <asm/intr_queue.h>
8#include <asm/pil.h>
9
10	.text
11	.align	32
12
13sun4v_cpu_mondo:
14	/* Head offset in %g2, tail offset in %g4.
15	 * If they are the same, no work.
16	 */
17	mov	INTRQ_CPU_MONDO_HEAD, %g2
18	ldxa	[%g2] ASI_QUEUE, %g2
19	mov	INTRQ_CPU_MONDO_TAIL, %g4
20	ldxa	[%g4] ASI_QUEUE, %g4
21	cmp	%g2, %g4
22	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
23	 nop
24
25	/* Get &trap_block[smp_processor_id()] into %g4.  */
26	ldxa	[%g0] ASI_SCRATCHPAD, %g4
27	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
28
29	/* Get CPU mondo queue base phys address into %g7.  */
30	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
31
32	/* Now get the cross-call arguments and handler PC, same
33	 * layout as sun4u:
34	 *
35	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36	 *                  high half is context arg to MMU flushes, into %g5
37	 * 2nd 64-bit word: 64-bit arg, load into %g1
38	 * 3rd 64-bit word: 64-bit arg, load into %g7
39	 */
40	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
41	add	%g2, 0x8, %g2
42	srlx	%g3, 32, %g5
43	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
44	add	%g2, 0x8, %g2
45	srl	%g3, 0, %g3
46	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
47	add	%g2, 0x40 - 0x8 - 0x8, %g2
48
49	/* Update queue head pointer.  */
50	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
51	and	%g2, %g4, %g2
52
53	mov	INTRQ_CPU_MONDO_HEAD, %g4
54	stxa	%g2, [%g4] ASI_QUEUE
55	membar	#Sync
56
57	jmpl	%g3, %g0
58	 nop
59
60sun4v_cpu_mondo_queue_empty:
61	retry
62
63sun4v_dev_mondo:
64	/* Head offset in %g2, tail offset in %g4.  */
65	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
66	ldxa	[%g2] ASI_QUEUE, %g2
67	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
68	ldxa	[%g4] ASI_QUEUE, %g4
69	cmp	%g2, %g4
70	be,pn	%xcc, sun4v_dev_mondo_queue_empty
71	 nop
72
73	/* Get &trap_block[smp_processor_id()] into %g4.  */
74	ldxa	[%g0] ASI_SCRATCHPAD, %g4
75	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
76
77	/* Get DEV mondo queue base phys address into %g5.  */
78	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
79
80	/* Load IVEC into %g3.  */
81	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
82	add	%g2, 0x40, %g2
83
84
85	/* Update queue head pointer, this frees up some registers.  */
86	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
87	and	%g2, %g4, %g2
88
89	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
90	stxa	%g2, [%g4] ASI_QUEUE
91	membar	#Sync
92
93	/* Get &__irq_work[smp_processor_id()] into %g1.  */
94	TRAP_LOAD_IRQ_WORK(%g1, %g4)
95
96	/* Get &ivector_table[IVEC] into %g4.  */
97	sethi	%hi(ivector_table), %g4
98	sllx	%g3, 3, %g3
99	or	%g4, %lo(ivector_table), %g4
100	add	%g4, %g3, %g4
101
102	/* Insert ivector_table[] entry into __irq_work[] queue.  */
103	lduw	[%g1], %g2		/* g2 = irq_work(cpu) */
104	stw	%g2, [%g4 + 0x00]	/* bucket->irq_chain = g2 */
105	stw	%g4, [%g1]		/* irq_work(cpu) = bucket */
106
107	/* Signal the interrupt by setting (1 << pil) in %softint.  */
108	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint
109
110sun4v_dev_mondo_queue_empty:
111	retry
112
113sun4v_res_mondo:
114	/* Head offset in %g2, tail offset in %g4.  */
115	mov	INTRQ_RESUM_MONDO_HEAD, %g2
116	ldxa	[%g2] ASI_QUEUE, %g2
117	mov	INTRQ_RESUM_MONDO_TAIL, %g4
118	ldxa	[%g4] ASI_QUEUE, %g4
119	cmp	%g2, %g4
120	be,pn	%xcc, sun4v_res_mondo_queue_empty
121	 nop
122
123	/* Get &trap_block[smp_processor_id()] into %g3.  */
124	ldxa	[%g0] ASI_SCRATCHPAD, %g3
125	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
126
127	/* Get RES mondo queue base phys address into %g5.  */
128	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
129
130	/* Get RES kernel buffer base phys address into %g7.  */
131	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
132
133	/* If the first word is non-zero, queue is full.  */
134	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
135	brnz,pn	%g1, sun4v_res_mondo_queue_full
136	 nop
137
138	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
139
140	/* Remember this entry's offset in %g1.  */
141	mov	%g2, %g1
142
143	/* Copy 64-byte queue entry into kernel buffer.  */
144	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
145	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
146	add	%g2, 0x08, %g2
147	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
148	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
149	add	%g2, 0x08, %g2
150	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
151	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
152	add	%g2, 0x08, %g2
153	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
154	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
155	add	%g2, 0x08, %g2
156	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
157	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
158	add	%g2, 0x08, %g2
159	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
160	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
161	add	%g2, 0x08, %g2
162	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
163	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
164	add	%g2, 0x08, %g2
165	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
166	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
167	add	%g2, 0x08, %g2
168
169	/* Update queue head pointer.  */
170	and	%g2, %g4, %g2
171
172	mov	INTRQ_RESUM_MONDO_HEAD, %g4
173	stxa	%g2, [%g4] ASI_QUEUE
174	membar	#Sync
175
176	/* Disable interrupts and save register state so we can call
177	 * C code.  The etrap handling will leave %g4 in %l4 for us
178	 * when it's done.
179	 */
180	rdpr	%pil, %g2
181	wrpr	%g0, 15, %pil
182	mov	%g1, %g4
183	ba,pt	%xcc, etrap_irq
184	 rd	%pc, %g7
185#ifdef CONFIG_TRACE_IRQFLAGS
186	call		trace_hardirqs_off
187	 nop
188#endif
189	/* Log the event.  */
190	add	%sp, PTREGS_OFF, %o0
191	call	sun4v_resum_error
192	 mov	%l4, %o1
193
194	/* Return from trap.  */
195	ba,pt	%xcc, rtrap_irq
196	 nop
197
198sun4v_res_mondo_queue_empty:
199	retry
200
201sun4v_res_mondo_queue_full:
202	/* The queue is full, consolidate our damage by setting
203	 * the head equal to the tail.  We'll just trap again otherwise.
204	 * Call C code to log the event.
205	 */
206	mov	INTRQ_RESUM_MONDO_HEAD, %g2
207	stxa	%g4, [%g2] ASI_QUEUE
208	membar	#Sync
209
210	rdpr	%pil, %g2
211	wrpr	%g0, 15, %pil
212	ba,pt	%xcc, etrap_irq
213	 rd	%pc, %g7
214#ifdef CONFIG_TRACE_IRQFLAGS
215	call		trace_hardirqs_off
216	 nop
217#endif
218	call	sun4v_resum_overflow
219	 add	%sp, PTREGS_OFF, %o0
220
221	ba,pt	%xcc, rtrap_irq
222	 nop
223
224sun4v_nonres_mondo:
225	/* Head offset in %g2, tail offset in %g4.  */
226	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
227	ldxa	[%g2] ASI_QUEUE, %g2
228	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
229	ldxa	[%g4] ASI_QUEUE, %g4
230	cmp	%g2, %g4
231	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
232	 nop
233
234	/* Get &trap_block[smp_processor_id()] into %g3.  */
235	ldxa	[%g0] ASI_SCRATCHPAD, %g3
236	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
237
238	/* Get RES mondo queue base phys address into %g5.  */
239	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
240
241	/* Get RES kernel buffer base phys address into %g7.  */
242	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
243
244	/* If the first word is non-zero, queue is full.  */
245	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
246	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
247	 nop
248
249	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
250
251	/* Remember this entry's offset in %g1.  */
252	mov	%g2, %g1
253
254	/* Copy 64-byte queue entry into kernel buffer.  */
255	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
256	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
257	add	%g2, 0x08, %g2
258	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
259	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
260	add	%g2, 0x08, %g2
261	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
262	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
263	add	%g2, 0x08, %g2
264	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
265	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
266	add	%g2, 0x08, %g2
267	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
268	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
269	add	%g2, 0x08, %g2
270	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
271	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
272	add	%g2, 0x08, %g2
273	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
274	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
275	add	%g2, 0x08, %g2
276	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
277	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
278	add	%g2, 0x08, %g2
279
280	/* Update queue head pointer.  */
281	and	%g2, %g4, %g2
282
283	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
284	stxa	%g2, [%g4] ASI_QUEUE
285	membar	#Sync
286
287	/* Disable interrupts and save register state so we can call
288	 * C code.  The etrap handling will leave %g4 in %l4 for us
289	 * when it's done.
290	 */
291	rdpr	%pil, %g2
292	wrpr	%g0, 15, %pil
293	mov	%g1, %g4
294	ba,pt	%xcc, etrap_irq
295	 rd	%pc, %g7
296#ifdef CONFIG_TRACE_IRQFLAGS
297	call		trace_hardirqs_off
298	 nop
299#endif
300	/* Log the event.  */
301	add	%sp, PTREGS_OFF, %o0
302	call	sun4v_nonresum_error
303	 mov	%l4, %o1
304
305	/* Return from trap.  */
306	ba,pt	%xcc, rtrap_irq
307	 nop
308
309sun4v_nonres_mondo_queue_empty:
310	retry
311
312sun4v_nonres_mondo_queue_full:
313	/* The queue is full, consolidate our damage by setting
314	 * the head equal to the tail.  We'll just trap again otherwise.
315	 * Call C code to log the event.
316	 */
317	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
318	stxa	%g4, [%g2] ASI_QUEUE
319	membar	#Sync
320
321	rdpr	%pil, %g2
322	wrpr	%g0, 15, %pil
323	ba,pt	%xcc, etrap_irq
324	 rd	%pc, %g7
325#ifdef CONFIG_TRACE_IRQFLAGS
326	call		trace_hardirqs_off
327	 nop
328#endif
329	call	sun4v_nonresum_overflow
330	 add	%sp, PTREGS_OFF, %o0
331
332	ba,pt	%xcc, rtrap_irq
333	 nop
334