1/* sun4v_ivec.S: Sun4v interrupt vector handling. 2 * 3 * Copyright (C) 2006 <davem@davemloft.net> 4 */ 5 6#include <asm/cpudata.h> 7#include <asm/intr_queue.h> 8#include <asm/pil.h> 9 10 .text 11 .align 32 12 13sun4v_cpu_mondo: 14 /* Head offset in %g2, tail offset in %g4. 15 * If they are the same, no work. 16 */ 17 mov INTRQ_CPU_MONDO_HEAD, %g2 18 ldxa [%g2] ASI_QUEUE, %g2 19 mov INTRQ_CPU_MONDO_TAIL, %g4 20 ldxa [%g4] ASI_QUEUE, %g4 21 cmp %g2, %g4 22 be,pn %xcc, sun4v_cpu_mondo_queue_empty 23 nop 24 25 /* Get &trap_block[smp_processor_id()] into %g4. */ 26 ldxa [%g0] ASI_SCRATCHPAD, %g4 27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 28 29 /* Get CPU mondo queue base phys address into %g7. */ 30 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 31 32 /* Now get the cross-call arguments and handler PC, same 33 * layout as sun4u: 34 * 35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it 36 * high half is context arg to MMU flushes, into %g5 37 * 2nd 64-bit word: 64-bit arg, load into %g1 38 * 3rd 64-bit word: 64-bit arg, load into %g7 39 */ 40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 41 add %g2, 0x8, %g2 42 srlx %g3, 32, %g5 43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 44 add %g2, 0x8, %g2 45 srl %g3, 0, %g3 46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 47 add %g2, 0x40 - 0x8 - 0x8, %g2 48 49 /* Update queue head pointer. */ 50 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 51 and %g2, %g4, %g2 52 53 mov INTRQ_CPU_MONDO_HEAD, %g4 54 stxa %g2, [%g4] ASI_QUEUE 55 membar #Sync 56 57 jmpl %g3, %g0 58 nop 59 60sun4v_cpu_mondo_queue_empty: 61 retry 62 63sun4v_dev_mondo: 64 /* Head offset in %g2, tail offset in %g4. */ 65 mov INTRQ_DEVICE_MONDO_HEAD, %g2 66 ldxa [%g2] ASI_QUEUE, %g2 67 mov INTRQ_DEVICE_MONDO_TAIL, %g4 68 ldxa [%g4] ASI_QUEUE, %g4 69 cmp %g2, %g4 70 be,pn %xcc, sun4v_dev_mondo_queue_empty 71 nop 72 73 /* Get &trap_block[smp_processor_id()] into %g4. */ 74 ldxa [%g0] ASI_SCRATCHPAD, %g4 75 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 76 77 /* Get DEV mondo queue base phys address into %g5. */ 78 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 79 80 /* Load IVEC into %g3. */ 81 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 82 add %g2, 0x40, %g2 83 84 85 /* Update queue head pointer, this frees up some registers. */ 86 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 87 and %g2, %g4, %g2 88 89 mov INTRQ_DEVICE_MONDO_HEAD, %g4 90 stxa %g2, [%g4] ASI_QUEUE 91 membar #Sync 92 93 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) 94 95 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */ 96 brlz,pt %g3, 1f 97 xnor %g3, %g0, %g4 98 99 /* Get __pa(&ivector_table[IVEC]) into %g4. */ 100 sethi %hi(ivector_table_pa), %g4 101 ldx [%g4 + %lo(ivector_table_pa)], %g4 102 sllx %g3, 4, %g3 103 add %g4, %g3, %g4 104 1051: ldx [%g1], %g2 106 stxa %g2, [%g4] ASI_PHYS_USE_EC 107 stx %g4, [%g1] 108 109 /* Signal the interrupt by setting (1 << pil) in %softint. */ 110 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint 111 112sun4v_dev_mondo_queue_empty: 113 retry 114 115sun4v_res_mondo: 116 /* Head offset in %g2, tail offset in %g4. */ 117 mov INTRQ_RESUM_MONDO_HEAD, %g2 118 ldxa [%g2] ASI_QUEUE, %g2 119 mov INTRQ_RESUM_MONDO_TAIL, %g4 120 ldxa [%g4] ASI_QUEUE, %g4 121 cmp %g2, %g4 122 be,pn %xcc, sun4v_res_mondo_queue_empty 123 nop 124 125 /* Get &trap_block[smp_processor_id()] into %g3. */ 126 ldxa [%g0] ASI_SCRATCHPAD, %g3 127 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 128 129 /* Get RES mondo queue base phys address into %g5. */ 130 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 131 132 /* Get RES kernel buffer base phys address into %g7. */ 133 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 134 135 /* If the first word is non-zero, queue is full. */ 136 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 137 brnz,pn %g1, sun4v_res_mondo_queue_full 138 nop 139 140 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 141 142 /* Remember this entry's offset in %g1. */ 143 mov %g2, %g1 144 145 /* Copy 64-byte queue entry into kernel buffer. */ 146 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 147 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 148 add %g2, 0x08, %g2 149 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 150 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 151 add %g2, 0x08, %g2 152 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 153 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 154 add %g2, 0x08, %g2 155 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 156 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 157 add %g2, 0x08, %g2 158 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 159 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 160 add %g2, 0x08, %g2 161 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 162 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 163 add %g2, 0x08, %g2 164 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 165 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 166 add %g2, 0x08, %g2 167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 169 add %g2, 0x08, %g2 170 171 /* Update queue head pointer. */ 172 and %g2, %g4, %g2 173 174 mov INTRQ_RESUM_MONDO_HEAD, %g4 175 stxa %g2, [%g4] ASI_QUEUE 176 membar #Sync 177 178 /* Disable interrupts and save register state so we can call 179 * C code. The etrap handling will leave %g4 in %l4 for us 180 * when it's done. 181 */ 182 rdpr %pil, %g2 183 wrpr %g0, PIL_NORMAL_MAX, %pil 184 mov %g1, %g4 185 ba,pt %xcc, etrap_irq 186 rd %pc, %g7 187#ifdef CONFIG_TRACE_IRQFLAGS 188 call trace_hardirqs_off 189 nop 190#endif 191 /* Log the event. */ 192 add %sp, PTREGS_OFF, %o0 193 call sun4v_resum_error 194 mov %l4, %o1 195 196 /* Return from trap. */ 197 ba,pt %xcc, rtrap_irq 198 nop 199 200sun4v_res_mondo_queue_empty: 201 retry 202 203sun4v_res_mondo_queue_full: 204 /* The queue is full, consolidate our damage by setting 205 * the head equal to the tail. We'll just trap again otherwise. 206 * Call C code to log the event. 207 */ 208 mov INTRQ_RESUM_MONDO_HEAD, %g2 209 stxa %g4, [%g2] ASI_QUEUE 210 membar #Sync 211 212 rdpr %pil, %g2 213 wrpr %g0, PIL_NORMAL_MAX, %pil 214 ba,pt %xcc, etrap_irq 215 rd %pc, %g7 216#ifdef CONFIG_TRACE_IRQFLAGS 217 call trace_hardirqs_off 218 nop 219#endif 220 call sun4v_resum_overflow 221 add %sp, PTREGS_OFF, %o0 222 223 ba,pt %xcc, rtrap_irq 224 nop 225 226sun4v_nonres_mondo: 227 /* Head offset in %g2, tail offset in %g4. */ 228 mov INTRQ_NONRESUM_MONDO_HEAD, %g2 229 ldxa [%g2] ASI_QUEUE, %g2 230 mov INTRQ_NONRESUM_MONDO_TAIL, %g4 231 ldxa [%g4] ASI_QUEUE, %g4 232 cmp %g2, %g4 233 be,pn %xcc, sun4v_nonres_mondo_queue_empty 234 nop 235 236 /* Get &trap_block[smp_processor_id()] into %g3. */ 237 ldxa [%g0] ASI_SCRATCHPAD, %g3 238 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 239 240 /* Get RES mondo queue base phys address into %g5. */ 241 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 242 243 /* Get RES kernel buffer base phys address into %g7. */ 244 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 245 246 /* If the first word is non-zero, queue is full. */ 247 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 248 brnz,pn %g1, sun4v_nonres_mondo_queue_full 249 nop 250 251 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 252 253 /* Remember this entry's offset in %g1. */ 254 mov %g2, %g1 255 256 /* Copy 64-byte queue entry into kernel buffer. */ 257 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 258 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 259 add %g2, 0x08, %g2 260 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 261 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 262 add %g2, 0x08, %g2 263 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 264 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 265 add %g2, 0x08, %g2 266 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 267 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 268 add %g2, 0x08, %g2 269 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 270 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 271 add %g2, 0x08, %g2 272 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 273 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 274 add %g2, 0x08, %g2 275 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 276 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 277 add %g2, 0x08, %g2 278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC 280 add %g2, 0x08, %g2 281 282 /* Update queue head pointer. */ 283 and %g2, %g4, %g2 284 285 mov INTRQ_NONRESUM_MONDO_HEAD, %g4 286 stxa %g2, [%g4] ASI_QUEUE 287 membar #Sync 288 289 /* Disable interrupts and save register state so we can call 290 * C code. The etrap handling will leave %g4 in %l4 for us 291 * when it's done. 292 */ 293 rdpr %pil, %g2 294 wrpr %g0, PIL_NORMAL_MAX, %pil 295 mov %g1, %g4 296 ba,pt %xcc, etrap_irq 297 rd %pc, %g7 298#ifdef CONFIG_TRACE_IRQFLAGS 299 call trace_hardirqs_off 300 nop 301#endif 302 /* Log the event. */ 303 add %sp, PTREGS_OFF, %o0 304 call sun4v_nonresum_error 305 mov %l4, %o1 306 307 /* Return from trap. */ 308 ba,pt %xcc, rtrap_irq 309 nop 310 311sun4v_nonres_mondo_queue_empty: 312 retry 313 314sun4v_nonres_mondo_queue_full: 315 /* The queue is full, consolidate our damage by setting 316 * the head equal to the tail. We'll just trap again otherwise. 317 * Call C code to log the event. 318 */ 319 mov INTRQ_NONRESUM_MONDO_HEAD, %g2 320 stxa %g4, [%g2] ASI_QUEUE 321 membar #Sync 322 323 rdpr %pil, %g2 324 wrpr %g0, PIL_NORMAL_MAX, %pil 325 ba,pt %xcc, etrap_irq 326 rd %pc, %g7 327#ifdef CONFIG_TRACE_IRQFLAGS 328 call trace_hardirqs_off 329 nop 330#endif 331 call sun4v_nonresum_overflow 332 add %sp, PTREGS_OFF, %o0 333 334 ba,pt %xcc, rtrap_irq 335 nop 336