1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40/**
41 * @file
42 *
43 * Interface to the Mips interrupts.
44 *
45 * <hr>$Revision: 70030 $<hr>
46 */
47#ifndef __U_BOOT__
48#if __GNUC__ >= 4
49/* Backtrace is only available with the new toolchain.  */
50#include <execinfo.h>
51#endif
52#endif  /* __U_BOOT__ */
53#include "cvmx-config.h"
54#include "cvmx.h"
55#include "cvmx-interrupt.h"
56#include "cvmx-sysinfo.h"
57#include "cvmx-uart.h"
58#include "cvmx-pow.h"
59#include "cvmx-ebt3000.h"
60#include "cvmx-coremask.h"
61#include "cvmx-spinlock.h"
62#include "cvmx-atomic.h"
63#include "cvmx-app-init.h"
64#include "cvmx-error.h"
65#include "cvmx-app-hotplug.h"
66#include "cvmx-profiler.h"
67#ifndef __U_BOOT__
68# include <octeon_mem_map.h>
69#else
70# include <asm/arch/octeon_mem_map.h>
71#endif
72EXTERN_ASM void cvmx_interrupt_stage1(void);
73EXTERN_ASM void cvmx_debug_handler_stage1(void);
74EXTERN_ASM void cvmx_interrupt_cache_error(void);
75
76int cvmx_interrupt_in_isr = 0;
77
78struct __cvmx_interrupt_handler {
79    cvmx_interrupt_func_t handler;      /**< One function to call per interrupt */
80    void *data;                         /**< User data per interrupt */
81    int handler_data;                   /**< Used internally */
82};
83
84/**
85 * Internal status the interrupt registration
86 */
87typedef struct
88{
89    struct __cvmx_interrupt_handler handlers[CVMX_IRQ_MAX];
90    cvmx_interrupt_exception_t exception_handler;
91} cvmx_interrupt_state_t;
92
93/**
94 * Internal state the interrupt registration
95 */
96#ifndef __U_BOOT__
97static CVMX_SHARED cvmx_interrupt_state_t cvmx_interrupt_state;
98static CVMX_SHARED cvmx_spinlock_t cvmx_interrupt_default_lock;
99/* Incremented once first core processing is finished. */
100static CVMX_SHARED int32_t cvmx_interrupt_initialize_flag;
101#endif  /* __U_BOOT__ */
102
103#define ULL unsigned long long
104
105#define HI32(data64)    ((uint32_t)(data64 >> 32))
106#define LO32(data64)    ((uint32_t)(data64 & 0xFFFFFFFF))
107
108static const char reg_names[][32] = { "r0","at","v0","v1","a0","a1","a2","a3",
109                                      "t0","t1","t2","t3","t4","t5","t6","t7",
110                                      "s0","s1","s2","s3","s4","s5", "s6","s7",
111                                      "t8","t9", "k0","k1","gp","sp","s8","ra" };
112
113/**
114 * version of printf that works better in exception context.
115 *
116 * @param format
117 */
118void cvmx_safe_printf(const char *format, ...)
119{
120    char buffer[256];
121    char *ptr = buffer;
122    int count;
123    va_list args;
124
125    va_start(args, format);
126#ifndef __U_BOOT__
127    count = vsnprintf(buffer, sizeof(buffer), format, args);
128#else
129    count = vsprintf(buffer, format, args);
130#endif
131    va_end(args);
132
133    while (count-- > 0)
134    {
135        cvmx_uart_lsr_t lsrval;
136
137        /* Spin until there is room */
138        do
139        {
140            lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
141#if !defined(CONFIG_OCTEON_SIM_SPEED)
142            if (lsrval.s.temt == 0)
143                cvmx_wait(10000);   /* Just to reduce the load on the system */
144#endif
145        }
146        while (lsrval.s.temt == 0);
147
148        if (*ptr == '\n')
149            cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
150        cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
151    }
152}
153
154/* Textual descriptions of cause codes */
155static const char cause_names[][128] = {
156        /*  0 */ "Interrupt",
157        /*  1 */ "TLB modification",
158        /*  2 */ "tlb load/fetch",
159        /*  3 */ "tlb store",
160        /*  4 */ "address exc, load/fetch",
161        /*  5 */ "address exc, store",
162        /*  6 */ "bus error, instruction fetch",
163        /*  7 */ "bus error, load/store",
164        /*  8 */ "syscall",
165        /*  9 */ "breakpoint",
166        /* 10 */ "reserved instruction",
167        /* 11 */ "cop unusable",
168        /* 12 */ "arithmetic overflow",
169        /* 13 */ "trap",
170        /* 14 */ "",
171        /* 15 */ "floating point exc",
172        /* 16 */ "",
173        /* 17 */ "",
174        /* 18 */ "cop2 exception",
175        /* 19 */ "",
176        /* 20 */ "",
177        /* 21 */ "",
178        /* 22 */ "mdmx unusable",
179        /* 23 */ "watch",
180        /* 24 */ "machine check",
181        /* 25 */ "",
182        /* 26 */ "",
183        /* 27 */ "",
184        /* 28 */ "",
185        /* 29 */ "",
186        /* 30 */ "cache error",
187        /* 31 */ ""
188};
189
190/**
191 * @INTERNAL
192 * print_reg64
193 * @param name   Name of the value to print
194 * @param reg    Value to print
195 */
196static inline void print_reg64(const char *name, uint64_t reg)
197{
198    cvmx_safe_printf("%16s: 0x%08x%08x\n", name, (unsigned int)HI32(reg),(unsigned int)LO32(reg));
199}
200
201/**
202 * @INTERNAL
203 * Dump all useful registers to the console
204 *
205 * @param registers CPU register to dump
206 */
207static void __cvmx_interrupt_dump_registers(uint64_t *registers)
208{
209    uint64_t r1, r2;
210    int reg;
211    for (reg=0; reg<16; reg++)
212    {
213        r1 = registers[reg]; r2 = registers[reg+16];
214        cvmx_safe_printf("%3s ($%02d): 0x%08x%08x \t %3s ($%02d): 0x%08x%08x\n",
215                           reg_names[reg], reg, (unsigned int)HI32(r1), (unsigned int)LO32(r1),
216                           reg_names[reg+16], reg+16, (unsigned int)HI32(r2), (unsigned int)LO32(r2));
217    }
218    CVMX_MF_COP0 (r1, COP0_CAUSE);
219    print_reg64 ("COP0_CAUSE", r1);
220    CVMX_MF_COP0 (r2, COP0_STATUS);
221    print_reg64 ("COP0_STATUS", r2);
222    CVMX_MF_COP0 (r1, COP0_BADVADDR);
223    print_reg64 ("COP0_BADVADDR", r1);
224    CVMX_MF_COP0 (r2, COP0_EPC);
225    print_reg64 ("COP0_EPC", r2);
226}
227
228/**
229 * @INTERNAL
230 * Default exception handler. Prints out the exception
231 * cause decode and all relevant registers.
232 *
233 * @param registers Registers at time of the exception
234 */
235#ifndef __U_BOOT__
236static
237#endif  /* __U_BOOT__ */
238void __cvmx_interrupt_default_exception_handler(uint64_t *registers)
239{
240    uint64_t trap_print_cause;
241    const char *str;
242#ifndef __U_BOOT__
243    int modified_zero_pc = 0;
244
245    ebt3000_str_write("Trap");
246    cvmx_spinlock_lock(&cvmx_interrupt_default_lock);
247#endif
248    CVMX_MF_COP0 (trap_print_cause, COP0_CAUSE);
249    str = cause_names [(trap_print_cause >> 2) & 0x1f];
250    cvmx_safe_printf("Core %d: Unhandled Exception. Cause register decodes to:\n%s\n", (int)cvmx_get_core_num(), str && *str ? str : "Reserved exception cause");
251    cvmx_safe_printf("******************************************************************\n");
252    __cvmx_interrupt_dump_registers(registers);
253
254#ifndef __U_BOOT__
255
256    cvmx_safe_printf("******************************************************************\n");
257#if __GNUC__ >= 4 && !defined(OCTEON_DISABLE_BACKTRACE)
258    cvmx_safe_printf("Backtrace:\n\n");
259    if (registers[35] == 0) {
260	modified_zero_pc = 1;
261	/* If PC is zero we probably did jalr $zero, in which case $31 - 8 is the call site. */
262	registers[35] = registers[31] - 8;
263    }
264    __octeon_print_backtrace_func ((__octeon_backtrace_printf_t)cvmx_safe_printf);
265    if (modified_zero_pc)
266	registers[35] = 0;
267    cvmx_safe_printf("******************************************************************\n");
268#endif
269
270    cvmx_spinlock_unlock(&cvmx_interrupt_default_lock);
271
272    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
273        CVMX_BREAK;
274
275    while (1)
276    {
277        /* Interrupts are suppressed when we are in the exception
278           handler (because of SR[EXL]).  Spin and poll the uart
279           status and see if the debugger is trying to stop us. */
280        cvmx_uart_lsr_t lsrval;
281        lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
282        if (lsrval.s.dr)
283        {
284            uint64_t tmp;
285            /* Pulse the MCD0 signal. */
286            asm volatile (
287            ".set push\n"
288            ".set noreorder\n"
289            ".set mips64\n"
290            "dmfc0 %0, $22\n"
291            "ori   %0, %0, 0x10\n"
292            "dmtc0 %0, $22\n"
293            ".set pop\n"
294            : "=r" (tmp));
295        }
296    }
297#endif /* __U_BOOT__ */
298}
299
300#ifndef __U_BOOT__
301/**
302 * @INTERNAL
303 * Default interrupt handler if the user doesn't register one.
304 *
305 * @param irq_number IRQ that caused this interrupt
306 * @param registers  Register at the time of the interrupt
307 * @param user_arg   Unused optional user data
308 */
309static void __cvmx_interrupt_default(int irq_number, uint64_t *registers, void *user_arg)
310{
311    cvmx_safe_printf("cvmx_interrupt_default: Received interrupt %d\n", irq_number);
312    __cvmx_interrupt_dump_registers(registers);
313}
314
315/**
316 * Map a ciu bit to an irq number.  0xff for invalid.
317 * 0-63 for en0.
318 * 64-127 for en1.
319 */
320
321static CVMX_SHARED uint8_t cvmx_ciu_to_irq[8][64];
322#define cvmx_ciu_en0_to_irq cvmx_ciu_to_irq[0]
323#define cvmx_ciu_en1_to_irq cvmx_ciu_to_irq[1]
324#define cvmx_ciu2_wrkq_to_irq cvmx_ciu_to_irq[0]
325#define cvmx_ciu2_wdog_to_irq cvmx_ciu_to_irq[1]
326#define cvmx_ciu2_rml_to_irq cvmx_ciu_to_irq[2]
327#define cvmx_ciu2_mio_to_irq cvmx_ciu_to_irq[3]
328#define cvmx_ciu2_io_to_irq cvmx_ciu_to_irq[4]
329#define cvmx_ciu2_mem_to_irq cvmx_ciu_to_irq[5]
330#define cvmx_ciu2_eth_to_irq cvmx_ciu_to_irq[6]
331#define cvmx_ciu2_gpio_to_irq cvmx_ciu_to_irq[7]
332
333static CVMX_SHARED uint8_t cvmx_ciu2_mbox_to_irq[64];
334static CVMX_SHARED uint8_t cvmx_ciu_61xx_timer_to_irq[64];
335
336static void __cvmx_interrupt_set_mapping(int irq, unsigned int en, unsigned int bit)
337{
338    cvmx_interrupt_state.handlers[irq].handler_data = (en << 6) | bit;
339    if (en <= 7)
340        cvmx_ciu_to_irq[en][bit] = irq;
341    else if (en == 8)
342        cvmx_ciu_61xx_timer_to_irq[bit] = irq;
343    else
344        cvmx_ciu2_mbox_to_irq[bit] = irq;
345}
346
347static uint64_t cvmx_interrupt_ciu_en0_mirror;
348static uint64_t cvmx_interrupt_ciu_en1_mirror;
349static uint64_t cvmx_interrupt_ciu_61xx_timer_mirror;
350
351/**
352 * @INTERNAL
353 * Called for all Performance Counter interrupts. Handler for
354 * interrupt line 6
355 *
356 * @param irq_number Interrupt number that we're being called for
357 * @param registers  Registers at the time of the interrupt
358 * @param user_arg   Unused user argument*
359 */
360static void __cvmx_interrupt_perf(int irq_number, uint64_t *registers, void *user_arg)
361{
362    uint64_t perf_counter;
363    CVMX_MF_COP0(perf_counter, COP0_PERFVALUE0);
364    if (perf_counter & (1ull << 63))
365        cvmx_collect_sample();
366}
367
368/**
369 * @INTERNAL
370 * Handler for interrupt lines 2 and 3. These are directly tied
371 * to the CIU. The handler queries the status of the CIU and
372 * calls the secondary handler for the CIU interrupt that
373 * occurred.
374 *
375 * @param irq_number Interrupt number that fired (2 or 3)
376 * @param registers  Registers at the time of the interrupt
377 * @param user_arg   Unused user argument
378 */
379static void __cvmx_interrupt_ciu(int irq_number, uint64_t *registers, void *user_arg)
380{
381    int ciu_offset;
382    uint64_t irq_mask;
383    uint64_t irq;
384    int bit;
385    int core = cvmx_get_core_num();
386
387    if (irq_number == CVMX_IRQ_MIPS2) {
388        /* Handle EN0 sources */
389        ciu_offset = core * 2;
390        irq_mask = cvmx_read_csr(CVMX_CIU_INTX_SUM0(ciu_offset)) & cvmx_interrupt_ciu_en0_mirror;
391        CVMX_DCLZ(bit, irq_mask);
392        bit = 63 - bit;
393        /* If ciu_int_sum1<sum2> is set, means its a timer interrupt */
394        if (bit == 51 && (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))) {
395            uint64_t irq_mask;
396            int bit;
397            irq_mask = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP2(core)) & cvmx_interrupt_ciu_61xx_timer_mirror;
398            CVMX_DCLZ(bit, irq_mask);
399            bit = 63 - bit;
400            /* Handle TIMER(4..9) interrupts */
401            if (bit <= 9 && bit >= 4) {
402                uint64_t irq = cvmx_ciu_61xx_timer_to_irq[bit];
403                if (cvmx_unlikely(irq == 0xff)) {
404                    /* No mapping */
405                    cvmx_interrupt_ciu_61xx_timer_mirror &= ~(1ull << bit);
406                    cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
407                    return;
408                }
409                struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
410                h->handler(irq, registers, h->data);
411                return;
412            }
413        }
414
415        if (bit >= 0) {
416            irq = cvmx_ciu_en0_to_irq[bit];
417            if (cvmx_unlikely(irq == 0xff)) {
418                /* No mapping. */
419                cvmx_interrupt_ciu_en0_mirror &= ~(1ull << bit);
420                cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
421                return;
422            }
423            struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
424            h->handler(irq, registers, h->data);
425            return;
426        }
427    } else {
428        /* Handle EN1 sources */
429        ciu_offset = cvmx_get_core_num() * 2 + 1;
430        irq_mask = cvmx_read_csr(CVMX_CIU_INT_SUM1) & cvmx_interrupt_ciu_en1_mirror;
431        CVMX_DCLZ(bit, irq_mask);
432        bit = 63 - bit;
433        if (bit >= 0) {
434            irq = cvmx_ciu_en1_to_irq[bit];
435            if (cvmx_unlikely(irq == 0xff)) {
436                /* No mapping. */
437                cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
438                cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
439                return;
440            }
441            struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
442            h->handler(irq, registers, h->data);
443            return;
444        }
445    }
446}
447
448/**
449 * @INTERNAL
450 * Handler for interrupt line 3, the DPI_DMA will have different value
451 * per core, all other fields values are identical for different cores.
452 *  These are directly tied to the CIU. The handler queries the status of
453 * the CIU and calls the secondary handler for the CIU interrupt that
454 * occurred.
455 *
456 * @param irq_number Interrupt number that fired (2 or 3)
457 * @param registers  Registers at the time of the interrupt
458 * @param user_arg   Unused user argument
459 */
460static void __cvmx_interrupt_ciu_cn61xx(int irq_number, uint64_t *registers, void *user_arg)
461{
462    /* Handle EN1 sources */
463    int core = cvmx_get_core_num();
464    int ciu_offset;
465    uint64_t irq_mask;
466    uint64_t irq;
467    int bit;
468
469    ciu_offset = core * 2 + 1;
470    irq_mask = cvmx_read_csr(CVMX_CIU_SUM1_PPX_IP3(core)) & cvmx_interrupt_ciu_en1_mirror;
471    CVMX_DCLZ(bit, irq_mask);
472    bit = 63 - bit;
473    if (bit >= 0) {
474        irq = cvmx_ciu_en1_to_irq[bit];
475        if (cvmx_unlikely(irq == 0xff)) {
476            /* No mapping. */
477            cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
478            cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
479            return;
480        }
481        struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
482        h->handler(irq, registers, h->data);
483        return;
484    }
485}
486
487/**
488 * @INTERNAL
489 * Handler for interrupt line 2 on 68XX. These are directly tied
490 * to the CIU2. The handler queries the status of the CIU and
491 * calls the secondary handler for the CIU interrupt that
492 * occurred.
493 *
494 * @param irq_number Interrupt number that fired (2 or 3)
495 * @param registers  Registers at the time of the interrupt
496 * @param user_arg   Unused user argument
497 */
498static void __cvmx_interrupt_ciu2(int irq_number, uint64_t *registers, void *user_arg)
499{
500    int sum_bit, src_bit;
501    uint64_t irq;
502    uint64_t src_reg, src_val;
503    struct __cvmx_interrupt_handler *h;
504    int core = cvmx_get_core_num();
505    uint64_t sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core));
506
507    CVMX_DCLZ(sum_bit, sum);
508    sum_bit = 63 - sum_bit;
509
510    if (sum_bit >= 0) {
511        switch (sum_bit) {
512        case 63:
513        case 62:
514        case 61:
515        case 60:
516            irq = cvmx_ciu2_mbox_to_irq[sum_bit - 60];
517            if (cvmx_unlikely(irq == 0xff)) {
518                /* No mapping. */
519                uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
520                cvmx_write_csr(mask_reg, 1ull << (sum_bit - 60));
521                break;
522            }
523            h = cvmx_interrupt_state.handlers + irq;
524            h->handler(irq, registers, h->data);
525            break;
526
527        case 7:
528        case 6:
529        case 5:
530        case 4:
531        case 3:
532        case 2:
533        case 1:
534        case 0:
535            src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core) + (0x1000 * sum_bit);
536            src_val = cvmx_read_csr(src_reg);
537            if (!src_val)
538                break;
539            CVMX_DCLZ(src_bit, src_val);
540            src_bit = 63 - src_bit;
541            irq = cvmx_ciu_to_irq[sum_bit][src_bit];
542            if (cvmx_unlikely(irq == 0xff)) {
543                /* No mapping. */
544                uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * sum_bit);
545                cvmx_write_csr(mask_reg, 1ull << src_bit);
546                break;
547            }
548            h = cvmx_interrupt_state.handlers + irq;
549            h->handler(irq, registers, h->data);
550            break;
551
552        default:
553            cvmx_safe_printf("Unknown CIU2 bit: %d\n", sum_bit);
554            break;
555        }
556    }
557    /* Clear the source to reduce the chance for spurious interrupts.  */
558
559    /* CN68XX has an CIU-15786 errata that accessing the ACK registers
560     * can stop interrupts from propagating
561     */
562
563    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
564        cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
565    else
566        cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core));
567}
568
569
570/**
571 * @INTERNAL
572 * Called for all RML interrupts. This is usually an ECC error
573 *
574 * @param irq_number Interrupt number that we're being called for
575 * @param registers  Registers at the time of the interrupt
576 * @param user_arg   Unused user argument
577 */
578static void __cvmx_interrupt_ecc(int irq_number, uint64_t *registers, void *user_arg)
579{
580    cvmx_error_poll();
581}
582
583
584/**
585 * Process an interrupt request
586 *
587 * @param registers Registers at time of interrupt / exception
588 * Registers 0-31 are standard MIPS, others specific to this routine
589 * @return
590 */
591void cvmx_interrupt_do_irq(uint64_t *registers);
592void cvmx_interrupt_do_irq(uint64_t *registers)
593{
594    uint64_t        mask;
595    uint64_t        cause;
596    uint64_t        status;
597    uint64_t        cache_err;
598    int             i;
599    uint32_t exc_vec;
600    /* Determine the cause of the interrupt */
601    asm volatile ("dmfc0 %0,$13,0" : "=r" (cause));
602    asm volatile ("dmfc0 %0,$12,0" : "=r" (status));
603    /* In case of exception, clear all interrupts to avoid recursive interrupts.
604       Also clear EXL bit to display the correct PC value. */
605    if ((cause & 0x7c) == 0)
606    {
607        asm volatile ("dmtc0 %0, $12, 0" : : "r" (status & ~(0xff02)));
608    }
609    /* The assembly stub at each exception vector saves its address in k1 when
610    ** it calls the stage 2 handler.  We use this to compute the exception vector
611    ** that brought us here */
612    exc_vec = (uint32_t)(registers[27] & 0x780);  /* Mask off bits we need to ignore */
613
614    /* Check for cache errors.  The cache errors go to a separate exception vector,
615    ** so we will only check these if we got here from a cache error exception, and
616    ** the ERL (error level) bit is set. */
617    i = cvmx_get_core_num();
618    if (exc_vec == 0x100 && (status & 0x4))
619    {
620        CVMX_MF_CACHE_ERR(cache_err);
621
622        /* Use copy of DCACHE_ERR register that early exception stub read */
623        if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
624        {
625            if (registers[34] & 0x1)
626                cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 7:3: 0x%x\n", i, (int)(registers[34] >> 8) & 0x3f, (int)(registers[34] >> 3) & 0x1f);
627            else if (cache_err & 0x1)
628                cvmx_safe_printf("Icache error detected: core: %d, set: %d, way : %d, va 6:3 = 0x%x\n", i, (int)(cache_err >> 5) & 0x3f, (int)(cache_err >> 3) & 0x3, (int)(cache_err >> 11) & 0xf);
629            else
630                cvmx_safe_printf("Cache error exception: core %d\n", i);
631        }
632        else
633        {
634            if (registers[34] & 0x1)
635                cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 9:7: 0x%x\n", i, (int)(registers[34] >> 10) & 0x1f, (int)(registers[34] >> 7) & 0x3);
636            else if (cache_err & 0x1)
637                cvmx_safe_printf("Icache error detected: core: %d, way : %d, va 9:3 = 0x%x\n", i, (int)(cache_err >> 10) & 0x3f, (int)(cache_err >> 3) & 0x7f);
638            else
639                cvmx_safe_printf("Cache error exception: core %d\n", i);
640        }
641        CVMX_MT_DCACHE_ERR(1);
642        CVMX_MT_CACHE_ERR(0);
643    }
644
645    /* The bus error exceptions can occur due to DID timeout or write buffer,
646       check by reading COP0_CACHEERRD */
647    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
648    {
649        i = cvmx_get_core_num();
650        if (registers[34] & 0x4)
651        {
652            cvmx_safe_printf("Bus error detected due to DID timeout: core: %d\n", i);
653            CVMX_MT_DCACHE_ERR(4);
654        }
655        else if (registers[34] & 0x2)
656        {
657            cvmx_safe_printf("Bus error detected due to write buffer parity: core: %d\n", i);
658            CVMX_MT_DCACHE_ERR(2);
659        }
660    }
661
662    if ((cause & 0x7c) != 0)
663    {
664        cvmx_interrupt_state.exception_handler(registers);
665        goto return_from_interrupt;
666    }
667
668    /* Convert the cause into an active mask */
669    mask = ((cause & status) >> 8) & 0xff;
670    if (mask == 0)
671    {
672        goto return_from_interrupt; /* Spurious interrupt */
673    }
674
675    for (i=0; i<8; i++)
676    {
677        if (mask & (1<<i))
678        {
679            struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + i;
680            h->handler(i, registers, h->data);
681            goto return_from_interrupt;
682        }
683    }
684
685    /* We should never get here */
686    __cvmx_interrupt_default_exception_handler(registers);
687
688return_from_interrupt:
689    /* Restore Status register before returning from exception. */
690    asm volatile ("dmtc0 %0, $12, 0" : : "r" (status));
691}
692
693void (*cvmx_interrupt_mask_irq)(int irq_number);
694void (*cvmx_interrupt_unmask_irq)(int irq_number);
695
696#define CLEAR_OR_MASK(V,M,O) ({\
697            if (O)             \
698                (V) &= ~(M);   \
699            else               \
700                (V) |= (M);    \
701        })
702
703static void __cvmx_interrupt_ciu2_mask_unmask_irq(int irq_number, int op)
704{
705
706    if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
707        return;
708
709    if (irq_number <=  CVMX_IRQ_MIPS7) {
710        uint32_t flags, mask;
711
712        flags = cvmx_interrupt_disable_save();
713        asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
714        CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
715        asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
716        cvmx_interrupt_restore(flags);
717    } else {
718        int idx;
719        uint64_t reg;
720        int core = cvmx_get_core_num();
721
722        int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
723
724        if (bit < 0)
725            return;
726
727        idx = bit >> 6;
728        bit &= 0x3f;
729        if (idx > 7) {
730            /* MBOX */
731            if (op)
732                reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
733            else
734                reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(core);
735        } else {
736            if (op)
737                reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * idx);
738            else
739                reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(core) + (0x1000 * idx);
740        }
741        cvmx_write_csr(reg, 1ull << bit);
742    }
743}
744
745static void __cvmx_interrupt_ciu2_mask_irq(int irq_number)
746{
747    __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 1);
748}
749
750static void __cvmx_interrupt_ciu2_unmask_irq(int irq_number)
751{
752    __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 0);
753}
754
755static void __cvmx_interrupt_ciu_mask_unmask_irq(int irq_number, int op)
756{
757    uint32_t flags;
758
759    if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
760        return;
761
762    flags = cvmx_interrupt_disable_save();
763    if (irq_number <=  CVMX_IRQ_MIPS7) {
764        uint32_t mask;
765        asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
766        CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
767        asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
768    } else {
769        int ciu_bit, ciu_offset;
770        int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
771        int is_timer_intr = bit >> 6;
772        int core = cvmx_get_core_num();
773
774        if (bit < 0)
775            goto out;
776
777        ciu_bit = bit & 0x3f;
778        ciu_offset = core * 2;
779
780        if (is_timer_intr == 8)
781        {
782            CLEAR_OR_MASK(cvmx_interrupt_ciu_61xx_timer_mirror, 1ull << ciu_bit, op);
783            CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << 51, op); // SUM2 bit
784            cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
785        }
786        else if (bit & 0x40) {
787            /* EN1 */
788            ciu_offset += 1;
789            CLEAR_OR_MASK(cvmx_interrupt_ciu_en1_mirror, 1ull << ciu_bit, op);
790            cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
791        } else {
792            /* EN0 */
793            CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << ciu_bit, op);
794            cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
795        }
796    }
797out:
798    cvmx_interrupt_restore(flags);
799}
800
801static void __cvmx_interrupt_ciu_mask_irq(int irq_number)
802{
803    __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 1);
804}
805
806static void __cvmx_interrupt_ciu_unmask_irq(int irq_number)
807{
808    __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 0);
809}
810
811/**
812 * Register an interrupt handler for the specified interrupt number.
813 *
814 * @param irq_number Interrupt number to register for See
815 *                   cvmx-interrupt.h for enumeration and description of sources.
816 * @param func       Function to call on interrupt.
817 * @param user_arg   User data to pass to the interrupt handler
818 */
819void cvmx_interrupt_register(int irq_number, cvmx_interrupt_func_t func, void *user_arg)
820{
821    if (irq_number >= CVMX_IRQ_MAX || irq_number < 0) {
822        cvmx_warn("cvmx_interrupt_register: Illegal irq_number %d\n", irq_number);
823        return;
824    }
825    cvmx_interrupt_state.handlers[irq_number].handler = func;
826    cvmx_interrupt_state.handlers[irq_number].data = user_arg;
827    CVMX_SYNCWS;
828}
829
830
831static void cvmx_interrupt_ciu_initialize(cvmx_sysinfo_t *sys_info_ptr)
832{
833    int i;
834    int core = cvmx_get_core_num();
835
836    /* Disable all CIU interrupts by default */
837    cvmx_interrupt_ciu_en0_mirror = 0;
838    cvmx_interrupt_ciu_en1_mirror = 0;
839    cvmx_interrupt_ciu_61xx_timer_mirror = 0;
840    cvmx_write_csr(CVMX_CIU_INTX_EN0(core * 2), cvmx_interrupt_ciu_en0_mirror);
841    cvmx_write_csr(CVMX_CIU_INTX_EN0((core * 2)+1), cvmx_interrupt_ciu_en0_mirror);
842    cvmx_write_csr(CVMX_CIU_INTX_EN1(core * 2), cvmx_interrupt_ciu_en1_mirror);
843    cvmx_write_csr(CVMX_CIU_INTX_EN1((core * 2)+1), cvmx_interrupt_ciu_en1_mirror);
844    if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
845        cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(cvmx_get_core_num()), cvmx_interrupt_ciu_61xx_timer_mirror);
846
847    if (!cvmx_coremask_first_core(sys_info_ptr->core_mask)|| is_core_being_hot_plugged())
848        return;
849
850    /* On the first core, set up the maps */
851    for (i = 0; i < 64; i++) {
852        cvmx_ciu_en0_to_irq[i] = 0xff;
853        cvmx_ciu_en1_to_irq[i] = 0xff;
854        cvmx_ciu_61xx_timer_to_irq[i] = 0xff;
855    }
856
857    /* WORKQ */
858    for (i = 0; i < 16; i++)
859        __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
860    /* GPIO */
861    for (i = 0; i < 16; i++)
862        __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 0, i + 16);
863
864    /* MBOX */
865    for (i = 0; i < 2; i++)
866        __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 0, i + 32);
867
868    /* UART */
869    __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 0, 0, 34);
870    __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 1, 0, 35);
871    __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 2, 1, 16);
872
873    /* PCI */
874    for (i = 0; i < 4; i++)
875        __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 0, i + 36);
876
877    /* MSI */
878    for (i = 0; i < 4; i++)
879        __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 0, i + 40);
880
881    /* TWSI */
882    __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 0, 0, 45);
883    __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 1, 0, 59);
884
885    /* other */
886    __cvmx_interrupt_set_mapping(CVMX_IRQ_RML, 0, 46);
887    __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0, 0, 47);
888
889    /* GMX_DRP */
890    for (i = 0; i < 2; i++)
891        __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 0, i + 48);
892
893    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 0, 50);
894    __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY_ZERO, 0, 51);
895
896    /* TIMER0 */
897    for (i = 0; i < 4; i++)
898        __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 0, i + 52);
899
900    /* TIMER4..9 */
901    for(i = 0; i < 6; i++)
902        __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER4 + i, 8, i + 4);
903
904    __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 0, 0, 56);
905    __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 1, 1, 17);
906    __cvmx_interrupt_set_mapping(CVMX_IRQ_PCM, 0, 57);
907    __cvmx_interrupt_set_mapping(CVMX_IRQ_MPI, 0, 58);
908    __cvmx_interrupt_set_mapping(CVMX_IRQ_POWIQ, 0, 60);
909    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 0, 61);
910    __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 0, 0, 62);
911    __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 1, 1, 18);
912    __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 0, 63);
913
914    /* WDOG */
915    for (i = 0; i < 16; i++)
916        __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
917
918    __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 1, 19);
919    __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 1, 20);
920    __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 1, 21);
921    __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 1, 22);
922    __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 1, 23);
923    __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 1, 24);
924    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 1, 25);
925    __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 1, 26);
926    __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 1, 27);
927    __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 1, 28);
928    __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 1, 29);
929    __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 1, 30);
930    __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 1, 31);
931    __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 1, 32);
932    __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 1, 33);
933    __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 1, 34);
934    __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 1, 35);
935    __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0, 1, 36);
936    __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + 1, 1, 37);
937    __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 1, 40);
938    __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 1, 46);
939    __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 1, 47);
940    __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 1, 48);
941    __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 1, 49);
942    __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO0, 1, 50);
943    __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO1, 1, 51);
944    __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0, 1, 52);
945    __cvmx_interrupt_set_mapping(CVMX_IRQ_DFM, 1, 56);
946    __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO2, 1, 60);
947    __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 1, 63);
948}
949
950static void cvmx_interrupt_ciu2_initialize(cvmx_sysinfo_t *sys_info_ptr)
951{
952    int i;
953
954    /* Disable all CIU2 interrupts by default */
955
956    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WRKQ(cvmx_get_core_num()), 0);
957    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WRKQ(cvmx_get_core_num()), 0);
958    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WRKQ(cvmx_get_core_num()), 0);
959    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WDOG(cvmx_get_core_num()), 0);
960    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WDOG(cvmx_get_core_num()), 0);
961    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WDOG(cvmx_get_core_num()), 0);
962    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_RML(cvmx_get_core_num()), 0);
963    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_RML(cvmx_get_core_num()), 0);
964    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_RML(cvmx_get_core_num()), 0);
965    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MIO(cvmx_get_core_num()), 0);
966    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MIO(cvmx_get_core_num()), 0);
967    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MIO(cvmx_get_core_num()), 0);
968    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_IO(cvmx_get_core_num()), 0);
969    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_IO(cvmx_get_core_num()), 0);
970    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_IO(cvmx_get_core_num()), 0);
971    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MEM(cvmx_get_core_num()), 0);
972    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MEM(cvmx_get_core_num()), 0);
973    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MEM(cvmx_get_core_num()), 0);
974    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_PKT(cvmx_get_core_num()), 0);
975    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_PKT(cvmx_get_core_num()), 0);
976    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_PKT(cvmx_get_core_num()), 0);
977    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_GPIO(cvmx_get_core_num()), 0);
978    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_GPIO(cvmx_get_core_num()), 0);
979    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_GPIO(cvmx_get_core_num()), 0);
980    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MBOX(cvmx_get_core_num()), 0);
981    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MBOX(cvmx_get_core_num()), 0);
982    cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MBOX(cvmx_get_core_num()), 0);
983
984    if (!cvmx_coremask_first_core(sys_info_ptr->core_mask) || is_core_being_hot_plugged())
985        return;
986
987    /* On the first core, set up the maps */
988    for (i = 0; i < 64; i++) {
989        cvmx_ciu2_wrkq_to_irq[i] = 0xff;
990        cvmx_ciu2_wdog_to_irq[i] = 0xff;
991        cvmx_ciu2_rml_to_irq[i] = 0xff;
992        cvmx_ciu2_mio_to_irq[i] = 0xff;
993        cvmx_ciu2_io_to_irq[i] = 0xff;
994        cvmx_ciu2_mem_to_irq[i] = 0xff;
995        cvmx_ciu2_eth_to_irq[i] = 0xff;
996        cvmx_ciu2_gpio_to_irq[i] = 0xff;
997        cvmx_ciu2_mbox_to_irq[i] = 0xff;
998    }
999
1000    /* WORKQ */
1001    for (i = 0; i < 64; i++)
1002        __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
1003
1004    /* GPIO */
1005    for (i = 0; i < 16; i++)
1006        __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 7, i);
1007
1008    /* MBOX */
1009    for (i = 0; i < 4; i++)
1010        __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 60, i);
1011
1012    /* UART */
1013    for (i = 0; i < 2; i++)
1014        __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + i, 3, 36 + i);
1015
1016    /* PCI */
1017    for (i = 0; i < 4; i++)
1018        __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 4, 16 + i);
1019
1020    /* MSI */
1021    for (i = 0; i < 4; i++)
1022        __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 4, 8 + i);
1023
1024    /* TWSI */
1025    for (i = 0; i < 2; i++)
1026        __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + i, 3, 32 + i);
1027
1028    /* TRACE */
1029    for (i = 0; i < 4; i++)
1030        __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0 + i, 2, 52 + i);
1031
1032    /* GMX_DRP */
1033    for (i = 0; i < 5; i++)
1034        __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 6, 8 + i);
1035
1036    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 3, 2);
1037
1038    /* TIMER0 */
1039    for (i = 0; i < 4; i++)
1040        __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 3, 8 + i);
1041
1042    __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0, 3, 44);
1043    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 3, 0);
1044    __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0, 6, 40);
1045    __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 3, 18);
1046
1047    /* WDOG */
1048    for (i = 0; i < 32; i++)
1049        __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
1050
1051    __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 3, 16);
1052    __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 3, 17);
1053    __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 2, 0);
1054    __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 2, 4);
1055    __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 2, 16);
1056    __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 2, 48);
1057    __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 2, 5);
1058    __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 2, 6);
1059    __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 2, 7);
1060    __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 2, 24);
1061    __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 2, 28);
1062    __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 2, 29);
1063    __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 2, 30);
1064    __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 2, 40);
1065    __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 3, 40);
1066    __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 2, 32);
1067    __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 2, 33);
1068    __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 2, 36);
1069
1070    /* AGX */
1071    for (i = 0; i < 5; i++)
1072        __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + i, 6, i);
1073
1074    __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 6, 32);
1075    __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 3, 48);
1076    __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 4, 32);
1077    __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 4, 32);
1078
1079    /* LMC */
1080    for (i = 0; i < 4; i++)
1081        __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0 + i, 5, i);
1082
1083    __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 3, 63);
1084    __cvmx_interrupt_set_mapping(CVMX_IRQ_ILK, 6, 48);
1085}
1086
1087/**
1088 * Initialize the interrupt routine and copy the low level
1089 * stub into the correct interrupt vector. This is called
1090 * automatically during application startup.
1091 */
1092void cvmx_interrupt_initialize(void)
1093{
1094    void *low_level_loc;
1095    cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
1096    int i;
1097
1098    if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged()) {
1099#ifndef CVMX_ENABLE_CSR_ADDRESS_CHECKING
1100        /* We assume this relationship between the registers. */
1101        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x1000 == CVMX_CIU2_SRC_PPX_IP2_WDOG(0));
1102        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x2000 == CVMX_CIU2_SRC_PPX_IP2_RML(0));
1103        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x3000 == CVMX_CIU2_SRC_PPX_IP2_MIO(0));
1104        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x4000 == CVMX_CIU2_SRC_PPX_IP2_IO(0));
1105        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x5000 == CVMX_CIU2_SRC_PPX_IP2_MEM(0));
1106        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x6000 == CVMX_CIU2_SRC_PPX_IP2_PKT(0));
1107        CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x7000 == CVMX_CIU2_SRC_PPX_IP2_GPIO(0));
1108        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(0));
1109        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1C(0));
1110        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1C(0));
1111        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1C(0));
1112        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1C(0));
1113        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1C(0));
1114        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(0));
1115        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(0));
1116        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1S(0));
1117        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1S(0));
1118        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1S(0));
1119        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1S(0));
1120        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1S(0));
1121        CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(0));
1122#endif /* !CVMX_ENABLE_CSR_ADDRESS_CHECKING */
1123
1124        for (i = 0; i < CVMX_IRQ_MAX; i++) {
1125            cvmx_interrupt_state.handlers[i].handler = __cvmx_interrupt_default;
1126            cvmx_interrupt_state.handlers[i].data = NULL;
1127            cvmx_interrupt_state.handlers[i].handler_data = -1;
1128        }
1129    }
1130
1131    if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1132    {
1133        cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu2_mask_irq;
1134        cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu2_unmask_irq;
1135        cvmx_interrupt_ciu2_initialize(sys_info_ptr);
1136        /* Add an interrupt handlers for chained CIU interrupt */
1137        cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu2, NULL);
1138    }
1139    else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
1140    {
1141        cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
1142        cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
1143        cvmx_interrupt_ciu_initialize(sys_info_ptr);
1144
1145        /* Add an interrupt handlers for chained CIU interrupts */
1146        cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
1147        cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu_cn61xx, NULL);
1148    }
1149    else
1150    {
1151        cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
1152        cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
1153        cvmx_interrupt_ciu_initialize(sys_info_ptr);
1154
1155        /* Add an interrupt handlers for chained CIU interrupts */
1156        cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
1157        cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu, NULL);
1158    }
1159
1160    /* Move performance counter interrupts to IRQ 6*/
1161    cvmx_update_perfcnt_irq();
1162
1163    /* Add an interrupt handler for Perf counter interrupts */
1164    cvmx_interrupt_register(CVMX_IRQ_MIPS6, __cvmx_interrupt_perf, NULL);
1165
1166    if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged())
1167    {
1168        cvmx_interrupt_state.exception_handler = __cvmx_interrupt_default_exception_handler;
1169
1170        low_level_loc = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,sys_info_ptr->exception_base_addr));
1171        memcpy(low_level_loc + 0x80, (void*)cvmx_interrupt_stage1, 0x80);
1172        memcpy(low_level_loc + 0x100, (void*)cvmx_interrupt_cache_error, 0x80);
1173        memcpy(low_level_loc + 0x180, (void*)cvmx_interrupt_stage1, 0x80);
1174        memcpy(low_level_loc + 0x200, (void*)cvmx_interrupt_stage1, 0x80);
1175
1176        /* Make sure the locations used to count Icache and Dcache exceptions
1177            starts out as zero */
1178        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 8), 0);
1179        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 16), 0);
1180        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 24), 0);
1181        CVMX_SYNC;
1182
1183        /* Add an interrupt handler for ECC failures */
1184        if (cvmx_error_initialize(0 /* || CVMX_ERROR_FLAGS_ECC_SINGLE_BIT */))
1185            cvmx_warn("cvmx_error_initialize() failed\n");
1186
1187        /* Enable PIP/IPD, POW, PKO, FPA, NAND, KEY, RAD, L2C, LMC, GMX, AGL,
1188           DFM, DFA, error handling interrupts. */
1189        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1190        {
1191            int i;
1192
1193            for (i = 0; i < 5; i++)
1194            {
1195                cvmx_interrupt_register(CVMX_IRQ_AGX0+i, __cvmx_interrupt_ecc, NULL);
1196                cvmx_interrupt_unmask_irq(CVMX_IRQ_AGX0+i);
1197            }
1198            cvmx_interrupt_register(CVMX_IRQ_NAND, __cvmx_interrupt_ecc, NULL);
1199            cvmx_interrupt_unmask_irq(CVMX_IRQ_NAND);
1200            cvmx_interrupt_register(CVMX_IRQ_MIO, __cvmx_interrupt_ecc, NULL);
1201            cvmx_interrupt_unmask_irq(CVMX_IRQ_MIO);
1202            cvmx_interrupt_register(CVMX_IRQ_FPA, __cvmx_interrupt_ecc, NULL);
1203            cvmx_interrupt_unmask_irq(CVMX_IRQ_FPA);
1204            cvmx_interrupt_register(CVMX_IRQ_IPD, __cvmx_interrupt_ecc, NULL);
1205            cvmx_interrupt_unmask_irq(CVMX_IRQ_IPD);
1206            cvmx_interrupt_register(CVMX_IRQ_PIP, __cvmx_interrupt_ecc, NULL);
1207            cvmx_interrupt_unmask_irq(CVMX_IRQ_PIP);
1208            cvmx_interrupt_register(CVMX_IRQ_POW, __cvmx_interrupt_ecc, NULL);
1209            cvmx_interrupt_unmask_irq(CVMX_IRQ_POW);
1210            cvmx_interrupt_register(CVMX_IRQ_L2C, __cvmx_interrupt_ecc, NULL);
1211            cvmx_interrupt_unmask_irq(CVMX_IRQ_L2C);
1212            cvmx_interrupt_register(CVMX_IRQ_PKO, __cvmx_interrupt_ecc, NULL);
1213            cvmx_interrupt_unmask_irq(CVMX_IRQ_PKO);
1214            cvmx_interrupt_register(CVMX_IRQ_ZIP, __cvmx_interrupt_ecc, NULL);
1215            cvmx_interrupt_unmask_irq(CVMX_IRQ_ZIP);
1216            cvmx_interrupt_register(CVMX_IRQ_RAD, __cvmx_interrupt_ecc, NULL);
1217            cvmx_interrupt_unmask_irq(CVMX_IRQ_RAD);
1218            cvmx_interrupt_register(CVMX_IRQ_KEY, __cvmx_interrupt_ecc, NULL);
1219            cvmx_interrupt_unmask_irq(CVMX_IRQ_KEY);
1220            /* Before enabling SLI interrupt clear any RML_TO interrupt */
1221            if (cvmx_read_csr(CVMX_PEXP_SLI_INT_SUM) & 0x1)
1222            {
1223                cvmx_safe_printf("clearing pending SLI_INT_SUM[RML_TO] interrupt (ignore)\n");
1224                cvmx_write_csr(CVMX_PEXP_SLI_INT_SUM, 1);
1225            }
1226            cvmx_interrupt_register(CVMX_IRQ_SLI, __cvmx_interrupt_ecc, NULL);
1227            cvmx_interrupt_unmask_irq(CVMX_IRQ_SLI);
1228            cvmx_interrupt_register(CVMX_IRQ_DPI, __cvmx_interrupt_ecc, NULL);
1229            cvmx_interrupt_unmask_irq(CVMX_IRQ_DPI);
1230            cvmx_interrupt_register(CVMX_IRQ_DFA, __cvmx_interrupt_ecc, NULL);
1231            cvmx_interrupt_unmask_irq(CVMX_IRQ_DFA);
1232            cvmx_interrupt_register(CVMX_IRQ_AGL, __cvmx_interrupt_ecc, NULL);
1233            cvmx_interrupt_unmask_irq(CVMX_IRQ_AGL);
1234            for (i = 0; i < 4; i++)
1235            {
1236                cvmx_interrupt_register(CVMX_IRQ_LMC0+i, __cvmx_interrupt_ecc, NULL);
1237                cvmx_interrupt_unmask_irq(CVMX_IRQ_LMC0+i);
1238            }
1239            cvmx_interrupt_register(CVMX_IRQ_DFM, __cvmx_interrupt_ecc, NULL);
1240            cvmx_interrupt_unmask_irq(CVMX_IRQ_DFM);
1241            cvmx_interrupt_register(CVMX_IRQ_RST, __cvmx_interrupt_ecc, NULL);
1242            cvmx_interrupt_unmask_irq(CVMX_IRQ_RST);
1243            cvmx_interrupt_register(CVMX_IRQ_ILK, __cvmx_interrupt_ecc, NULL);
1244            cvmx_interrupt_unmask_irq(CVMX_IRQ_ILK);
1245        }
1246        else
1247        {
1248            cvmx_interrupt_register(CVMX_IRQ_RML, __cvmx_interrupt_ecc, NULL);
1249            cvmx_interrupt_unmask_irq(CVMX_IRQ_RML);
1250        }
1251
1252        cvmx_atomic_set32(&cvmx_interrupt_initialize_flag, 1);
1253    }
1254
1255    while (!cvmx_atomic_get32(&cvmx_interrupt_initialize_flag))
1256        ; /* Wait for first core to finish above. */
1257
1258    if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1259        cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
1260    } else {
1261        cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
1262        cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS3);
1263    }
1264
1265    CVMX_ICACHE_INVALIDATE;
1266
1267    /* Enable interrupts for each core (bit0 of COP0 Status) */
1268    cvmx_interrupt_restore(1);
1269}
1270
1271
1272
1273/**
1274 * Set the exception handler for all non interrupt sources.
1275 *
1276 * @param handler New exception handler
1277 * @return Old exception handler
1278 */
1279cvmx_interrupt_exception_t cvmx_interrupt_set_exception(cvmx_interrupt_exception_t handler)
1280{
1281    cvmx_interrupt_exception_t result = cvmx_interrupt_state.exception_handler;
1282    cvmx_interrupt_state.exception_handler = handler;
1283    CVMX_SYNCWS;
1284    return result;
1285}
1286#endif /* !__U_BOOT__ */
1287
1288
1289