cvmx-interrupt.c revision 210284
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Interface to the Mips interrupts.
48 *
49 * <hr>$Revision: 42264 $<hr>
50 */
51#if __GNUC__ >= 4
52/* Backtrace is only available with the new toolchain.  */
53#include <execinfo.h>
54#endif
55#include "cvmx-config.h"
56#include "cvmx.h"
57#include "cvmx-interrupt.h"
58#include "cvmx-sysinfo.h"
59#include "cvmx-uart.h"
60#include "cvmx-pow.h"
61#include "cvmx-ebt3000.h"
62#include "cvmx-coremask.h"
63#include "cvmx-spinlock.h"
64#include "cvmx-app-init.h"
65
66EXTERN_ASM void cvmx_interrupt_stage1(void);
67EXTERN_ASM void cvmx_interrupt_cache_error(void);
68
69/**
70 * Internal status the interrupt registration
71 */
72typedef struct
73{
74    cvmx_interrupt_func_t handlers[256];  /**< One function to call per interrupt */
75    void *                data[256];      /**< User data per interrupt */
76    cvmx_interrupt_exception_t exception_handler;
77} cvmx_interrupt_state_t;
78
79/**
80 * Internal state the interrupt registration
81 */
82static CVMX_SHARED cvmx_interrupt_state_t cvmx_interrupt_state;
83static CVMX_SHARED cvmx_spinlock_t cvmx_interrupt_default_lock;
84
85#define COP0_CAUSE      "$13,0"
86#define COP0_STATUS     "$12,0"
87#define COP0_BADVADDR   "$8,0"
88#define COP0_EPC        "$14,0"
89#define READ_COP0(dest, R) asm volatile ("dmfc0 %[rt]," R : [rt] "=r" (dest))
90#define ULL unsigned long long
91
92
93
94/**
95 * @INTERNAL
96 * Dump all useful registers to the console
97 *
98 * @param registers CPU register to dump
99 */
100static void __cvmx_interrupt_dump_registers(uint64_t registers[32])
101{
102    static const char *name[32] = {"r0","at","v0","v1","a0","a1","a2","a3",
103        "t0","t1","t2","t3","t4","t5","t6","t7","s0","s1","s2","s3","s4","s5",
104        "s6","s7", "t8","t9", "k0","k1","gp","sp","s8","ra"};
105    uint64_t reg;
106    for (reg=0; reg<16; reg++)
107    {
108        cvmx_safe_printf("%3s ($%02d): 0x%016llx \t %3s ($%02d): 0x%016llx\n",
109               name[reg], (int)reg, (ULL)registers[reg], name[reg+16], (int)reg+16, (ULL)registers[reg+16]);
110    }
111    READ_COP0(reg, COP0_CAUSE);
112    cvmx_safe_printf("%16s: 0x%016llx\n", "COP0_CAUSE", (ULL)reg);
113    READ_COP0(reg, COP0_STATUS);
114    cvmx_safe_printf("%16s: 0x%016llx\n", "COP0_STATUS", (ULL)reg);
115    READ_COP0(reg, COP0_BADVADDR);
116    cvmx_safe_printf("%16s: 0x%016llx\n", "COP0_BADVADDR", (ULL)reg);
117    READ_COP0(reg, COP0_EPC);
118    cvmx_safe_printf("%16s: 0x%016llx\n", "COP0_EPC", (ULL)reg);
119}
120
121
122/**
123 * @INTERNAL
124 * Default exception handler. Prints out the exception
125 * cause decode and all relevant registers.
126 *
127 * @param registers Registers at time of the exception
128 */
129static void __cvmx_interrupt_default_exception_handler(uint64_t registers[32])
130{
131    uint64_t trap_print_cause;
132
133    ebt3000_str_write("Trap");
134    cvmx_spinlock_lock(&cvmx_interrupt_default_lock);
135    cvmx_safe_printf("******************************************************************\n");
136    cvmx_safe_printf("Core %d: Unhandled Exception. Cause register decodes to:\n", (int)cvmx_get_core_num());
137    READ_COP0(trap_print_cause, COP0_CAUSE);
138    switch ((trap_print_cause >> 2) & 0x1f)
139    {
140        case 0x0:
141            cvmx_safe_printf("Interrupt\n");
142            break;
143        case 0x1:
144            cvmx_safe_printf("TLB Mod\n");
145            break;
146        case 0x2:
147            cvmx_safe_printf("tlb load/fetch\n");
148            break;
149        case 0x3:
150            cvmx_safe_printf("tlb store\n");
151            break;
152        case 0x4:
153            cvmx_safe_printf("address exc, load/fetch\n");
154            break;
155        case 0x5:
156            cvmx_safe_printf("address exc, store\n");
157            break;
158        case 0x6:
159            cvmx_safe_printf("bus error, inst. fetch\n");
160            break;
161        case 0x7:
162            cvmx_safe_printf("bus error, load/store\n");
163            break;
164        case 0x8:
165            cvmx_safe_printf("syscall\n");
166            break;
167        case 0x9:
168            cvmx_safe_printf("breakpoint \n");
169            break;
170        case 0xa:
171            cvmx_safe_printf("reserved instruction\n");
172            break;
173        case 0xb:
174            cvmx_safe_printf("cop unusable\n");
175            break;
176        case 0xc:
177            cvmx_safe_printf("arithmetic overflow\n");
178            break;
179        case 0xd:
180            cvmx_safe_printf("trap\n");
181            break;
182        case 0xf:
183            cvmx_safe_printf("floating point exc\n");
184            break;
185        case 0x12:
186            cvmx_safe_printf("cop2 exception\n");
187            break;
188        case 0x16:
189            cvmx_safe_printf("mdmx unusable\n");
190            break;
191        case 0x17:
192            cvmx_safe_printf("watch\n");
193            break;
194        case 0x18:
195            cvmx_safe_printf("machine check\n");
196            break;
197        case 0x1e:
198            cvmx_safe_printf("cache error\n");
199            break;
200        default:
201            cvmx_safe_printf("Reserved exception cause.\n");
202            break;
203
204    }
205
206    cvmx_safe_printf("******************************************************************\n");
207    __cvmx_interrupt_dump_registers(registers);
208    cvmx_safe_printf("******************************************************************\n");
209
210#if __GNUC__ >= 4 && !defined(OCTEON_DISABLE_BACKTRACE)
211    cvmx_safe_printf("Backtrace:\n\n");
212    __octeon_print_backtrace_func ((__octeon_backtrace_printf_t)cvmx_safe_printf);
213    cvmx_safe_printf("******************************************************************\n");
214#endif
215
216    cvmx_spinlock_unlock(&cvmx_interrupt_default_lock);
217
218    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
219      CVMX_BREAK;
220
221    while (1)
222    {
223	/* Interrupts are suppressed when we are in the exception
224	   handler (because of SR[EXL]).  Spin and poll the uart
225	   status and see if the debugger is trying to stop us. */
226	cvmx_uart_lsr_t lsrval;
227	lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
228	if (lsrval.s.dr)
229	{
230	    uint64_t tmp;
231	    /* Pulse the MCD0 signal. */
232	    asm volatile (
233		".set push\n"
234		".set noreorder\n"
235		".set mips64\n"
236		"dmfc0 %0, $22\n"
237		"ori   %0, %0, 0x10\n"
238		"dmtc0 %0, $22\n"
239		".set pop\n"
240		: "=r" (tmp));
241	}
242    }
243}
244
245
246/**
247 * @INTERNAL
248 * Default interrupt handler if the user doesn't register one.
249 *
250 * @param irq_number IRQ that caused this interrupt
251 * @param registers  Register at the time of the interrupt
252 * @param user_arg   Unused optional user data
253 */
254static void __cvmx_interrupt_default(int irq_number, uint64_t registers[32], void *user_arg)
255{
256    cvmx_safe_printf("cvmx_interrupt_default: Received interrupt %d\n", irq_number);
257    __cvmx_interrupt_dump_registers(registers);
258}
259
260
261/**
262 * @INTERNAL
263 * Handler for interrupt lines 2 and 3. These are directly tied
264 * to the CIU. The handler queres the status of the CIU and
265 * calls the secondary handler for the CIU interrupt that
266 * occurred.
267 *
268 * @param irq_number Interrupt number that fired (2 or 3)
269 * @param registers  Registers at the time of the interrupt
270 * @param user_arg   Unused user argument
271 */
272static void __cvmx_interrupt_ciu(int irq_number, uint64_t registers[32], void *user_arg)
273{
274    int ciu_offset = cvmx_get_core_num() * 2 + irq_number - 2;
275    uint64_t irq_mask = cvmx_read_csr(CVMX_CIU_INTX_SUM0(ciu_offset)) & cvmx_read_csr(CVMX_CIU_INTX_EN0(ciu_offset));
276    int irq = 8;
277
278    /* Handle EN0 sources */
279    while (irq_mask)
280    {
281        if (irq_mask&1)
282        {
283            cvmx_interrupt_state.handlers[irq](irq, registers, cvmx_interrupt_state.data[irq]);
284            return;
285        }
286        irq_mask = irq_mask >> 1;
287        irq++;
288    }
289
290    /* Handle EN1 sources */
291    irq_mask = cvmx_read_csr(CVMX_CIU_INT_SUM1) & cvmx_read_csr(CVMX_CIU_INTX_EN1(ciu_offset));
292    irq = 8 + 64;
293    while (irq_mask)
294    {
295        if (irq_mask&1)
296        {
297            cvmx_interrupt_state.handlers[irq](irq, registers, cvmx_interrupt_state.data[irq]);
298            return;
299        }
300        irq_mask = irq_mask >> 1;
301        irq++;
302    }
303}
304
305
306/**
307 * @INTERNAL
308 * Called for all RML interrupts. This is usually an ECC error
309 *
310 * @param irq_number Interrupt number that we're being called for
311 * @param registers  Registers at the time of the interrupt
312 * @param user_arg   Unused user argument
313 */
314static void __cvmx_interrupt_ecc(int irq_number, uint64_t registers[32], void *user_arg)
315{
316    cvmx_interrupt_rsl_decode();
317}
318
319
320/**
321 * Process an interrupt request
322 *
323 * @param registers Registers at time of interrupt / exception
324 * Registers 0-31 are standard MIPS, others specific to this routine
325 * @return
326 */
327EXTERN_ASM void cvmx_interrupt_do_irq(uint64_t registers[35]);
328void cvmx_interrupt_do_irq(uint64_t registers[35])
329{
330    uint64_t        mask;
331    uint64_t        cause;
332    uint64_t        status;
333    uint64_t        cache_err;
334    int             i;
335    uint32_t exc_vec;
336
337    /* Determine the cause of the interrupt */
338    asm volatile ("dmfc0 %0,$13,0" : "=r" (cause));
339    asm volatile ("dmfc0 %0,$12,0" : "=r" (status));
340
341    /* The assembly stub at each exception vector saves its address in k1 when
342    ** it calls the stage 2 handler.  We use this to compute the exception vector
343    ** that brought us here */
344    exc_vec = (uint32_t)(registers[27] & 0x780);  /* Mask off bits we need to ignore */
345
346    /* Check for cache errors.  The cache errors go to a separate exception vector,
347    ** so we will only check these if we got here from a cache error exception, and
348    ** the ERL (error level) bit is set. */
349    if (exc_vec == 0x100 && (status & 0x4))
350    {
351        i = cvmx_get_core_num();
352        CVMX_MF_CACHE_ERR(cache_err);
353
354        /* Use copy of DCACHE_ERR register that early exception stub read */
355        if (registers[34] & 0x1)
356        {
357            cvmx_safe_printf("Dcache error detected: core: %d, set: %d, va 6:3: 0x%x\n", i, (int)(cache_err >> 3) & 0x3, (int)(cache_err >> 3) & 0xf);
358            uint64_t dcache_err = 0;
359            CVMX_MT_DCACHE_ERR(dcache_err);
360        }
361        else if (cache_err & 0x1)
362        {
363            cvmx_safe_printf("Icache error detected: core: %d, set: %d, way : %d\n", i, (int)(cache_err >> 5) & 0x3f, (int)(cache_err >> 7) & 0x3);
364            cache_err = 0;
365            CVMX_MT_CACHE_ERR(cache_err);
366        }
367        else
368            cvmx_safe_printf("Cache error exception: core %d\n", i);
369    }
370
371    if ((cause & 0x7c) != 0)
372    {
373        cvmx_interrupt_state.exception_handler(registers);
374        return;
375    }
376
377    /* Convert the cause into an active mask */
378    mask = ((cause & status) >> 8) & 0xff;
379    if (mask == 0)
380        return; /* Spurious interrupt */
381
382    for (i=0; i<8; i++)
383    {
384        if (mask & (1<<i))
385        {
386            cvmx_interrupt_state.handlers[i](i, registers, cvmx_interrupt_state.data[i]);
387            return;
388        }
389    }
390
391    /* We should never get here */
392    __cvmx_interrupt_default_exception_handler(registers);
393}
394
395
396/**
397 * Initialize the interrupt routine and copy the low level
398 * stub into the correct interrupt vector. This is called
399 * automatically during application startup.
400 */
401void cvmx_interrupt_initialize(void)
402{
403    void *low_level_loc;
404    cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
405    int i;
406
407    /* Disable all CIU interrupts by default */
408    cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2), 0);
409    cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2+1), 0);
410    cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2), 0);
411    cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2+1), 0);
412
413    if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
414    {
415        cvmx_interrupt_state.exception_handler = __cvmx_interrupt_default_exception_handler;
416
417        for (i=0; i<256; i++)
418        {
419            cvmx_interrupt_state.handlers[i] = __cvmx_interrupt_default;
420            cvmx_interrupt_state.data[i] = NULL;
421        }
422
423        low_level_loc = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,sys_info_ptr->exception_base_addr));
424        memcpy(low_level_loc + 0x80, (void*)cvmx_interrupt_stage1, 0x80);
425        memcpy(low_level_loc + 0x100, (void*)cvmx_interrupt_cache_error, 0x80);
426        memcpy(low_level_loc + 0x180, (void*)cvmx_interrupt_stage1, 0x80);
427        memcpy(low_level_loc + 0x200, (void*)cvmx_interrupt_stage1, 0x80);
428        /* Make sure the locations used to count Icache and Dcache exceptions
429            starts out as zero */
430        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 8), 0);
431        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 16), 0);
432        cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 24), 0);
433        CVMX_SYNC;
434
435        /* Add an interrupt handlers for chained CIU interrupts */
436        cvmx_interrupt_register(CVMX_IRQ_CIU0, __cvmx_interrupt_ciu, NULL);
437        cvmx_interrupt_register(CVMX_IRQ_CIU1, __cvmx_interrupt_ciu, NULL);
438
439        /* Add an interrupt handler for ECC failures */
440        cvmx_interrupt_register(CVMX_IRQ_RML, __cvmx_interrupt_ecc, NULL);
441
442        cvmx_interrupt_rsl_enable();
443        cvmx_interrupt_unmask_irq(CVMX_IRQ_RML);
444    }
445
446    cvmx_interrupt_unmask_irq(CVMX_IRQ_CIU0);
447    cvmx_interrupt_unmask_irq(CVMX_IRQ_CIU1);
448    CVMX_ICACHE_INVALIDATE;
449
450    /* Enable interrupts for each core (bit0 of COP0 Status) */
451    uint32_t mask;
452    asm volatile (
453        "mfc0   %0,$12,0\n"
454        "ori    %0, %0, 1\n"
455        "mtc0   %0,$12,0\n"
456        : "=r" (mask));
457}
458
459
460/**
461 * Register an interrupt handler for the specified interrupt number.
462 *
463 * @param irq_number Interrupt number to register for (0-135)  See
464 *                   cvmx-interrupt.h for enumeration and description of sources.
465 * @param func       Function to call on interrupt.
466 * @param user_arg   User data to pass to the interrupt handler
467 */
468void cvmx_interrupt_register(cvmx_irq_t irq_number, cvmx_interrupt_func_t func, void *user_arg)
469{
470    cvmx_interrupt_state.handlers[irq_number] = func;
471    cvmx_interrupt_state.data[irq_number] = user_arg;
472    CVMX_SYNCWS;
473}
474
475
476/**
477 * Set the exception handler for all non interrupt sources.
478 *
479 * @param handler New exception handler
480 * @return Old exception handler
481 */
482cvmx_interrupt_exception_t cvmx_interrupt_set_exception(cvmx_interrupt_exception_t handler)
483{
484    cvmx_interrupt_exception_t result = cvmx_interrupt_state.exception_handler;
485    cvmx_interrupt_state.exception_handler = handler;
486    CVMX_SYNCWS;
487    return result;
488}
489
490
491/**
492 * version of printf that works better in exception context.
493 *
494 * @param format
495 */
496void cvmx_safe_printf(const char *format, ...)
497{
498    static char buffer[256];
499    va_list args;
500    va_start(args, format);
501    int count = vsnprintf(buffer, sizeof(buffer), format, args);
502    va_end(args);
503
504    char *ptr = buffer;
505    while (count-- > 0)
506    {
507        cvmx_uart_lsr_t lsrval;
508
509        /* Spin until there is room */
510        do
511        {
512            lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
513            if (lsrval.s.temt == 0)
514                cvmx_wait(10000);   /* Just to reduce the load on the system */
515        }
516        while (lsrval.s.temt == 0);
517
518        if (*ptr == '\n')
519            cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
520        cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
521    }
522}
523
524
525
526
527
528
529