1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/module.h> 16#include <linux/seq_file.h> 17#include <linux/interrupt.h> 18#include <linux/irq.h> 19#include <linux/kernel_stat.h> 20#include <linux/uaccess.h> 21#include <hv/drv_pcie_rc_intf.h> 22#include <arch/spr_def.h> 23#include <asm/traps.h> 24 25/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ 26#define IS_HW_CLEARED 1 27 28/* 29 * The set of interrupts we enable for raw_local_irq_enable(). 30 * This is initialized to have just a single interrupt that the kernel 31 * doesn't actually use as a sentinel. During kernel init, 32 * interrupts are added as the kernel gets prepared to support them. 33 * NOTE: we could probably initialize them all statically up front. 34 */ 35DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = 36 INITIAL_INTERRUPTS_ENABLED; 37EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); 38 39/* Define per-tile device interrupt statistics state. */ 40DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; 41EXPORT_PER_CPU_SYMBOL(irq_stat); 42 43/* 44 * Define per-tile irq disable mask; the hardware/HV only has a single 45 * mask that we use to implement both masking and disabling. 46 */ 47static DEFINE_PER_CPU(unsigned long, irq_disable_mask) 48 ____cacheline_internodealigned_in_smp; 49 50/* 51 * Per-tile IRQ nesting depth. Used to make sure we enable newly 52 * enabled IRQs before exiting the outermost interrupt. 53 */ 54static DEFINE_PER_CPU(int, irq_depth); 55 56/* State for allocating IRQs on Gx. */ 57#if CHIP_HAS_IPI() 58static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); 59static DEFINE_SPINLOCK(available_irqs_lock); 60#endif 61 62#if CHIP_HAS_IPI() 63/* Use SPRs to manipulate device interrupts. */ 64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 67#else 68/* Use HV to manipulate device interrupts. */ 69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask) 70#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) 71#define clear_irqs(irq_mask) hv_clear_intr(irq_mask) 72#endif 73 74/* 75 * The interrupt handling path, implemented in terms of HV interrupt 76 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. 77 */ 78void tile_dev_intr(struct pt_regs *regs, int intnum) 79{ 80 int depth = __get_cpu_var(irq_depth)++; 81 unsigned long original_irqs; 82 unsigned long remaining_irqs; 83 struct pt_regs *old_regs; 84 85#if CHIP_HAS_IPI() 86 /* 87 * Pending interrupts are listed in an SPR. We might be 88 * nested, so be sure to only handle irqs that weren't already 89 * masked by a previous interrupt. Then, mask out the ones 90 * we're going to handle. 91 */ 92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 94 __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 95#else 96 /* 97 * Hypervisor performs the equivalent of the Gx code above and 98 * then puts the pending interrupt mask into a system save reg 99 * for us to find. 100 */ 101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 102#endif 103 remaining_irqs = original_irqs; 104 105 /* Track time spent here in an interrupt context. */ 106 old_regs = set_irq_regs(regs); 107 irq_enter(); 108 109#ifdef CONFIG_DEBUG_STACKOVERFLOW 110 /* Debugging check for stack overflow: less than 1/8th stack free? */ 111 { 112 long sp = stack_pointer - (long) current_thread_info(); 113 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 114 pr_emerg("tile_dev_intr: " 115 "stack overflow: %ld\n", 116 sp - sizeof(struct thread_info)); 117 dump_stack(); 118 } 119 } 120#endif 121 while (remaining_irqs) { 122 unsigned long irq = __ffs(remaining_irqs); 123 remaining_irqs &= ~(1UL << irq); 124 125 /* Count device irqs; Linux IPIs are counted elsewhere. */ 126 if (irq != IRQ_RESCHEDULE) 127 __get_cpu_var(irq_stat).irq_dev_intr_count++; 128 129 generic_handle_irq(irq); 130 } 131 132 /* 133 * If we weren't nested, turn on all enabled interrupts, 134 * including any that were reenabled during interrupt 135 * handling. 136 */ 137 if (depth == 0) 138 unmask_irqs(~__get_cpu_var(irq_disable_mask)); 139 140 __get_cpu_var(irq_depth)--; 141 142 /* 143 * Track time spent against the current process again and 144 * process any softirqs if they are waiting. 145 */ 146 irq_exit(); 147 set_irq_regs(old_regs); 148} 149 150 151/* 152 * Remove an irq from the disabled mask. If we're in an interrupt 153 * context, defer enabling the HW interrupt until we leave. 154 */ 155void enable_percpu_irq(unsigned int irq) 156{ 157 get_cpu_var(irq_disable_mask) &= ~(1UL << irq); 158 if (__get_cpu_var(irq_depth) == 0) 159 unmask_irqs(1UL << irq); 160 put_cpu_var(irq_disable_mask); 161} 162EXPORT_SYMBOL(enable_percpu_irq); 163 164/* 165 * Add an irq to the disabled mask. We disable the HW interrupt 166 * immediately so that there's no possibility of it firing. If we're 167 * in an interrupt context, the return path is careful to avoid 168 * unmasking a newly disabled interrupt. 169 */ 170void disable_percpu_irq(unsigned int irq) 171{ 172 get_cpu_var(irq_disable_mask) |= (1UL << irq); 173 mask_irqs(1UL << irq); 174 put_cpu_var(irq_disable_mask); 175} 176EXPORT_SYMBOL(disable_percpu_irq); 177 178/* Mask an interrupt. */ 179static void tile_irq_chip_mask(unsigned int irq) 180{ 181 mask_irqs(1UL << irq); 182} 183 184/* Unmask an interrupt. */ 185static void tile_irq_chip_unmask(unsigned int irq) 186{ 187 unmask_irqs(1UL << irq); 188} 189 190/* 191 * Clear an interrupt before processing it so that any new assertions 192 * will trigger another irq. 193 */ 194static void tile_irq_chip_ack(unsigned int irq) 195{ 196 if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) 197 clear_irqs(1UL << irq); 198} 199 200/* 201 * For per-cpu interrupts, we need to avoid unmasking any interrupts 202 * that we disabled via disable_percpu_irq(). 203 */ 204static void tile_irq_chip_eoi(unsigned int irq) 205{ 206 if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) 207 unmask_irqs(1UL << irq); 208} 209 210static struct irq_chip tile_irq_chip = { 211 .typename = "tile_irq_chip", 212 .ack = tile_irq_chip_ack, 213 .eoi = tile_irq_chip_eoi, 214 .mask = tile_irq_chip_mask, 215 .unmask = tile_irq_chip_unmask, 216}; 217 218void __init init_IRQ(void) 219{ 220 ipi_init(); 221} 222 223void __cpuinit setup_irq_regs(void) 224{ 225 /* Enable interrupt delivery. */ 226 unmask_irqs(~0UL); 227#if CHIP_HAS_IPI() 228 raw_local_irq_unmask(INT_IPI_1); 229#endif 230} 231 232void tile_irq_activate(unsigned int irq, int tile_irq_type) 233{ 234 /* 235 * We use handle_level_irq() by default because the pending 236 * interrupt vector (whether modeled by the HV on TILE64 and 237 * TILEPro or implemented in hardware on TILE-Gx) has 238 * level-style semantics for each bit. An interrupt fires 239 * whenever a bit is high, not just at edges. 240 */ 241 irq_flow_handler_t handle = handle_level_irq; 242 if (tile_irq_type == TILE_IRQ_PERCPU) 243 handle = handle_percpu_irq; 244 set_irq_chip_and_handler(irq, &tile_irq_chip, handle); 245 246 /* 247 * Flag interrupts that are hardware-cleared so that ack() 248 * won't clear them. 249 */ 250 if (tile_irq_type == TILE_IRQ_HW_CLEAR) 251 set_irq_chip_data(irq, (void *)IS_HW_CLEARED); 252} 253EXPORT_SYMBOL(tile_irq_activate); 254 255 256void ack_bad_irq(unsigned int irq) 257{ 258 pr_err("unexpected IRQ trap at vector %02x\n", irq); 259} 260 261/* 262 * Generic, controller-independent functions: 263 */ 264 265int show_interrupts(struct seq_file *p, void *v) 266{ 267 int i = *(loff_t *) v, j; 268 struct irqaction *action; 269 unsigned long flags; 270 271 if (i == 0) { 272 seq_printf(p, " "); 273 for (j = 0; j < NR_CPUS; j++) 274 if (cpu_online(j)) 275 seq_printf(p, "CPU%-8d", j); 276 seq_putc(p, '\n'); 277 } 278 279 if (i < NR_IRQS) { 280 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 281 action = irq_desc[i].action; 282 if (!action) 283 goto skip; 284 seq_printf(p, "%3d: ", i); 285#ifndef CONFIG_SMP 286 seq_printf(p, "%10u ", kstat_irqs(i)); 287#else 288 for_each_online_cpu(j) 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 290#endif 291 seq_printf(p, " %14s", irq_desc[i].chip->typename); 292 seq_printf(p, " %s", action->name); 293 294 for (action = action->next; action; action = action->next) 295 seq_printf(p, ", %s", action->name); 296 297 seq_putc(p, '\n'); 298skip: 299 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 300 } 301 return 0; 302} 303 304#if CHIP_HAS_IPI() 305int create_irq(void) 306{ 307 unsigned long flags; 308 int result; 309 310 spin_lock_irqsave(&available_irqs_lock, flags); 311 if (available_irqs == 0) 312 result = -ENOMEM; 313 else { 314 result = __ffs(available_irqs); 315 available_irqs &= ~(1UL << result); 316 dynamic_irq_init(result); 317 } 318 spin_unlock_irqrestore(&available_irqs_lock, flags); 319 320 return result; 321} 322EXPORT_SYMBOL(create_irq); 323 324void destroy_irq(unsigned int irq) 325{ 326 unsigned long flags; 327 328 spin_lock_irqsave(&available_irqs_lock, flags); 329 available_irqs |= (1UL << irq); 330 dynamic_irq_cleanup(irq); 331 spin_unlock_irqrestore(&available_irqs_lock, flags); 332} 333EXPORT_SYMBOL(destroy_irq); 334#endif 335