1/*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29/*
30 * Machine dependent interrupt code for x86.  For x86, we have to
31 * deal with different PICs.  Thus, we use the passed in vector to lookup
32 * an interrupt source associated with that vector.  The interrupt source
33 * describes which PIC the source belongs to and includes methods to handle
34 * that source.
35 */
36
37#include "opt_atpic.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/bus.h>
42#include <sys/interrupt.h>
43#include <sys/ktr.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/smp.h>
49#include <sys/syslog.h>
50#include <sys/systm.h>
51#include <machine/clock.h>
52#include <machine/intr_machdep.h>
53#include <machine/smp.h>
54#ifdef DDB
55#include <ddb/ddb.h>
56#endif
57
58#ifndef DEV_ATPIC
59#include <machine/segments.h>
60#include <machine/frame.h>
61#include <dev/ic/i8259.h>
62#include <x86/isa/icu.h>
63#ifdef PC98
64#include <pc98/cbus/cbus.h>
65#else
66#include <x86/isa/isa.h>
67#endif
68#endif
69
70#define	MAX_STRAY_LOG	5
71
72typedef void (*mask_fn)(void *);
73
74static int intrcnt_index;
75static struct intsrc *interrupt_sources[NUM_IO_INTS];
76static struct mtx intr_table_lock;
77static struct mtx intrcnt_lock;
78static TAILQ_HEAD(pics_head, pic) pics;
79
80#ifdef SMP
81static int assign_cpu;
82#endif
83
84u_long intrcnt[INTRCNT_COUNT];
85char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)];
86size_t sintrcnt = sizeof(intrcnt);
87size_t sintrnames = sizeof(intrnames);
88
89static int	intr_assign_cpu(void *arg, u_char cpu);
90static void	intr_disable_src(void *arg);
91static void	intr_init(void *__dummy);
92static int	intr_pic_registered(struct pic *pic);
93static void	intrcnt_setname(const char *name, int index);
94static void	intrcnt_updatename(struct intsrc *is);
95static void	intrcnt_register(struct intsrc *is);
96
97static int
98intr_pic_registered(struct pic *pic)
99{
100	struct pic *p;
101
102	TAILQ_FOREACH(p, &pics, pics) {
103		if (p == pic)
104			return (1);
105	}
106	return (0);
107}
108
109/*
110 * Register a new interrupt controller (PIC).  This is to support suspend
111 * and resume where we suspend/resume controllers rather than individual
112 * sources.  This also allows controllers with no active sources (such as
113 * 8259As in a system using the APICs) to participate in suspend and resume.
114 */
115int
116intr_register_pic(struct pic *pic)
117{
118	int error;
119
120	mtx_lock(&intr_table_lock);
121	if (intr_pic_registered(pic))
122		error = EBUSY;
123	else {
124		TAILQ_INSERT_TAIL(&pics, pic, pics);
125		error = 0;
126	}
127	mtx_unlock(&intr_table_lock);
128	return (error);
129}
130
131/*
132 * Register a new interrupt source with the global interrupt system.
133 * The global interrupts need to be disabled when this function is
134 * called.
135 */
136int
137intr_register_source(struct intsrc *isrc)
138{
139	int error, vector;
140
141	KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
142	vector = isrc->is_pic->pic_vector(isrc);
143	if (interrupt_sources[vector] != NULL)
144		return (EEXIST);
145	error = intr_event_create(&isrc->is_event, isrc, 0, vector,
146	    intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
147	    (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
148	    vector);
149	if (error)
150		return (error);
151	mtx_lock(&intr_table_lock);
152	if (interrupt_sources[vector] != NULL) {
153		mtx_unlock(&intr_table_lock);
154		intr_event_destroy(isrc->is_event);
155		return (EEXIST);
156	}
157	intrcnt_register(isrc);
158	interrupt_sources[vector] = isrc;
159	isrc->is_handlers = 0;
160	mtx_unlock(&intr_table_lock);
161	return (0);
162}
163
164struct intsrc *
165intr_lookup_source(int vector)
166{
167
168	return (interrupt_sources[vector]);
169}
170
171int
172intr_add_handler(const char *name, int vector, driver_filter_t filter,
173    driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
174{
175	struct intsrc *isrc;
176	int error;
177
178	isrc = intr_lookup_source(vector);
179	if (isrc == NULL)
180		return (EINVAL);
181	error = intr_event_add_handler(isrc->is_event, name, filter, handler,
182	    arg, intr_priority(flags), flags, cookiep);
183	if (error == 0) {
184		mtx_lock(&intr_table_lock);
185		intrcnt_updatename(isrc);
186		isrc->is_handlers++;
187		if (isrc->is_handlers == 1) {
188			isrc->is_pic->pic_enable_intr(isrc);
189			isrc->is_pic->pic_enable_source(isrc);
190		}
191		mtx_unlock(&intr_table_lock);
192	}
193	return (error);
194}
195
196int
197intr_remove_handler(void *cookie)
198{
199	struct intsrc *isrc;
200	int error;
201
202	isrc = intr_handler_source(cookie);
203	error = intr_event_remove_handler(cookie);
204	if (error == 0) {
205		mtx_lock(&intr_table_lock);
206		isrc->is_handlers--;
207		if (isrc->is_handlers == 0) {
208			isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
209			isrc->is_pic->pic_disable_intr(isrc);
210		}
211		intrcnt_updatename(isrc);
212		mtx_unlock(&intr_table_lock);
213	}
214	return (error);
215}
216
217int
218intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
219{
220	struct intsrc *isrc;
221
222	isrc = intr_lookup_source(vector);
223	if (isrc == NULL)
224		return (EINVAL);
225	return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
226}
227
228static void
229intr_disable_src(void *arg)
230{
231	struct intsrc *isrc;
232
233	isrc = arg;
234	isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
235}
236
237void
238intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
239{
240	struct intr_event *ie;
241	int vector;
242
243	/*
244	 * We count software interrupts when we process them.  The
245	 * code here follows previous practice, but there's an
246	 * argument for counting hardware interrupts when they're
247	 * processed too.
248	 */
249	(*isrc->is_count)++;
250	PCPU_INC(cnt.v_intr);
251
252	ie = isrc->is_event;
253
254	/*
255	 * XXX: We assume that IRQ 0 is only used for the ISA timer
256	 * device (clk).
257	 */
258	vector = isrc->is_pic->pic_vector(isrc);
259	if (vector == 0)
260		clkintr_pending = 1;
261
262	/*
263	 * For stray interrupts, mask and EOI the source, bump the
264	 * stray count, and log the condition.
265	 */
266	if (intr_event_handle(ie, frame) != 0) {
267		isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
268		(*isrc->is_straycount)++;
269		if (*isrc->is_straycount < MAX_STRAY_LOG)
270			log(LOG_ERR, "stray irq%d\n", vector);
271		else if (*isrc->is_straycount == MAX_STRAY_LOG)
272			log(LOG_CRIT,
273			    "too many stray irq %d's: not logging anymore\n",
274			    vector);
275	}
276}
277
278void
279intr_resume(void)
280{
281	struct pic *pic;
282
283#ifndef DEV_ATPIC
284	atpic_reset();
285#endif
286	mtx_lock(&intr_table_lock);
287	TAILQ_FOREACH(pic, &pics, pics) {
288		if (pic->pic_resume != NULL)
289			pic->pic_resume(pic);
290	}
291	mtx_unlock(&intr_table_lock);
292}
293
294void
295intr_suspend(void)
296{
297	struct pic *pic;
298
299	mtx_lock(&intr_table_lock);
300	TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) {
301		if (pic->pic_suspend != NULL)
302			pic->pic_suspend(pic);
303	}
304	mtx_unlock(&intr_table_lock);
305}
306
307static int
308intr_assign_cpu(void *arg, u_char cpu)
309{
310#ifdef SMP
311	struct intsrc *isrc;
312	int error;
313
314	/*
315	 * Don't do anything during early boot.  We will pick up the
316	 * assignment once the APs are started.
317	 */
318	if (assign_cpu && cpu != NOCPU) {
319		isrc = arg;
320		mtx_lock(&intr_table_lock);
321		error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
322		mtx_unlock(&intr_table_lock);
323	} else
324		error = 0;
325	return (error);
326#else
327	return (EOPNOTSUPP);
328#endif
329}
330
331static void
332intrcnt_setname(const char *name, int index)
333{
334
335	snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
336	    MAXCOMLEN, name);
337}
338
339static void
340intrcnt_updatename(struct intsrc *is)
341{
342
343	intrcnt_setname(is->is_event->ie_fullname, is->is_index);
344}
345
346static void
347intrcnt_register(struct intsrc *is)
348{
349	char straystr[MAXCOMLEN + 1];
350
351	KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
352	mtx_lock_spin(&intrcnt_lock);
353	is->is_index = intrcnt_index;
354	intrcnt_index += 2;
355	snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
356	    is->is_pic->pic_vector(is));
357	intrcnt_updatename(is);
358	is->is_count = &intrcnt[is->is_index];
359	intrcnt_setname(straystr, is->is_index + 1);
360	is->is_straycount = &intrcnt[is->is_index + 1];
361	mtx_unlock_spin(&intrcnt_lock);
362}
363
364void
365intrcnt_add(const char *name, u_long **countp)
366{
367
368	mtx_lock_spin(&intrcnt_lock);
369	*countp = &intrcnt[intrcnt_index];
370	intrcnt_setname(name, intrcnt_index);
371	intrcnt_index++;
372	mtx_unlock_spin(&intrcnt_lock);
373}
374
375static void
376intr_init(void *dummy __unused)
377{
378
379	intrcnt_setname("???", 0);
380	intrcnt_index = 1;
381	TAILQ_INIT(&pics);
382	mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF);
383	mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
384}
385SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
386
387#ifndef DEV_ATPIC
388/* Initialize the two 8259A's to a known-good shutdown state. */
389void
390atpic_reset(void)
391{
392
393	outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
394	outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
395	outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID));
396	outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE);
397	outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
398	outb(IO_ICU1, OCW3_SEL | OCW3_RR);
399
400	outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
401	outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
402	outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID);
403	outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE);
404	outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
405	outb(IO_ICU2, OCW3_SEL | OCW3_RR);
406}
407#endif
408
409/* Add a description to an active interrupt handler. */
410int
411intr_describe(u_int vector, void *ih, const char *descr)
412{
413	struct intsrc *isrc;
414	int error;
415
416	isrc = intr_lookup_source(vector);
417	if (isrc == NULL)
418		return (EINVAL);
419	error = intr_event_describe_handler(isrc->is_event, ih, descr);
420	if (error)
421		return (error);
422	intrcnt_updatename(isrc);
423	return (0);
424}
425
426#ifdef DDB
427/*
428 * Dump data about interrupt handlers
429 */
430DB_SHOW_COMMAND(irqs, db_show_irqs)
431{
432	struct intsrc **isrc;
433	int i, verbose;
434
435	if (strcmp(modif, "v") == 0)
436		verbose = 1;
437	else
438		verbose = 0;
439	isrc = interrupt_sources;
440	for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
441		if (*isrc != NULL)
442			db_dump_intr_event((*isrc)->is_event, verbose);
443}
444#endif
445
446#ifdef SMP
447/*
448 * Support for balancing interrupt sources across CPUs.  For now we just
449 * allocate CPUs round-robin.
450 */
451
452static cpuset_t intr_cpus;
453static int current_cpu;
454
455/*
456 * Return the CPU that the next interrupt source should use.  For now
457 * this just returns the next local APIC according to round-robin.
458 */
459u_int
460intr_next_cpu(void)
461{
462	u_int apic_id;
463
464	/* Leave all interrupts on the BSP during boot. */
465	if (!assign_cpu)
466		return (PCPU_GET(apic_id));
467
468	mtx_lock_spin(&icu_lock);
469	apic_id = cpu_apic_ids[current_cpu];
470	do {
471		current_cpu++;
472		if (current_cpu > mp_maxid)
473			current_cpu = 0;
474	} while (!CPU_ISSET(current_cpu, &intr_cpus));
475	mtx_unlock_spin(&icu_lock);
476	return (apic_id);
477}
478
479/* Attempt to bind the specified IRQ to the specified CPU. */
480int
481intr_bind(u_int vector, u_char cpu)
482{
483	struct intsrc *isrc;
484
485	isrc = intr_lookup_source(vector);
486	if (isrc == NULL)
487		return (EINVAL);
488	return (intr_event_bind(isrc->is_event, cpu));
489}
490
491/*
492 * Add a CPU to our mask of valid CPUs that can be destinations of
493 * interrupts.
494 */
495void
496intr_add_cpu(u_int cpu)
497{
498
499	if (cpu >= MAXCPU)
500		panic("%s: Invalid CPU ID", __func__);
501	if (bootverbose)
502		printf("INTR: Adding local APIC %d as a target\n",
503		    cpu_apic_ids[cpu]);
504
505	CPU_SET(cpu, &intr_cpus);
506}
507
508/*
509 * Distribute all the interrupt sources among the available CPUs once the
510 * AP's have been launched.
511 */
512static void
513intr_shuffle_irqs(void *arg __unused)
514{
515	struct intsrc *isrc;
516	int i;
517
518#ifdef XEN
519	/*
520	 * Doesn't work yet
521	 */
522	return;
523#endif
524
525	/* Don't bother on UP. */
526	if (mp_ncpus == 1)
527		return;
528
529	/* Round-robin assign a CPU to each enabled source. */
530	mtx_lock(&intr_table_lock);
531	assign_cpu = 1;
532	for (i = 0; i < NUM_IO_INTS; i++) {
533		isrc = interrupt_sources[i];
534		if (isrc != NULL && isrc->is_handlers > 0) {
535			/*
536			 * If this event is already bound to a CPU,
537			 * then assign the source to that CPU instead
538			 * of picking one via round-robin.  Note that
539			 * this is careful to only advance the
540			 * round-robin if the CPU assignment succeeds.
541			 */
542			if (isrc->is_event->ie_cpu != NOCPU)
543				(void)isrc->is_pic->pic_assign_cpu(isrc,
544				    cpu_apic_ids[isrc->is_event->ie_cpu]);
545			else if (isrc->is_pic->pic_assign_cpu(isrc,
546				cpu_apic_ids[current_cpu]) == 0)
547				(void)intr_next_cpu();
548
549		}
550	}
551	mtx_unlock(&intr_table_lock);
552}
553SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
554    NULL);
555#else
556/*
557 * Always route interrupts to the current processor in the UP case.
558 */
559u_int
560intr_next_cpu(void)
561{
562
563	return (PCPU_GET(apic_id));
564}
565
566/* Use an empty stub for compatibility. */
567void
568intr_add_cpu(u_int cpu __unused)
569{
570
571}
572#endif
573