1/*
2 * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
6 * Distributed under the terms of the MIT License.
7 *
8 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12
13#include <int.h>
14
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18
19#include <arch/debug_console.h>
20#include <arch/int.h>
21#include <boot/kernel_args.h>
22#include <elf.h>
23#include <util/AutoLock.h>
24#include <util/kqueue.h>
25#include <smp.h>
26
27#include "kernel_debug_config.h"
28
29
30//#define TRACE_INT
31#ifdef TRACE_INT
32#	define TRACE(x) dprintf x
33#else
34#	define TRACE(x) ;
35#endif
36
37
38struct io_handler {
39	struct io_handler	*next;
40	interrupt_handler	func;
41	void				*data;
42	bool				use_enable_counter;
43	bool				no_handled_info;
44#if DEBUG_INTERRUPTS
45	int64				handled_count;
46#endif
47};
48
49struct io_vector {
50	struct io_handler	*handler_list;
51	spinlock			vector_lock;
52	int32				enable_count;
53	bool				no_lock_vector;
54#if DEBUG_INTERRUPTS
55	int64				handled_count;
56	int64				unhandled_count;
57	int					trigger_count;
58	int					ignored_count;
59#endif
60};
61
62static struct io_vector sVectors[NUM_IO_VECTORS];
63static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
64static mutex sIOInterruptVectorAllocationLock
65	= MUTEX_INITIALIZER("io_interrupt_vector_allocation");
66
67
68#if DEBUG_INTERRUPTS
69static int
70dump_int_statistics(int argc, char **argv)
71{
72	int i;
73	for (i = 0; i < NUM_IO_VECTORS; i++) {
74		struct io_handler *io;
75
76		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
77			&& sVectors[i].enable_count == 0
78			&& sVectors[i].handled_count == 0
79			&& sVectors[i].unhandled_count == 0
80			&& sVectors[i].handler_list == NULL)
81			continue;
82
83		kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
84			"unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
85			sVectors[i].handled_count,sVectors[i].unhandled_count,
86			B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
87			sVectors[i].handler_list == NULL ? ", no handler" : "");
88
89		for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
90			const char *symbol, *imageName;
91			bool exactMatch;
92
93			status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
94				NULL, &symbol, &imageName, &exactMatch);
95			if (error == B_OK && exactMatch) {
96				if (strchr(imageName, '/') != NULL)
97					imageName = strrchr(imageName, '/') + 1;
98
99				int length = 4 + strlen(imageName);
100				kprintf("   %s:%-*s (%p)", imageName, 45 - length, symbol,
101					io->func);
102			} else
103				kprintf("\t\t\t\t\t   func %p", io->func);
104
105			kprintf(", data %p, handled ", io->data);
106			if (io->no_handled_info)
107				kprintf("<unknown>\n");
108			else
109				kprintf("%8" B_PRId64 "\n", io->handled_count);
110		}
111
112		kprintf("\n");
113	}
114	return 0;
115}
116#endif
117
118
119//	#pragma mark - private kernel API
120
121
122bool
123interrupts_enabled(void)
124{
125	return arch_int_are_interrupts_enabled();
126}
127
128
129status_t
130int_init(kernel_args* args)
131{
132	TRACE(("init_int_handlers: entry\n"));
133
134	return arch_int_init(args);
135}
136
137
138status_t
139int_init_post_vm(kernel_args* args)
140{
141	int i;
142
143	/* initialize the vector list */
144	for (i = 0; i < NUM_IO_VECTORS; i++) {
145		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
146		sVectors[i].enable_count = 0;
147		sVectors[i].no_lock_vector = false;
148#if DEBUG_INTERRUPTS
149		sVectors[i].handled_count = 0;
150		sVectors[i].unhandled_count = 0;
151		sVectors[i].trigger_count = 0;
152		sVectors[i].ignored_count = 0;
153#endif
154		sVectors[i].handler_list = NULL;
155	}
156
157#if DEBUG_INTERRUPTS
158	add_debugger_command("ints", &dump_int_statistics,
159		"list interrupt statistics");
160#endif
161
162	return arch_int_init_post_vm(args);
163}
164
165
166status_t
167int_init_io(kernel_args* args)
168{
169	return arch_int_init_io(args);
170}
171
172
173status_t
174int_init_post_device_manager(kernel_args* args)
175{
176	arch_debug_install_interrupt_handlers();
177
178	return arch_int_init_post_device_manager(args);
179}
180
181
182/*!	Actually process an interrupt via the handlers registered for that
183	vector (IRQ).
184*/
185int
186int_io_interrupt_handler(int vector, bool levelTriggered)
187{
188	int status = B_UNHANDLED_INTERRUPT;
189	struct io_handler* io;
190	bool handled = false;
191
192	if (!sVectors[vector].no_lock_vector)
193		acquire_spinlock(&sVectors[vector].vector_lock);
194
195#if !DEBUG_INTERRUPTS
196	// The list can be empty at this place
197	if (sVectors[vector].handler_list == NULL) {
198		dprintf("unhandled io interrupt %d\n", vector);
199		if (!sVectors[vector].no_lock_vector)
200			release_spinlock(&sVectors[vector].vector_lock);
201		return B_UNHANDLED_INTERRUPT;
202	}
203#endif
204
205	// For level-triggered interrupts, we actually handle the return
206	// value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
207	// want to call another interrupt handler.
208	// For edge-triggered interrupts, however, we always need to call
209	// all handlers, as multiple interrupts cannot be identified. We
210	// still make sure the return code of this function will issue
211	// whatever the driver thought would be useful.
212
213	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
214		status = io->func(io->data);
215
216#if DEBUG_INTERRUPTS
217		if (status != B_UNHANDLED_INTERRUPT)
218			io->handled_count++;
219#endif
220		if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
221			break;
222
223		if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
224			handled = true;
225	}
226
227#if DEBUG_INTERRUPTS
228	sVectors[vector].trigger_count++;
229	if (status != B_UNHANDLED_INTERRUPT || handled) {
230		sVectors[vector].handled_count++;
231	} else {
232		sVectors[vector].unhandled_count++;
233		sVectors[vector].ignored_count++;
234	}
235
236	if (sVectors[vector].trigger_count > 10000) {
237		if (sVectors[vector].ignored_count > 9900) {
238			struct io_handler *last = sVectors[vector].handler_list;
239			while (last && last->next)
240				last = last->next;
241
242			if (last != NULL && last->no_handled_info) {
243				// we have an interrupt handler installed that does not
244				// know whether or not it has actually handled the interrupt,
245				// so this unhandled count is inaccurate and we can't just
246				// disable
247			} else {
248				if (sVectors[vector].handler_list == NULL
249					|| sVectors[vector].handler_list->next == NULL) {
250					// this interrupt vector is not shared, disable it
251					sVectors[vector].enable_count = -100;
252					arch_int_disable_io_interrupt(vector);
253					dprintf("Disabling unhandled io interrupt %d\n", vector);
254				} else {
255					// this is a shared interrupt vector, we cannot just disable it
256					dprintf("More than 99%% interrupts of vector %d are unhandled\n",
257						vector);
258				}
259			}
260		}
261
262		sVectors[vector].trigger_count = 0;
263		sVectors[vector].ignored_count = 0;
264	}
265#endif
266
267	if (!sVectors[vector].no_lock_vector)
268		release_spinlock(&sVectors[vector].vector_lock);
269
270	if (levelTriggered)
271		return status;
272
273	// edge triggered return value
274
275	if (handled)
276		return B_HANDLED_INTERRUPT;
277
278	return B_UNHANDLED_INTERRUPT;
279}
280
281
282//	#pragma mark - public API
283
284
285#undef disable_interrupts
286#undef restore_interrupts
287
288
289cpu_status
290disable_interrupts(void)
291{
292	return arch_int_disable_interrupts();
293}
294
295
296void
297restore_interrupts(cpu_status status)
298{
299	arch_int_restore_interrupts(status);
300}
301
302
303/*!	Install a handler to be called when an interrupt is triggered
304	for the given interrupt number with \a data as the argument.
305*/
306status_t
307install_io_interrupt_handler(long vector, interrupt_handler handler, void *data,
308	ulong flags)
309{
310	struct io_handler *io = NULL;
311	cpu_status state;
312
313	if (vector < 0 || vector >= NUM_IO_VECTORS)
314		return B_BAD_VALUE;
315
316	io = (struct io_handler *)malloc(sizeof(struct io_handler));
317	if (io == NULL)
318		return B_NO_MEMORY;
319
320	arch_debug_remove_interrupt_handler(vector);
321		// There might be a temporary debug interrupt installed on this
322		// vector that should be removed now.
323
324	io->func = handler;
325	io->data = data;
326	io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
327	io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
328#if DEBUG_INTERRUPTS
329	io->handled_count = 0LL;
330#endif
331
332	// Disable the interrupts, get the spinlock for this irq only
333	// and then insert the handler
334	state = disable_interrupts();
335	acquire_spinlock(&sVectors[vector].vector_lock);
336
337	if ((flags & B_NO_HANDLED_INFO) != 0
338		&& sVectors[vector].handler_list != NULL) {
339		// The driver registering this interrupt handler doesn't know
340		// whether or not it actually handled the interrupt after the
341		// handler returns. This is incompatible with shared interrupts
342		// as we'd potentially steal interrupts from other handlers
343		// resulting in interrupt storms. Therefore we enqueue this interrupt
344		// handler as the very last one, meaning all other handlers will
345		// get their go at any interrupt first.
346		struct io_handler *last = sVectors[vector].handler_list;
347		while (last->next)
348			last = last->next;
349
350		io->next = NULL;
351		last->next = io;
352	} else {
353		// A normal interrupt handler, just add it to the head of the list.
354		io->next = sVectors[vector].handler_list;
355		sVectors[vector].handler_list = io;
356	}
357
358	// If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
359	// whether the interrupt should be enabled or not
360	if (io->use_enable_counter) {
361		if (sVectors[vector].enable_count++ == 0)
362			arch_int_enable_io_interrupt(vector);
363	}
364
365	// If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
366	// to have multiple handlers and does not require locking of the vector
367	// when entering the handler. For example this is used by internally
368	// registered interrupt handlers like for handling local APIC interrupts
369	// that may run concurently on multiple CPUs. Locking with a spinlock
370	// would in that case defeat the purpose as it would serialize calling the
371	// handlers in parallel on different CPUs.
372	if (flags & B_NO_LOCK_VECTOR)
373		sVectors[vector].no_lock_vector = true;
374
375	release_spinlock(&sVectors[vector].vector_lock);
376	restore_interrupts(state);
377
378	return B_OK;
379}
380
381
382/*!	Remove a previously installed interrupt handler */
383status_t
384remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data)
385{
386	status_t status = B_BAD_VALUE;
387	struct io_handler *io = NULL;
388	struct io_handler *last = NULL;
389	cpu_status state;
390
391	if (vector < 0 || vector >= NUM_IO_VECTORS)
392		return B_BAD_VALUE;
393
394	/* lock the structures down so it is not modified while we search */
395	state = disable_interrupts();
396	acquire_spinlock(&sVectors[vector].vector_lock);
397
398	/* loop through the available handlers and try to find a match.
399	 * We go forward through the list but this means we start with the
400	 * most recently added handlers.
401	 */
402	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
403		/* we have to match both function and data */
404		if (io->func == handler && io->data == data) {
405			if (last != NULL)
406				last->next = io->next;
407			else
408				sVectors[vector].handler_list = io->next;
409
410			// Check if we need to disable the interrupt
411			if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
412				arch_int_disable_io_interrupt(vector);
413
414			status = B_OK;
415			break;
416		}
417
418		last = io;
419	}
420
421	release_spinlock(&sVectors[vector].vector_lock);
422	restore_interrupts(state);
423
424	// if the handler could be found and removed, we still have to free it
425	if (status == B_OK)
426		free(io);
427
428	return status;
429}
430
431
432/*	Mark \a count contigous interrupts starting at \a startVector as in use.
433	This will prevent them from being allocated by others. Only use this when
434	the reserved range is hardwired to the given vector, otherwise allocate
435	vectors using allocate_io_interrupt_vectors() instead.
436*/
437status_t
438reserve_io_interrupt_vectors(long count, long startVector)
439{
440	MutexLocker locker(&sIOInterruptVectorAllocationLock);
441
442	for (long i = 0; i < count; i++) {
443		if (sAllocatedIOInterruptVectors[startVector + i]) {
444			panic("reserved interrupt vector range %ld-%ld overlaps already "
445				"allocated vector %ld", startVector, startVector + count - 1,
446				startVector + i);
447			free_io_interrupt_vectors(i, startVector);
448			return B_BUSY;
449		}
450
451		sAllocatedIOInterruptVectors[startVector + i] = true;
452	}
453
454	dprintf("reserve_io_interrupt_vectors: reserved %ld vectors starting "
455		"from %ld\n", count, startVector);
456	return B_OK;
457}
458
459
460/*!	Allocate \a count contigous interrupt vectors. The vectors are allocated
461	as available so that they do not overlap with any other reserved vector.
462	The first vector to be used is returned in \a startVector on success.
463*/
464status_t
465allocate_io_interrupt_vectors(long count, long *startVector)
466{
467	MutexLocker locker(&sIOInterruptVectorAllocationLock);
468
469	long vector = 0;
470	bool runFound = true;
471	for (long i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
472		if (sAllocatedIOInterruptVectors[i])
473			continue;
474
475		vector = i;
476		runFound = true;
477		for (uint16 j = 1; j < count; j++) {
478			if (sAllocatedIOInterruptVectors[i + j]) {
479				runFound = false;
480				i += j;
481				break;
482			}
483		}
484
485		if (runFound)
486			break;
487	}
488
489	if (!runFound) {
490		dprintf("found no free vectors to allocate %ld io interrupts\n", count);
491		return B_NO_MEMORY;
492	}
493
494	for (long i = 0; i < count; i++)
495		sAllocatedIOInterruptVectors[vector + i] = true;
496
497	*startVector = vector;
498	dprintf("allocate_io_interrupt_vectors: allocated %ld vectors starting "
499		"from %ld\n", count, vector);
500	return B_OK;
501}
502
503
504/*!	Free/unreserve interrupt vectors previously allocated with the
505	{reserve|allocate}_io_interrupt_vectors() functions. The \a count and
506	\a startVector can be adjusted from the allocation calls to partially free
507	a vector range.
508*/
509void
510free_io_interrupt_vectors(long count, long startVector)
511{
512	if (startVector + count > NUM_IO_VECTORS) {
513		panic("invalid start vector %ld or count %ld supplied to "
514			"free_io_interrupt_vectors\n", startVector, count);
515	}
516
517	dprintf("free_io_interrupt_vectors: freeing %ld vectors starting "
518		"from %ld\n", count, startVector);
519
520	MutexLocker locker(sIOInterruptVectorAllocationLock);
521	for (long i = 0; i < count; i++) {
522		if (!sAllocatedIOInterruptVectors[startVector + i]) {
523			panic("io interrupt vector %ld was not allocated\n",
524				startVector + i);
525		}
526
527		sAllocatedIOInterruptVectors[startVector + i] = false;
528	}
529}
530