1/**
2 * \file
3 * \brief x86-32 interrupt/exception handling utility functions
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15/*********************************************************************
16 *
17 * Copyright (C) 2003-2004,  Karlsruhe University
18 *
19 * File path:     glue/v4-amd64/hwirq.h
20 * Description:   Macros to define interrupt handler stubs for AMD64
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 *    notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 *    notice, this list of conditions and the following disclaimer in the
29 *    documentation and/or other materials provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * $Id: hwirq.h,v 1.3 2006/10/19 22:57:35 ud3 Exp $
44 *
45 ********************************************************************/
46
47#include <kernel.h>
48#include <stdio.h>
49#include <string.h>
50#include <irq.h>
51#include <gdb_stub.h>
52#include <x86.h>
53#include <dispatch.h>
54#include <wakeup.h>
55#include <arch/x86/pic.h>
56#include <arch/x86/apic.h>
57#include <barrelfish_kpi/dispatcher_shared_target.h>
58#include <asmoffsets.h>
59#include <trace/trace.h>
60#include <trace_definitions/trace_defs.h>
61#include <exec.h>
62#include <arch/x86/ipi_notify.h>
63#include <arch/x86/timing.h>
64#include <arch/x86/syscall.h>
65#include <barrelfish_kpi/cpu_arch.h>
66#include <kcb.h>
67#include <mdb/mdb_tree.h>
68
69/**
70 * \brief Define IRQ handler number 'num'.
71 *
72 * This defines an interrupt handler for vector #num. The way this is done is
73 * quite tricky: A block of assembly is emitted, with a label pointing to
74 * the beginning of that block. The label is made known as a symbol by
75 * having a C function _declaration_ directly in front of the block. The
76 * symbol has to be defined extern, so it is global, but its ELF visibility
77 * is set "hidden", so that the symbol does not end up in the GOT. This is
78 * very important for keeping the code position-independent.
79 *
80 * The NOERR/ERR variants depend on whether the hardware delivers an error code.
81 */
82#define HW_EXCEPTION_NOERR(num)                                         \
83    void __attribute__ ((visibility ("hidden"))) hwexc_##num(void);     \
84    __asm (                                                             \
85           "\t.text                                        \n\t"        \
86           "\t.type hwexc_"#num",@function                 \n\t"        \
87           "hwexc_"#num":                                  \n\t"        \
88           "pushl $0                /* dummy error code */ \n\t"        \
89           "pushl $"#num"           /* vector number */    \n\t"        \
90           "jmp    hwexc_common     /* common stuff */     \n\t"        \
91                                                                        )
92
93#define HW_EXCEPTION_ERR(num)                                           \
94    void __attribute__ ((visibility ("hidden"))) hwexc_##num(void);     \
95    __asm (                                                             \
96           "\t.text                                        \n\t"        \
97           "\t.type hwexc_"#num",@function                 \n\t"        \
98           "hwexc_"#num":                                  \n\t"        \
99           "pushl $"#num"           /* vector number */    \n\t"        \
100           "jmp    hwexc_common     /* common stuff */     \n\t"        \
101                                                                        )
102
103#define XHW_IRQ(num)                                                    \
104    void __attribute__ ((visibility ("hidden"))) hwirq_##num(void);     \
105    __asm (                                                             \
106           "\t.text                                        \n\t"        \
107           "\t.type hwirq_"#num",@function                 \n\t"        \
108           "hwirq_"#num":                                  \n\t"        \
109           "pushl $"#num"           /* vector number */    \n\t"        \
110           "jmp    hwirq_common     /* common stuff */     \n\t"        \
111                                                                        )
112/// Noop wrapper for HW_IRQ to deal with CPP stringification problems
113#define HW_IRQ(num) XHW_IRQ(num)
114
115#define STR(x) #x
116#define XTR(x) STR(x)
117
118__asm (
119    ".text                                              \n\t"
120    "   .type hwexc_common ,@function                   \n\t"
121    "hwexc_common:                                      \n\t"
122    "testb $3, 12(%esp) /* if CS.CPL == 0 */            \n\t"
123    "jz kernel_fault                                    \n\t"
124
125    /* User exception: save full state and return to the user.
126     * This path could be optimized by only saving the non-volatile
127     * registers (since the kernel's C path will maintain them), and
128     * having the user-mode code save them if needed. Since the
129     * current user code always does need them, we just save the full
130     * set here. */
131
132    /* decide where to save the state, the options are:
133     *    pagefault and enabled -> enabled save area
134     *    pagefault while disabled or any other trap -> trap save area
135     */
136    "pushl %ecx                                         \n\t"
137    "call __i686.get_pc_thunk.cx                        \n\t"
138    "addl $_GLOBAL_OFFSET_TABLE_, %ecx                  \n\t"
139    "movl dcb_current@GOTOFF(%ecx), %ecx /* ecx = dcb_current */       \n\t"
140    "movl "XTR(OFFSETOF_DCB_DISP)"(%ecx), %ecx /* ecx = dcb_current->disp */\n\t"
141    "cmpl $14, 4(%esp)       /* is pagefault? */        \n\t"
142    "jne save_trap                                      \n\t"
143    "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%ecx) /* disp->disabled ? */\n\t"
144    "jne save_trap                                      \n\t"
145    "pushl %ebx                                         \n\t"
146    "movl 4*4(%esp), %ebx     /* ebx = faulting IP */   \n\t"
147    "cmpl "XTR(OFFSETOF_DISP_X86_32_CRIT_PC_LOW)"(%ecx), %ebx /* crit_pc_low <= rip? */\n\t"
148    "jae disabled_test                                  \n\t"
149    "\nsave_enabled:                                    \n\t"
150    "popl %ebx                                          \n\t"
151    "addl $"XTR(OFFSETOF_DISP_X86_32_ENABLED_AREA)", %ecx /* ecx = enabled_save_area */\n\t"
152    "jmp do_save                                        \n\t"
153    "\ndisabled_test:                                   \n\t"
154    "cmpl "XTR(OFFSETOF_DISP_X86_32_CRIT_PC_HIGH)"(%ecx), %ebx /* crit_pc_high > rip? */\n\t"
155    "jae save_enabled                                   \n\t"
156    "popl %ebx                                          \n\t"
157    "\nsave_trap:                                       \n\t"
158    "addl $"XTR(OFFSETOF_DISP_X86_32_TRAP_AREA)", %ecx /* trap_save_area */\n\t"
159
160    /* save to the save area. at this point, ecx = save area ptr,
161     * esp+8 = exception num, esp+16 = CPU-stacked error and registers */
162    "\ndo_save:                                         \n\t"
163    "movl %eax,  0*4(%ecx)                              \n\t"
164    "popl %eax                    /* original ecx */    \n\t"
165    "movl %ebx,  1*4(%ecx)                              \n\t"
166    "movl %eax,  2*4(%ecx)                              \n\t"
167    "movl %edx,  3*4(%ecx)                              \n\t"
168    "movl %esi,  4*4(%ecx)                              \n\t"
169    "movl %edi,  5*4(%ecx)                              \n\t"
170    "movl %ebp,  6*4(%ecx)                              \n\t"
171    "mov %fs, "XTR(OFFSETOF_FS_REG)"(%ecx)              \n\t"
172    "mov %gs, "XTR(OFFSETOF_GS_REG)"(%ecx)              \n\t"
173    "pushl %ecx                                         \n\t"
174    "lea 3*4(%esp), %ecx                                \n\t"
175    "pushl %ecx                                         \n\t"
176    "calll generic_handle_user_exception                \n\t"
177    /* Get all the function arguments off the stack again */
178    "addl $4*4, %esp                                    \n\t"
179    // Load disp->udisp into EDI
180    "call __i686.get_pc_thunk.di                        \n\t"
181    "addl $_GLOBAL_OFFSET_TABLE_, %edi                  \n\t"
182    "movl dcb_current@GOTOFF(%edi), %edi /* edi = dcb_current */       \n\t"
183    "movl "XTR(OFFSETOF_DCB_DISP)"(%edi), %edi /* edi = dcb_current->disp */\n\t"
184    "movl "XTR(OFFSETOF_DISP_UDISP)"(%edi), %edi /* edi = disp->udisp */\n\t"
185    "iretl                                              \n\t"
186
187    /* a kernel fault means something bad happened, so we stack
188     * everything for the debugger to use, in the GDB frame format */
189    "\nkernel_fault:                                    \n\t"
190    "pushl $0x10     /* SS */                           \n\t"
191    "pushl 4*4(%esp) /* CS */                           \n\t"
192    "pushl 6*4(%esp) /* EFLAGS */                       \n\t"
193    "pushl 5*4(%esp) /* EIP */                          \n\t"
194    "pushl %esp      /* ESP */                          \n\t"
195    "pushl %ebp                                         \n\t"
196    "pushl %edi                                         \n\t"
197    "pushl %esi                                         \n\t"
198    "pushl %edx                                         \n\t"
199    "pushl %ecx                                         \n\t"
200    "pushl %ebx                                         \n\t"
201    "pushl %eax                                         \n\t"
202    "pushl %esp              /* save area ptr*/         \n\t"
203    "pushl 14*4(%esp)        /* error code   */         \n\t"
204    "pushl 14*4(%esp)        /* vector number */        \n\t"
205    "call generic_handle_kernel_exception               \n\t"
206
207    /* (Device) interrupt. */
208    "   .type hwirq_common ,@function                   \n\t"
209    "hwirq_common:                                      \n\t"
210    /* If happened in kernel_mode, simply make userspace runnable */
211    "testb $3, 8(%esp)  /* if CS.CPL == 0 */            \n\t"
212    "jz call_handle_irq                                 \n\t"
213
214    /* Happened in user mode.
215     * we need to save everything to the dispatcher. */
216    /* decide where to save the state, either enabled or disabled save areas */
217    "pushl %edx                                         \n\t"
218    "call __i686.get_pc_thunk.dx                        \n\t"
219    "addl $_GLOBAL_OFFSET_TABLE_, %edx                  \n\t"
220    "movl dcb_current@GOTOFF(%edx), %edx /* edx = dcb_current */       \n\t"
221    "movl "XTR(OFFSETOF_DCB_DISP)"(%edx), %edx /* edx = dcb_current->disp */\n\t"
222    "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%edx) /* disp->disabled ? */\n\t"
223    "jne irq_save_disabled                              \n\t"
224    "pushl %ebx                                         \n\t"
225    "movl 3*4(%esp), %ebx     /* ebx = faulting IP */   \n\t"
226    "cmpl "XTR(OFFSETOF_DISP_X86_32_CRIT_PC_LOW)"(%edx), %ebx /* crit_pc_low <= rip? */\n\t"
227    "jae irq_disabled_test                              \n\t"
228    "\nirq_save_enabled:                                \n\t"
229    "popl %ebx                                          \n\t"
230    "addl $"XTR(OFFSETOF_DISP_X86_32_ENABLED_AREA)", %edx /* edx = enabled_save_area */\n\t"
231    "jmp irq_do_save                                    \n\t"
232    "\nirq_disabled_test:                               \n\t"
233    "cmpl "XTR(OFFSETOF_DISP_X86_32_CRIT_PC_HIGH)"(%edx), %ebx /* crit_pc_high > rip? */\n\t"
234    "jae irq_save_enabled                               \n\t"
235    "popl %ebx                                          \n\t"
236    "\nirq_save_disabled:                               \n\t"
237    "addl $"XTR(OFFSETOF_DISP_X86_32_DISABLED_AREA)", %edx /* disabled_save_area */\n\t"
238
239    /* save to the save area. at this point, edx = save area ptr,
240     * esp+8 = vector number, esp+12 = CPU-stacked regisers */
241    "\nirq_do_save:                                     \n\t"
242    "movl %eax,  0*4(%edx)                              \n\t"
243    "movl %ebx,  1*4(%edx)                              \n\t"
244    "movl %ecx,  2*4(%edx)                              \n\t"
245    "popl %eax                    /* original edx */    \n\t"
246    "movl %eax,  3*4(%edx)                              \n\t"
247    "movl %esi,  4*4(%edx)                              \n\t"
248    "movl %edi,  5*4(%edx)                              \n\t"
249    "movl %ebp,  6*4(%edx)                              \n\t"
250    "mov %fs, "XTR(OFFSETOF_FS_REG)"(%edx)              \n\t"
251    "mov %gs, "XTR(OFFSETOF_GS_REG)"(%edx)              \n\t"
252    "lea 4(%esp), %esi            /* CPU save area */   \n\t"
253    "pushl %esi                                         \n\t"
254    "pushl %edx                                         \n\t"
255    "calll generic_handle_irq /* NB: edx = disp save ptr*/\n\t"
256
257    "\ncall_handle_irq:                                 \n\t"
258    "calll handle_irq                                   \n\t"
259);
260
261// CPU exceptions
262HW_EXCEPTION_NOERR(0);
263HW_EXCEPTION_NOERR(1);
264HW_EXCEPTION_NOERR(2);
265HW_EXCEPTION_NOERR(3);
266HW_EXCEPTION_NOERR(4);
267HW_EXCEPTION_NOERR(5);
268HW_EXCEPTION_NOERR(6);
269HW_EXCEPTION_NOERR(7);
270HW_EXCEPTION_ERR(8);
271HW_EXCEPTION_NOERR(9);
272HW_EXCEPTION_ERR(10);
273HW_EXCEPTION_ERR(11);
274HW_EXCEPTION_ERR(12);
275HW_EXCEPTION_ERR(13);
276HW_EXCEPTION_ERR(14);
277HW_EXCEPTION_NOERR(16);
278HW_EXCEPTION_ERR(17);
279HW_EXCEPTION_NOERR(18);
280HW_EXCEPTION_NOERR(19);
281
282// Classic PIC interrupts
283HW_IRQ(32);
284HW_IRQ(33);
285HW_IRQ(34);
286HW_IRQ(35);
287HW_IRQ(36);
288HW_IRQ(37);
289HW_IRQ(38);
290HW_IRQ(39);
291HW_IRQ(40);
292HW_IRQ(41);
293HW_IRQ(42);
294HW_IRQ(43);
295HW_IRQ(44);
296HW_IRQ(45);
297HW_IRQ(46);
298HW_IRQ(47);
299
300// Generic interrupts
301HW_IRQ(48);
302HW_IRQ(49);
303HW_IRQ(50);
304HW_IRQ(51);
305HW_IRQ(52);
306HW_IRQ(53);
307HW_IRQ(54);
308HW_IRQ(55);
309HW_IRQ(56);
310HW_IRQ(57);
311HW_IRQ(58);
312HW_IRQ(59);
313HW_IRQ(60);
314HW_IRQ(61);
315
316// Trace IPIs
317HW_IRQ(62);
318HW_IRQ(63);
319
320// Local APIC interrupts
321HW_IRQ(249);
322HW_IRQ(250);
323HW_IRQ(251);
324HW_IRQ(252);
325HW_IRQ(253);
326HW_IRQ(254);
327
328// Reserved as "unhandled exception" handler
329HW_EXCEPTION_NOERR(666);
330
331#define ERR_PF_PRESENT          (1 << 0)
332#define ERR_PF_READ_WRITE       (1 << 1)
333#define ERR_PF_USER_SUPERVISOR  (1 << 2)
334#define ERR_PF_RESERVED         (1 << 3)
335#define ERR_PF_INSTRUCTION      (1 << 4)
336
337/**
338 * \brief Interrupt Descriptor Table (IDT) for processor this kernel is running
339 * on.
340 */
341static struct gate_descriptor idt[NIDT] __attribute__ ((aligned (16)));
342
343/// System call entry point
344void syscall_entry(void);
345
346static inline bool bitmap_get(uint8_t * bitmap, int idx){
347    return (bitmap[idx/8] >> (idx % 8)) & 1;
348}
349
350static inline void bitmap_set_true(uint8_t * bitmap, int idx){
351    bitmap[idx/8] |= (1 << (idx % 8));
352}
353
354/**
355 * \brief Send interrupt notification to user-space listener.
356 *
357 * Sends an interrupt notification IDC to a local endpoint that
358 * listens for IRQ notifications.
359 *
360 * \param irq   IRQ# to send in notification.
361 */
362static void send_user_interrupt(int irq)
363{
364    assert(irq >= 0 && irq < NDISPATCH);
365
366    // Find the right cap
367    struct kcb *k = kcb_current;
368    do {
369        if (k->irq_dispatch[irq].cap.type == ObjType_EndPoint) {
370            break;
371        }
372        k = k->next;
373    } while (k && k != kcb_current);
374    // if k == NULL we don't need to switch as we only have a single kcb
375    if (k) {
376        switch_kcb(k);
377    }
378    // from here: kcb_current is the kcb for which the interrupt was intended
379    struct capability *cap = &kcb_current->irq_dispatch[irq].cap;
380
381
382    // Return on null cap (unhandled interrupt)
383    if(cap->type == ObjType_Null) {
384        printk(LOG_WARN, "unhandled IRQ %d\n", irq);
385        return;
386    }
387
388    // Otherwise, cap needs to be an endpoint
389    assert(cap->type == ObjType_EndPoint);
390    errval_t err = lmp_deliver_notification(cap);
391    if (err_is_fail(err)) {
392        if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
393            struct dispatcher_shared_generic *disp =
394                get_dispatcher_shared_generic(cap->u.endpoint.listener->disp);
395            printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow\n",
396                   DISP_NAME_LEN, disp->name);
397        } else {
398            printk(LOG_ERR, "Unexpected error delivering IRQ\n");
399        }
400    }
401
402#ifdef SCHEDULER_RR
403    /* XXX: run the handler dispatcher immediately
404     * we shouldn't do this (we should let the scheduler decide), but because
405     * our default scheduler is braindead, this is a quick hack to make sure
406     * that mostly-sane things happen
407     */
408    dispatch(cap->u.endpoint.listener);
409#else
410    dispatch(schedule());
411#endif
412}
413
414errval_t irq_table_alloc(int *outvec)
415{
416    assert(outvec);
417    // XXX: this is O(#kcb*NDISPATCH)
418    int i;
419    for (i = 0; i < NDISPATCH; i++) {
420        struct kcb *k = kcb_current;
421        bool found_free = true;
422        do {
423            if (kcb_current->irq_dispatch[i].cap.type == ObjType_EndPoint) {
424                found_free = false;
425                break;
426            }
427            k=k->next?k->next:k;
428        } while(k != kcb_current);
429        if (found_free) {
430            break;
431        }
432    }
433    if (i == NDISPATCH) {
434        *outvec = -1;
435        return SYS_ERR_IRQ_NO_FREE_VECTOR;
436    } else {
437        *outvec = i;
438        return SYS_ERR_OK;
439    }
440}
441
442errval_t irq_debug_create_src_cap(uint8_t dcn_vbits, capaddr_t dcn,
443        capaddr_t out_cap_addr, uint64_t start, uint64_t end)
444{
445    // This method is a hack to forge a irq src cap for the given GSI targeting the ioapic
446    errval_t err;
447    struct cte out_cap;
448    memset(&out_cap, 0, sizeof(struct cte));
449
450    out_cap.cap.type = ObjType_IRQSrc;
451    out_cap.cap.u.irqsrc.vec_start = start;
452    out_cap.cap.u.irqsrc.vec_end = end;
453
454    struct cte * cn;
455    err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
456    if(err_is_fail(err)){
457        return err;
458    }
459    err = caps_copy_to_cnode(cn, out_cap_addr, &out_cap, 0, 0, 0);
460    if(err_is_fail(err)){
461        return err;
462    }
463
464    return SYS_ERR_OK;
465}
466
467errval_t irq_table_alloc_dest_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr)
468{
469    errval_t err;
470
471    int i;
472    bool i_usable = false;
473    for (i = 0; i < NDISPATCH; i++) {
474        i_usable = true;
475        //Iterate over all kcbs
476        struct kcb *k = kcb_current;
477        do {
478            if(bitmap_get(k->irq_in_use, i)){
479                i_usable = false;
480                break;
481            }
482            k = k->next;
483        } while (k && k != kcb_current);
484        if(i_usable) break; // Skip increment
485    }
486
487    if (i == NDISPATCH) {
488        return SYS_ERR_IRQ_NO_FREE_VECTOR;
489    } else {
490        struct cte out_cap;
491        memset(&out_cap, 0, sizeof(struct cte));
492        bitmap_set_true(kcb_current->irq_in_use, i);
493
494        out_cap.cap.type = ObjType_IRQDest;
495        out_cap.cap.u.irqdest.controller = my_core_id;
496        out_cap.cap.u.irqdest.vector = i;
497
498        struct cte * cn;
499        err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
500        if(err_is_fail(err)){
501            return err;
502        }
503
504        caps_copy_to_cnode(cn, out_cap_addr, &out_cap, 0, 0, 0);
505        //printk(LOG_NOTE, "irq: Allocated cap for vec: %d\n", i);
506        return SYS_ERR_OK;
507    }
508}
509
510errval_t irq_connect(struct capability *dest_cap, capaddr_t endpoint_adr)
511{
512    errval_t err;
513    struct cte *endpoint;
514
515    // Lookup & check message endpoint cap
516    err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint_adr,
517                           CPTR_BITS, &endpoint, CAPRIGHTS_WRITE);
518    if (err_is_fail(err)) {
519        return err_push(err, SYS_ERR_IRQ_LOOKUP_EP);
520    }
521
522    assert(endpoint != NULL);
523
524    // Return w/error if cap is not an endpoint
525    if(endpoint->cap.type != ObjType_EndPoint) {
526        return SYS_ERR_IRQ_NOT_ENDPOINT;
527    }
528
529    // Return w/error if no listener on endpoint
530    if(endpoint->cap.u.endpoint.listener == NULL) {
531        return SYS_ERR_IRQ_NO_LISTENER;
532    }
533
534    assert(dest_cap->type == ObjType_IRQDest);
535    if(dest_cap->u.irqdest.controller != my_core_id){
536        return SYS_ERR_IRQ_WRONG_CONTROLLER;
537    }
538
539    uint64_t dest_vec = dest_cap->u.irqdest.vector;
540    assert(kcb_current->irq_dispatch[dest_vec].cap.type == ObjType_Null);
541    caps_copy_to_cte(&kcb_current->irq_dispatch[dest_vec],
542            endpoint,0,0,0);
543
544    //printk(LOG_NOTE, "irq: connected vec: %"PRIu64"\n", dest_vec);
545    return SYS_ERR_OK;
546}
547
548/**
549 * Deprecated. Use capabilities.
550 */
551errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint)
552{
553    printk(LOG_ERR, "Used deprecated irq_table_set. Not setting interrupt\n");
554    return SYS_ERR_IRQ_INVALID;
555}
556
557errval_t irq_table_delete(unsigned int nidt)
558{
559    printk(LOG_ERR, "Used deprecated irq_table_delete. Not setting interrupt\n");
560    return SYS_ERR_IRQ_INVALID;
561}
562
563errval_t irq_table_notify_domains(struct kcb *kcb)
564{
565    uintptr_t msg[] = { 1 };
566    for (int i = 0; i < NDISPATCH; i++) {
567        if (kcb->irq_dispatch[i].cap.type == ObjType_EndPoint) {
568            struct capability *cap = &kcb->irq_dispatch[i].cap;
569            // 1 word message as notification
570            errval_t err = lmp_deliver_payload(cap, NULL, msg, 1, false);
571            if (err_is_fail(err)) {
572                if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
573                    struct dispatcher_shared_generic *disp =
574                        get_dispatcher_shared_generic(cap->u.endpoint.listener->disp);
575                    printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow\n",
576                            DISP_NAME_LEN, disp->name);
577                } else {
578                    printk(LOG_ERR, "Unexpected error delivering IRQ\n");
579                }
580            }
581        }
582        kcb->irq_dispatch[i].cap.type = ObjType_Null;
583    }
584    return SYS_ERR_OK;
585}
586
587/**
588 * \brief Handles kernel exceptions
589 *
590 * \param vec   Vector number of exception
591 * \param error Error code from CPU, or 0 for an exception without an error code
592 * \param save_area Pointer to save area for registers stacked by trap handler
593 */
594static __attribute__ ((used,noreturn))
595    void generic_handle_kernel_exception(int vec, uint32_t error,
596                            struct registers_x86_32 *save_area)
597{
598    uintptr_t rip = save_area->eip;
599    lvaddr_t fault_address;
600
601    if (vec == 666) {
602        panic("unhandled kernel exception");
603    }
604
605    assert(vec < NEXCEPTIONS);
606    // CS.CPL == 0, ie. a kernel-mode fault
607    assert((save_area->cs & 0x3) == 0);
608
609    printk(LOG_PANIC, "exception %d (error code 0x%"PRIx32"): ", vec, error);
610
611    switch(vec) {
612    case 0:     // Divide Error (#DE)
613        printf("divide error\n");
614        break;
615    case 1:     // Debug Exception (#DB)
616        printf("debug exception\n");
617        break;
618    case 2:     // NMI Interrupt
619        printf("NMI Interrupt\n");
620        break;
621    case 3:     // Breakpoint (#BP)
622        printf("breakpoint\n");
623        break;
624    case 4:     // Overflow (#OF)
625        printf("overflow\n");
626        break;
627    case 5:     // BOUND Range Exceeded (#BR)
628        printf("BOUND Range Exceeded\n");
629        break;
630    case 6:     // Invalid Opcode (#UD)
631        printf("invalid opcode\n");
632        break;
633    case 7:     // Device Not Available (#NM)
634        printf("device not available\n");
635        break;
636    case 8:     // Double fault (#DF)
637        printf("double fault\n");
638        break;
639    case 9:     // Coprocessor Segment Overrun
640        printf("coprocessor segment overrun\n");
641        break;
642    case 10:    // Invalid TSS (#TS)
643        printf("invalid TSS\n");
644        break;
645    case 11:    // Segment Not Present (#NP)
646        printf("segment not present\n");
647        break;
648    case 12:    // Stack Fault (#SS)
649        printf("stack fault\n");
650        break;
651    case 13:    // General Protection Fault (#GP)
652        printf("general protection fault\n");
653        break;
654    case 14:    // Page Fault (#PF)
655        printf("%s page fault due to %s%s, while in %s mode%s\n",
656               error & ERR_PF_READ_WRITE ? "write" : "read",
657               error & ERR_PF_PRESENT ? "access violation" : "page not present",
658               error & ERR_PF_RESERVED ? ", reserved bits set in page table"
659               : "",
660               error & ERR_PF_USER_SUPERVISOR ? "user" : "supervisor",
661               error & ERR_PF_INSTRUCTION ? ", by instruction fetch" : "");
662
663        __asm volatile("mov %%cr2, %[fault_address]"
664                       : [fault_address] "=r" (fault_address));
665        printf("Address that caused the fault: 0x%"PRIxLVADDR"\n", fault_address);
666        break;
667    case 17:    // Alignment Check Exception (#AC)
668        printf("alignment check exception\n");
669        break;
670
671    default:
672        printf("unhandled exception!\n");
673        break;
674    }
675
676    if(dcb_current != NULL) {
677        dispatcher_handle_t handle = dcb_current->disp;
678        struct dispatcher_shared_generic *disp =
679            get_dispatcher_shared_generic(handle);
680
681        printf("On behalf of: %.*s\n", DISP_NAME_LEN, disp->name);
682    } else {
683        printf("No active process\n");
684    }
685
686    // Print faulting instruction pointer
687    printf("Faulting instruction pointer (or following instruction): "
688           "0x%"PRIxPTR" (0x %"PRIxPTR" in binary)\n", rip,
689           rip - (uintptr_t)&_start_kernel + X86_32_START_KERNEL_PHYS);
690
691    // Print some important registers
692    printf("EAX 0x%"PRIx32" EBX 0x%"PRIx32" ECX 0x%"PRIx32
693          " EDX 0x%"PRIx32" ESP 0x%"PRIx32"\n",
694           save_area->eax, save_area->ebx,
695           save_area->ecx, save_area->edx,
696           save_area->esp);
697
698    // Print the top 10 stack words
699    printf("Top o' stack:\n");
700    for(int i = 0; i < 20; i++) {
701        unsigned long *p = (unsigned long *)save_area->esp + i;
702        printf("0x%lx ", *p);
703    }
704    printf("\n");
705
706    // Drop to the debugger
707    gdb_handle_exception(vec, (uintptr_t*)save_area);
708    panic("gdb_handle_exception returned");
709}
710
711/**
712 * \brief copies CPU-stacked registers to a dispatcher save area
713 */
714static void copy_cpu_frame_to_dispatcher(
715    uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
716    struct registers_x86_32 *disp_save_area)
717{
718    // sanity checks
719    assert(cpu_save_area[X86_SAVE_SS] == USER_SS);
720    assert(cpu_save_area[X86_SAVE_CS] == USER_CS);
721    assert((cpu_save_area[X86_SAVE_EFLAGS] & USER_EFLAGS) == USER_EFLAGS);
722
723    disp_save_area->ss = cpu_save_area[X86_SAVE_SS];
724    disp_save_area->esp = cpu_save_area[X86_SAVE_ESP];
725    disp_save_area->eflags = cpu_save_area[X86_SAVE_EFLAGS];
726    disp_save_area->cs = cpu_save_area[X86_SAVE_CS];
727    disp_save_area->eip = cpu_save_area[X86_SAVE_EIP];
728}
729
730/**
731 * \brief Handles user-mode exceptions
732 *
733 * \param vec   Vector number of exception
734 * \param error Error code from CPU, or 0 for an exception without an error code
735 * \param cpu_save_area  Pointer to save area for registers stacked by CPU
736 * \param disp_save_area Pointer to save area in dispatcher
737 */
738static __attribute__ ((used))
739void generic_handle_user_exception(uintptr_t *cpu_save_area,
740                                   struct registers_x86_32 *disp_save_area,
741                                   int vec, uint32_t error)
742{
743    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
744    dispatcher_handle_t handle = dcb_current->disp;
745    struct dispatcher_shared_generic *disp =
746        get_dispatcher_shared_generic(handle);
747    uintptr_t eip = cpu_save_area[X86_SAVE_EIP];
748    lvaddr_t fault_address, handler, param;
749
750    assert(vec < NEXCEPTIONS);
751    assert((cpu_save_area[X86_SAVE_CS] & 0x3) != 0); // CS.CPL > 0
752
753    copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
754
755    bool disabled = dcb_current->disabled =
756        dispatcher_is_disabled_ip(handle, eip);
757
758    // Store FPU state if it's used
759    // Do this for every trap when the current domain used the FPU
760    // Do it for FPU not available traps in any case (to save the last FPU user)
761    // XXX: Need to reset fpu_dcb when that DCB is deleted
762    if(fpu_dcb != NULL &&
763       (fpu_dcb == dcb_current || vec == IDT_NM)) {
764        struct dispatcher_shared_generic *dst =
765            get_dispatcher_shared_generic(fpu_dcb->disp);
766
767        // Turn FPU trap off temporarily for saving its state
768        bool trap = fpu_trap_get();
769        fpu_trap_off();
770
771        if(fpu_dcb->disabled) {
772            fpu_save(dispatcher_get_disabled_fpu_save_area(fpu_dcb->disp));
773	    dst->fpu_used = 1;
774        } else {
775            assert(!fpu_dcb->disabled);
776            fpu_save(dispatcher_get_enabled_fpu_save_area(fpu_dcb->disp));
777	    dst->fpu_used = 2;
778        }
779
780        if(trap) {
781            fpu_trap_on();
782        }
783    }
784
785    if (vec == IDT_PF) { // Page fault
786        // Get fault address
787        __asm volatile("mov %%cr2, %[fault_address]"
788                       : [fault_address] "=r" (fault_address));
789
790        printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
791                         " IP %"PRIxPTR"  error %"PRIx32"\n",
792               disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
793               disp->name, fault_address, eip, error);
794
795        /* sanity-check that the trap handler saved in the right place */
796        assert((disabled && disp_save_area == dispatcher_get_trap_save_area(handle))
797               || (!disabled && disp_save_area == dispatcher_get_enabled_save_area(handle)));
798        if (disabled) {
799            dcb_current->faults_taken++;
800            handler = disp->dispatcher_pagefault_disabled;
801        } else {
802            handler = disp->dispatcher_pagefault;
803        }
804        param = fault_address;
805    } else if (vec == IDT_NM) {
806        debug(SUBSYS_DISPATCH, "FPU trap in %.*s (%p) at 0x%" PRIxPTR ", %s\n",
807              DISP_NAME_LEN, disp->name, dcb_current, eip, disabled ? "DISABLED" : "ENABLED");
808
809        /* Intel system programming part 1: 2.3.1, 2.5, 11, 12.5.1
810         * clear the TS flag (flag that says, that the FPU is not available)
811         */
812        clts();
813
814        // Remember FPU-using DCB
815        fpu_dcb = dcb_current;
816
817        // Wipe FPU for protection and to initialize it in case we trapped while
818        // disabled
819        fpu_init();
820
821        if(disabled) {
822            // Initialize FPU (done earlier) and ignore trap
823            dispatch(dcb_current);
824        } else {
825            // defer trap to user-space
826            // FPUs are switched eagerly while disabled, there should be no trap
827            assert(disp_save_area == dispatcher_get_trap_save_area(handle));
828            handler = disp->dispatcher_trap;
829            param = vec;
830        }
831    } else if (vec == IDT_NMI) {
832        printk(LOG_WARN, "NMI - ignoring\n");
833        dispatch(schedule());
834    } else if (vec == IDT_MF) {
835        uint16_t fpu_status;
836
837        __asm volatile("fnstsw %0" : "=a" (fpu_status));
838
839        printk(LOG_WARN, "FPU error%s in '%.*s': IP %"PRIxPTR" FPU status %x\n",
840               disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
841               disp->name, eip, fpu_status);
842
843        handler = disp->dispatcher_trap;
844        param = vec;
845    } else { // All other traps
846        printk(LOG_WARN, "user trap #%d in '%.*s': IP %"PRIxPTR
847                         ", error %"PRIu32"\n",
848               vec, DISP_NAME_LEN, disp->name, eip, error);
849        assert(disp_save_area == dispatcher_get_trap_save_area(handle));
850        if(vec != 1) {
851            dcb_current->faults_taken++;
852        }
853
854        handler = disp->dispatcher_trap;
855        param = vec;
856    }
857
858    // Make unrunnable if it has taken too many faults
859    if (dcb_current->faults_taken > 2) {
860        printk(LOG_WARN, "generic_handle_user_exception: too many faults, "
861               "making domain unrunnable\n");
862        dcb_current->faults_taken = 0; // just in case it gets restarted
863        scheduler_remove(dcb_current);
864        dispatch(schedule());
865    }
866
867    /* resume user to save area */
868    disp->disabled = 1;
869    cpu_save_area[X86_SAVE_EIP] = handler;
870    cpu_save_area[X86_SAVE_EFLAGS] = USER_EFLAGS;
871
872    /* XXX: get GCC to load up the argument registers before returning */
873    register uintptr_t arg0 __asm ("%eax") = param;
874    register uintptr_t arg1 __asm ("%ecx") = error;
875    register uintptr_t arg2 __asm ("%edx") = eip;
876    __asm volatile("" :: "r" (arg0), "r" (arg1), "r" (arg2));
877}
878
879/// Handle an IRQ that arrived, either while in user or kernel mode (HLT)
880static __attribute__ ((used)) void handle_irq(int vector)
881{
882    debug(SUBSYS_DISPATCH, "IRQ vector %d while %s\n", vector,
883          dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");
884
885    int irq = vector - NEXCEPTIONS;
886
887    // if we were in wait_for_interrupt(), unmask timer before running userspace
888    if (dcb_current == NULL && kernel_ticks_enabled) {
889        apic_unmask_timer();
890    }
891
892    // APIC timer interrupt: handle in kernel and reschedule
893    if (vector == APIC_TIMER_INTERRUPT_VECTOR) {
894        apic_eoi();
895        assert(kernel_ticks_enabled);
896        // Ignore timeslice if it happens too closely (less than half
897        // of the TSC ticks that are supposed to pass) to the last.
898        // In that case we have just synced timers and see a spurious
899        // APIC timer interrupt.
900        uint64_t tsc_now = rdtsc();
901        if(tsc_now - tsc_lasttime >
902           (kernel_timeslice * timing_get_tsc_per_ms()) / 2) {
903            kernel_now += kernel_timeslice;
904        }
905        tsc_lasttime = tsc_now;
906        trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER, kernel_now);
907        wakeup_check(kernel_now);
908    } else if (vector == APIC_ERROR_INTERRUPT_VECTOR) {
909        printk(LOG_ERR, "APIC error interrupt fired!\n"); // XXX: do something?
910        apic_eoi();
911    } else if (vector == APIC_INTER_CORE_VECTOR) {
912        apic_eoi();
913        ipi_handle_notify();
914    }
915#if 0
916 else if (irq >= 0 && irq <= 15) { // classic PIC device interrupt
917     printk(LOG_NOTE, "got interrupt %d!\n", irq);
918
919        apic_eoi();
920
921        // only handle PIC interrupts on the BSP core
922        if (apic_is_bsp()) {
923            if (pic_have_interrupt(irq)) {
924                pic_eoi(irq);
925                send_user_interrupt(irq);
926            } else { // no interrupt pending, check for a different one (!)
927                irq = pic_pending_interrupt();
928                if (irq == -1) { // really nothing pending
929                    printk(LOG_NOTE, "spurious interrupt (IRQ %d)\n", irq);
930                } else { // why does this happen?! -AB
931                    printk(LOG_NOTE, "IRQ %d reported on wrong vector (%d)\n",
932                           irq, vector - NEXCEPTIONS);
933                    pic_eoi(irq);
934                    send_user_interrupt(irq);
935                }
936            }
937        }
938    }
939#endif
940    else { // APIC device interrupt (or IPI)
941        //printk(LOG_NOTE, "interrupt %d vector %d!\n", irq, vector);
942        apic_eoi();
943        send_user_interrupt(irq);
944    }
945
946    // reschedule (because the runnable processes may have changed) and dispatch
947    /* FIXME: the round-robin scheduler doesn't do the best thing here:
948     * it always picks the next task, but we only really want to do that on
949     * a timer tick
950     */
951    dispatch(schedule());
952    panic("dispatch() returned");
953}
954
955/**
956 * \brief Handles device interrupts that arrive while in user mode
957 *
958 * \param vector    Vector number
959 * \param cpu_save_area  Pointer to save area for registers stacked by CPU
960 * \param disp_save_area Pointer to save area in dispatcher
961 */
962static __attribute__ ((used)) void
963generic_handle_irq(struct registers_x86_32 *disp_save_area, uintptr_t *cpu_save_area,
964                   int vector)
965{
966    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
967    dispatcher_handle_t handle = dcb_current->disp;
968    uintptr_t eip = cpu_save_area[X86_SAVE_EIP];
969    assert(vector < NIDT && vector >= NEXCEPTIONS);
970
971    // Copy CPU-saved registers to dispatcher save area
972    copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
973
974    /* sanity-check that the trap handler saved in the right place,
975     * and update disabled flag in DCB */
976    if (disp_save_area == dispatcher_get_disabled_save_area(handle)) {
977        assert(dispatcher_is_disabled_ip(handle, eip));
978        dcb_current->disabled = 1;
979    } else {
980        assert(disp_save_area == dispatcher_get_enabled_save_area(handle));
981        assert(!dispatcher_is_disabled_ip(handle, eip));
982        dcb_current->disabled = 0;
983    }
984
985    handle_irq(vector);
986}
987
988/* Utility function for code below; initialises a gate_descriptor */
989static void setgd(struct gate_descriptor *gd, void (* handler)(void),
990                  int type, int dpl, int selector)
991{
992    memset(gd, 0, sizeof(struct gate_descriptor));
993    gd->gd_looffset = (uintptr_t)handler & ((1UL << 16) - 1);
994    gd->gd_hioffset = (uintptr_t)handler >> 16;
995    gd->gd_selector = selector;
996    gd->gd_type = type;
997    gd->gd_dpl = dpl;
998    gd->gd_p = 1;
999}
1000
1001/**
1002 * \brief Sets up the default IDT for current CPU.
1003 */
1004void setup_default_idt(void)
1005{
1006    struct region_descriptor region = {         // set default IDT
1007        .rd_limit = NIDT * sizeof(idt[0]) - 1,
1008        .rd_base = (uint32_t)&idt
1009    };
1010    int i;
1011
1012    // reset IDT
1013    memset((void *)&idt, 0, NIDT * sizeof(idt[0]));
1014
1015    // initialize IDT with default generic handlers
1016    for (i = 0; i < NIDT; i++)
1017        setgd(&idt[i], hwexc_666, SDT_SYSIGT, SEL_KPL,
1018              GSEL(KCODE_SEL, SEL_KPL));
1019
1020    /* Setup exception handlers */
1021    setgd(&idt[0], hwexc_0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1022    setgd(&idt[1], hwexc_1, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1023    setgd(&idt[2], hwexc_2, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1024    setgd(&idt[3], hwexc_3, SDT_SYSIGT, SEL_UPL, GSEL(KCODE_SEL, SEL_KPL));
1025    setgd(&idt[4], hwexc_4, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1026    setgd(&idt[5], hwexc_5, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1027    setgd(&idt[6], hwexc_6, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1028    setgd(&idt[7], hwexc_7, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1029    setgd(&idt[8], hwexc_8, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1030    setgd(&idt[9], hwexc_9, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1031    setgd(&idt[10], hwexc_10, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1032    setgd(&idt[11], hwexc_11, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1033    setgd(&idt[12], hwexc_12, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1034    setgd(&idt[13], hwexc_13, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1035    setgd(&idt[14], hwexc_14, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1036    // Interrupt 15 is undefined
1037    setgd(&idt[16], hwexc_16, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1038    setgd(&idt[17], hwexc_17, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1039    setgd(&idt[18], hwexc_18, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1040    setgd(&idt[19], hwexc_19, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1041    // Interrupts 20 - 31 are reserved
1042
1043    /* Setup classic PIC interrupt handlers */
1044    setgd(&idt[32], hwirq_32, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1045    setgd(&idt[33], hwirq_33, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1046    setgd(&idt[34], hwirq_34, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1047    setgd(&idt[35], hwirq_35, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1048    setgd(&idt[36], hwirq_36, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1049    setgd(&idt[37], hwirq_37, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1050    setgd(&idt[38], hwirq_38, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1051    setgd(&idt[39], hwirq_39, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1052    setgd(&idt[40], hwirq_40, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1053    setgd(&idt[41], hwirq_41, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1054    setgd(&idt[42], hwirq_42, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1055    setgd(&idt[43], hwirq_43, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1056    setgd(&idt[44], hwirq_44, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1057    setgd(&idt[45], hwirq_45, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1058    setgd(&idt[46], hwirq_46, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1059    setgd(&idt[47], hwirq_47, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1060
1061    // Setup generic interrupt handlers
1062    setgd(&idt[48], hwirq_48, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1063    setgd(&idt[49], hwirq_49, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1064    setgd(&idt[50], hwirq_50, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1065    setgd(&idt[50], hwirq_50, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1066    setgd(&idt[51], hwirq_51, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1067    setgd(&idt[52], hwirq_52, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1068    setgd(&idt[53], hwirq_53, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1069    setgd(&idt[54], hwirq_54, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1070    setgd(&idt[55], hwirq_55, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1071    setgd(&idt[56], hwirq_56, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1072    setgd(&idt[57], hwirq_57, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1073    setgd(&idt[58], hwirq_58, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1074    setgd(&idt[59], hwirq_59, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1075    setgd(&idt[60], hwirq_60, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1076    setgd(&idt[61], hwirq_61, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1077
1078    // XXX Interrupts used for TRACE IPIs
1079    setgd(&idt[62], hwirq_62, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1080    setgd(&idt[63], hwirq_63, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1081
1082    // Setup local APIC interrupt handlers
1083    setgd(&idt[249], hwirq_249, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1084    setgd(&idt[250], hwirq_250, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1085    setgd(&idt[251], hwirq_251, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1086    setgd(&idt[252], hwirq_252, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1087    setgd(&idt[253], hwirq_253, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1088    setgd(&idt[254], hwirq_254, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1089
1090    // Setup system call interrupt
1091    setgd(&idt[255], syscall_entry, SDT_SYSIGT, SEL_UPL,
1092          GSEL(KCODE_SEL, SEL_KPL));
1093
1094    /* Load IDT register */
1095    __asm volatile("lidt %0" :: "m" (region));
1096}
1097