1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * ARM trap handlers.
32 */
33
34#include <mach/mach_types.h>
35#include <mach/mach_traps.h>
36#include <mach/thread_status.h>
37#include <mach_assert.h>
38#include <mach_kdp.h>
39#include <kern/thread.h>
40#include <kern/kalloc.h>
41#include <stdarg.h>
42#include <vm/vm_kern.h>
43#include <vm/pmap.h>
44#include <stdarg.h>
45#include <machine/machine_routines.h>
46#include <arm/misc_protos.h>
47#include <pexpert/pexpert.h>
48#include <pexpert/arm/boot.h>
49#include <pexpert/arm/protos.h>
50#include <vm/vm_fault.h>
51#include <vm/vm_kern.h>         /* For kernel_map */
52#include <libkern/OSByteOrder.h>
53#include <arm/armops.h>
54
55#define ANSI_COLOR_RED     "\x1b[31m"
56#define ANSI_COLOR_GREEN   "\x1b[32m"
57#define ANSI_COLOR_YELLOW  "\x1b[33m"
58#define ANSI_COLOR_BLUE    "\x1b[34m"
59#define ANSI_COLOR_MAGENTA "\x1b[35m"
60#define ANSI_COLOR_CYAN    "\x1b[36m"
61#define ANSI_COLOR_RESET   "\x1b[0m"
62
63typedef enum {
64    SLEH_ABORT_TYPE_PREFETCH_ABORT = 3,
65    SLEH_ABORT_TYPE_DATA_ABORT = 4,
66} sleh_abort_reasons;
67
68void doexception(int exc, mach_exception_code_t code,
69                 mach_exception_subcode_t sub)
70{
71    mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
72
73    codes[0] = code;
74    codes[1] = sub;
75    exception_triage(exc, codes, 2);
76}
77
78/**
79 * __arm_get_dfsr
80 *
81 * Get the current data fault status register.
82 */
83static inline uint32_t __arm_get_dfsr(void)
84{
85    uint32_t arm_register;
86    __asm__ __volatile__("mrc    p15, 0, %0, c5, c0, 0":"=r"(arm_register));
87    return arm_register;
88}
89
90/**
91 * __arm_get_dfar
92 *
93 * Get the current data fault address register.
94 */
95static inline uint32_t __arm_get_dfar(void)
96{
97    uint32_t arm_register;
98    __asm__ __volatile__("mrc    p15, 0, %0, c6, c0, 0":"=r"(arm_register));
99    return arm_register;
100}
101
102/**
103 * __arm_get_ifsr
104 *
105 * Get the current instruction fault status register.
106 */
107static inline uint32_t __arm_get_ifsr(void)
108{
109    uint32_t arm_register;
110    __asm__ __volatile__("mrc    p15, 0, %0, c5, c0, 1":"=r"(arm_register));
111    return arm_register;
112}
113
114/**
115 * __arm_get_dfsr
116 *
117 * Get the current data fault status register.
118 */
119static inline uint32_t __arm_get_ifar(void)
120{
121    uint32_t arm_register;
122    __asm__ __volatile__("mrc    p15, 0, %0, c6, c0, 1":"=r"(arm_register));
123    return arm_register;
124}
125
126/**
127 * update_arm_exception_state
128 *
129 * Update the exception state upon an exception.
130 */
131static inline void update_arm_exception_state(abort_information_context_t *arm_ctx, uint32_t exception_type)
132{
133    thread_t thread = current_thread();
134
135    thread->machine.es.fsr = arm_ctx->fsr;
136    thread->machine.es.far = arm_ctx->far;
137    thread->machine.es.exception = exception_type;
138}
139
140/**
141 * ifsr_to_human
142 *
143 * Return a human readable representation of the IFSR bits.
144 */
145static char *ifsr_to_human(uint32_t ifsr)
146{
147    switch ((ifsr & 0xF)) {
148    case 0:
149        return "No function, reset value";
150    case 1:
151        return "Alignment fault";
152    case 2:
153        return "Debug event fault";
154    case 3:
155        return "Access flag fault on section";
156    case 4:
157        return "No function";
158    case 5:
159        return "Translation fault on section";
160    case 6:
161        return "Access flag fault on page";
162    case 7:
163        return "Translation fault on page";
164    case 8:
165        return "Precise external abort";
166    case 9:
167        return "Domain fault on section";
168    case 10:
169        return "No function";
170    case 11:
171        return "Domain fault on page";
172    case 12:
173        return "External abort on translation, level one";
174    case 13:
175        return "Permission fault on section";
176    case 14:
177        return "External abort on translation, level two";
178    case 15:
179        return "Permission fault on page";
180    default:
181        return "Unknown";
182    }
183    return "Unknown";
184}
185
186/**
187 * sleh_fatal_exception
188 */
189void sleh_fatal_exception(abort_information_context_t * arm_ctx, char *message)
190{
191    debug_mode = TRUE;
192    printf("Fatal exception: %s\n", message);
193    printf("ARM register state: (saved state %p)\n"
194           "  r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
195           "  r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
196           "  r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
197           "  12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
198           "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", arm_ctx,
199           arm_ctx->r[0], arm_ctx->r[1], arm_ctx->r[2], arm_ctx->r[3],
200           arm_ctx->r[4], arm_ctx->r[5], arm_ctx->r[6], arm_ctx->r[7],
201           arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10], arm_ctx->r[11],
202           arm_ctx->r[12], arm_ctx->sp, arm_ctx->lr, arm_ctx->pc, arm_ctx->cpsr,
203           arm_ctx->fsr, arm_ctx->far);
204    printf("Current thread: %p\n", current_thread());
205
206    uint32_t ttbcr, ttbr0, ttbr1;
207    __asm__ __volatile__("mrc p15, 0, %0, c2, c0, 0":"=r"(ttbr0));
208    __asm__ __volatile__("mrc p15, 0, %0, c2, c0, 1":"=r"(ttbr1));
209    __asm__ __volatile__("mrc p15, 0, %0, c2, c0, 2":"=r"(ttbcr));
210
211    printf("Control registers:\n"
212           "  ttbcr: 0x%08x  ttbr0:  0x%08x  ttbr1:  0x%08x\n",
213           ttbcr, ttbr0, ttbr1);
214    Debugger("fatal exception");
215    printf("We are hanging here ...\n");
216
217    Halt_system();
218}
219
220/**
221 * sleh_abort
222 *
223 * Handle prefetch and data aborts. (EXC_BAD_ACCESS IS NOT HERE YET)
224 */
225static int __abort_count = 0;
226void sleh_abort(void *context, int reason)
227{
228    uint32_t dfsr = 0, dfar = 0, ifsr = 0, ifar = 0, cpsr, exception_type =
229        0, exception_subcode = 0;
230    abort_information_context_t *arm_ctx =
231        (abort_information_context_t *) context;
232    thread_t thread = current_thread();
233
234    /*
235     * Make sure we get the correct registers only if required.
236     */
237#if 0
238    kprintf("sleh_abort: pc %x lr %x far %x fsr %x psr %x\n", arm_ctx->pc, arm_ctx->lr, arm_ctx->far, arm_ctx->fsr, arm_ctx->cpsr);
239#endif
240    if (reason == SLEH_ABORT_TYPE_DATA_ABORT) {
241        dfsr = arm_ctx->fsr;
242        dfar = arm_ctx->far;
243    } else if (reason == SLEH_ABORT_TYPE_PREFETCH_ABORT) {
244        ifsr = arm_ctx->fsr;
245        ifar = arm_ctx->far;
246    } else {
247        sleh_fatal_exception(arm_ctx, "sleh_abort: weird abort");
248    }
249
250    /*
251     * We do not want anything entering sleh_abort recursively.
252     */
253    if (__abort_count != 0) {
254        sleh_fatal_exception(arm_ctx, "sleh_abort: recursive abort");
255    }
256    __abort_count++;
257
258    /*
259     * Panic if it's an alignment fault?
260     */
261    if ((ifsr == 1) || (dfsr == 1)) {
262        sleh_fatal_exception(arm_ctx, "sleh_abort: alignment fault");
263    }
264
265    if (!kernel_map) {
266        sleh_fatal_exception(arm_ctx,
267                             "sleh_abort: kernel map is NULL, probably a fault before vm_bootstrap?");
268    }
269
270    if (!thread) {
271        sleh_fatal_exception(arm_ctx, "sleh_abort: current thread is null?");
272    }
273
274    if(ml_at_interrupt_context()) {
275        sleh_fatal_exception(arm_ctx, "sleh_abort: Abort in interrupt handler");
276    }
277
278    /*
279     * See if the abort was in Kernel or User mode.
280     */
281    cpsr = arm_ctx->cpsr & 0x1F;
282
283    /*
284     * Kernel mode. (ARM Supervisor)
285     */
286    if (cpsr == 0x13) {
287        switch (reason) {
288            /*
289             * Prefetch aborts always include the IFSR and IFAR.
290             */
291        case SLEH_ABORT_TYPE_PREFETCH_ABORT:{
292                /*
293                 * Die in a fire.
294                 */
295                vm_map_t map;
296                kern_return_t code;
297
298                /*
299                 * Get the kernel thread map.
300                 */
301                map = kernel_map;
302
303                /*
304                 * Attempt to fault the page.
305                 */
306                __abort_count--;
307                code =
308                    vm_fault(map, vm_map_trunc_page(arm_ctx->pc),
309                             (VM_PROT_EXECUTE | VM_PROT_READ), FALSE,
310                             THREAD_UNINT, NULL, vm_map_trunc_page(0));
311
312                if (code != KERN_SUCCESS) {
313
314                    if (current_debugger) {
315                        if (kdp_raise_exception(EXC_BREAKPOINT, 0, 0, &arm_ctx))
316                            return;
317                    }
318
319                    /*
320                     * Still, die in a fire.
321                     */
322                    panic_context(0, (void *) arm_ctx,
323                                  "Kernel prefetch abort. (faulting address: 0x%08x, saved state 0x%08x)\n"
324                                  "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
325                                  "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
326                                  "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
327                                  "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
328                                  "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
329                                  ifar, arm_ctx, arm_ctx->r[0], arm_ctx->r[1],
330                                  arm_ctx->r[2], arm_ctx->r[3], arm_ctx->r[4],
331                                  arm_ctx->r[5], arm_ctx->r[6], arm_ctx->r[7],
332                                  arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10],
333                                  arm_ctx->r[11], arm_ctx->r[12], arm_ctx->sp,
334                                  arm_ctx->lr, arm_ctx->pc, arm_ctx->cpsr, ifsr,
335                                  ifar);
336                }
337                return;
338            }
339        case SLEH_ABORT_TYPE_DATA_ABORT:{
340                vm_map_t map;
341                kern_return_t code;
342
343                /*
344                 * Get the current thread map.
345                 */
346                map = thread->map;
347
348                /*
349                 * Attempt to fault the page.
350                 */
351                __abort_count--;
352                code =
353                    vm_fault(map, vm_map_trunc_page(dfar),
354                             (dfsr & 0x800) ? (VM_PROT_READ | VM_PROT_WRITE)
355                             : (VM_PROT_READ), FALSE, THREAD_UNINT, NULL,
356                             vm_map_trunc_page(0));
357
358                if (code != KERN_SUCCESS) {
359                    /*
360                     * Still, die in a fire.
361                     */
362                    code =
363                        vm_fault(kernel_map, vm_map_trunc_page(dfar),
364                                 (dfsr & 0x800) ? (VM_PROT_READ | VM_PROT_WRITE)
365                                 : (VM_PROT_READ), FALSE, THREAD_UNINT, NULL,
366                                 vm_map_trunc_page(0));
367                    if (code != KERN_SUCCESS) {
368                        /*
369                         * Attempt to fault the page against the kernel map.
370                         */
371                        if (!thread->recover) {
372                            panic_context(0, (void *) arm_ctx,
373                                          "Kernel data abort. (faulting address: 0x%08x, saved state 0x%08x)\n"
374                                          "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
375                                          "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
376                                          "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
377                                          "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
378                                          "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
379                                          dfar, arm_ctx, arm_ctx->r[0], arm_ctx->r[1],
380                                          arm_ctx->r[2], arm_ctx->r[3],
381                                          arm_ctx->r[4], arm_ctx->r[5],
382                                          arm_ctx->r[6], arm_ctx->r[7],
383                                          arm_ctx->r[8], arm_ctx->r[9],
384                                          arm_ctx->r[10], arm_ctx->r[11],
385                                          arm_ctx->r[12], arm_ctx->sp,
386                                          arm_ctx->lr, arm_ctx->pc,
387                                          arm_ctx->cpsr, dfsr, dfar);
388                        } else {
389                            /*
390                             * If there's a recovery routine, use it.
391                             */
392                            if (thread->map == kernel_map)
393                                panic
394                                    ("Attempting to use a recovery routine on a kernel map thread");
395
396                            if (!thread->map)
397                                sleh_fatal_exception(arm_ctx,
398                                                     "Current thread has no thread map, what?");
399
400                            arm_ctx->pc = thread->recover;
401                            arm_ctx->cpsr &= ~(1 << 5);
402                            thread->recover = NULL;
403                            return;
404                        }
405                    }
406                }
407                return;
408            }
409        default:
410            panic("sleh_abort: unknown kernel mode abort, type %d\n", reason);
411        }
412        /*
413         * User mode (ARM User)
414         */
415    } else if (cpsr == 0x10) {
416        switch (reason) {
417            /*
418             * User prefetch abort
419             */
420        case SLEH_ABORT_TYPE_PREFETCH_ABORT:{
421                /*
422                 * Attempt to fault it. Same as data except address comes from IFAR.
423                 */
424                vm_map_t map;
425                kern_return_t code;
426
427                /*
428                 * Get the current thread map.
429                 */
430                map = thread->map;
431                /*
432                 * Attempt to fault the page.
433                 */
434                assert(get_preemption_level() == 0);
435                __abort_count--;
436                code =
437                    vm_fault(map, vm_map_trunc_page(arm_ctx->pc),
438                             (VM_PROT_EXECUTE | VM_PROT_READ), FALSE,
439                             THREAD_UNINT, NULL, vm_map_trunc_page(0));
440
441                /*
442                 * Additionally, see if we can fault one page higher as the instruction
443                 * may be on a page split boundary. libobjc and all require this???
444                 *
445                 * Prefaulting the instruction before allows the prefetch mechanism
446                 * to not abort.
447                 */
448                if((arm_ctx->pc & 0xfff) >= 0xff0)
449                    vm_fault(map, vm_map_trunc_page(arm_ctx->pc) + PAGE_SIZE,
450                             (VM_PROT_EXECUTE | VM_PROT_READ), FALSE,
451                             THREAD_UNINT, NULL, vm_map_trunc_page(0));
452
453                if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
454                    exception_type = EXC_BAD_ACCESS;
455                    exception_subcode = 0;
456                    update_arm_exception_state(arm_ctx, exception_type);
457
458                    /*
459                     * Debug only.
460                     */
461                    printf
462                        (ANSI_COLOR_RED "%s[%d]: " ANSI_COLOR_YELLOW "usermode prefetch abort, EXC_BAD_ACCESS at 0x%08x in map %p (pmap %p) (%s)" ANSI_COLOR_RESET" \n",
463                         proc_name_address(thread->task->bsd_info),
464                         proc_pid(thread->task->bsd_info), arm_ctx->pc, map,
465                         map->pmap, ifsr_to_human(ifsr));
466                    printf("Thread has ARM register state:\n"
467                           "    r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
468                           "    r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
469                           "    r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
470                           "   r12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
471                           "  cpsr: 0x%08x\n", arm_ctx->r[0], arm_ctx->r[1],
472                           arm_ctx->r[2], arm_ctx->r[3], arm_ctx->r[4],
473                           arm_ctx->r[5], arm_ctx->r[6], arm_ctx->r[7],
474                           arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10],
475                           arm_ctx->r[11], arm_ctx->r[12], arm_ctx->sp,
476                           arm_ctx->lr, arm_ctx->pc, arm_ctx->cpsr);
477                    printf("dyld_all_image_info_addr: 0x%08x   dyld_all_image_info_size: 0x%08x\n",
478                            thread->task->all_image_info_addr, thread->task->all_image_info_size);
479                } else {
480                    /*
481                     * Retry execution of instruction.
482                     */
483                    ml_set_interrupts_enabled(TRUE);
484                    return;
485                }
486                break;
487            }
488            /*
489             * User Data Abort
490             */
491        case SLEH_ABORT_TYPE_DATA_ABORT:{
492                /*
493                 * Attempt to fault it. Same as instruction except address comes from DFAR.
494                 */
495                vm_map_t map;
496                kern_return_t code;
497
498                /*
499                 * Get the current thread map.
500                 */
501                map = thread->map;
502
503                /*
504                 * Attempt to fault the page.
505                 */
506                assert(get_preemption_level() == 0);
507                __abort_count--;
508                code =
509                    vm_fault(map, vm_map_trunc_page(dfar),
510                             (dfsr & 0x800) ? (VM_PROT_READ | VM_PROT_WRITE)
511                             : (VM_PROT_READ), FALSE, THREAD_UNINT, NULL,
512                             vm_map_trunc_page(0));
513                if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
514                    exception_type = EXC_BAD_ACCESS;
515                    exception_subcode = 0;
516                    update_arm_exception_state(arm_ctx, exception_type);
517
518                    /*
519                     * Only for debug.
520                     */
521                    printf
522                        (ANSI_COLOR_RED "%s[%d]: " ANSI_COLOR_BLUE "usermode data abort, EXC_BAD_ACCESS at 0x%08x in map %p (pmap %p) (%s)" ANSI_COLOR_RESET "\n",
523                         proc_name_address(thread->task->bsd_info),
524                         proc_pid(thread->task->bsd_info), dfar, map, map->pmap,
525                         ifsr_to_human(dfsr));
526                    printf("Thread has ARM register state:\n"
527                           "    r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
528                           "    r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
529                           "    r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
530                           "   r12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
531                           "  cpsr: 0x%08x\n", arm_ctx->r[0], arm_ctx->r[1],
532                           arm_ctx->r[2], arm_ctx->r[3], arm_ctx->r[4],
533                           arm_ctx->r[5], arm_ctx->r[6], arm_ctx->r[7],
534                           arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10],
535                           arm_ctx->r[11], arm_ctx->r[12], arm_ctx->sp,
536                           arm_ctx->lr, arm_ctx->pc, arm_ctx->cpsr);
537                    printf("dyld_all_image_info_addr: 0x%08x   dyld_all_image_info_size: 0x%08x\n",
538                            thread->task->all_image_info_addr, thread->task->all_image_info_size);
539                } else {
540                    /*
541                     * Retry execution of instruction.
542                     */
543                    ml_set_interrupts_enabled(TRUE);
544                    return;
545                }
546                break;
547            }
548        default:
549            exception_type = EXC_BREAKPOINT;
550            exception_subcode = 0;
551            update_arm_exception_state(arm_ctx, exception_type);
552            break;
553        }
554        /*
555         * Unknown mode.
556         */
557    } else {
558        panic("sleh_abort: Abort in unknown mode, cpsr: 0x%08x\n", cpsr);
559    }
560
561    /*
562     * If there was a user exception, handle it.
563     */
564    if (exception_type) {
565        ml_set_interrupts_enabled(TRUE);
566        doexception(exception_type, exception_subcode, 0);
567    }
568
569    /*
570     * Done.
571     */
572    return;
573}
574
575/**
576 * irq_handler
577 *
578 * Handle irqs and pass them over to the platform expert.
579 */
580boolean_t irq_handler(void *context)
581{
582    /*
583     * Increase/decrease CPU interrupt level.
584     */
585    cpu_data_t *datap = current_cpu_datap();
586    assert(datap);
587    datap->cpu_interrupt_level++;
588
589    /*
590     * Disable system preemption, dispatch the interrupt and go.
591     */
592    __disable_preemption();
593
594    /*
595     * Dispatch the interrupt.
596     */
597    boolean_t ret = pe_arm_dispatch_interrupt(context);
598
599    /*
600     * Go.
601     */
602    datap->cpu_interrupt_level--;
603
604    __enable_preemption();
605
606    return ret;
607}
608
609void irq_iokit_dispatch(uint32_t irq)
610{
611    cpu_data_t *datap = current_cpu_datap();
612    if(datap->handler) {
613        datap->handler(datap->target, NULL, datap->nub, irq);
614    }
615}
616
617/**
618 * sleh_undef
619 *
620 * Handle undefined instructions and VFP usage.
621 */
622void sleh_undef(arm_saved_state_t * state)
623{
624    uint32_t cpsr, exception_type = 0, exception_subcode = 0;
625    arm_saved_state_t *arm_ctx = (arm_saved_state_t *) state;
626    thread_t thread = current_thread();
627
628    if (!thread) {
629        panic("sleh_undef: current thread is NULL\n");
630    }
631
632    /*
633     * See if the abort was in Kernel or User mode.
634     */
635    cpsr = arm_ctx->cpsr & 0x1F;
636
637    /*
638     * Kernel mode. (ARM Supervisor)
639     */
640    if (cpsr == 0x13) {
641        /*
642         * Fall through to bad kernel handler.
643         */
644        panic_context(0, (void *) arm_ctx,
645                      "Kernel undefined instruction. (saved state 0x%08x)\n"
646                      "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
647                      "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
648                      "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
649                      "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
650                      "cpsr: 0x%08x\n", arm_ctx, arm_ctx->r[0], arm_ctx->r[1],
651                      arm_ctx->r[2], arm_ctx->r[3], arm_ctx->r[4],
652                      arm_ctx->r[5], arm_ctx->r[6], arm_ctx->r[7],
653                      arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10],
654                      arm_ctx->r[11], arm_ctx->r[12], arm_ctx->sp, arm_ctx->lr,
655                      arm_ctx->pc, arm_ctx->cpsr);
656    } else if (cpsr == 0x10) {
657        vm_map_t map;
658        uint32_t instruction, thumb_offset;
659        /*
660         * Get the current thread map.
661         */
662        map = thread->map;
663
664        /*
665         * Get the current instruction. Do not let the pmaps change.
666         */
667        spl_t spl = splhigh();
668        thumb_offset = (arm_ctx->cpsr & (1 << 5)) ? 1 : 0;
669        copyin((uint8_t *) (arm_ctx->pc + thumb_offset), &instruction,
670               sizeof(uint32_t));
671        splx(spl);
672
673        /* i should really fix this crap properly........ */
674
675        /*
676         * Check the instruction encoding to see if it's a coprocessor instruction.
677         */
678        instruction = OSSwapInt32(instruction);
679
680        /*
681         * dyld's faulting one. I really just need to redo all of the VFP detection
682         * code, which will happen one day... I just hate myself for this.
683         */
684        if(instruction != 0xfedeffe7)
685        {
686            /*
687             * NEON instruction.
688             */
689            thread->machine.vfp_dirty = 0;
690            if (!thread->machine.vfp_enable) {
691                /*
692                 * Enable VFP.
693                 */
694                vfp_enable_exception(TRUE);
695                vfp_context_load(&thread->machine.vfp_regs);
696                /*
697                 * Continue user execution.
698                 */
699                thread->machine.vfp_enable = TRUE;
700            }
701            return;
702        }
703
704        printf
705            (ANSI_COLOR_RED "%s[%d]: " ANSI_COLOR_GREEN "usermode undefined instruction, EXC_BAD_INSTRUCTION at 0x%08x in map %p (pmap %p)" ANSI_COLOR_RESET "\n",
706             proc_name_address(thread->task->bsd_info),
707             proc_pid(thread->task->bsd_info), arm_ctx->pc, map, map->pmap);
708        printf("Thread has ARM register state:\n"
709               "    r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
710               "    r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
711               "    r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
712               "   r12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
713               "  cpsr: 0x%08x\n", arm_ctx->r[0], arm_ctx->r[1], arm_ctx->r[2],
714               arm_ctx->r[3], arm_ctx->r[4], arm_ctx->r[5], arm_ctx->r[6],
715               arm_ctx->r[7], arm_ctx->r[8], arm_ctx->r[9], arm_ctx->r[10],
716               arm_ctx->r[11], arm_ctx->r[12], arm_ctx->sp, arm_ctx->lr,
717               arm_ctx->pc, arm_ctx->cpsr);
718        printf("dyld_all_image_info_addr: 0x%08x   dyld_all_image_info_size: 0x%08x\n",
719            thread->task->all_image_info_addr, thread->task->all_image_info_size);
720
721        /*
722         * xxx gate
723         */
724        exception_type = EXC_BAD_INSTRUCTION;
725        exception_subcode = 0;
726        update_arm_exception_state(arm_ctx, exception_type);
727    } else if (cpsr == 0x17) {
728        panic("sleh_undef: undefined instruction in system mode");
729    }
730
731    /*
732     * If there was a user exception, handle it.
733     */
734    if (exception_type) {
735        ml_set_interrupts_enabled(TRUE);
736        doexception(exception_type, exception_subcode, 0);
737    }
738
739    /*
740     * Done.
741     */
742    return;
743}
744