1/* 2 * linux/arch/m68knommu/platform/5307/entry.S 3 * 4 * Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com) 5 * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, 6 * Kenneth Albanowski <kjahds@kjahds.com>, 7 * Copyright (C) 2000 Lineo Inc. (www.lineo.com) 8 * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com) 9 * 10 * Based on: 11 * 12 * linux/arch/m68k/kernel/entry.S 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * This file is subject to the terms and conditions of the GNU General Public 17 * License. See the file README.legal in the main directory of this archive 18 * for more details. 19 * 20 * Linux/m68k support by Hamish Macdonald 21 * 22 * 68060 fixes by Jesper Skov 23 * ColdFire support by Greg Ungerer (gerg@snapgear.com) 24 * 5307 fixes by David W. Miller 25 * linux 2.4 support David McCullough <davidm@snapgear.com> 26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be> 27 */ 28 29#include <linux/sys.h> 30#include <linux/linkage.h> 31#include <asm/unistd.h> 32#include <asm/thread_info.h> 33#include <asm/errno.h> 34#include <asm/setup.h> 35#include <asm/segment.h> 36#include <asm/asm-offsets.h> 37#include <asm/entry.h> 38 39.bss 40 41sw_ksp: 42.long 0 43 44sw_usp: 45.long 0 46 47.text 48 49.globl system_call 50.globl resume 51.globl ret_from_exception 52.globl ret_from_signal 53.globl sys_call_table 54.globl ret_from_interrupt 55.globl inthandler 56.globl fasthandler 57 58enosys: 59 mov.l #sys_ni_syscall,%d3 60 bra 1f 61 62ENTRY(system_call) 63 SAVE_ALL 64 move #0x2000,%sr /* enable intrs again */ 65 66 cmpl #NR_syscalls,%d0 67 jcc enosys 68 lea sys_call_table,%a0 69 lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */ 70 movel %a0@(%d0),%d3 71 jeq enosys 72 731: 74 movel %sp,%d2 /* get thread_info pointer */ 75 andl #-THREAD_SIZE,%d2 /* at start of kernel stack */ 76 movel %d2,%a0 77 movel %a0@,%a1 /* save top of frame */ 78 movel %sp,%a1@(TASK_THREAD+THREAD_ESP0) 79 btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8) 80 bnes 1f 81 82 movel %d3,%a0 83 jbsr %a0@ 84 movel %d0,%sp@(PT_OFF_D0) /* save the return value */ 85 jra ret_from_exception 861: 87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ 88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ 89 subql #4,%sp 90 SAVE_SWITCH_STACK 91 jbsr syscall_trace 92 RESTORE_SWITCH_STACK 93 addql #4,%sp 94 movel %d3,%a0 95 jbsr %a0@ 96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */ 97 subql #4,%sp /* dummy return address */ 98 SAVE_SWITCH_STACK 99 jbsr syscall_trace 100 101ret_from_signal: 102 RESTORE_SWITCH_STACK 103 addql #4,%sp 104 105ret_from_exception: 106 move #0x2700,%sr /* disable intrs */ 107 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ 108 jeq Luser_return /* if so, skip resched, signals */ 109 110#ifdef CONFIG_PREEMPT 111 movel %sp,%d1 /* get thread_info pointer */ 112 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 113 movel %d1,%a0 114 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 115 andl #_TIF_NEED_RESCHED,%d1 116 jeq Lkernel_return 117 118 movel %a0@(TI_PREEMPTCOUNT),%d1 119 cmpl #0,%d1 120 jne Lkernel_return 121 122 pea Lkernel_return 123 jmp preempt_schedule_irq /* preempt the kernel */ 124#endif 125 126Lkernel_return: 127 moveml %sp@,%d1-%d5/%a0-%a2 128 lea %sp@(32),%sp /* space for 8 regs */ 129 movel %sp@+,%d0 130 addql #4,%sp /* orig d0 */ 131 addl %sp@+,%sp /* stk adj */ 132 rte 133 134Luser_return: 135 movel %sp,%d1 /* get thread_info pointer */ 136 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 137 movel %d1,%a0 138 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 139 andl #_TIF_WORK_MASK,%d1 140 jne Lwork_to_do /* still work to do */ 141 142Lreturn: 143 move #0x2700,%sr /* disable intrs */ 144 movel sw_usp,%a0 /* get usp */ 145 movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ 146 movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ 147 moveml %sp@,%d1-%d5/%a0-%a2 148 lea %sp@(32),%sp /* space for 8 regs */ 149 movel %sp@+,%d0 150 addql #4,%sp /* orig d0 */ 151 addl %sp@+,%sp /* stk adj */ 152 addql #8,%sp /* remove exception */ 153 movel %sp,sw_ksp /* save ksp */ 154 subql #8,sw_usp /* set exception */ 155 movel sw_usp,%sp /* restore usp */ 156 rte 157 158Lwork_to_do: 159 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 160 move #0x2000,%sr /* enable intrs again */ 161 btst #TIF_NEED_RESCHED,%d1 162 jne reschedule 163 164 /* GERG: do we need something here for TRACEing?? */ 165 166Lsignal_return: 167 subql #4,%sp /* dummy return address */ 168 SAVE_SWITCH_STACK 169 pea %sp@(SWITCH_STACK_SIZE) 170 clrl %sp@- 171 jsr do_signal 172 addql #8,%sp 173 RESTORE_SWITCH_STACK 174 addql #4,%sp 175 jmp Lreturn 176 177/* 178 * This is the generic interrupt handler (for all hardware interrupt 179 * sources). Calls upto high level code to do all the work. 180 */ 181ENTRY(inthandler) 182 SAVE_ALL 183 moveq #-1,%d0 184 movel %d0,%sp@(PT_OFF_ORIG_D0) 185 186 movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ 187 andl #0x03fc,%d0 /* mask out vector only */ 188 189 movel %sp,%sp@- /* push regs arg */ 190 lsrl #2,%d0 /* calculate real vector # */ 191 movel %d0,%sp@- /* push vector number */ 192 jbsr do_IRQ /* call high level irq handler */ 193 lea %sp@(8),%sp /* pop args off stack */ 194 195 bra ret_from_interrupt /* this was fallthrough */ 196 197/* 198 * This is the fast interrupt handler (for certain hardware interrupt 199 * sources). Unlike the normal interrupt handler it just uses the 200 * current stack (doesn't care if it is user or kernel). It also 201 * doesn't bother doing the bottom half handlers. 202 */ 203ENTRY(fasthandler) 204 SAVE_LOCAL 205 206 movew %sp@(PT_OFF_FORMATVEC),%d0 207 andl #0x03fc,%d0 /* mask out vector only */ 208 209 movel %sp,%sp@- /* push regs arg */ 210 lsrl #2,%d0 /* calculate real vector # */ 211 movel %d0,%sp@- /* push vector number */ 212 jbsr do_IRQ /* call high level irq handler */ 213 lea %sp@(8),%sp /* pop args off stack */ 214 215 RESTORE_LOCAL 216 217ENTRY(ret_from_interrupt) 218 /* the fasthandler is confusing me, haven't seen any user */ 219 jmp ret_from_exception 220 221/* 222 * Beware - when entering resume, prev (the current task) is 223 * in a0, next (the new task) is in a1,so don't change these 224 * registers until their contents are no longer needed. 225 * This is always called in supervisor mode, so don't bother to save 226 * and restore sr; user's process sr is actually in the stack. 227 */ 228ENTRY(resume) 229 movel %a0, %d1 /* get prev thread in d1 */ 230 231 movel sw_usp,%d0 /* save usp */ 232 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 233 234 SAVE_SWITCH_STACK 235 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ 236 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */ 237 RESTORE_SWITCH_STACK 238 239 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */ 240 movel %a0, sw_usp 241 rts 242