1/* 2 * arch/xtensa/kernel/align.S 3 * 4 * Handle unalignment exceptions in kernel space. 5 * 6 * This file is subject to the terms and conditions of the GNU General 7 * Public License. See the file "COPYING" in the main directory of 8 * this archive for more details. 9 * 10 * Copyright (C) 2001 - 2005 Tensilica, Inc. 11 * 12 * Rewritten by Chris Zankel <chris@zankel.net> 13 * 14 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 15 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca> 16 */ 17 18#include <linux/linkage.h> 19#include <asm/current.h> 20#include <asm/asm-offsets.h> 21#include <asm/processor.h> 22 23#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 24 25/* First-level exception handler for unaligned exceptions. 26 * 27 * Note: This handler works only for kernel exceptions. Unaligned user 28 * access should get a seg fault. 29 */ 30 31/* Big and little endian 16-bit values are located in 32 * different halves of a register. HWORD_START helps to 33 * abstract the notion of extracting a 16-bit value from a 34 * register. 35 * We also have to define new shifting instructions because 36 * lsb and msb are on 'opposite' ends in a register for 37 * different endian machines. 38 * 39 * Assume a memory region in ascending address: 40 * 0 1 2 3|4 5 6 7 41 * 42 * When loading one word into a register, the content of that register is: 43 * LE 3 2 1 0, 7 6 5 4 44 * BE 0 1 2 3, 4 5 6 7 45 * 46 * Masking the bits of the higher/lower address means: 47 * LE X X 0 0, 0 0 X X 48 * BE 0 0 X X, X X 0 0 49 * 50 * Shifting to higher/lower addresses, means: 51 * LE shift left / shift right 52 * BE shift right / shift left 53 * 54 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means: 55 * LE mask 0 0 X X / shift left 56 * BE shift left / mask 0 0 X X 57 */ 58 59#define UNALIGNED_USER_EXCEPTION 60 61#if XCHAL_HAVE_BE 62 63#define HWORD_START 16 64#define INSN_OP0 28 65#define INSN_T 24 66#define INSN_OP1 16 67 68.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm 69.macro __ssa8 r; ssa8b \r; .endm 70.macro __ssa8r r; ssa8l \r; .endm 71.macro __sh r, s; srl \r, \s; .endm 72.macro __sl r, s; sll \r, \s; .endm 73.macro __exth r, s; extui \r, \s, 0, 16; .endm 74.macro __extl r, s; slli \r, \s, 16; .endm 75 76#else 77 78#define HWORD_START 0 79#define INSN_OP0 0 80#define INSN_T 4 81#define INSN_OP1 12 82 83.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm 84.macro __ssa8 r; ssa8l \r; .endm 85.macro __ssa8r r; ssa8b \r; .endm 86.macro __sh r, s; sll \r, \s; .endm 87.macro __sl r, s; srl \r, \s; .endm 88.macro __exth r, s; slli \r, \s, 16; .endm 89.macro __extl r, s; extui \r, \s, 0, 16; .endm 90 91#endif 92 93 94#define OP0_L32I_N 0x8 /* load immediate narrow */ 95#define OP0_S32I_N 0x9 /* store immediate narrow */ 96#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */ 97#define OP1_SI_BIT 2 /* OP1 bit number for stores */ 98 99#define OP1_L32I 0x2 100#define OP1_L16UI 0x1 101#define OP1_L16SI 0x9 102#define OP1_L32AI 0xb 103 104#define OP1_S32I 0x6 105#define OP1_S16I 0x5 106#define OP1_S32RI 0xf 107 108/* 109 * Entry condition: 110 * 111 * a0: trashed, original value saved on stack (PT_AREG0) 112 * a1: a1 113 * a2: new stack pointer, original in DEPC 114 * a3: dispatch table 115 * depc: a2, original value saved on stack (PT_DEPC) 116 * excsave_1: a3 117 * 118 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 119 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 120 */ 121 122 123ENTRY(fast_unaligned) 124 125 /* Note: We don't expect the address to be aligned on a word 126 * boundary. After all, the processor generated that exception 127 * and it would be a hardware fault. 128 */ 129 130 /* Save some working register */ 131 132 s32i a4, a2, PT_AREG4 133 s32i a5, a2, PT_AREG5 134 s32i a6, a2, PT_AREG6 135 s32i a7, a2, PT_AREG7 136 s32i a8, a2, PT_AREG8 137 138 rsr a0, DEPC 139 xsr a3, EXCSAVE_1 140 s32i a0, a2, PT_AREG2 141 s32i a3, a2, PT_AREG3 142 143 /* Keep value of SAR in a0 */ 144 145 rsr a0, SAR 146 rsr a8, EXCVADDR # load unaligned memory address 147 148 /* Now, identify one of the following load/store instructions. 149 * 150 * The only possible danger of a double exception on the 151 * following l32i instructions is kernel code in vmalloc 152 * memory. The processor was just executing at the EPC_1 153 * address, and indeed, already fetched the instruction. That 154 * guarantees a TLB mapping, which hasn't been replaced by 155 * this unaligned exception handler that uses only static TLB 156 * mappings. However, high-level interrupt handlers might 157 * modify TLB entries, so for the generic case, we register a 158 * TABLE_FIXUP handler here, too. 159 */ 160 161 /* a3...a6 saved on stack, a2 = SP */ 162 163 /* Extract the instruction that caused the unaligned access. */ 164 165 rsr a7, EPC_1 # load exception address 166 movi a3, ~3 167 and a3, a3, a7 # mask lower bits 168 169 l32i a4, a3, 0 # load 2 words 170 l32i a5, a3, 4 171 172 __ssa8 a7 173 __src_b a4, a4, a5 # a4 has the instruction 174 175 /* Analyze the instruction (load or store?). */ 176 177 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble 178 179#if XCHAL_HAVE_DENSITY 180 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump 181 addi a6, a5, -OP0_S32I_N 182 _beqz a6, .Lstore # S32I.N, do a store 183#endif 184 /* 'store indicator bit' not set, jump */ 185 _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload 186 187 /* Store: Jump to table entry to get the value in the source register.*/ 188 189.Lstore:movi a5, .Lstore_table # table 190 extui a6, a4, INSN_T, 4 # get source register 191 addx8 a5, a6, a5 192 jx a5 # jump into table 193 194 /* Invalid instruction, CRITICAL! */ 195.Linvalid_instruction_load: 196 j .Linvalid_instruction 197 198 /* Load: Load memory address. */ 199 200.Lload: movi a3, ~3 201 and a3, a3, a8 # align memory address 202 203 __ssa8 a8 204#ifdef UNALIGNED_USER_EXCEPTION 205 addi a3, a3, 8 206 l32e a5, a3, -8 207 l32e a6, a3, -4 208#else 209 l32i a5, a3, 0 210 l32i a6, a3, 4 211#endif 212 __src_b a3, a5, a6 # a3 has the data word 213 214#if XCHAL_HAVE_DENSITY 215 addi a7, a7, 2 # increment PC (assume 16-bit insn) 216 217 extui a5, a4, INSN_OP0, 4 218 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump 219 220 addi a7, a7, 1 221#else 222 addi a7, a7, 3 223#endif 224 225 extui a5, a4, INSN_OP1, 4 226 _beqi a5, OP1_L32I, 1f # l32i: jump 227 228 extui a3, a3, 0, 16 # extract lower 16 bits 229 _beqi a5, OP1_L16UI, 1f 230 addi a5, a5, -OP1_L16SI 231 _bnez a5, .Linvalid_instruction_load 232 233 /* sign extend value */ 234 235 slli a3, a3, 16 236 srai a3, a3, 16 237 238 /* Set target register. */ 239 2401: 241 242#if XCHAL_HAVE_LOOPS 243 rsr a5, LEND # check if we reached LEND 244 bne a7, a5, 1f 245 rsr a5, LCOUNT # and LCOUNT != 0 246 beqz a5, 1f 247 addi a5, a5, -1 # decrement LCOUNT and set 248 rsr a7, LBEG # set PC to LBEGIN 249 wsr a5, LCOUNT 250#endif 251 2521: wsr a7, EPC_1 # skip load instruction 253 extui a4, a4, INSN_T, 4 # extract target register 254 movi a5, .Lload_table 255 addx8 a4, a4, a5 256 jx a4 # jump to entry for target register 257 258 .align 8 259.Lload_table: 260 s32i a3, a2, PT_AREG0; _j .Lexit; .align 8 261 mov a1, a3; _j .Lexit; .align 8 # fishy?? 262 s32i a3, a2, PT_AREG2; _j .Lexit; .align 8 263 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8 264 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8 265 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8 266 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8 267 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8 268 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8 269 mov a9, a3 ; _j .Lexit; .align 8 270 mov a10, a3 ; _j .Lexit; .align 8 271 mov a11, a3 ; _j .Lexit; .align 8 272 mov a12, a3 ; _j .Lexit; .align 8 273 mov a13, a3 ; _j .Lexit; .align 8 274 mov a14, a3 ; _j .Lexit; .align 8 275 mov a15, a3 ; _j .Lexit; .align 8 276 277.Lstore_table: 278 l32i a3, a2, PT_AREG0; _j 1f; .align 8 279 mov a3, a1; _j 1f; .align 8 # fishy?? 280 l32i a3, a2, PT_AREG2; _j 1f; .align 8 281 l32i a3, a2, PT_AREG3; _j 1f; .align 8 282 l32i a3, a2, PT_AREG4; _j 1f; .align 8 283 l32i a3, a2, PT_AREG5; _j 1f; .align 8 284 l32i a3, a2, PT_AREG6; _j 1f; .align 8 285 l32i a3, a2, PT_AREG7; _j 1f; .align 8 286 l32i a3, a2, PT_AREG8; _j 1f; .align 8 287 mov a3, a9 ; _j 1f; .align 8 288 mov a3, a10 ; _j 1f; .align 8 289 mov a3, a11 ; _j 1f; .align 8 290 mov a3, a12 ; _j 1f; .align 8 291 mov a3, a13 ; _j 1f; .align 8 292 mov a3, a14 ; _j 1f; .align 8 293 mov a3, a15 ; _j 1f; .align 8 294 2951: # a7: instruction pointer, a4: instruction, a3: value 296 297 movi a6, 0 # mask: ffffffff:00000000 298 299#if XCHAL_HAVE_DENSITY 300 addi a7, a7, 2 # incr. PC,assume 16-bit instruction 301 302 extui a5, a4, INSN_OP0, 4 # extract OP0 303 addi a5, a5, -OP0_S32I_N 304 _beqz a5, 1f # s32i.n: jump 305 306 addi a7, a7, 1 # increment PC, 32-bit instruction 307#else 308 addi a7, a7, 3 # increment PC, 32-bit instruction 309#endif 310 311 extui a5, a4, INSN_OP1, 4 # extract OP1 312 _beqi a5, OP1_S32I, 1f # jump if 32 bit store 313 _bnei a5, OP1_S16I, .Linvalid_instruction_store 314 315 movi a5, -1 316 __extl a3, a3 # get 16-bit value 317 __exth a6, a5 # get 16-bit mask ffffffff:ffff0000 318 319 /* Get memory address */ 320 3211: 322#if XCHAL_HAVE_LOOPS 323 rsr a4, LEND # check if we reached LEND 324 bne a7, a4, 1f 325 rsr a4, LCOUNT # and LCOUNT != 0 326 beqz a4, 1f 327 addi a4, a4, -1 # decrement LCOUNT and set 328 rsr a7, LBEG # set PC to LBEGIN 329 wsr a4, LCOUNT 330#endif 331 3321: wsr a7, EPC_1 # skip store instruction 333 movi a4, ~3 334 and a4, a4, a8 # align memory address 335 336 /* Insert value into memory */ 337 338 movi a5, -1 # mask: ffffffff:XXXX0000 339#ifdef UNALIGNED_USER_EXCEPTION 340 addi a4, a4, 8 341#endif 342 343 __ssa8r a8 344 __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) 345 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) 346#ifdef UNALIGNED_USER_EXCEPTION 347 l32e a5, a4, -8 348#else 349 l32i a5, a4, 0 # load lower address word 350#endif 351 and a5, a5, a7 # mask 352 __sh a7, a3 # shift value 353 or a5, a5, a7 # or with original value 354#ifdef UNALIGNED_USER_EXCEPTION 355 s32e a5, a4, -8 356 l32e a7, a4, -4 357#else 358 s32i a5, a4, 0 # store 359 l32i a7, a4, 4 # same for upper address word 360#endif 361 __sl a5, a3 362 and a6, a7, a6 363 or a6, a6, a5 364#ifdef UNALIGNED_USER_EXCEPTION 365 s32e a6, a4, -4 366#else 367 s32i a6, a4, 4 368#endif 369 370 /* Done. restore stack and return */ 371 372.Lexit: 373 movi a4, 0 374 rsr a3, EXCSAVE_1 375 s32i a4, a3, EXC_TABLE_FIXUP 376 377 /* Restore working register */ 378 379 l32i a8, a2, PT_AREG8 380 l32i a7, a2, PT_AREG7 381 l32i a6, a2, PT_AREG6 382 l32i a5, a2, PT_AREG5 383 l32i a4, a2, PT_AREG4 384 l32i a3, a2, PT_AREG3 385 386 /* restore SAR and return */ 387 388 wsr a0, SAR 389 l32i a0, a2, PT_AREG0 390 l32i a2, a2, PT_AREG2 391 rfe 392 393 /* We cannot handle this exception. */ 394 395 .extern _kernel_exception 396.Linvalid_instruction_store: 397.Linvalid_instruction: 398 399 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ 400 401 l32i a8, a2, PT_AREG8 402 l32i a7, a2, PT_AREG7 403 l32i a6, a2, PT_AREG6 404 l32i a5, a2, PT_AREG5 405 l32i a4, a2, PT_AREG4 406 wsr a0, SAR 407 mov a1, a2 408 409 rsr a0, PS 410 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 411 412 movi a0, _kernel_exception 413 jx a0 414 4151: movi a0, _user_exception 416 jx a0 417 418 419#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ 420