1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#define CPU_ARCH_UNKNOWN 0 7#define CPU_ARCH_ARMv3 1 8#define CPU_ARCH_ARMv4 2 9#define CPU_ARCH_ARMv4T 3 10#define CPU_ARCH_ARMv5 4 11#define CPU_ARCH_ARMv5T 5 12#define CPU_ARCH_ARMv5TE 6 13#define CPU_ARCH_ARMv5TEJ 7 14#define CPU_ARCH_ARMv6 8 15#define CPU_ARCH_ARMv7 9 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43#define CR_EE (1 << 25) /* Exception (Big) Endian */ 44#define CR_TRE (1 << 28) /* TEX remap enable */ 45#define CR_AFE (1 << 29) /* Access flag enable */ 46#define CR_TE (1 << 30) /* Thumb exception enable */ 47 48#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 49 50#ifndef __ASSEMBLY__ 51 52#include <linux/linkage.h> 53#include <linux/irqflags.h> 54 55#include <asm/outercache.h> 56 57#if defined(CONFIG_BUZZZ_FUNC) 58#ifndef __always_inline__ 59#define __always_inline__ inline __attribute__((always_inline)) __attribute__((no_instrument_function)) 60#endif 61#else /* !CONFIG_BUZZZ_FUNC */ 62#ifndef __always_inline__ 63#define __always_inline__ inline 64#endif 65#endif /* !CONFIG_BUZZZ_FUNC */ 66 67#define __exception __attribute__((section(".exception.text"))) 68 69struct thread_info; 70struct task_struct; 71 72/* information about the system we're running on */ 73extern unsigned int system_rev; 74extern unsigned int system_serial_low; 75extern unsigned int system_serial_high; 76extern unsigned int mem_fclk_21285; 77 78struct pt_regs; 79 80void die(const char *msg, struct pt_regs *regs, int err); 81 82struct siginfo; 83void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 84 unsigned long err, unsigned long trap); 85 86void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 87 struct pt_regs *), 88 int sig, int code, const char *name); 89 90#define xchg(ptr,x) \ 91 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 92 93extern asmlinkage void __backtrace(void); 94extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 95 96struct mm_struct; 97extern void show_pte(struct mm_struct *mm, unsigned long addr); 98extern void __show_regs(struct pt_regs *); 99 100extern int cpu_architecture(void); 101extern void cpu_init(void); 102 103void arm_machine_restart(char mode, const char *cmd); 104extern void (*arm_pm_restart)(char str, const char *cmd); 105 106#define UDBG_UNDEFINED (1 << 0) 107#define UDBG_SYSCALL (1 << 1) 108#define UDBG_BADABORT (1 << 2) 109#define UDBG_SEGV (1 << 3) 110#define UDBG_BUS (1 << 4) 111 112extern unsigned int user_debug; 113 114#if __LINUX_ARM_ARCH__ >= 4 115#define vectors_high() (cr_alignment & CR_V) 116#else 117#define vectors_high() (0) 118#endif 119 120#if __LINUX_ARM_ARCH__ >= 7 121#define isb() __asm__ __volatile__ ("isb" : : : "memory") 122#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 123#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 124#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 125#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 126 : : "r" (0) : "memory") 127#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 128 : : "r" (0) : "memory") 129#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 130 : : "r" (0) : "memory") 131#elif defined(CONFIG_CPU_FA526) 132#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 133 : : "r" (0) : "memory") 134#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 135 : : "r" (0) : "memory") 136#define dmb() __asm__ __volatile__ ("" : : : "memory") 137#else 138#define isb() __asm__ __volatile__ ("" : : : "memory") 139#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 140 : : "r" (0) : "memory") 141#define dmb() __asm__ __volatile__ ("" : : : "memory") 142#endif 143 144#ifdef CONFIG_ARCH_HAS_BARRIERS 145#include <mach/barriers.h> 146#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 147#define mb() do { dsb(); outer_sync(); } while (0) 148#define rmb() dmb() 149#define wmb() mb() 150#else 151#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 152#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 153#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 154#endif 155 156#ifndef CONFIG_SMP 157#define smp_mb() barrier() 158#define smp_rmb() barrier() 159#define smp_wmb() barrier() 160#else 161#define smp_mb() dmb() 162#define smp_rmb() dmb() 163#define smp_wmb() dmb() 164#endif 165 166#define read_barrier_depends() do { } while(0) 167#define smp_read_barrier_depends() do { } while(0) 168 169#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 170#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 171 172extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 173extern unsigned long cr_alignment; /* defined in entry-armv.S */ 174 175static __always_inline__ unsigned int get_cr(void) 176{ 177 unsigned int val; 178 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 179 return val; 180} 181 182static __always_inline__ void set_cr(unsigned int val) 183{ 184 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 185 : : "r" (val) : "cc"); 186 isb(); 187} 188 189#ifndef CONFIG_SMP 190extern void adjust_cr(unsigned long mask, unsigned long set); 191#endif 192 193#define CPACC_FULL(n) (3 << (n * 2)) 194#define CPACC_SVC(n) (1 << (n * 2)) 195#define CPACC_DISABLE(n) (0 << (n * 2)) 196 197static __always_inline__ unsigned int get_copro_access(void) 198{ 199 unsigned int val; 200 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 201 : "=r" (val) : : "cc"); 202 return val; 203} 204 205static __always_inline__ void set_copro_access(unsigned int val) 206{ 207 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 208 : : "r" (val) : "cc"); 209 isb(); 210} 211 212/* 213 * switch_mm() may do a full cache flush over the context switch, 214 * so enable interrupts over the context switch to avoid high 215 * latency. 216 */ 217#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 218 219/* 220 * switch_to(prev, next) should switch from task `prev' to `next' 221 * `prev' will never be the same as `next'. schedule() itself 222 * contains the memory barrier to tell GCC not to cache `current'. 223 */ 224extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 225 226#define switch_to(prev,next,last) \ 227do { \ 228 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 229} while (0) 230 231#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 232/* 233 * On the StrongARM, "swp" is terminally broken since it bypasses the 234 * cache totally. This means that the cache becomes inconsistent, and, 235 * since we use normal loads/stores as well, this is really bad. 236 * Typically, this causes oopsen in filp_close, but could have other, 237 * more disasterous effects. There are two work-arounds: 238 * 1. Disable interrupts and emulate the atomic swap 239 * 2. Clean the cache, perform atomic swap, flush the cache 240 * 241 * We choose (1) since its the "easiest" to achieve here and is not 242 * dependent on the processor type. 243 * 244 * NOTE that this solution won't work on an SMP system, so explcitly 245 * forbid it here. 246 */ 247#define swp_is_buggy 248#endif 249 250static __always_inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 251{ 252 extern void __bad_xchg(volatile void *, int); 253 unsigned long ret; 254#ifdef swp_is_buggy 255 unsigned long flags; 256#endif 257 258 smp_mb(); 259 260 switch (size) { 261#if __LINUX_ARM_ARCH__ >= 6 262 unsigned int tmp; 263 case 1: 264 asm volatile("@ __xchg1\n" 265 "1: ldrexb %0, [%3]\n" 266 " strexb %1, %2, [%3]\n" 267 " teq %1, #0\n" 268 " bne 1b" 269 : "=&r" (ret), "=&r" (tmp) 270 : "r" (x), "r" (ptr) 271 : "memory", "cc"); 272 break; 273 case 4: 274 asm volatile("@ __xchg4\n" 275 "1: ldrex %0, [%3]\n" 276 " strex %1, %2, [%3]\n" 277 " teq %1, #0\n" 278 " bne 1b" 279 : "=&r" (ret), "=&r" (tmp) 280 : "r" (x), "r" (ptr) 281 : "memory", "cc"); 282 break; 283#elif defined(swp_is_buggy) 284#ifdef CONFIG_SMP 285#error SMP is not supported on this platform 286#endif 287 case 1: 288 raw_local_irq_save(flags); 289 ret = *(volatile unsigned char *)ptr; 290 *(volatile unsigned char *)ptr = x; 291 raw_local_irq_restore(flags); 292 break; 293 294 case 4: 295 raw_local_irq_save(flags); 296 ret = *(volatile unsigned long *)ptr; 297 *(volatile unsigned long *)ptr = x; 298 raw_local_irq_restore(flags); 299 break; 300#else 301 case 1: 302 asm volatile("@ __xchg1\n" 303 " swpb %0, %1, [%2]" 304 : "=&r" (ret) 305 : "r" (x), "r" (ptr) 306 : "memory", "cc"); 307 break; 308 case 4: 309 asm volatile("@ __xchg4\n" 310 " swp %0, %1, [%2]" 311 : "=&r" (ret) 312 : "r" (x), "r" (ptr) 313 : "memory", "cc"); 314 break; 315#endif 316 default: 317 __bad_xchg(ptr, size), ret = 0; 318 break; 319 } 320 smp_mb(); 321 322 return ret; 323} 324 325extern void disable_hlt(void); 326extern void enable_hlt(void); 327 328#include <asm-generic/cmpxchg-local.h> 329 330#if __LINUX_ARM_ARCH__ < 6 331 332#ifdef CONFIG_SMP 333#error "SMP is not supported on this platform" 334#endif 335 336/* 337 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 338 * them available. 339 */ 340#define cmpxchg_local(ptr, o, n) \ 341 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 342 (unsigned long)(n), sizeof(*(ptr)))) 343#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 344 345#ifndef CONFIG_SMP 346#include <asm-generic/cmpxchg.h> 347#endif 348 349#else /* __LINUX_ARM_ARCH__ >= 6 */ 350 351extern void __bad_cmpxchg(volatile void *ptr, int size); 352 353/* 354 * cmpxchg only support 32-bits operands on ARMv6. 355 */ 356 357static __always_inline__ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 358 unsigned long new, int size) 359{ 360 unsigned long oldval, res; 361 362 switch (size) { 363#ifdef CONFIG_CPU_32v6K 364 case 1: 365 do { 366 asm volatile("@ __cmpxchg1\n" 367 " ldrexb %1, [%2]\n" 368 " mov %0, #0\n" 369 " teq %1, %3\n" 370 " strexbeq %0, %4, [%2]\n" 371 : "=&r" (res), "=&r" (oldval) 372 : "r" (ptr), "Ir" (old), "r" (new) 373 : "memory", "cc"); 374 } while (res); 375 break; 376 case 2: 377 do { 378 asm volatile("@ __cmpxchg1\n" 379 " ldrexh %1, [%2]\n" 380 " mov %0, #0\n" 381 " teq %1, %3\n" 382 " strexheq %0, %4, [%2]\n" 383 : "=&r" (res), "=&r" (oldval) 384 : "r" (ptr), "Ir" (old), "r" (new) 385 : "memory", "cc"); 386 } while (res); 387 break; 388#endif /* CONFIG_CPU_32v6K */ 389 case 4: 390 do { 391 asm volatile("@ __cmpxchg4\n" 392 " ldrex %1, [%2]\n" 393 " mov %0, #0\n" 394 " teq %1, %3\n" 395 " strexeq %0, %4, [%2]\n" 396 : "=&r" (res), "=&r" (oldval) 397 : "r" (ptr), "Ir" (old), "r" (new) 398 : "memory", "cc"); 399 } while (res); 400 break; 401 default: 402 __bad_cmpxchg(ptr, size); 403 oldval = 0; 404 } 405 406 return oldval; 407} 408 409static __always_inline__ unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 410 unsigned long new, int size) 411{ 412 unsigned long ret; 413 414 smp_mb(); 415 ret = __cmpxchg(ptr, old, new, size); 416 smp_mb(); 417 418 return ret; 419} 420 421#define cmpxchg(ptr,o,n) \ 422 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ 423 (unsigned long)(o), \ 424 (unsigned long)(n), \ 425 sizeof(*(ptr)))) 426 427static __always_inline__ unsigned long __cmpxchg_local(volatile void *ptr, 428 unsigned long old, 429 unsigned long new, int size) 430{ 431 unsigned long ret; 432 433 switch (size) { 434#ifndef CONFIG_CPU_32v6K 435 case 1: 436 case 2: 437 ret = __cmpxchg_local_generic(ptr, old, new, size); 438 break; 439#endif /* !CONFIG_CPU_32v6K */ 440 default: 441 ret = __cmpxchg(ptr, old, new, size); 442 } 443 444 return ret; 445} 446 447#define cmpxchg_local(ptr,o,n) \ 448 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 449 (unsigned long)(o), \ 450 (unsigned long)(n), \ 451 sizeof(*(ptr)))) 452 453#ifdef CONFIG_CPU_32v6K 454 455/* 456 * Note : ARMv7-M (currently unsupported by Linux) does not support 457 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should 458 * not be allowed to use __cmpxchg64. 459 */ 460static __always_inline__ unsigned long long __cmpxchg64(volatile void *ptr, 461 unsigned long long old, 462 unsigned long long new) 463{ 464 register unsigned long long oldval asm("r0"); 465 register unsigned long long __old asm("r2") = old; 466 register unsigned long long __new asm("r4") = new; 467 unsigned long res; 468 469 do { 470 asm volatile( 471 " @ __cmpxchg8\n" 472 " ldrexd %1, %H1, [%2]\n" 473 " mov %0, #0\n" 474 " teq %1, %3\n" 475 " teqeq %H1, %H3\n" 476 " strexdeq %0, %4, %H4, [%2]\n" 477 : "=&r" (res), "=&r" (oldval) 478 : "r" (ptr), "Ir" (__old), "r" (__new) 479 : "memory", "cc"); 480 } while (res); 481 482 return oldval; 483} 484 485static __always_inline__ unsigned long long __cmpxchg64_mb(volatile void *ptr, 486 unsigned long long old, 487 unsigned long long new) 488{ 489 unsigned long long ret; 490 491 smp_mb(); 492 ret = __cmpxchg64(ptr, old, new); 493 smp_mb(); 494 495 return ret; 496} 497 498#define cmpxchg64(ptr,o,n) \ 499 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ 500 (unsigned long long)(o), \ 501 (unsigned long long)(n))) 502 503#define cmpxchg64_local(ptr,o,n) \ 504 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ 505 (unsigned long long)(o), \ 506 (unsigned long long)(n))) 507 508#else /* !CONFIG_CPU_32v6K */ 509 510#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 511 512#endif /* CONFIG_CPU_32v6K */ 513 514#endif /* __LINUX_ARM_ARCH__ >= 6 */ 515 516#endif /* __ASSEMBLY__ */ 517 518#define arch_align_stack(x) (x) 519 520#endif /* __KERNEL__ */ 521 522#endif 523