tc-arm.c revision 272519
1/* tc-arm.c -- Assemble for the ARM 2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 3 2004, 2005, 2006 4 Free Software Foundation, Inc. 5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) 6 Modified by David Taylor (dtaylor@armltd.co.uk) 7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com) 8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com) 9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com) 10 11 This file is part of GAS, the GNU Assembler. 12 13 GAS is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2, or (at your option) 16 any later version. 17 18 GAS is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with GAS; see the file COPYING. If not, write to the Free 25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 26 02110-1301, USA. */ 27 28#include <limits.h> 29#include <stdarg.h> 30#define NO_RELOC 0 31#include "as.h" 32#include "safe-ctype.h" 33#include "subsegs.h" 34#include "obstack.h" 35 36#include "opcode/arm.h" 37 38#ifdef OBJ_ELF 39#include "elf/arm.h" 40#include "dw2gencfi.h" 41#endif 42 43#include "dwarf2dbg.h" 44 45#define WARN_DEPRECATED 1 46 47#ifdef OBJ_ELF 48/* Must be at least the size of the largest unwind opcode (currently two). */ 49#define ARM_OPCODE_CHUNK_SIZE 8 50 51/* This structure holds the unwinding state. */ 52 53static struct 54{ 55 symbolS * proc_start; 56 symbolS * table_entry; 57 symbolS * personality_routine; 58 int personality_index; 59 /* The segment containing the function. */ 60 segT saved_seg; 61 subsegT saved_subseg; 62 /* Opcodes generated from this function. */ 63 unsigned char * opcodes; 64 int opcode_count; 65 int opcode_alloc; 66 /* The number of bytes pushed to the stack. */ 67 offsetT frame_size; 68 /* We don't add stack adjustment opcodes immediately so that we can merge 69 multiple adjustments. We can also omit the final adjustment 70 when using a frame pointer. */ 71 offsetT pending_offset; 72 /* These two fields are set by both unwind_movsp and unwind_setfp. They 73 hold the reg+offset to use when restoring sp from a frame pointer. */ 74 offsetT fp_offset; 75 int fp_reg; 76 /* Nonzero if an unwind_setfp directive has been seen. */ 77 unsigned fp_used:1; 78 /* Nonzero if the last opcode restores sp from fp_reg. */ 79 unsigned sp_restored:1; 80} unwind; 81 82/* Bit N indicates that an R_ARM_NONE relocation has been output for 83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be 84 emitted only once per section, to save unnecessary bloat. */ 85static unsigned int marked_pr_dependency = 0; 86 87#endif /* OBJ_ELF */ 88 89/* Results from operand parsing worker functions. */ 90 91typedef enum 92{ 93 PARSE_OPERAND_SUCCESS, 94 PARSE_OPERAND_FAIL, 95 PARSE_OPERAND_FAIL_NO_BACKTRACK 96} parse_operand_result; 97 98enum arm_float_abi 99{ 100 ARM_FLOAT_ABI_HARD, 101 ARM_FLOAT_ABI_SOFTFP, 102 ARM_FLOAT_ABI_SOFT 103}; 104 105/* Types of processor to assemble for. */ 106#ifndef CPU_DEFAULT 107#if defined __XSCALE__ 108#define CPU_DEFAULT ARM_ARCH_XSCALE 109#else 110#if defined __thumb__ 111#define CPU_DEFAULT ARM_ARCH_V5T 112#endif 113#endif 114#endif 115 116#ifndef FPU_DEFAULT 117# ifdef TE_LINUX 118# define FPU_DEFAULT FPU_ARCH_FPA 119# elif defined (TE_NetBSD) 120# ifdef OBJ_ELF 121# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */ 122# else 123 /* Legacy a.out format. */ 124# define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */ 125# endif 126# elif defined (TE_VXWORKS) 127# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */ 128# else 129 /* For backwards compatibility, default to FPA. */ 130# define FPU_DEFAULT FPU_ARCH_FPA 131# endif 132#endif /* ifndef FPU_DEFAULT */ 133 134#define streq(a, b) (strcmp (a, b) == 0) 135 136static arm_feature_set cpu_variant; 137static arm_feature_set arm_arch_used; 138static arm_feature_set thumb_arch_used; 139 140/* Flags stored in private area of BFD structure. */ 141static int uses_apcs_26 = FALSE; 142static int atpcs = FALSE; 143static int support_interwork = FALSE; 144static int uses_apcs_float = FALSE; 145static int pic_code = FALSE; 146 147/* Variables that we set while parsing command-line options. Once all 148 options have been read we re-process these values to set the real 149 assembly flags. */ 150static const arm_feature_set *legacy_cpu = NULL; 151static const arm_feature_set *legacy_fpu = NULL; 152 153static const arm_feature_set *mcpu_cpu_opt = NULL; 154static const arm_feature_set *mcpu_fpu_opt = NULL; 155static const arm_feature_set *march_cpu_opt = NULL; 156static const arm_feature_set *march_fpu_opt = NULL; 157static const arm_feature_set *mfpu_opt = NULL; 158static const arm_feature_set *object_arch = NULL; 159 160/* Constants for known architecture features. */ 161static const arm_feature_set fpu_default = FPU_DEFAULT; 162static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1; 163static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; 164static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3; 165static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1; 166static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA; 167static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; 168static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK; 169static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; 170 171#ifdef CPU_DEFAULT 172static const arm_feature_set cpu_default = CPU_DEFAULT; 173#endif 174 175static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0); 176static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0); 177static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0); 178static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0); 179static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0); 180static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0); 181static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0); 182static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0); 183static const arm_feature_set arm_ext_v4t_5 = 184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0); 185static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0); 186static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0); 187static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0); 188static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0); 189static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0); 190static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0); 191static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0); 192static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0); 193static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0); 194static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0); 195static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0); 196static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0); 197static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0); 198static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0); 199 200static const arm_feature_set arm_arch_any = ARM_ANY; 201static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1); 202static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; 203static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; 204 205static const arm_feature_set arm_cext_iwmmxt2 = 206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2); 207static const arm_feature_set arm_cext_iwmmxt = 208 ARM_FEATURE (0, ARM_CEXT_IWMMXT); 209static const arm_feature_set arm_cext_xscale = 210 ARM_FEATURE (0, ARM_CEXT_XSCALE); 211static const arm_feature_set arm_cext_maverick = 212 ARM_FEATURE (0, ARM_CEXT_MAVERICK); 213static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1); 214static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2); 215static const arm_feature_set fpu_vfp_ext_v1xd = 216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD); 217static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1); 218static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2); 219static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3); 220static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1); 221static const arm_feature_set fpu_vfp_v3_or_neon_ext = 222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); 223 224static int mfloat_abi_opt = -1; 225/* Record user cpu selection for object attributes. */ 226static arm_feature_set selected_cpu = ARM_ARCH_NONE; 227/* Must be long enough to hold any of the names in arm_cpus. */ 228static char selected_cpu_name[16]; 229#ifdef OBJ_ELF 230# ifdef EABI_DEFAULT 231static int meabi_flags = EABI_DEFAULT; 232# else 233static int meabi_flags = EF_ARM_EABI_UNKNOWN; 234# endif 235 236bfd_boolean 237arm_is_eabi(void) 238{ 239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4); 240} 241#endif 242 243#ifdef OBJ_ELF 244/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ 245symbolS * GOT_symbol; 246#endif 247 248/* 0: assemble for ARM, 249 1: assemble for Thumb, 250 2: assemble for Thumb even though target CPU does not support thumb 251 instructions. */ 252static int thumb_mode = 0; 253 254/* If unified_syntax is true, we are processing the new unified 255 ARM/Thumb syntax. Important differences from the old ARM mode: 256 257 - Immediate operands do not require a # prefix. 258 - Conditional affixes always appear at the end of the 259 instruction. (For backward compatibility, those instructions 260 that formerly had them in the middle, continue to accept them 261 there.) 262 - The IT instruction may appear, and if it does is validated 263 against subsequent conditional affixes. It does not generate 264 machine code. 265 266 Important differences from the old Thumb mode: 267 268 - Immediate operands do not require a # prefix. 269 - Most of the V6T2 instructions are only available in unified mode. 270 - The .N and .W suffixes are recognized and honored (it is an error 271 if they cannot be honored). 272 - All instructions set the flags if and only if they have an 's' affix. 273 - Conditional affixes may be used. They are validated against 274 preceding IT instructions. Unlike ARM mode, you cannot use a 275 conditional affix except in the scope of an IT instruction. */ 276 277static bfd_boolean unified_syntax = FALSE; 278 279enum neon_el_type 280{ 281 NT_invtype, 282 NT_untyped, 283 NT_integer, 284 NT_float, 285 NT_poly, 286 NT_signed, 287 NT_unsigned 288}; 289 290struct neon_type_el 291{ 292 enum neon_el_type type; 293 unsigned size; 294}; 295 296#define NEON_MAX_TYPE_ELS 4 297 298struct neon_type 299{ 300 struct neon_type_el el[NEON_MAX_TYPE_ELS]; 301 unsigned elems; 302}; 303 304struct arm_it 305{ 306 const char * error; 307 unsigned long instruction; 308 int size; 309 int size_req; 310 int cond; 311 /* "uncond_value" is set to the value in place of the conditional field in 312 unconditional versions of the instruction, or -1 if nothing is 313 appropriate. */ 314 int uncond_value; 315 struct neon_type vectype; 316 /* Set to the opcode if the instruction needs relaxation. 317 Zero if the instruction is not relaxed. */ 318 unsigned long relax; 319 struct 320 { 321 bfd_reloc_code_real_type type; 322 expressionS exp; 323 int pc_rel; 324 } reloc; 325 326 struct 327 { 328 unsigned reg; 329 signed int imm; 330 struct neon_type_el vectype; 331 unsigned present : 1; /* Operand present. */ 332 unsigned isreg : 1; /* Operand was a register. */ 333 unsigned immisreg : 1; /* .imm field is a second register. */ 334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */ 335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */ 336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */ 337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV 338 instructions. This allows us to disambiguate ARM <-> vector insns. */ 339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ 340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ 341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */ 342 unsigned issingle : 1; /* Operand is VFP single-precision register. */ 343 unsigned hasreloc : 1; /* Operand has relocation suffix. */ 344 unsigned writeback : 1; /* Operand has trailing ! */ 345 unsigned preind : 1; /* Preindexed address. */ 346 unsigned postind : 1; /* Postindexed address. */ 347 unsigned negative : 1; /* Index register was negated. */ 348 unsigned shifted : 1; /* Shift applied to operation. */ 349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */ 350 } operands[6]; 351}; 352 353static struct arm_it inst; 354 355#define NUM_FLOAT_VALS 8 356 357const char * fp_const[] = 358{ 359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 360}; 361 362/* Number of littlenums required to hold an extended precision number. */ 363#define MAX_LITTLENUMS 6 364 365LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; 366 367#define FAIL (-1) 368#define SUCCESS (0) 369 370#define SUFF_S 1 371#define SUFF_D 2 372#define SUFF_E 3 373#define SUFF_P 4 374 375#define CP_T_X 0x00008000 376#define CP_T_Y 0x00400000 377 378#define CONDS_BIT 0x00100000 379#define LOAD_BIT 0x00100000 380 381#define DOUBLE_LOAD_FLAG 0x00000001 382 383struct asm_cond 384{ 385 const char * template; 386 unsigned long value; 387}; 388 389#define COND_ALWAYS 0xE 390 391struct asm_psr 392{ 393 const char *template; 394 unsigned long field; 395}; 396 397struct asm_barrier_opt 398{ 399 const char *template; 400 unsigned long value; 401}; 402 403/* The bit that distinguishes CPSR and SPSR. */ 404#define SPSR_BIT (1 << 22) 405 406/* The individual PSR flag bits. */ 407#define PSR_c (1 << 16) 408#define PSR_x (1 << 17) 409#define PSR_s (1 << 18) 410#define PSR_f (1 << 19) 411 412struct reloc_entry 413{ 414 char *name; 415 bfd_reloc_code_real_type reloc; 416}; 417 418enum vfp_reg_pos 419{ 420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn, 421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn 422}; 423 424enum vfp_ldstm_type 425{ 426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX 427}; 428 429/* Bits for DEFINED field in neon_typed_alias. */ 430#define NTA_HASTYPE 1 431#define NTA_HASINDEX 2 432 433struct neon_typed_alias 434{ 435 unsigned char defined; 436 unsigned char index; 437 struct neon_type_el eltype; 438}; 439 440/* ARM register categories. This includes coprocessor numbers and various 441 architecture extensions' registers. */ 442enum arm_reg_type 443{ 444 REG_TYPE_RN, 445 REG_TYPE_CP, 446 REG_TYPE_CN, 447 REG_TYPE_FN, 448 REG_TYPE_VFS, 449 REG_TYPE_VFD, 450 REG_TYPE_NQ, 451 REG_TYPE_VFSD, 452 REG_TYPE_NDQ, 453 REG_TYPE_NSDQ, 454 REG_TYPE_VFC, 455 REG_TYPE_MVF, 456 REG_TYPE_MVD, 457 REG_TYPE_MVFX, 458 REG_TYPE_MVDX, 459 REG_TYPE_MVAX, 460 REG_TYPE_DSPSC, 461 REG_TYPE_MMXWR, 462 REG_TYPE_MMXWC, 463 REG_TYPE_MMXWCG, 464 REG_TYPE_XSCALE, 465}; 466 467/* Structure for a hash table entry for a register. 468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra 469 information which states whether a vector type or index is specified (for a 470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */ 471struct reg_entry 472{ 473 const char *name; 474 unsigned char number; 475 unsigned char type; 476 unsigned char builtin; 477 struct neon_typed_alias *neon; 478}; 479 480/* Diagnostics used when we don't get a register of the expected type. */ 481const char *const reg_expected_msgs[] = 482{ 483 N_("ARM register expected"), 484 N_("bad or missing co-processor number"), 485 N_("co-processor register expected"), 486 N_("FPA register expected"), 487 N_("VFP single precision register expected"), 488 N_("VFP/Neon double precision register expected"), 489 N_("Neon quad precision register expected"), 490 N_("VFP single or double precision register expected"), 491 N_("Neon double or quad precision register expected"), 492 N_("VFP single, double or Neon quad precision register expected"), 493 N_("VFP system register expected"), 494 N_("Maverick MVF register expected"), 495 N_("Maverick MVD register expected"), 496 N_("Maverick MVFX register expected"), 497 N_("Maverick MVDX register expected"), 498 N_("Maverick MVAX register expected"), 499 N_("Maverick DSPSC register expected"), 500 N_("iWMMXt data register expected"), 501 N_("iWMMXt control register expected"), 502 N_("iWMMXt scalar register expected"), 503 N_("XScale accumulator register expected"), 504}; 505 506/* Some well known registers that we refer to directly elsewhere. */ 507#define REG_SP 13 508#define REG_LR 14 509#define REG_PC 15 510 511/* ARM instructions take 4bytes in the object file, Thumb instructions 512 take 2: */ 513#define INSN_SIZE 4 514 515struct asm_opcode 516{ 517 /* Basic string to match. */ 518 const char *template; 519 520 /* Parameters to instruction. */ 521 unsigned char operands[8]; 522 523 /* Conditional tag - see opcode_lookup. */ 524 unsigned int tag : 4; 525 526 /* Basic instruction code. */ 527 unsigned int avalue : 28; 528 529 /* Thumb-format instruction code. */ 530 unsigned int tvalue; 531 532 /* Which architecture variant provides this instruction. */ 533 const arm_feature_set *avariant; 534 const arm_feature_set *tvariant; 535 536 /* Function to call to encode instruction in ARM format. */ 537 void (* aencode) (void); 538 539 /* Function to call to encode instruction in Thumb format. */ 540 void (* tencode) (void); 541}; 542 543/* Defines for various bits that we will want to toggle. */ 544#define INST_IMMEDIATE 0x02000000 545#define OFFSET_REG 0x02000000 546#define HWOFFSET_IMM 0x00400000 547#define SHIFT_BY_REG 0x00000010 548#define PRE_INDEX 0x01000000 549#define INDEX_UP 0x00800000 550#define WRITE_BACK 0x00200000 551#define LDM_TYPE_2_OR_3 0x00400000 552#define CPSI_MMOD 0x00020000 553 554#define LITERAL_MASK 0xf000f000 555#define OPCODE_MASK 0xfe1fffff 556#define V4_STR_BIT 0x00000020 557 558#define T2_SUBS_PC_LR 0xf3de8f00 559 560#define DATA_OP_SHIFT 21 561 562#define T2_OPCODE_MASK 0xfe1fffff 563#define T2_DATA_OP_SHIFT 21 564 565/* Codes to distinguish the arithmetic instructions. */ 566#define OPCODE_AND 0 567#define OPCODE_EOR 1 568#define OPCODE_SUB 2 569#define OPCODE_RSB 3 570#define OPCODE_ADD 4 571#define OPCODE_ADC 5 572#define OPCODE_SBC 6 573#define OPCODE_RSC 7 574#define OPCODE_TST 8 575#define OPCODE_TEQ 9 576#define OPCODE_CMP 10 577#define OPCODE_CMN 11 578#define OPCODE_ORR 12 579#define OPCODE_MOV 13 580#define OPCODE_BIC 14 581#define OPCODE_MVN 15 582 583#define T2_OPCODE_AND 0 584#define T2_OPCODE_BIC 1 585#define T2_OPCODE_ORR 2 586#define T2_OPCODE_ORN 3 587#define T2_OPCODE_EOR 4 588#define T2_OPCODE_ADD 8 589#define T2_OPCODE_ADC 10 590#define T2_OPCODE_SBC 11 591#define T2_OPCODE_SUB 13 592#define T2_OPCODE_RSB 14 593 594#define T_OPCODE_MUL 0x4340 595#define T_OPCODE_TST 0x4200 596#define T_OPCODE_CMN 0x42c0 597#define T_OPCODE_NEG 0x4240 598#define T_OPCODE_MVN 0x43c0 599 600#define T_OPCODE_ADD_R3 0x1800 601#define T_OPCODE_SUB_R3 0x1a00 602#define T_OPCODE_ADD_HI 0x4400 603#define T_OPCODE_ADD_ST 0xb000 604#define T_OPCODE_SUB_ST 0xb080 605#define T_OPCODE_ADD_SP 0xa800 606#define T_OPCODE_ADD_PC 0xa000 607#define T_OPCODE_ADD_I8 0x3000 608#define T_OPCODE_SUB_I8 0x3800 609#define T_OPCODE_ADD_I3 0x1c00 610#define T_OPCODE_SUB_I3 0x1e00 611 612#define T_OPCODE_ASR_R 0x4100 613#define T_OPCODE_LSL_R 0x4080 614#define T_OPCODE_LSR_R 0x40c0 615#define T_OPCODE_ROR_R 0x41c0 616#define T_OPCODE_ASR_I 0x1000 617#define T_OPCODE_LSL_I 0x0000 618#define T_OPCODE_LSR_I 0x0800 619 620#define T_OPCODE_MOV_I8 0x2000 621#define T_OPCODE_CMP_I8 0x2800 622#define T_OPCODE_CMP_LR 0x4280 623#define T_OPCODE_MOV_HR 0x4600 624#define T_OPCODE_CMP_HR 0x4500 625 626#define T_OPCODE_LDR_PC 0x4800 627#define T_OPCODE_LDR_SP 0x9800 628#define T_OPCODE_STR_SP 0x9000 629#define T_OPCODE_LDR_IW 0x6800 630#define T_OPCODE_STR_IW 0x6000 631#define T_OPCODE_LDR_IH 0x8800 632#define T_OPCODE_STR_IH 0x8000 633#define T_OPCODE_LDR_IB 0x7800 634#define T_OPCODE_STR_IB 0x7000 635#define T_OPCODE_LDR_RW 0x5800 636#define T_OPCODE_STR_RW 0x5000 637#define T_OPCODE_LDR_RH 0x5a00 638#define T_OPCODE_STR_RH 0x5200 639#define T_OPCODE_LDR_RB 0x5c00 640#define T_OPCODE_STR_RB 0x5400 641 642#define T_OPCODE_PUSH 0xb400 643#define T_OPCODE_POP 0xbc00 644 645#define T_OPCODE_BRANCH 0xe000 646 647#define THUMB_SIZE 2 /* Size of thumb instruction. */ 648#define THUMB_PP_PC_LR 0x0100 649#define THUMB_LOAD_BIT 0x0800 650#define THUMB2_LOAD_BIT 0x00100000 651 652#define BAD_ARGS _("bad arguments to instruction") 653#define BAD_PC _("r15 not allowed here") 654#define BAD_SP _("r13 not allowed here") 655#define BAD_COND _("instruction cannot be conditional") 656#define BAD_OVERLAP _("registers may not be the same") 657#define BAD_HIREG _("lo register required") 658#define BAD_THUMB32 _("instruction not supported in Thumb16 mode") 659#define BAD_ADDR_MODE _("instruction does not accept this addressing mode"); 660#define BAD_BRANCH _("branch must be last instruction in IT block") 661#define BAD_NOT_IT _("instruction not allowed in IT block") 662#define BAD_FPU _("selected FPU does not support instruction") 663#define BAD_VMRS _("APSR_nzcv may only be used with fpscr") 664 665static struct hash_control *arm_ops_hsh; 666static struct hash_control *arm_cond_hsh; 667static struct hash_control *arm_shift_hsh; 668static struct hash_control *arm_psr_hsh; 669static struct hash_control *arm_v7m_psr_hsh; 670static struct hash_control *arm_reg_hsh; 671static struct hash_control *arm_reloc_hsh; 672static struct hash_control *arm_barrier_opt_hsh; 673 674/* Stuff needed to resolve the label ambiguity 675 As: 676 ... 677 label: <insn> 678 may differ from: 679 ... 680 label: 681 <insn> 682*/ 683 684symbolS * last_label_seen; 685static int label_is_thumb_function_name = FALSE; 686 687/* Literal pool structure. Held on a per-section 688 and per-sub-section basis. */ 689 690#define MAX_LITERAL_POOL_SIZE 1024 691typedef struct literal_pool 692{ 693 expressionS literals [MAX_LITERAL_POOL_SIZE]; 694 unsigned int next_free_entry; 695 unsigned int id; 696 symbolS * symbol; 697 segT section; 698 subsegT sub_section; 699 struct literal_pool * next; 700} literal_pool; 701 702/* Pointer to a linked list of literal pools. */ 703literal_pool * list_of_pools = NULL; 704 705/* State variables for IT block handling. */ 706static bfd_boolean current_it_mask = 0; 707static int current_cc; 708 709 710/* Pure syntax. */ 711 712/* This array holds the chars that always start a comment. If the 713 pre-processor is disabled, these aren't very useful. */ 714const char comment_chars[] = "@"; 715 716/* This array holds the chars that only start a comment at the beginning of 717 a line. If the line seems to have the form '# 123 filename' 718 .line and .file directives will appear in the pre-processed output. */ 719/* Note that input_file.c hand checks for '#' at the beginning of the 720 first line of the input file. This is because the compiler outputs 721 #NO_APP at the beginning of its output. */ 722/* Also note that comments like this one will always work. */ 723const char line_comment_chars[] = "#"; 724 725const char line_separator_chars[] = ";"; 726 727/* Chars that can be used to separate mant 728 from exp in floating point numbers. */ 729const char EXP_CHARS[] = "eE"; 730 731/* Chars that mean this number is a floating point constant. */ 732/* As in 0f12.456 */ 733/* or 0d1.2345e12 */ 734 735const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; 736 737/* Prefix characters that indicate the start of an immediate 738 value. */ 739#define is_immediate_prefix(C) ((C) == '#' || (C) == '$') 740 741/* Separator character handling. */ 742 743#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 744 745static inline int 746skip_past_char (char ** str, char c) 747{ 748 if (**str == c) 749 { 750 (*str)++; 751 return SUCCESS; 752 } 753 else 754 return FAIL; 755} 756#define skip_past_comma(str) skip_past_char (str, ',') 757 758/* Arithmetic expressions (possibly involving symbols). */ 759 760/* Return TRUE if anything in the expression is a bignum. */ 761 762static int 763walk_no_bignums (symbolS * sp) 764{ 765 if (symbol_get_value_expression (sp)->X_op == O_big) 766 return 1; 767 768 if (symbol_get_value_expression (sp)->X_add_symbol) 769 { 770 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol) 771 || (symbol_get_value_expression (sp)->X_op_symbol 772 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol))); 773 } 774 775 return 0; 776} 777 778static int in_my_get_expression = 0; 779 780/* Third argument to my_get_expression. */ 781#define GE_NO_PREFIX 0 782#define GE_IMM_PREFIX 1 783#define GE_OPT_PREFIX 2 784/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit) 785 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */ 786#define GE_OPT_PREFIX_BIG 3 787 788static int 789my_get_expression (expressionS * ep, char ** str, int prefix_mode) 790{ 791 char * save_in; 792 segT seg; 793 794 /* In unified syntax, all prefixes are optional. */ 795 if (unified_syntax) 796 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode 797 : GE_OPT_PREFIX; 798 799 switch (prefix_mode) 800 { 801 case GE_NO_PREFIX: break; 802 case GE_IMM_PREFIX: 803 if (!is_immediate_prefix (**str)) 804 { 805 inst.error = _("immediate expression requires a # prefix"); 806 return FAIL; 807 } 808 (*str)++; 809 break; 810 case GE_OPT_PREFIX: 811 case GE_OPT_PREFIX_BIG: 812 if (is_immediate_prefix (**str)) 813 (*str)++; 814 break; 815 default: abort (); 816 } 817 818 memset (ep, 0, sizeof (expressionS)); 819 820 save_in = input_line_pointer; 821 input_line_pointer = *str; 822 in_my_get_expression = 1; 823 seg = expression (ep); 824 in_my_get_expression = 0; 825 826 if (ep->X_op == O_illegal) 827 { 828 /* We found a bad expression in md_operand(). */ 829 *str = input_line_pointer; 830 input_line_pointer = save_in; 831 if (inst.error == NULL) 832 inst.error = _("bad expression"); 833 return 1; 834 } 835 836#ifdef OBJ_AOUT 837 if (seg != absolute_section 838 && seg != text_section 839 && seg != data_section 840 && seg != bss_section 841 && seg != undefined_section) 842 { 843 inst.error = _("bad segment"); 844 *str = input_line_pointer; 845 input_line_pointer = save_in; 846 return 1; 847 } 848#endif 849 850 /* Get rid of any bignums now, so that we don't generate an error for which 851 we can't establish a line number later on. Big numbers are never valid 852 in instructions, which is where this routine is always called. */ 853 if (prefix_mode != GE_OPT_PREFIX_BIG 854 && (ep->X_op == O_big 855 || (ep->X_add_symbol 856 && (walk_no_bignums (ep->X_add_symbol) 857 || (ep->X_op_symbol 858 && walk_no_bignums (ep->X_op_symbol)))))) 859 { 860 inst.error = _("invalid constant"); 861 *str = input_line_pointer; 862 input_line_pointer = save_in; 863 return 1; 864 } 865 866 *str = input_line_pointer; 867 input_line_pointer = save_in; 868 return 0; 869} 870 871/* Turn a string in input_line_pointer into a floating point constant 872 of type TYPE, and store the appropriate bytes in *LITP. The number 873 of LITTLENUMS emitted is stored in *SIZEP. An error message is 874 returned, or NULL on OK. 875 876 Note that fp constants aren't represent in the normal way on the ARM. 877 In big endian mode, things are as expected. However, in little endian 878 mode fp constants are big-endian word-wise, and little-endian byte-wise 879 within the words. For example, (double) 1.1 in big endian mode is 880 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is 881 the byte sequence 99 99 f1 3f 9a 99 99 99. 882 883 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ 884 885char * 886md_atof (int type, char * litP, int * sizeP) 887{ 888 int prec; 889 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 890 char *t; 891 int i; 892 893 switch (type) 894 { 895 case 'f': 896 case 'F': 897 case 's': 898 case 'S': 899 prec = 2; 900 break; 901 902 case 'd': 903 case 'D': 904 case 'r': 905 case 'R': 906 prec = 4; 907 break; 908 909 case 'x': 910 case 'X': 911 prec = 6; 912 break; 913 914 case 'p': 915 case 'P': 916 prec = 6; 917 break; 918 919 default: 920 *sizeP = 0; 921 return _("bad call to MD_ATOF()"); 922 } 923 924 t = atof_ieee (input_line_pointer, type, words); 925 if (t) 926 input_line_pointer = t; 927 *sizeP = prec * 2; 928 929 if (target_big_endian) 930 { 931 for (i = 0; i < prec; i++) 932 { 933 md_number_to_chars (litP, (valueT) words[i], 2); 934 litP += 2; 935 } 936 } 937 else 938 { 939 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 940 for (i = prec - 1; i >= 0; i--) 941 { 942 md_number_to_chars (litP, (valueT) words[i], 2); 943 litP += 2; 944 } 945 else 946 /* For a 4 byte float the order of elements in `words' is 1 0. 947 For an 8 byte float the order is 1 0 3 2. */ 948 for (i = 0; i < prec; i += 2) 949 { 950 md_number_to_chars (litP, (valueT) words[i + 1], 2); 951 md_number_to_chars (litP + 2, (valueT) words[i], 2); 952 litP += 4; 953 } 954 } 955 956 return 0; 957} 958 959/* We handle all bad expressions here, so that we can report the faulty 960 instruction in the error message. */ 961void 962md_operand (expressionS * expr) 963{ 964 if (in_my_get_expression) 965 expr->X_op = O_illegal; 966} 967 968/* Immediate values. */ 969 970/* Generic immediate-value read function for use in directives. 971 Accepts anything that 'expression' can fold to a constant. 972 *val receives the number. */ 973#ifdef OBJ_ELF 974static int 975immediate_for_directive (int *val) 976{ 977 expressionS exp; 978 exp.X_op = O_illegal; 979 980 if (is_immediate_prefix (*input_line_pointer)) 981 { 982 input_line_pointer++; 983 expression (&exp); 984 } 985 986 if (exp.X_op != O_constant) 987 { 988 as_bad (_("expected #constant")); 989 ignore_rest_of_line (); 990 return FAIL; 991 } 992 *val = exp.X_add_number; 993 return SUCCESS; 994} 995#endif 996 997/* Register parsing. */ 998 999/* Generic register parser. CCP points to what should be the 1000 beginning of a register name. If it is indeed a valid register 1001 name, advance CCP over it and return the reg_entry structure; 1002 otherwise return NULL. Does not issue diagnostics. */ 1003 1004static struct reg_entry * 1005arm_reg_parse_multi (char **ccp) 1006{ 1007 char *start = *ccp; 1008 char *p; 1009 struct reg_entry *reg; 1010 1011#ifdef REGISTER_PREFIX 1012 if (*start != REGISTER_PREFIX) 1013 return NULL; 1014 start++; 1015#endif 1016#ifdef OPTIONAL_REGISTER_PREFIX 1017 if (*start == OPTIONAL_REGISTER_PREFIX) 1018 start++; 1019#endif 1020 1021 p = start; 1022 if (!ISALPHA (*p) || !is_name_beginner (*p)) 1023 return NULL; 1024 1025 do 1026 p++; 1027 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 1028 1029 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start); 1030 1031 if (!reg) 1032 return NULL; 1033 1034 *ccp = p; 1035 return reg; 1036} 1037 1038static int 1039arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg, 1040 enum arm_reg_type type) 1041{ 1042 /* Alternative syntaxes are accepted for a few register classes. */ 1043 switch (type) 1044 { 1045 case REG_TYPE_MVF: 1046 case REG_TYPE_MVD: 1047 case REG_TYPE_MVFX: 1048 case REG_TYPE_MVDX: 1049 /* Generic coprocessor register names are allowed for these. */ 1050 if (reg && reg->type == REG_TYPE_CN) 1051 return reg->number; 1052 break; 1053 1054 case REG_TYPE_CP: 1055 /* For backward compatibility, a bare number is valid here. */ 1056 { 1057 unsigned long processor = strtoul (start, ccp, 10); 1058 if (*ccp != start && processor <= 15) 1059 return processor; 1060 } 1061 1062 case REG_TYPE_MMXWC: 1063 /* WC includes WCG. ??? I'm not sure this is true for all 1064 instructions that take WC registers. */ 1065 if (reg && reg->type == REG_TYPE_MMXWCG) 1066 return reg->number; 1067 break; 1068 1069 default: 1070 break; 1071 } 1072 1073 return FAIL; 1074} 1075 1076/* As arm_reg_parse_multi, but the register must be of type TYPE, and the 1077 return value is the register number or FAIL. */ 1078 1079static int 1080arm_reg_parse (char **ccp, enum arm_reg_type type) 1081{ 1082 char *start = *ccp; 1083 struct reg_entry *reg = arm_reg_parse_multi (ccp); 1084 int ret; 1085 1086 /* Do not allow a scalar (reg+index) to parse as a register. */ 1087 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX)) 1088 return FAIL; 1089 1090 if (reg && reg->type == type) 1091 return reg->number; 1092 1093 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL) 1094 return ret; 1095 1096 *ccp = start; 1097 return FAIL; 1098} 1099 1100/* Parse a Neon type specifier. *STR should point at the leading '.' 1101 character. Does no verification at this stage that the type fits the opcode 1102 properly. E.g., 1103 1104 .i32.i32.s16 1105 .s32.f32 1106 .u16 1107 1108 Can all be legally parsed by this function. 1109 1110 Fills in neon_type struct pointer with parsed information, and updates STR 1111 to point after the parsed type specifier. Returns SUCCESS if this was a legal 1112 type, FAIL if not. */ 1113 1114static int 1115parse_neon_type (struct neon_type *type, char **str) 1116{ 1117 char *ptr = *str; 1118 1119 if (type) 1120 type->elems = 0; 1121 1122 while (type->elems < NEON_MAX_TYPE_ELS) 1123 { 1124 enum neon_el_type thistype = NT_untyped; 1125 unsigned thissize = -1u; 1126 1127 if (*ptr != '.') 1128 break; 1129 1130 ptr++; 1131 1132 /* Just a size without an explicit type. */ 1133 if (ISDIGIT (*ptr)) 1134 goto parsesize; 1135 1136 switch (TOLOWER (*ptr)) 1137 { 1138 case 'i': thistype = NT_integer; break; 1139 case 'f': thistype = NT_float; break; 1140 case 'p': thistype = NT_poly; break; 1141 case 's': thistype = NT_signed; break; 1142 case 'u': thistype = NT_unsigned; break; 1143 case 'd': 1144 thistype = NT_float; 1145 thissize = 64; 1146 ptr++; 1147 goto done; 1148 default: 1149 as_bad (_("unexpected character `%c' in type specifier"), *ptr); 1150 return FAIL; 1151 } 1152 1153 ptr++; 1154 1155 /* .f is an abbreviation for .f32. */ 1156 if (thistype == NT_float && !ISDIGIT (*ptr)) 1157 thissize = 32; 1158 else 1159 { 1160 parsesize: 1161 thissize = strtoul (ptr, &ptr, 10); 1162 1163 if (thissize != 8 && thissize != 16 && thissize != 32 1164 && thissize != 64) 1165 { 1166 as_bad (_("bad size %d in type specifier"), thissize); 1167 return FAIL; 1168 } 1169 } 1170 1171 done: 1172 if (type) 1173 { 1174 type->el[type->elems].type = thistype; 1175 type->el[type->elems].size = thissize; 1176 type->elems++; 1177 } 1178 } 1179 1180 /* Empty/missing type is not a successful parse. */ 1181 if (type->elems == 0) 1182 return FAIL; 1183 1184 *str = ptr; 1185 1186 return SUCCESS; 1187} 1188 1189/* Errors may be set multiple times during parsing or bit encoding 1190 (particularly in the Neon bits), but usually the earliest error which is set 1191 will be the most meaningful. Avoid overwriting it with later (cascading) 1192 errors by calling this function. */ 1193 1194static void 1195first_error (const char *err) 1196{ 1197 if (!inst.error) 1198 inst.error = err; 1199} 1200 1201/* Parse a single type, e.g. ".s32", leading period included. */ 1202static int 1203parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) 1204{ 1205 char *str = *ccp; 1206 struct neon_type optype; 1207 1208 if (*str == '.') 1209 { 1210 if (parse_neon_type (&optype, &str) == SUCCESS) 1211 { 1212 if (optype.elems == 1) 1213 *vectype = optype.el[0]; 1214 else 1215 { 1216 first_error (_("only one type should be specified for operand")); 1217 return FAIL; 1218 } 1219 } 1220 else 1221 { 1222 first_error (_("vector type expected")); 1223 return FAIL; 1224 } 1225 } 1226 else 1227 return FAIL; 1228 1229 *ccp = str; 1230 1231 return SUCCESS; 1232} 1233 1234/* Special meanings for indices (which have a range of 0-7), which will fit into 1235 a 4-bit integer. */ 1236 1237#define NEON_ALL_LANES 15 1238#define NEON_INTERLEAVE_LANES 14 1239 1240/* Parse either a register or a scalar, with an optional type. Return the 1241 register number, and optionally fill in the actual type of the register 1242 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and 1243 type/index information in *TYPEINFO. */ 1244 1245static int 1246parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, 1247 enum arm_reg_type *rtype, 1248 struct neon_typed_alias *typeinfo) 1249{ 1250 char *str = *ccp; 1251 struct reg_entry *reg = arm_reg_parse_multi (&str); 1252 struct neon_typed_alias atype; 1253 struct neon_type_el parsetype; 1254 1255 atype.defined = 0; 1256 atype.index = -1; 1257 atype.eltype.type = NT_invtype; 1258 atype.eltype.size = -1; 1259 1260 /* Try alternate syntax for some types of register. Note these are mutually 1261 exclusive with the Neon syntax extensions. */ 1262 if (reg == NULL) 1263 { 1264 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type); 1265 if (altreg != FAIL) 1266 *ccp = str; 1267 if (typeinfo) 1268 *typeinfo = atype; 1269 return altreg; 1270 } 1271 1272 /* Undo polymorphism when a set of register types may be accepted. */ 1273 if ((type == REG_TYPE_NDQ 1274 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD)) 1275 || (type == REG_TYPE_VFSD 1276 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) 1277 || (type == REG_TYPE_NSDQ 1278 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD 1279 || reg->type == REG_TYPE_NQ)) 1280 || (type == REG_TYPE_MMXWC 1281 && (reg->type == REG_TYPE_MMXWCG))) 1282 type = reg->type; 1283 1284 if (type != reg->type) 1285 return FAIL; 1286 1287 if (reg->neon) 1288 atype = *reg->neon; 1289 1290 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS) 1291 { 1292 if ((atype.defined & NTA_HASTYPE) != 0) 1293 { 1294 first_error (_("can't redefine type for operand")); 1295 return FAIL; 1296 } 1297 atype.defined |= NTA_HASTYPE; 1298 atype.eltype = parsetype; 1299 } 1300 1301 if (skip_past_char (&str, '[') == SUCCESS) 1302 { 1303 if (type != REG_TYPE_VFD) 1304 { 1305 first_error (_("only D registers may be indexed")); 1306 return FAIL; 1307 } 1308 1309 if ((atype.defined & NTA_HASINDEX) != 0) 1310 { 1311 first_error (_("can't change index for operand")); 1312 return FAIL; 1313 } 1314 1315 atype.defined |= NTA_HASINDEX; 1316 1317 if (skip_past_char (&str, ']') == SUCCESS) 1318 atype.index = NEON_ALL_LANES; 1319 else 1320 { 1321 expressionS exp; 1322 1323 my_get_expression (&exp, &str, GE_NO_PREFIX); 1324 1325 if (exp.X_op != O_constant) 1326 { 1327 first_error (_("constant expression required")); 1328 return FAIL; 1329 } 1330 1331 if (skip_past_char (&str, ']') == FAIL) 1332 return FAIL; 1333 1334 atype.index = exp.X_add_number; 1335 } 1336 } 1337 1338 if (typeinfo) 1339 *typeinfo = atype; 1340 1341 if (rtype) 1342 *rtype = type; 1343 1344 *ccp = str; 1345 1346 return reg->number; 1347} 1348 1349/* Like arm_reg_parse, but allow allow the following extra features: 1350 - If RTYPE is non-zero, return the (possibly restricted) type of the 1351 register (e.g. Neon double or quad reg when either has been requested). 1352 - If this is a Neon vector type with additional type information, fill 1353 in the struct pointed to by VECTYPE (if non-NULL). 1354 This function will fault on encountering a scalar. 1355*/ 1356 1357static int 1358arm_typed_reg_parse (char **ccp, enum arm_reg_type type, 1359 enum arm_reg_type *rtype, struct neon_type_el *vectype) 1360{ 1361 struct neon_typed_alias atype; 1362 char *str = *ccp; 1363 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype); 1364 1365 if (reg == FAIL) 1366 return FAIL; 1367 1368 /* Do not allow a scalar (reg+index) to parse as a register. */ 1369 if ((atype.defined & NTA_HASINDEX) != 0) 1370 { 1371 first_error (_("register operand expected, but got scalar")); 1372 return FAIL; 1373 } 1374 1375 if (vectype) 1376 *vectype = atype.eltype; 1377 1378 *ccp = str; 1379 1380 return reg; 1381} 1382 1383#define NEON_SCALAR_REG(X) ((X) >> 4) 1384#define NEON_SCALAR_INDEX(X) ((X) & 15) 1385 1386/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't 1387 have enough information to be able to do a good job bounds-checking. So, we 1388 just do easy checks here, and do further checks later. */ 1389 1390static int 1391parse_scalar (char **ccp, int elsize, struct neon_type_el *type) 1392{ 1393 int reg; 1394 char *str = *ccp; 1395 struct neon_typed_alias atype; 1396 1397 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype); 1398 1399 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0) 1400 return FAIL; 1401 1402 if (atype.index == NEON_ALL_LANES) 1403 { 1404 first_error (_("scalar must have an index")); 1405 return FAIL; 1406 } 1407 else if (atype.index >= 64 / elsize) 1408 { 1409 first_error (_("scalar index out of range")); 1410 return FAIL; 1411 } 1412 1413 if (type) 1414 *type = atype.eltype; 1415 1416 *ccp = str; 1417 1418 return reg * 16 + atype.index; 1419} 1420 1421/* Parse an ARM register list. Returns the bitmask, or FAIL. */ 1422static long 1423parse_reg_list (char ** strp) 1424{ 1425 char * str = * strp; 1426 long range = 0; 1427 int another_range; 1428 1429 /* We come back here if we get ranges concatenated by '+' or '|'. */ 1430 do 1431 { 1432 another_range = 0; 1433 1434 if (*str == '{') 1435 { 1436 int in_range = 0; 1437 int cur_reg = -1; 1438 1439 str++; 1440 do 1441 { 1442 int reg; 1443 1444 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL) 1445 { 1446 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 1447 return FAIL; 1448 } 1449 1450 if (in_range) 1451 { 1452 int i; 1453 1454 if (reg <= cur_reg) 1455 { 1456 first_error (_("bad range in register list")); 1457 return FAIL; 1458 } 1459 1460 for (i = cur_reg + 1; i < reg; i++) 1461 { 1462 if (range & (1 << i)) 1463 as_tsktsk 1464 (_("Warning: duplicated register (r%d) in register list"), 1465 i); 1466 else 1467 range |= 1 << i; 1468 } 1469 in_range = 0; 1470 } 1471 1472 if (range & (1 << reg)) 1473 as_tsktsk (_("Warning: duplicated register (r%d) in register list"), 1474 reg); 1475 else if (reg <= cur_reg) 1476 as_tsktsk (_("Warning: register range not in ascending order")); 1477 1478 range |= 1 << reg; 1479 cur_reg = reg; 1480 } 1481 while (skip_past_comma (&str) != FAIL 1482 || (in_range = 1, *str++ == '-')); 1483 str--; 1484 1485 if (*str++ != '}') 1486 { 1487 first_error (_("missing `}'")); 1488 return FAIL; 1489 } 1490 } 1491 else 1492 { 1493 expressionS expr; 1494 1495 if (my_get_expression (&expr, &str, GE_NO_PREFIX)) 1496 return FAIL; 1497 1498 if (expr.X_op == O_constant) 1499 { 1500 if (expr.X_add_number 1501 != (expr.X_add_number & 0x0000ffff)) 1502 { 1503 inst.error = _("invalid register mask"); 1504 return FAIL; 1505 } 1506 1507 if ((range & expr.X_add_number) != 0) 1508 { 1509 int regno = range & expr.X_add_number; 1510 1511 regno &= -regno; 1512 regno = (1 << regno) - 1; 1513 as_tsktsk 1514 (_("Warning: duplicated register (r%d) in register list"), 1515 regno); 1516 } 1517 1518 range |= expr.X_add_number; 1519 } 1520 else 1521 { 1522 if (inst.reloc.type != 0) 1523 { 1524 inst.error = _("expression too complex"); 1525 return FAIL; 1526 } 1527 1528 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS)); 1529 inst.reloc.type = BFD_RELOC_ARM_MULTI; 1530 inst.reloc.pc_rel = 0; 1531 } 1532 } 1533 1534 if (*str == '|' || *str == '+') 1535 { 1536 str++; 1537 another_range = 1; 1538 } 1539 } 1540 while (another_range); 1541 1542 *strp = str; 1543 return range; 1544} 1545 1546/* Types of registers in a list. */ 1547 1548enum reg_list_els 1549{ 1550 REGLIST_VFP_S, 1551 REGLIST_VFP_D, 1552 REGLIST_NEON_D 1553}; 1554 1555/* Parse a VFP register list. If the string is invalid return FAIL. 1556 Otherwise return the number of registers, and set PBASE to the first 1557 register. Parses registers of type ETYPE. 1558 If REGLIST_NEON_D is used, several syntax enhancements are enabled: 1559 - Q registers can be used to specify pairs of D registers 1560 - { } can be omitted from around a singleton register list 1561 FIXME: This is not implemented, as it would require backtracking in 1562 some cases, e.g.: 1563 vtbl.8 d3,d4,d5 1564 This could be done (the meaning isn't really ambiguous), but doesn't 1565 fit in well with the current parsing framework. 1566 - 32 D registers may be used (also true for VFPv3). 1567 FIXME: Types are ignored in these register lists, which is probably a 1568 bug. */ 1569 1570static int 1571parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) 1572{ 1573 char *str = *ccp; 1574 int base_reg; 1575 int new_base; 1576 enum arm_reg_type regtype = 0; 1577 int max_regs = 0; 1578 int count = 0; 1579 int warned = 0; 1580 unsigned long mask = 0; 1581 int i; 1582 1583 if (*str != '{') 1584 { 1585 inst.error = _("expecting {"); 1586 return FAIL; 1587 } 1588 1589 str++; 1590 1591 switch (etype) 1592 { 1593 case REGLIST_VFP_S: 1594 regtype = REG_TYPE_VFS; 1595 max_regs = 32; 1596 break; 1597 1598 case REGLIST_VFP_D: 1599 regtype = REG_TYPE_VFD; 1600 break; 1601 1602 case REGLIST_NEON_D: 1603 regtype = REG_TYPE_NDQ; 1604 break; 1605 } 1606 1607 if (etype != REGLIST_VFP_S) 1608 { 1609 /* VFPv3 allows 32 D registers. */ 1610 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 1611 { 1612 max_regs = 32; 1613 if (thumb_mode) 1614 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 1615 fpu_vfp_ext_v3); 1616 else 1617 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 1618 fpu_vfp_ext_v3); 1619 } 1620 else 1621 max_regs = 16; 1622 } 1623 1624 base_reg = max_regs; 1625 1626 do 1627 { 1628 int setmask = 1, addregs = 1; 1629 1630 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); 1631 1632 if (new_base == FAIL) 1633 { 1634 first_error (_(reg_expected_msgs[regtype])); 1635 return FAIL; 1636 } 1637 1638 if (new_base >= max_regs) 1639 { 1640 first_error (_("register out of range in list")); 1641 return FAIL; 1642 } 1643 1644 /* Note: a value of 2 * n is returned for the register Q<n>. */ 1645 if (regtype == REG_TYPE_NQ) 1646 { 1647 setmask = 3; 1648 addregs = 2; 1649 } 1650 1651 if (new_base < base_reg) 1652 base_reg = new_base; 1653 1654 if (mask & (setmask << new_base)) 1655 { 1656 first_error (_("invalid register list")); 1657 return FAIL; 1658 } 1659 1660 if ((mask >> new_base) != 0 && ! warned) 1661 { 1662 as_tsktsk (_("register list not in ascending order")); 1663 warned = 1; 1664 } 1665 1666 mask |= setmask << new_base; 1667 count += addregs; 1668 1669 if (*str == '-') /* We have the start of a range expression */ 1670 { 1671 int high_range; 1672 1673 str++; 1674 1675 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL)) 1676 == FAIL) 1677 { 1678 inst.error = gettext (reg_expected_msgs[regtype]); 1679 return FAIL; 1680 } 1681 1682 if (high_range >= max_regs) 1683 { 1684 first_error (_("register out of range in list")); 1685 return FAIL; 1686 } 1687 1688 if (regtype == REG_TYPE_NQ) 1689 high_range = high_range + 1; 1690 1691 if (high_range <= new_base) 1692 { 1693 inst.error = _("register range not in ascending order"); 1694 return FAIL; 1695 } 1696 1697 for (new_base += addregs; new_base <= high_range; new_base += addregs) 1698 { 1699 if (mask & (setmask << new_base)) 1700 { 1701 inst.error = _("invalid register list"); 1702 return FAIL; 1703 } 1704 1705 mask |= setmask << new_base; 1706 count += addregs; 1707 } 1708 } 1709 } 1710 while (skip_past_comma (&str) != FAIL); 1711 1712 str++; 1713 1714 /* Sanity check -- should have raised a parse error above. */ 1715 if (count == 0 || count > max_regs) 1716 abort (); 1717 1718 *pbase = base_reg; 1719 1720 /* Final test -- the registers must be consecutive. */ 1721 mask >>= base_reg; 1722 for (i = 0; i < count; i++) 1723 { 1724 if ((mask & (1u << i)) == 0) 1725 { 1726 inst.error = _("non-contiguous register range"); 1727 return FAIL; 1728 } 1729 } 1730 1731 *ccp = str; 1732 1733 return count; 1734} 1735 1736/* True if two alias types are the same. */ 1737 1738static int 1739neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b) 1740{ 1741 if (!a && !b) 1742 return 1; 1743 1744 if (!a || !b) 1745 return 0; 1746 1747 if (a->defined != b->defined) 1748 return 0; 1749 1750 if ((a->defined & NTA_HASTYPE) != 0 1751 && (a->eltype.type != b->eltype.type 1752 || a->eltype.size != b->eltype.size)) 1753 return 0; 1754 1755 if ((a->defined & NTA_HASINDEX) != 0 1756 && (a->index != b->index)) 1757 return 0; 1758 1759 return 1; 1760} 1761 1762/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions. 1763 The base register is put in *PBASE. 1764 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of 1765 the return value. 1766 The register stride (minus one) is put in bit 4 of the return value. 1767 Bits [6:5] encode the list length (minus one). 1768 The type of the list elements is put in *ELTYPE, if non-NULL. */ 1769 1770#define NEON_LANE(X) ((X) & 0xf) 1771#define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1) 1772#define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1) 1773 1774static int 1775parse_neon_el_struct_list (char **str, unsigned *pbase, 1776 struct neon_type_el *eltype) 1777{ 1778 char *ptr = *str; 1779 int base_reg = -1; 1780 int reg_incr = -1; 1781 int count = 0; 1782 int lane = -1; 1783 int leading_brace = 0; 1784 enum arm_reg_type rtype = REG_TYPE_NDQ; 1785 int addregs = 1; 1786 const char *const incr_error = "register stride must be 1 or 2"; 1787 const char *const type_error = "mismatched element/structure types in list"; 1788 struct neon_typed_alias firsttype; 1789 1790 if (skip_past_char (&ptr, '{') == SUCCESS) 1791 leading_brace = 1; 1792 1793 do 1794 { 1795 struct neon_typed_alias atype; 1796 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype); 1797 1798 if (getreg == FAIL) 1799 { 1800 first_error (_(reg_expected_msgs[rtype])); 1801 return FAIL; 1802 } 1803 1804 if (base_reg == -1) 1805 { 1806 base_reg = getreg; 1807 if (rtype == REG_TYPE_NQ) 1808 { 1809 reg_incr = 1; 1810 addregs = 2; 1811 } 1812 firsttype = atype; 1813 } 1814 else if (reg_incr == -1) 1815 { 1816 reg_incr = getreg - base_reg; 1817 if (reg_incr < 1 || reg_incr > 2) 1818 { 1819 first_error (_(incr_error)); 1820 return FAIL; 1821 } 1822 } 1823 else if (getreg != base_reg + reg_incr * count) 1824 { 1825 first_error (_(incr_error)); 1826 return FAIL; 1827 } 1828 1829 if (!neon_alias_types_same (&atype, &firsttype)) 1830 { 1831 first_error (_(type_error)); 1832 return FAIL; 1833 } 1834 1835 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list 1836 modes. */ 1837 if (ptr[0] == '-') 1838 { 1839 struct neon_typed_alias htype; 1840 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1; 1841 if (lane == -1) 1842 lane = NEON_INTERLEAVE_LANES; 1843 else if (lane != NEON_INTERLEAVE_LANES) 1844 { 1845 first_error (_(type_error)); 1846 return FAIL; 1847 } 1848 if (reg_incr == -1) 1849 reg_incr = 1; 1850 else if (reg_incr != 1) 1851 { 1852 first_error (_("don't use Rn-Rm syntax with non-unit stride")); 1853 return FAIL; 1854 } 1855 ptr++; 1856 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype); 1857 if (hireg == FAIL) 1858 { 1859 first_error (_(reg_expected_msgs[rtype])); 1860 return FAIL; 1861 } 1862 if (!neon_alias_types_same (&htype, &firsttype)) 1863 { 1864 first_error (_(type_error)); 1865 return FAIL; 1866 } 1867 count += hireg + dregs - getreg; 1868 continue; 1869 } 1870 1871 /* If we're using Q registers, we can't use [] or [n] syntax. */ 1872 if (rtype == REG_TYPE_NQ) 1873 { 1874 count += 2; 1875 continue; 1876 } 1877 1878 if ((atype.defined & NTA_HASINDEX) != 0) 1879 { 1880 if (lane == -1) 1881 lane = atype.index; 1882 else if (lane != atype.index) 1883 { 1884 first_error (_(type_error)); 1885 return FAIL; 1886 } 1887 } 1888 else if (lane == -1) 1889 lane = NEON_INTERLEAVE_LANES; 1890 else if (lane != NEON_INTERLEAVE_LANES) 1891 { 1892 first_error (_(type_error)); 1893 return FAIL; 1894 } 1895 count++; 1896 } 1897 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL); 1898 1899 /* No lane set by [x]. We must be interleaving structures. */ 1900 if (lane == -1) 1901 lane = NEON_INTERLEAVE_LANES; 1902 1903 /* Sanity check. */ 1904 if (lane == -1 || base_reg == -1 || count < 1 || count > 4 1905 || (count > 1 && reg_incr == -1)) 1906 { 1907 first_error (_("error parsing element/structure list")); 1908 return FAIL; 1909 } 1910 1911 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL) 1912 { 1913 first_error (_("expected }")); 1914 return FAIL; 1915 } 1916 1917 if (reg_incr == -1) 1918 reg_incr = 1; 1919 1920 if (eltype) 1921 *eltype = firsttype.eltype; 1922 1923 *pbase = base_reg; 1924 *str = ptr; 1925 1926 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5); 1927} 1928 1929/* Parse an explicit relocation suffix on an expression. This is 1930 either nothing, or a word in parentheses. Note that if !OBJ_ELF, 1931 arm_reloc_hsh contains no entries, so this function can only 1932 succeed if there is no () after the word. Returns -1 on error, 1933 BFD_RELOC_UNUSED if there wasn't any suffix. */ 1934static int 1935parse_reloc (char **str) 1936{ 1937 struct reloc_entry *r; 1938 char *p, *q; 1939 1940 if (**str != '(') 1941 return BFD_RELOC_UNUSED; 1942 1943 p = *str + 1; 1944 q = p; 1945 1946 while (*q && *q != ')' && *q != ',') 1947 q++; 1948 if (*q != ')') 1949 return -1; 1950 1951 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL) 1952 return -1; 1953 1954 *str = q + 1; 1955 return r->reloc; 1956} 1957 1958/* Directives: register aliases. */ 1959 1960static struct reg_entry * 1961insert_reg_alias (char *str, int number, int type) 1962{ 1963 struct reg_entry *new; 1964 const char *name; 1965 1966 if ((new = hash_find (arm_reg_hsh, str)) != 0) 1967 { 1968 if (new->builtin) 1969 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str); 1970 1971 /* Only warn about a redefinition if it's not defined as the 1972 same register. */ 1973 else if (new->number != number || new->type != type) 1974 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1975 1976 return 0; 1977 } 1978 1979 name = xstrdup (str); 1980 new = xmalloc (sizeof (struct reg_entry)); 1981 1982 new->name = name; 1983 new->number = number; 1984 new->type = type; 1985 new->builtin = FALSE; 1986 new->neon = NULL; 1987 1988 if (hash_insert (arm_reg_hsh, name, (PTR) new)) 1989 abort (); 1990 1991 return new; 1992} 1993 1994static void 1995insert_neon_reg_alias (char *str, int number, int type, 1996 struct neon_typed_alias *atype) 1997{ 1998 struct reg_entry *reg = insert_reg_alias (str, number, type); 1999 2000 if (!reg) 2001 { 2002 first_error (_("attempt to redefine typed alias")); 2003 return; 2004 } 2005 2006 if (atype) 2007 { 2008 reg->neon = xmalloc (sizeof (struct neon_typed_alias)); 2009 *reg->neon = *atype; 2010 } 2011} 2012 2013/* Look for the .req directive. This is of the form: 2014 2015 new_register_name .req existing_register_name 2016 2017 If we find one, or if it looks sufficiently like one that we want to 2018 handle any error here, return non-zero. Otherwise return zero. */ 2019 2020static int 2021create_register_alias (char * newname, char *p) 2022{ 2023 struct reg_entry *old; 2024 char *oldname, *nbuf; 2025 size_t nlen; 2026 2027 /* The input scrubber ensures that whitespace after the mnemonic is 2028 collapsed to single spaces. */ 2029 oldname = p; 2030 if (strncmp (oldname, " .req ", 6) != 0) 2031 return 0; 2032 2033 oldname += 6; 2034 if (*oldname == '\0') 2035 return 0; 2036 2037 old = hash_find (arm_reg_hsh, oldname); 2038 if (!old) 2039 { 2040 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 2041 return 1; 2042 } 2043 2044 /* If TC_CASE_SENSITIVE is defined, then newname already points to 2045 the desired alias name, and p points to its end. If not, then 2046 the desired alias name is in the global original_case_string. */ 2047#ifdef TC_CASE_SENSITIVE 2048 nlen = p - newname; 2049#else 2050 newname = original_case_string; 2051 nlen = strlen (newname); 2052#endif 2053 2054 nbuf = alloca (nlen + 1); 2055 memcpy (nbuf, newname, nlen); 2056 nbuf[nlen] = '\0'; 2057 2058 /* Create aliases under the new name as stated; an all-lowercase 2059 version of the new name; and an all-uppercase version of the new 2060 name. */ 2061 insert_reg_alias (nbuf, old->number, old->type); 2062 2063 for (p = nbuf; *p; p++) 2064 *p = TOUPPER (*p); 2065 2066 if (strncmp (nbuf, newname, nlen)) 2067 insert_reg_alias (nbuf, old->number, old->type); 2068 2069 for (p = nbuf; *p; p++) 2070 *p = TOLOWER (*p); 2071 2072 if (strncmp (nbuf, newname, nlen)) 2073 insert_reg_alias (nbuf, old->number, old->type); 2074 2075 return 1; 2076} 2077 2078/* Create a Neon typed/indexed register alias using directives, e.g.: 2079 X .dn d5.s32[1] 2080 Y .qn 6.s16 2081 Z .dn d7 2082 T .dn Z[0] 2083 These typed registers can be used instead of the types specified after the 2084 Neon mnemonic, so long as all operands given have types. Types can also be 2085 specified directly, e.g.: 2086 vadd d0.s32, d1.s32, d2.s32 2087*/ 2088 2089static int 2090create_neon_reg_alias (char *newname, char *p) 2091{ 2092 enum arm_reg_type basetype; 2093 struct reg_entry *basereg; 2094 struct reg_entry mybasereg; 2095 struct neon_type ntype; 2096 struct neon_typed_alias typeinfo; 2097 char *namebuf, *nameend; 2098 int namelen; 2099 2100 typeinfo.defined = 0; 2101 typeinfo.eltype.type = NT_invtype; 2102 typeinfo.eltype.size = -1; 2103 typeinfo.index = -1; 2104 2105 nameend = p; 2106 2107 if (strncmp (p, " .dn ", 5) == 0) 2108 basetype = REG_TYPE_VFD; 2109 else if (strncmp (p, " .qn ", 5) == 0) 2110 basetype = REG_TYPE_NQ; 2111 else 2112 return 0; 2113 2114 p += 5; 2115 2116 if (*p == '\0') 2117 return 0; 2118 2119 basereg = arm_reg_parse_multi (&p); 2120 2121 if (basereg && basereg->type != basetype) 2122 { 2123 as_bad (_("bad type for register")); 2124 return 0; 2125 } 2126 2127 if (basereg == NULL) 2128 { 2129 expressionS exp; 2130 /* Try parsing as an integer. */ 2131 my_get_expression (&exp, &p, GE_NO_PREFIX); 2132 if (exp.X_op != O_constant) 2133 { 2134 as_bad (_("expression must be constant")); 2135 return 0; 2136 } 2137 basereg = &mybasereg; 2138 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2 2139 : exp.X_add_number; 2140 basereg->neon = 0; 2141 } 2142 2143 if (basereg->neon) 2144 typeinfo = *basereg->neon; 2145 2146 if (parse_neon_type (&ntype, &p) == SUCCESS) 2147 { 2148 /* We got a type. */ 2149 if (typeinfo.defined & NTA_HASTYPE) 2150 { 2151 as_bad (_("can't redefine the type of a register alias")); 2152 return 0; 2153 } 2154 2155 typeinfo.defined |= NTA_HASTYPE; 2156 if (ntype.elems != 1) 2157 { 2158 as_bad (_("you must specify a single type only")); 2159 return 0; 2160 } 2161 typeinfo.eltype = ntype.el[0]; 2162 } 2163 2164 if (skip_past_char (&p, '[') == SUCCESS) 2165 { 2166 expressionS exp; 2167 /* We got a scalar index. */ 2168 2169 if (typeinfo.defined & NTA_HASINDEX) 2170 { 2171 as_bad (_("can't redefine the index of a scalar alias")); 2172 return 0; 2173 } 2174 2175 my_get_expression (&exp, &p, GE_NO_PREFIX); 2176 2177 if (exp.X_op != O_constant) 2178 { 2179 as_bad (_("scalar index must be constant")); 2180 return 0; 2181 } 2182 2183 typeinfo.defined |= NTA_HASINDEX; 2184 typeinfo.index = exp.X_add_number; 2185 2186 if (skip_past_char (&p, ']') == FAIL) 2187 { 2188 as_bad (_("expecting ]")); 2189 return 0; 2190 } 2191 } 2192 2193 namelen = nameend - newname; 2194 namebuf = alloca (namelen + 1); 2195 strncpy (namebuf, newname, namelen); 2196 namebuf[namelen] = '\0'; 2197 2198 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2199 typeinfo.defined != 0 ? &typeinfo : NULL); 2200 2201 /* Insert name in all uppercase. */ 2202 for (p = namebuf; *p; p++) 2203 *p = TOUPPER (*p); 2204 2205 if (strncmp (namebuf, newname, namelen)) 2206 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2207 typeinfo.defined != 0 ? &typeinfo : NULL); 2208 2209 /* Insert name in all lowercase. */ 2210 for (p = namebuf; *p; p++) 2211 *p = TOLOWER (*p); 2212 2213 if (strncmp (namebuf, newname, namelen)) 2214 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2215 typeinfo.defined != 0 ? &typeinfo : NULL); 2216 2217 return 1; 2218} 2219 2220/* Should never be called, as .req goes between the alias and the 2221 register name, not at the beginning of the line. */ 2222static void 2223s_req (int a ATTRIBUTE_UNUSED) 2224{ 2225 as_bad (_("invalid syntax for .req directive")); 2226} 2227 2228static void 2229s_dn (int a ATTRIBUTE_UNUSED) 2230{ 2231 as_bad (_("invalid syntax for .dn directive")); 2232} 2233 2234static void 2235s_qn (int a ATTRIBUTE_UNUSED) 2236{ 2237 as_bad (_("invalid syntax for .qn directive")); 2238} 2239 2240/* The .unreq directive deletes an alias which was previously defined 2241 by .req. For example: 2242 2243 my_alias .req r11 2244 .unreq my_alias */ 2245 2246static void 2247s_unreq (int a ATTRIBUTE_UNUSED) 2248{ 2249 char * name; 2250 char saved_char; 2251 2252 name = input_line_pointer; 2253 2254 while (*input_line_pointer != 0 2255 && *input_line_pointer != ' ' 2256 && *input_line_pointer != '\n') 2257 ++input_line_pointer; 2258 2259 saved_char = *input_line_pointer; 2260 *input_line_pointer = 0; 2261 2262 if (!*name) 2263 as_bad (_("invalid syntax for .unreq directive")); 2264 else 2265 { 2266 struct reg_entry *reg = hash_find (arm_reg_hsh, name); 2267 2268 if (!reg) 2269 as_bad (_("unknown register alias '%s'"), name); 2270 else if (reg->builtin) 2271 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 2272 name); 2273 else 2274 { 2275 hash_delete (arm_reg_hsh, name); 2276 free ((char *) reg->name); 2277 if (reg->neon) 2278 free (reg->neon); 2279 free (reg); 2280 } 2281 } 2282 2283 *input_line_pointer = saved_char; 2284 demand_empty_rest_of_line (); 2285} 2286 2287/* Directives: Instruction set selection. */ 2288 2289#ifdef OBJ_ELF 2290/* This code is to handle mapping symbols as defined in the ARM ELF spec. 2291 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0). 2292 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), 2293 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ 2294 2295static enum mstate mapstate = MAP_UNDEFINED; 2296 2297void 2298mapping_state (enum mstate state) 2299{ 2300 symbolS * symbolP; 2301 const char * symname; 2302 int type; 2303 2304 if (mapstate == state) 2305 /* The mapping symbol has already been emitted. 2306 There is nothing else to do. */ 2307 return; 2308 2309 mapstate = state; 2310 2311 switch (state) 2312 { 2313 case MAP_DATA: 2314 symname = "$d"; 2315 type = BSF_NO_FLAGS; 2316 break; 2317 case MAP_ARM: 2318 symname = "$a"; 2319 type = BSF_NO_FLAGS; 2320 break; 2321 case MAP_THUMB: 2322 symname = "$t"; 2323 type = BSF_NO_FLAGS; 2324 break; 2325 case MAP_UNDEFINED: 2326 return; 2327 default: 2328 abort (); 2329 } 2330 2331 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 2332 2333 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now); 2334 symbol_table_insert (symbolP); 2335 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; 2336 2337 switch (state) 2338 { 2339 case MAP_ARM: 2340 THUMB_SET_FUNC (symbolP, 0); 2341 ARM_SET_THUMB (symbolP, 0); 2342 ARM_SET_INTERWORK (symbolP, support_interwork); 2343 break; 2344 2345 case MAP_THUMB: 2346 THUMB_SET_FUNC (symbolP, 1); 2347 ARM_SET_THUMB (symbolP, 1); 2348 ARM_SET_INTERWORK (symbolP, support_interwork); 2349 break; 2350 2351 case MAP_DATA: 2352 default: 2353 return; 2354 } 2355} 2356#else 2357#define mapping_state(x) /* nothing */ 2358#endif 2359 2360/* Find the real, Thumb encoded start of a Thumb function. */ 2361 2362static symbolS * 2363find_real_start (symbolS * symbolP) 2364{ 2365 char * real_start; 2366 const char * name = S_GET_NAME (symbolP); 2367 symbolS * new_target; 2368 2369 /* This definition must agree with the one in gcc/config/arm/thumb.c. */ 2370#define STUB_NAME ".real_start_of" 2371 2372 if (name == NULL) 2373 abort (); 2374 2375 /* The compiler may generate BL instructions to local labels because 2376 it needs to perform a branch to a far away location. These labels 2377 do not have a corresponding ".real_start_of" label. We check 2378 both for S_IS_LOCAL and for a leading dot, to give a way to bypass 2379 the ".real_start_of" convention for nonlocal branches. */ 2380 if (S_IS_LOCAL (symbolP) || name[0] == '.') 2381 return symbolP; 2382 2383 real_start = ACONCAT ((STUB_NAME, name, NULL)); 2384 new_target = symbol_find (real_start); 2385 2386 if (new_target == NULL) 2387 { 2388 as_warn ("Failed to find real start of function: %s\n", name); 2389 new_target = symbolP; 2390 } 2391 2392 return new_target; 2393} 2394 2395static void 2396opcode_select (int width) 2397{ 2398 switch (width) 2399 { 2400 case 16: 2401 if (! thumb_mode) 2402 { 2403 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 2404 as_bad (_("selected processor does not support THUMB opcodes")); 2405 2406 thumb_mode = 1; 2407 /* No need to force the alignment, since we will have been 2408 coming from ARM mode, which is word-aligned. */ 2409 record_alignment (now_seg, 1); 2410 } 2411 mapping_state (MAP_THUMB); 2412 break; 2413 2414 case 32: 2415 if (thumb_mode) 2416 { 2417 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 2418 as_bad (_("selected processor does not support ARM opcodes")); 2419 2420 thumb_mode = 0; 2421 2422 if (!need_pass_2) 2423 frag_align (2, 0, 0); 2424 2425 record_alignment (now_seg, 1); 2426 } 2427 mapping_state (MAP_ARM); 2428 break; 2429 2430 default: 2431 as_bad (_("invalid instruction size selected (%d)"), width); 2432 } 2433} 2434 2435static void 2436s_arm (int ignore ATTRIBUTE_UNUSED) 2437{ 2438 opcode_select (32); 2439 demand_empty_rest_of_line (); 2440} 2441 2442static void 2443s_thumb (int ignore ATTRIBUTE_UNUSED) 2444{ 2445 opcode_select (16); 2446 demand_empty_rest_of_line (); 2447} 2448 2449static void 2450s_code (int unused ATTRIBUTE_UNUSED) 2451{ 2452 int temp; 2453 2454 temp = get_absolute_expression (); 2455 switch (temp) 2456 { 2457 case 16: 2458 case 32: 2459 opcode_select (temp); 2460 break; 2461 2462 default: 2463 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp); 2464 } 2465} 2466 2467static void 2468s_force_thumb (int ignore ATTRIBUTE_UNUSED) 2469{ 2470 /* If we are not already in thumb mode go into it, EVEN if 2471 the target processor does not support thumb instructions. 2472 This is used by gcc/config/arm/lib1funcs.asm for example 2473 to compile interworking support functions even if the 2474 target processor should not support interworking. */ 2475 if (! thumb_mode) 2476 { 2477 thumb_mode = 2; 2478 record_alignment (now_seg, 1); 2479 } 2480 2481 demand_empty_rest_of_line (); 2482} 2483 2484static void 2485s_thumb_func (int ignore ATTRIBUTE_UNUSED) 2486{ 2487 s_thumb (0); 2488 2489 /* The following label is the name/address of the start of a Thumb function. 2490 We need to know this for the interworking support. */ 2491 label_is_thumb_function_name = TRUE; 2492} 2493 2494/* Perform a .set directive, but also mark the alias as 2495 being a thumb function. */ 2496 2497static void 2498s_thumb_set (int equiv) 2499{ 2500 /* XXX the following is a duplicate of the code for s_set() in read.c 2501 We cannot just call that code as we need to get at the symbol that 2502 is created. */ 2503 char * name; 2504 char delim; 2505 char * end_name; 2506 symbolS * symbolP; 2507 2508 /* Especial apologies for the random logic: 2509 This just grew, and could be parsed much more simply! 2510 Dean - in haste. */ 2511 name = input_line_pointer; 2512 delim = get_symbol_end (); 2513 end_name = input_line_pointer; 2514 *end_name = delim; 2515 2516 if (*input_line_pointer != ',') 2517 { 2518 *end_name = 0; 2519 as_bad (_("expected comma after name \"%s\""), name); 2520 *end_name = delim; 2521 ignore_rest_of_line (); 2522 return; 2523 } 2524 2525 input_line_pointer++; 2526 *end_name = 0; 2527 2528 if (name[0] == '.' && name[1] == '\0') 2529 { 2530 /* XXX - this should not happen to .thumb_set. */ 2531 abort (); 2532 } 2533 2534 if ((symbolP = symbol_find (name)) == NULL 2535 && (symbolP = md_undefined_symbol (name)) == NULL) 2536 { 2537#ifndef NO_LISTING 2538 /* When doing symbol listings, play games with dummy fragments living 2539 outside the normal fragment chain to record the file and line info 2540 for this symbol. */ 2541 if (listing & LISTING_SYMBOLS) 2542 { 2543 extern struct list_info_struct * listing_tail; 2544 fragS * dummy_frag = xmalloc (sizeof (fragS)); 2545 2546 memset (dummy_frag, 0, sizeof (fragS)); 2547 dummy_frag->fr_type = rs_fill; 2548 dummy_frag->line = listing_tail; 2549 symbolP = symbol_new (name, undefined_section, 0, dummy_frag); 2550 dummy_frag->fr_symbol = symbolP; 2551 } 2552 else 2553#endif 2554 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag); 2555 2556#ifdef OBJ_COFF 2557 /* "set" symbols are local unless otherwise specified. */ 2558 SF_SET_LOCAL (symbolP); 2559#endif /* OBJ_COFF */ 2560 } /* Make a new symbol. */ 2561 2562 symbol_table_insert (symbolP); 2563 2564 * end_name = delim; 2565 2566 if (equiv 2567 && S_IS_DEFINED (symbolP) 2568 && S_GET_SEGMENT (symbolP) != reg_section) 2569 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP)); 2570 2571 pseudo_set (symbolP); 2572 2573 demand_empty_rest_of_line (); 2574 2575 /* XXX Now we come to the Thumb specific bit of code. */ 2576 2577 THUMB_SET_FUNC (symbolP, 1); 2578 ARM_SET_THUMB (symbolP, 1); 2579#if defined OBJ_ELF || defined OBJ_COFF 2580 ARM_SET_INTERWORK (symbolP, support_interwork); 2581#endif 2582} 2583 2584/* Directives: Mode selection. */ 2585 2586/* .syntax [unified|divided] - choose the new unified syntax 2587 (same for Arm and Thumb encoding, modulo slight differences in what 2588 can be represented) or the old divergent syntax for each mode. */ 2589static void 2590s_syntax (int unused ATTRIBUTE_UNUSED) 2591{ 2592 char *name, delim; 2593 2594 name = input_line_pointer; 2595 delim = get_symbol_end (); 2596 2597 if (!strcasecmp (name, "unified")) 2598 unified_syntax = TRUE; 2599 else if (!strcasecmp (name, "divided")) 2600 unified_syntax = FALSE; 2601 else 2602 { 2603 as_bad (_("unrecognized syntax mode \"%s\""), name); 2604 return; 2605 } 2606 *input_line_pointer = delim; 2607 demand_empty_rest_of_line (); 2608} 2609 2610/* Directives: sectioning and alignment. */ 2611 2612/* Same as s_align_ptwo but align 0 => align 2. */ 2613 2614static void 2615s_align (int unused ATTRIBUTE_UNUSED) 2616{ 2617 int temp; 2618 bfd_boolean fill_p; 2619 long temp_fill; 2620 long max_alignment = 15; 2621 2622 temp = get_absolute_expression (); 2623 if (temp > max_alignment) 2624 as_bad (_("alignment too large: %d assumed"), temp = max_alignment); 2625 else if (temp < 0) 2626 { 2627 as_bad (_("alignment negative. 0 assumed.")); 2628 temp = 0; 2629 } 2630 2631 if (*input_line_pointer == ',') 2632 { 2633 input_line_pointer++; 2634 temp_fill = get_absolute_expression (); 2635 fill_p = TRUE; 2636 } 2637 else 2638 { 2639 fill_p = FALSE; 2640 temp_fill = 0; 2641 } 2642 2643 if (!temp) 2644 temp = 2; 2645 2646 /* Only make a frag if we HAVE to. */ 2647 if (temp && !need_pass_2) 2648 { 2649 if (!fill_p && subseg_text_p (now_seg)) 2650 frag_align_code (temp, 0); 2651 else 2652 frag_align (temp, (int) temp_fill, 0); 2653 } 2654 demand_empty_rest_of_line (); 2655 2656 record_alignment (now_seg, temp); 2657} 2658 2659static void 2660s_bss (int ignore ATTRIBUTE_UNUSED) 2661{ 2662 /* We don't support putting frags in the BSS segment, we fake it by 2663 marking in_bss, then looking at s_skip for clues. */ 2664 subseg_set (bss_section, 0); 2665 demand_empty_rest_of_line (); 2666 mapping_state (MAP_DATA); 2667} 2668 2669static void 2670s_even (int ignore ATTRIBUTE_UNUSED) 2671{ 2672 /* Never make frag if expect extra pass. */ 2673 if (!need_pass_2) 2674 frag_align (1, 0, 0); 2675 2676 record_alignment (now_seg, 1); 2677 2678 demand_empty_rest_of_line (); 2679} 2680 2681/* Directives: Literal pools. */ 2682 2683static literal_pool * 2684find_literal_pool (void) 2685{ 2686 literal_pool * pool; 2687 2688 for (pool = list_of_pools; pool != NULL; pool = pool->next) 2689 { 2690 if (pool->section == now_seg 2691 && pool->sub_section == now_subseg) 2692 break; 2693 } 2694 2695 return pool; 2696} 2697 2698static literal_pool * 2699find_or_make_literal_pool (void) 2700{ 2701 /* Next literal pool ID number. */ 2702 static unsigned int latest_pool_num = 1; 2703 literal_pool * pool; 2704 2705 pool = find_literal_pool (); 2706 2707 if (pool == NULL) 2708 { 2709 /* Create a new pool. */ 2710 pool = xmalloc (sizeof (* pool)); 2711 if (! pool) 2712 return NULL; 2713 2714 pool->next_free_entry = 0; 2715 pool->section = now_seg; 2716 pool->sub_section = now_subseg; 2717 pool->next = list_of_pools; 2718 pool->symbol = NULL; 2719 2720 /* Add it to the list. */ 2721 list_of_pools = pool; 2722 } 2723 2724 /* New pools, and emptied pools, will have a NULL symbol. */ 2725 if (pool->symbol == NULL) 2726 { 2727 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 2728 (valueT) 0, &zero_address_frag); 2729 pool->id = latest_pool_num ++; 2730 } 2731 2732 /* Done. */ 2733 return pool; 2734} 2735 2736/* Add the literal in the global 'inst' 2737 structure to the relevent literal pool. */ 2738 2739static int 2740add_to_lit_pool (void) 2741{ 2742 literal_pool * pool; 2743 unsigned int entry; 2744 2745 pool = find_or_make_literal_pool (); 2746 2747 /* Check if this literal value is already in the pool. */ 2748 for (entry = 0; entry < pool->next_free_entry; entry ++) 2749 { 2750 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2751 && (inst.reloc.exp.X_op == O_constant) 2752 && (pool->literals[entry].X_add_number 2753 == inst.reloc.exp.X_add_number) 2754 && (pool->literals[entry].X_unsigned 2755 == inst.reloc.exp.X_unsigned)) 2756 break; 2757 2758 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2759 && (inst.reloc.exp.X_op == O_symbol) 2760 && (pool->literals[entry].X_add_number 2761 == inst.reloc.exp.X_add_number) 2762 && (pool->literals[entry].X_add_symbol 2763 == inst.reloc.exp.X_add_symbol) 2764 && (pool->literals[entry].X_op_symbol 2765 == inst.reloc.exp.X_op_symbol)) 2766 break; 2767 } 2768 2769 /* Do we need to create a new entry? */ 2770 if (entry == pool->next_free_entry) 2771 { 2772 if (entry >= MAX_LITERAL_POOL_SIZE) 2773 { 2774 inst.error = _("literal pool overflow"); 2775 return FAIL; 2776 } 2777 2778 pool->literals[entry] = inst.reloc.exp; 2779 pool->next_free_entry += 1; 2780 } 2781 2782 inst.reloc.exp.X_op = O_symbol; 2783 inst.reloc.exp.X_add_number = ((int) entry) * 4; 2784 inst.reloc.exp.X_add_symbol = pool->symbol; 2785 2786 return SUCCESS; 2787} 2788 2789/* Can't use symbol_new here, so have to create a symbol and then at 2790 a later date assign it a value. Thats what these functions do. */ 2791 2792static void 2793symbol_locate (symbolS * symbolP, 2794 const char * name, /* It is copied, the caller can modify. */ 2795 segT segment, /* Segment identifier (SEG_<something>). */ 2796 valueT valu, /* Symbol value. */ 2797 fragS * frag) /* Associated fragment. */ 2798{ 2799 unsigned int name_length; 2800 char * preserved_copy_of_name; 2801 2802 name_length = strlen (name) + 1; /* +1 for \0. */ 2803 obstack_grow (¬es, name, name_length); 2804 preserved_copy_of_name = obstack_finish (¬es); 2805 2806#ifdef tc_canonicalize_symbol_name 2807 preserved_copy_of_name = 2808 tc_canonicalize_symbol_name (preserved_copy_of_name); 2809#endif 2810 2811 S_SET_NAME (symbolP, preserved_copy_of_name); 2812 2813 S_SET_SEGMENT (symbolP, segment); 2814 S_SET_VALUE (symbolP, valu); 2815 symbol_clear_list_pointers (symbolP); 2816 2817 symbol_set_frag (symbolP, frag); 2818 2819 /* Link to end of symbol chain. */ 2820 { 2821 extern int symbol_table_frozen; 2822 2823 if (symbol_table_frozen) 2824 abort (); 2825 } 2826 2827 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP); 2828 2829 obj_symbol_new_hook (symbolP); 2830 2831#ifdef tc_symbol_new_hook 2832 tc_symbol_new_hook (symbolP); 2833#endif 2834 2835#ifdef DEBUG_SYMS 2836 verify_symbol_chain (symbol_rootP, symbol_lastP); 2837#endif /* DEBUG_SYMS */ 2838} 2839 2840 2841static void 2842s_ltorg (int ignored ATTRIBUTE_UNUSED) 2843{ 2844 unsigned int entry; 2845 literal_pool * pool; 2846 char sym_name[20]; 2847 2848 pool = find_literal_pool (); 2849 if (pool == NULL 2850 || pool->symbol == NULL 2851 || pool->next_free_entry == 0) 2852 return; 2853 2854 mapping_state (MAP_DATA); 2855 2856 /* Align pool as you have word accesses. 2857 Only make a frag if we have to. */ 2858 if (!need_pass_2) 2859 frag_align (2, 0, 0); 2860 2861 record_alignment (now_seg, 2); 2862 2863 sprintf (sym_name, "$$lit_\002%x", pool->id); 2864 2865 symbol_locate (pool->symbol, sym_name, now_seg, 2866 (valueT) frag_now_fix (), frag_now); 2867 symbol_table_insert (pool->symbol); 2868 2869 ARM_SET_THUMB (pool->symbol, thumb_mode); 2870 2871#if defined OBJ_COFF || defined OBJ_ELF 2872 ARM_SET_INTERWORK (pool->symbol, support_interwork); 2873#endif 2874 2875 for (entry = 0; entry < pool->next_free_entry; entry ++) 2876 /* First output the expression in the instruction to the pool. */ 2877 emit_expr (&(pool->literals[entry]), 4); /* .word */ 2878 2879 /* Mark the pool as empty. */ 2880 pool->next_free_entry = 0; 2881 pool->symbol = NULL; 2882} 2883 2884#ifdef OBJ_ELF 2885/* Forward declarations for functions below, in the MD interface 2886 section. */ 2887static void fix_new_arm (fragS *, int, short, expressionS *, int, int); 2888static valueT create_unwind_entry (int); 2889static void start_unwind_section (const segT, int); 2890static void add_unwind_opcode (valueT, int); 2891static void flush_pending_unwind (void); 2892 2893/* Directives: Data. */ 2894 2895static void 2896s_arm_elf_cons (int nbytes) 2897{ 2898 expressionS exp; 2899 2900#ifdef md_flush_pending_output 2901 md_flush_pending_output (); 2902#endif 2903 2904 if (is_it_end_of_statement ()) 2905 { 2906 demand_empty_rest_of_line (); 2907 return; 2908 } 2909 2910#ifdef md_cons_align 2911 md_cons_align (nbytes); 2912#endif 2913 2914 mapping_state (MAP_DATA); 2915 do 2916 { 2917 int reloc; 2918 char *base = input_line_pointer; 2919 2920 expression (& exp); 2921 2922 if (exp.X_op != O_symbol) 2923 emit_expr (&exp, (unsigned int) nbytes); 2924 else 2925 { 2926 char *before_reloc = input_line_pointer; 2927 reloc = parse_reloc (&input_line_pointer); 2928 if (reloc == -1) 2929 { 2930 as_bad (_("unrecognized relocation suffix")); 2931 ignore_rest_of_line (); 2932 return; 2933 } 2934 else if (reloc == BFD_RELOC_UNUSED) 2935 emit_expr (&exp, (unsigned int) nbytes); 2936 else 2937 { 2938 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc); 2939 int size = bfd_get_reloc_size (howto); 2940 2941 if (reloc == BFD_RELOC_ARM_PLT32) 2942 { 2943 as_bad (_("(plt) is only valid on branch targets")); 2944 reloc = BFD_RELOC_UNUSED; 2945 size = 0; 2946 } 2947 2948 if (size > nbytes) 2949 as_bad (_("%s relocations do not fit in %d bytes"), 2950 howto->name, nbytes); 2951 else 2952 { 2953 /* We've parsed an expression stopping at O_symbol. 2954 But there may be more expression left now that we 2955 have parsed the relocation marker. Parse it again. 2956 XXX Surely there is a cleaner way to do this. */ 2957 char *p = input_line_pointer; 2958 int offset; 2959 char *save_buf = alloca (input_line_pointer - base); 2960 memcpy (save_buf, base, input_line_pointer - base); 2961 memmove (base + (input_line_pointer - before_reloc), 2962 base, before_reloc - base); 2963 2964 input_line_pointer = base + (input_line_pointer-before_reloc); 2965 expression (&exp); 2966 memcpy (base, save_buf, p - base); 2967 2968 offset = nbytes - size; 2969 p = frag_more ((int) nbytes); 2970 fix_new_exp (frag_now, p - frag_now->fr_literal + offset, 2971 size, &exp, 0, reloc); 2972 } 2973 } 2974 } 2975 } 2976 while (*input_line_pointer++ == ','); 2977 2978 /* Put terminator back into stream. */ 2979 input_line_pointer --; 2980 demand_empty_rest_of_line (); 2981} 2982 2983 2984/* Parse a .rel31 directive. */ 2985 2986static void 2987s_arm_rel31 (int ignored ATTRIBUTE_UNUSED) 2988{ 2989 expressionS exp; 2990 char *p; 2991 valueT highbit; 2992 2993 highbit = 0; 2994 if (*input_line_pointer == '1') 2995 highbit = 0x80000000; 2996 else if (*input_line_pointer != '0') 2997 as_bad (_("expected 0 or 1")); 2998 2999 input_line_pointer++; 3000 if (*input_line_pointer != ',') 3001 as_bad (_("missing comma")); 3002 input_line_pointer++; 3003 3004#ifdef md_flush_pending_output 3005 md_flush_pending_output (); 3006#endif 3007 3008#ifdef md_cons_align 3009 md_cons_align (4); 3010#endif 3011 3012 mapping_state (MAP_DATA); 3013 3014 expression (&exp); 3015 3016 p = frag_more (4); 3017 md_number_to_chars (p, highbit, 4); 3018 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1, 3019 BFD_RELOC_ARM_PREL31); 3020 3021 demand_empty_rest_of_line (); 3022} 3023 3024/* Directives: AEABI stack-unwind tables. */ 3025 3026/* Parse an unwind_fnstart directive. Simply records the current location. */ 3027 3028static void 3029s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED) 3030{ 3031 demand_empty_rest_of_line (); 3032 /* Mark the start of the function. */ 3033 unwind.proc_start = expr_build_dot (); 3034 3035 /* Reset the rest of the unwind info. */ 3036 unwind.opcode_count = 0; 3037 unwind.table_entry = NULL; 3038 unwind.personality_routine = NULL; 3039 unwind.personality_index = -1; 3040 unwind.frame_size = 0; 3041 unwind.fp_offset = 0; 3042 unwind.fp_reg = 13; 3043 unwind.fp_used = 0; 3044 unwind.sp_restored = 0; 3045} 3046 3047 3048/* Parse a handlerdata directive. Creates the exception handling table entry 3049 for the function. */ 3050 3051static void 3052s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED) 3053{ 3054 demand_empty_rest_of_line (); 3055 if (unwind.table_entry) 3056 as_bad (_("dupicate .handlerdata directive")); 3057 3058 create_unwind_entry (1); 3059} 3060 3061/* Parse an unwind_fnend directive. Generates the index table entry. */ 3062 3063static void 3064s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED) 3065{ 3066 long where; 3067 char *ptr; 3068 valueT val; 3069 3070 demand_empty_rest_of_line (); 3071 3072 /* Add eh table entry. */ 3073 if (unwind.table_entry == NULL) 3074 val = create_unwind_entry (0); 3075 else 3076 val = 0; 3077 3078 /* Add index table entry. This is two words. */ 3079 start_unwind_section (unwind.saved_seg, 1); 3080 frag_align (2, 0, 0); 3081 record_alignment (now_seg, 2); 3082 3083 ptr = frag_more (8); 3084 memset(ptr, 0, 8); 3085 where = frag_now_fix () - 8; 3086 3087 /* Self relative offset of the function start. */ 3088 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1, 3089 BFD_RELOC_ARM_PREL31); 3090 3091 /* Indicate dependency on EHABI-defined personality routines to the 3092 linker, if it hasn't been done already. */ 3093 if (unwind.personality_index >= 0 && unwind.personality_index < 3 3094 && !(marked_pr_dependency & (1 << unwind.personality_index))) 3095 { 3096 static const char *const name[] = { 3097 "__aeabi_unwind_cpp_pr0", 3098 "__aeabi_unwind_cpp_pr1", 3099 "__aeabi_unwind_cpp_pr2" 3100 }; 3101 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]); 3102 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE); 3103 marked_pr_dependency |= 1 << unwind.personality_index; 3104 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency 3105 = marked_pr_dependency; 3106 } 3107 3108 if (val) 3109 /* Inline exception table entry. */ 3110 md_number_to_chars (ptr + 4, val, 4); 3111 else 3112 /* Self relative offset of the table entry. */ 3113 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1, 3114 BFD_RELOC_ARM_PREL31); 3115 3116 /* Restore the original section. */ 3117 subseg_set (unwind.saved_seg, unwind.saved_subseg); 3118} 3119 3120 3121/* Parse an unwind_cantunwind directive. */ 3122 3123static void 3124s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED) 3125{ 3126 demand_empty_rest_of_line (); 3127 if (unwind.personality_routine || unwind.personality_index != -1) 3128 as_bad (_("personality routine specified for cantunwind frame")); 3129 3130 unwind.personality_index = -2; 3131} 3132 3133 3134/* Parse a personalityindex directive. */ 3135 3136static void 3137s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED) 3138{ 3139 expressionS exp; 3140 3141 if (unwind.personality_routine || unwind.personality_index != -1) 3142 as_bad (_("duplicate .personalityindex directive")); 3143 3144 expression (&exp); 3145 3146 if (exp.X_op != O_constant 3147 || exp.X_add_number < 0 || exp.X_add_number > 15) 3148 { 3149 as_bad (_("bad personality routine number")); 3150 ignore_rest_of_line (); 3151 return; 3152 } 3153 3154 unwind.personality_index = exp.X_add_number; 3155 3156 demand_empty_rest_of_line (); 3157} 3158 3159 3160/* Parse a personality directive. */ 3161 3162static void 3163s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED) 3164{ 3165 char *name, *p, c; 3166 3167 if (unwind.personality_routine || unwind.personality_index != -1) 3168 as_bad (_("duplicate .personality directive")); 3169 3170 name = input_line_pointer; 3171 c = get_symbol_end (); 3172 p = input_line_pointer; 3173 unwind.personality_routine = symbol_find_or_make (name); 3174 *p = c; 3175 demand_empty_rest_of_line (); 3176} 3177 3178 3179/* Parse a directive saving core registers. */ 3180 3181static void 3182s_arm_unwind_save_core (void) 3183{ 3184 valueT op; 3185 long range; 3186 int n; 3187 3188 range = parse_reg_list (&input_line_pointer); 3189 if (range == FAIL) 3190 { 3191 as_bad (_("expected register list")); 3192 ignore_rest_of_line (); 3193 return; 3194 } 3195 3196 demand_empty_rest_of_line (); 3197 3198 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...} 3199 into .unwind_save {..., sp...}. We aren't bothered about the value of 3200 ip because it is clobbered by calls. */ 3201 if (unwind.sp_restored && unwind.fp_reg == 12 3202 && (range & 0x3000) == 0x1000) 3203 { 3204 unwind.opcode_count--; 3205 unwind.sp_restored = 0; 3206 range = (range | 0x2000) & ~0x1000; 3207 unwind.pending_offset = 0; 3208 } 3209 3210 /* Pop r4-r15. */ 3211 if (range & 0xfff0) 3212 { 3213 /* See if we can use the short opcodes. These pop a block of up to 8 3214 registers starting with r4, plus maybe r14. */ 3215 for (n = 0; n < 8; n++) 3216 { 3217 /* Break at the first non-saved register. */ 3218 if ((range & (1 << (n + 4))) == 0) 3219 break; 3220 } 3221 /* See if there are any other bits set. */ 3222 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0) 3223 { 3224 /* Use the long form. */ 3225 op = 0x8000 | ((range >> 4) & 0xfff); 3226 add_unwind_opcode (op, 2); 3227 } 3228 else 3229 { 3230 /* Use the short form. */ 3231 if (range & 0x4000) 3232 op = 0xa8; /* Pop r14. */ 3233 else 3234 op = 0xa0; /* Do not pop r14. */ 3235 op |= (n - 1); 3236 add_unwind_opcode (op, 1); 3237 } 3238 } 3239 3240 /* Pop r0-r3. */ 3241 if (range & 0xf) 3242 { 3243 op = 0xb100 | (range & 0xf); 3244 add_unwind_opcode (op, 2); 3245 } 3246 3247 /* Record the number of bytes pushed. */ 3248 for (n = 0; n < 16; n++) 3249 { 3250 if (range & (1 << n)) 3251 unwind.frame_size += 4; 3252 } 3253} 3254 3255 3256/* Parse a directive saving FPA registers. */ 3257 3258static void 3259s_arm_unwind_save_fpa (int reg) 3260{ 3261 expressionS exp; 3262 int num_regs; 3263 valueT op; 3264 3265 /* Get Number of registers to transfer. */ 3266 if (skip_past_comma (&input_line_pointer) != FAIL) 3267 expression (&exp); 3268 else 3269 exp.X_op = O_illegal; 3270 3271 if (exp.X_op != O_constant) 3272 { 3273 as_bad (_("expected , <constant>")); 3274 ignore_rest_of_line (); 3275 return; 3276 } 3277 3278 num_regs = exp.X_add_number; 3279 3280 if (num_regs < 1 || num_regs > 4) 3281 { 3282 as_bad (_("number of registers must be in the range [1:4]")); 3283 ignore_rest_of_line (); 3284 return; 3285 } 3286 3287 demand_empty_rest_of_line (); 3288 3289 if (reg == 4) 3290 { 3291 /* Short form. */ 3292 op = 0xb4 | (num_regs - 1); 3293 add_unwind_opcode (op, 1); 3294 } 3295 else 3296 { 3297 /* Long form. */ 3298 op = 0xc800 | (reg << 4) | (num_regs - 1); 3299 add_unwind_opcode (op, 2); 3300 } 3301 unwind.frame_size += num_regs * 12; 3302} 3303 3304 3305/* Parse a directive saving VFP registers for ARMv6 and above. */ 3306 3307static void 3308s_arm_unwind_save_vfp_armv6 (void) 3309{ 3310 int count; 3311 unsigned int start; 3312 valueT op; 3313 int num_vfpv3_regs = 0; 3314 int num_regs_below_16; 3315 3316 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D); 3317 if (count == FAIL) 3318 { 3319 as_bad (_("expected register list")); 3320 ignore_rest_of_line (); 3321 return; 3322 } 3323 3324 demand_empty_rest_of_line (); 3325 3326 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather 3327 than FSTMX/FLDMX-style ones). */ 3328 3329 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */ 3330 if (start >= 16) 3331 num_vfpv3_regs = count; 3332 else if (start + count > 16) 3333 num_vfpv3_regs = start + count - 16; 3334 3335 if (num_vfpv3_regs > 0) 3336 { 3337 int start_offset = start > 16 ? start - 16 : 0; 3338 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1); 3339 add_unwind_opcode (op, 2); 3340 } 3341 3342 /* Generate opcode for registers numbered in the range 0 .. 15. */ 3343 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count; 3344 assert (num_regs_below_16 + num_vfpv3_regs == count); 3345 if (num_regs_below_16 > 0) 3346 { 3347 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1); 3348 add_unwind_opcode (op, 2); 3349 } 3350 3351 unwind.frame_size += count * 8; 3352} 3353 3354 3355/* Parse a directive saving VFP registers for pre-ARMv6. */ 3356 3357static void 3358s_arm_unwind_save_vfp (void) 3359{ 3360 int count; 3361 unsigned int reg; 3362 valueT op; 3363 3364 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D); 3365 if (count == FAIL) 3366 { 3367 as_bad (_("expected register list")); 3368 ignore_rest_of_line (); 3369 return; 3370 } 3371 3372 demand_empty_rest_of_line (); 3373 3374 if (reg == 8) 3375 { 3376 /* Short form. */ 3377 op = 0xb8 | (count - 1); 3378 add_unwind_opcode (op, 1); 3379 } 3380 else 3381 { 3382 /* Long form. */ 3383 op = 0xb300 | (reg << 4) | (count - 1); 3384 add_unwind_opcode (op, 2); 3385 } 3386 unwind.frame_size += count * 8 + 4; 3387} 3388 3389 3390/* Parse a directive saving iWMMXt data registers. */ 3391 3392static void 3393s_arm_unwind_save_mmxwr (void) 3394{ 3395 int reg; 3396 int hi_reg; 3397 int i; 3398 unsigned mask = 0; 3399 valueT op; 3400 3401 if (*input_line_pointer == '{') 3402 input_line_pointer++; 3403 3404 do 3405 { 3406 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); 3407 3408 if (reg == FAIL) 3409 { 3410 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); 3411 goto error; 3412 } 3413 3414 if (mask >> reg) 3415 as_tsktsk (_("register list not in ascending order")); 3416 mask |= 1 << reg; 3417 3418 if (*input_line_pointer == '-') 3419 { 3420 input_line_pointer++; 3421 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); 3422 if (hi_reg == FAIL) 3423 { 3424 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); 3425 goto error; 3426 } 3427 else if (reg >= hi_reg) 3428 { 3429 as_bad (_("bad register range")); 3430 goto error; 3431 } 3432 for (; reg < hi_reg; reg++) 3433 mask |= 1 << reg; 3434 } 3435 } 3436 while (skip_past_comma (&input_line_pointer) != FAIL); 3437 3438 if (*input_line_pointer == '}') 3439 input_line_pointer++; 3440 3441 demand_empty_rest_of_line (); 3442 3443 /* Generate any deferred opcodes because we're going to be looking at 3444 the list. */ 3445 flush_pending_unwind (); 3446 3447 for (i = 0; i < 16; i++) 3448 { 3449 if (mask & (1 << i)) 3450 unwind.frame_size += 8; 3451 } 3452 3453 /* Attempt to combine with a previous opcode. We do this because gcc 3454 likes to output separate unwind directives for a single block of 3455 registers. */ 3456 if (unwind.opcode_count > 0) 3457 { 3458 i = unwind.opcodes[unwind.opcode_count - 1]; 3459 if ((i & 0xf8) == 0xc0) 3460 { 3461 i &= 7; 3462 /* Only merge if the blocks are contiguous. */ 3463 if (i < 6) 3464 { 3465 if ((mask & 0xfe00) == (1 << 9)) 3466 { 3467 mask |= ((1 << (i + 11)) - 1) & 0xfc00; 3468 unwind.opcode_count--; 3469 } 3470 } 3471 else if (i == 6 && unwind.opcode_count >= 2) 3472 { 3473 i = unwind.opcodes[unwind.opcode_count - 2]; 3474 reg = i >> 4; 3475 i &= 0xf; 3476 3477 op = 0xffff << (reg - 1); 3478 if (reg > 0 3479 && ((mask & op) == (1u << (reg - 1)))) 3480 { 3481 op = (1 << (reg + i + 1)) - 1; 3482 op &= ~((1 << reg) - 1); 3483 mask |= op; 3484 unwind.opcode_count -= 2; 3485 } 3486 } 3487 } 3488 } 3489 3490 hi_reg = 15; 3491 /* We want to generate opcodes in the order the registers have been 3492 saved, ie. descending order. */ 3493 for (reg = 15; reg >= -1; reg--) 3494 { 3495 /* Save registers in blocks. */ 3496 if (reg < 0 3497 || !(mask & (1 << reg))) 3498 { 3499 /* We found an unsaved reg. Generate opcodes to save the 3500 preceeding block. */ 3501 if (reg != hi_reg) 3502 { 3503 if (reg == 9) 3504 { 3505 /* Short form. */ 3506 op = 0xc0 | (hi_reg - 10); 3507 add_unwind_opcode (op, 1); 3508 } 3509 else 3510 { 3511 /* Long form. */ 3512 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1); 3513 add_unwind_opcode (op, 2); 3514 } 3515 } 3516 hi_reg = reg - 1; 3517 } 3518 } 3519 3520 return; 3521error: 3522 ignore_rest_of_line (); 3523} 3524 3525static void 3526s_arm_unwind_save_mmxwcg (void) 3527{ 3528 int reg; 3529 int hi_reg; 3530 unsigned mask = 0; 3531 valueT op; 3532 3533 if (*input_line_pointer == '{') 3534 input_line_pointer++; 3535 3536 do 3537 { 3538 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); 3539 3540 if (reg == FAIL) 3541 { 3542 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); 3543 goto error; 3544 } 3545 3546 reg -= 8; 3547 if (mask >> reg) 3548 as_tsktsk (_("register list not in ascending order")); 3549 mask |= 1 << reg; 3550 3551 if (*input_line_pointer == '-') 3552 { 3553 input_line_pointer++; 3554 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); 3555 if (hi_reg == FAIL) 3556 { 3557 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); 3558 goto error; 3559 } 3560 else if (reg >= hi_reg) 3561 { 3562 as_bad (_("bad register range")); 3563 goto error; 3564 } 3565 for (; reg < hi_reg; reg++) 3566 mask |= 1 << reg; 3567 } 3568 } 3569 while (skip_past_comma (&input_line_pointer) != FAIL); 3570 3571 if (*input_line_pointer == '}') 3572 input_line_pointer++; 3573 3574 demand_empty_rest_of_line (); 3575 3576 /* Generate any deferred opcodes because we're going to be looking at 3577 the list. */ 3578 flush_pending_unwind (); 3579 3580 for (reg = 0; reg < 16; reg++) 3581 { 3582 if (mask & (1 << reg)) 3583 unwind.frame_size += 4; 3584 } 3585 op = 0xc700 | mask; 3586 add_unwind_opcode (op, 2); 3587 return; 3588error: 3589 ignore_rest_of_line (); 3590} 3591 3592 3593/* Parse an unwind_save directive. 3594 If the argument is non-zero, this is a .vsave directive. */ 3595 3596static void 3597s_arm_unwind_save (int arch_v6) 3598{ 3599 char *peek; 3600 struct reg_entry *reg; 3601 bfd_boolean had_brace = FALSE; 3602 3603 /* Figure out what sort of save we have. */ 3604 peek = input_line_pointer; 3605 3606 if (*peek == '{') 3607 { 3608 had_brace = TRUE; 3609 peek++; 3610 } 3611 3612 reg = arm_reg_parse_multi (&peek); 3613 3614 if (!reg) 3615 { 3616 as_bad (_("register expected")); 3617 ignore_rest_of_line (); 3618 return; 3619 } 3620 3621 switch (reg->type) 3622 { 3623 case REG_TYPE_FN: 3624 if (had_brace) 3625 { 3626 as_bad (_("FPA .unwind_save does not take a register list")); 3627 ignore_rest_of_line (); 3628 return; 3629 } 3630 s_arm_unwind_save_fpa (reg->number); 3631 return; 3632 3633 case REG_TYPE_RN: s_arm_unwind_save_core (); return; 3634 case REG_TYPE_VFD: 3635 if (arch_v6) 3636 s_arm_unwind_save_vfp_armv6 (); 3637 else 3638 s_arm_unwind_save_vfp (); 3639 return; 3640 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return; 3641 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return; 3642 3643 default: 3644 as_bad (_(".unwind_save does not support this kind of register")); 3645 ignore_rest_of_line (); 3646 } 3647} 3648 3649 3650/* Parse an unwind_movsp directive. */ 3651 3652static void 3653s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED) 3654{ 3655 int reg; 3656 valueT op; 3657 int offset; 3658 3659 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3660 if (reg == FAIL) 3661 { 3662 as_bad (_(reg_expected_msgs[REG_TYPE_RN])); 3663 ignore_rest_of_line (); 3664 return; 3665 } 3666 3667 /* Optional constant. */ 3668 if (skip_past_comma (&input_line_pointer) != FAIL) 3669 { 3670 if (immediate_for_directive (&offset) == FAIL) 3671 return; 3672 } 3673 else 3674 offset = 0; 3675 3676 demand_empty_rest_of_line (); 3677 3678 if (reg == REG_SP || reg == REG_PC) 3679 { 3680 as_bad (_("SP and PC not permitted in .unwind_movsp directive")); 3681 return; 3682 } 3683 3684 if (unwind.fp_reg != REG_SP) 3685 as_bad (_("unexpected .unwind_movsp directive")); 3686 3687 /* Generate opcode to restore the value. */ 3688 op = 0x90 | reg; 3689 add_unwind_opcode (op, 1); 3690 3691 /* Record the information for later. */ 3692 unwind.fp_reg = reg; 3693 unwind.fp_offset = unwind.frame_size - offset; 3694 unwind.sp_restored = 1; 3695} 3696 3697/* Parse an unwind_pad directive. */ 3698 3699static void 3700s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED) 3701{ 3702 int offset; 3703 3704 if (immediate_for_directive (&offset) == FAIL) 3705 return; 3706 3707 if (offset & 3) 3708 { 3709 as_bad (_("stack increment must be multiple of 4")); 3710 ignore_rest_of_line (); 3711 return; 3712 } 3713 3714 /* Don't generate any opcodes, just record the details for later. */ 3715 unwind.frame_size += offset; 3716 unwind.pending_offset += offset; 3717 3718 demand_empty_rest_of_line (); 3719} 3720 3721/* Parse an unwind_setfp directive. */ 3722 3723static void 3724s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED) 3725{ 3726 int sp_reg; 3727 int fp_reg; 3728 int offset; 3729 3730 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3731 if (skip_past_comma (&input_line_pointer) == FAIL) 3732 sp_reg = FAIL; 3733 else 3734 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3735 3736 if (fp_reg == FAIL || sp_reg == FAIL) 3737 { 3738 as_bad (_("expected <reg>, <reg>")); 3739 ignore_rest_of_line (); 3740 return; 3741 } 3742 3743 /* Optional constant. */ 3744 if (skip_past_comma (&input_line_pointer) != FAIL) 3745 { 3746 if (immediate_for_directive (&offset) == FAIL) 3747 return; 3748 } 3749 else 3750 offset = 0; 3751 3752 demand_empty_rest_of_line (); 3753 3754 if (sp_reg != 13 && sp_reg != unwind.fp_reg) 3755 { 3756 as_bad (_("register must be either sp or set by a previous" 3757 "unwind_movsp directive")); 3758 return; 3759 } 3760 3761 /* Don't generate any opcodes, just record the information for later. */ 3762 unwind.fp_reg = fp_reg; 3763 unwind.fp_used = 1; 3764 if (sp_reg == 13) 3765 unwind.fp_offset = unwind.frame_size - offset; 3766 else 3767 unwind.fp_offset -= offset; 3768} 3769 3770/* Parse an unwind_raw directive. */ 3771 3772static void 3773s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED) 3774{ 3775 expressionS exp; 3776 /* This is an arbitrary limit. */ 3777 unsigned char op[16]; 3778 int count; 3779 3780 expression (&exp); 3781 if (exp.X_op == O_constant 3782 && skip_past_comma (&input_line_pointer) != FAIL) 3783 { 3784 unwind.frame_size += exp.X_add_number; 3785 expression (&exp); 3786 } 3787 else 3788 exp.X_op = O_illegal; 3789 3790 if (exp.X_op != O_constant) 3791 { 3792 as_bad (_("expected <offset>, <opcode>")); 3793 ignore_rest_of_line (); 3794 return; 3795 } 3796 3797 count = 0; 3798 3799 /* Parse the opcode. */ 3800 for (;;) 3801 { 3802 if (count >= 16) 3803 { 3804 as_bad (_("unwind opcode too long")); 3805 ignore_rest_of_line (); 3806 } 3807 if (exp.X_op != O_constant || exp.X_add_number & ~0xff) 3808 { 3809 as_bad (_("invalid unwind opcode")); 3810 ignore_rest_of_line (); 3811 return; 3812 } 3813 op[count++] = exp.X_add_number; 3814 3815 /* Parse the next byte. */ 3816 if (skip_past_comma (&input_line_pointer) == FAIL) 3817 break; 3818 3819 expression (&exp); 3820 } 3821 3822 /* Add the opcode bytes in reverse order. */ 3823 while (count--) 3824 add_unwind_opcode (op[count], 1); 3825 3826 demand_empty_rest_of_line (); 3827} 3828 3829 3830/* Parse a .eabi_attribute directive. */ 3831 3832static void 3833s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED) 3834{ 3835 s_vendor_attribute (OBJ_ATTR_PROC); 3836} 3837#endif /* OBJ_ELF */ 3838 3839static void s_arm_arch (int); 3840static void s_arm_arch_extension (int); 3841static void s_arm_object_arch (int); 3842static void s_arm_cpu (int); 3843static void s_arm_fpu (int); 3844 3845#ifdef TE_PE 3846 3847static void 3848pe_directive_secrel (int dummy ATTRIBUTE_UNUSED) 3849{ 3850 expressionS exp; 3851 3852 do 3853 { 3854 expression (&exp); 3855 if (exp.X_op == O_symbol) 3856 exp.X_op = O_secrel; 3857 3858 emit_expr (&exp, 4); 3859 } 3860 while (*input_line_pointer++ == ','); 3861 3862 input_line_pointer--; 3863 demand_empty_rest_of_line (); 3864} 3865#endif /* TE_PE */ 3866 3867/* This table describes all the machine specific pseudo-ops the assembler 3868 has to support. The fields are: 3869 pseudo-op name without dot 3870 function to call to execute this pseudo-op 3871 Integer arg to pass to the function. */ 3872 3873const pseudo_typeS md_pseudo_table[] = 3874{ 3875 /* Never called because '.req' does not start a line. */ 3876 { "req", s_req, 0 }, 3877 /* Following two are likewise never called. */ 3878 { "dn", s_dn, 0 }, 3879 { "qn", s_qn, 0 }, 3880 { "unreq", s_unreq, 0 }, 3881 { "bss", s_bss, 0 }, 3882 { "align", s_align, 0 }, 3883 { "arm", s_arm, 0 }, 3884 { "thumb", s_thumb, 0 }, 3885 { "code", s_code, 0 }, 3886 { "force_thumb", s_force_thumb, 0 }, 3887 { "thumb_func", s_thumb_func, 0 }, 3888 { "thumb_set", s_thumb_set, 0 }, 3889 { "even", s_even, 0 }, 3890 { "ltorg", s_ltorg, 0 }, 3891 { "pool", s_ltorg, 0 }, 3892 { "syntax", s_syntax, 0 }, 3893 { "cpu", s_arm_cpu, 0 }, 3894 { "arch", s_arm_arch, 0 }, 3895 { "arch_extension", s_arm_arch_extension, 0 }, 3896 { "object_arch", s_arm_object_arch, 0 }, 3897 { "fpu", s_arm_fpu, 0 }, 3898#ifdef OBJ_ELF 3899 { "word", s_arm_elf_cons, 4 }, 3900 { "long", s_arm_elf_cons, 4 }, 3901 { "rel31", s_arm_rel31, 0 }, 3902 { "fnstart", s_arm_unwind_fnstart, 0 }, 3903 { "fnend", s_arm_unwind_fnend, 0 }, 3904 { "cantunwind", s_arm_unwind_cantunwind, 0 }, 3905 { "personality", s_arm_unwind_personality, 0 }, 3906 { "personalityindex", s_arm_unwind_personalityindex, 0 }, 3907 { "handlerdata", s_arm_unwind_handlerdata, 0 }, 3908 { "save", s_arm_unwind_save, 0 }, 3909 { "vsave", s_arm_unwind_save, 1 }, 3910 { "movsp", s_arm_unwind_movsp, 0 }, 3911 { "pad", s_arm_unwind_pad, 0 }, 3912 { "setfp", s_arm_unwind_setfp, 0 }, 3913 { "unwind_raw", s_arm_unwind_raw, 0 }, 3914 { "eabi_attribute", s_arm_eabi_attribute, 0 }, 3915#else 3916 { "word", cons, 4}, 3917 3918 /* These are used for dwarf. */ 3919 {"2byte", cons, 2}, 3920 {"4byte", cons, 4}, 3921 {"8byte", cons, 8}, 3922 /* These are used for dwarf2. */ 3923 { "file", (void (*) (int)) dwarf2_directive_file, 0 }, 3924 { "loc", dwarf2_directive_loc, 0 }, 3925 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 }, 3926#endif 3927 { "extend", float_cons, 'x' }, 3928 { "ldouble", float_cons, 'x' }, 3929 { "packed", float_cons, 'p' }, 3930#ifdef TE_PE 3931 {"secrel32", pe_directive_secrel, 0}, 3932#endif 3933 { 0, 0, 0 } 3934}; 3935 3936/* Parser functions used exclusively in instruction operands. */ 3937 3938/* Generic immediate-value read function for use in insn parsing. 3939 STR points to the beginning of the immediate (the leading #); 3940 VAL receives the value; if the value is outside [MIN, MAX] 3941 issue an error. PREFIX_OPT is true if the immediate prefix is 3942 optional. */ 3943 3944static int 3945parse_immediate (char **str, int *val, int min, int max, 3946 bfd_boolean prefix_opt) 3947{ 3948 expressionS exp; 3949 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX); 3950 if (exp.X_op != O_constant) 3951 { 3952 inst.error = _("constant expression required"); 3953 return FAIL; 3954 } 3955 3956 if (exp.X_add_number < min || exp.X_add_number > max) 3957 { 3958 inst.error = _("immediate value out of range"); 3959 return FAIL; 3960 } 3961 3962 *val = exp.X_add_number; 3963 return SUCCESS; 3964} 3965 3966/* Less-generic immediate-value read function with the possibility of loading a 3967 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate 3968 instructions. Puts the result directly in inst.operands[i]. */ 3969 3970static int 3971parse_big_immediate (char **str, int i) 3972{ 3973 expressionS exp; 3974 char *ptr = *str; 3975 3976 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG); 3977 3978 if (exp.X_op == O_constant) 3979 { 3980 inst.operands[i].imm = exp.X_add_number & 0xffffffff; 3981 /* If we're on a 64-bit host, then a 64-bit number can be returned using 3982 O_constant. We have to be careful not to break compilation for 3983 32-bit X_add_number, though. */ 3984 if ((exp.X_add_number & ~0xffffffffl) != 0) 3985 { 3986 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */ 3987 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff; 3988 inst.operands[i].regisimm = 1; 3989 } 3990 } 3991 else if (exp.X_op == O_big 3992 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32 3993 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64) 3994 { 3995 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; 3996 /* Bignums have their least significant bits in 3997 generic_bignum[0]. Make sure we put 32 bits in imm and 3998 32 bits in reg, in a (hopefully) portable way. */ 3999 assert (parts != 0); 4000 inst.operands[i].imm = 0; 4001 for (j = 0; j < parts; j++, idx++) 4002 inst.operands[i].imm |= generic_bignum[idx] 4003 << (LITTLENUM_NUMBER_OF_BITS * j); 4004 inst.operands[i].reg = 0; 4005 for (j = 0; j < parts; j++, idx++) 4006 inst.operands[i].reg |= generic_bignum[idx] 4007 << (LITTLENUM_NUMBER_OF_BITS * j); 4008 inst.operands[i].regisimm = 1; 4009 } 4010 else 4011 return FAIL; 4012 4013 *str = ptr; 4014 4015 return SUCCESS; 4016} 4017 4018/* Returns the pseudo-register number of an FPA immediate constant, 4019 or FAIL if there isn't a valid constant here. */ 4020 4021static int 4022parse_fpa_immediate (char ** str) 4023{ 4024 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 4025 char * save_in; 4026 expressionS exp; 4027 int i; 4028 int j; 4029 4030 /* First try and match exact strings, this is to guarantee 4031 that some formats will work even for cross assembly. */ 4032 4033 for (i = 0; fp_const[i]; i++) 4034 { 4035 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0) 4036 { 4037 char *start = *str; 4038 4039 *str += strlen (fp_const[i]); 4040 if (is_end_of_line[(unsigned char) **str]) 4041 return i + 8; 4042 *str = start; 4043 } 4044 } 4045 4046 /* Just because we didn't get a match doesn't mean that the constant 4047 isn't valid, just that it is in a format that we don't 4048 automatically recognize. Try parsing it with the standard 4049 expression routines. */ 4050 4051 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE)); 4052 4053 /* Look for a raw floating point number. */ 4054 if ((save_in = atof_ieee (*str, 'x', words)) != NULL 4055 && is_end_of_line[(unsigned char) *save_in]) 4056 { 4057 for (i = 0; i < NUM_FLOAT_VALS; i++) 4058 { 4059 for (j = 0; j < MAX_LITTLENUMS; j++) 4060 { 4061 if (words[j] != fp_values[i][j]) 4062 break; 4063 } 4064 4065 if (j == MAX_LITTLENUMS) 4066 { 4067 *str = save_in; 4068 return i + 8; 4069 } 4070 } 4071 } 4072 4073 /* Try and parse a more complex expression, this will probably fail 4074 unless the code uses a floating point prefix (eg "0f"). */ 4075 save_in = input_line_pointer; 4076 input_line_pointer = *str; 4077 if (expression (&exp) == absolute_section 4078 && exp.X_op == O_big 4079 && exp.X_add_number < 0) 4080 { 4081 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it. 4082 Ditto for 15. */ 4083 if (gen_to_words (words, 5, (long) 15) == 0) 4084 { 4085 for (i = 0; i < NUM_FLOAT_VALS; i++) 4086 { 4087 for (j = 0; j < MAX_LITTLENUMS; j++) 4088 { 4089 if (words[j] != fp_values[i][j]) 4090 break; 4091 } 4092 4093 if (j == MAX_LITTLENUMS) 4094 { 4095 *str = input_line_pointer; 4096 input_line_pointer = save_in; 4097 return i + 8; 4098 } 4099 } 4100 } 4101 } 4102 4103 *str = input_line_pointer; 4104 input_line_pointer = save_in; 4105 inst.error = _("invalid FPA immediate expression"); 4106 return FAIL; 4107} 4108 4109/* Returns 1 if a number has "quarter-precision" float format 4110 0baBbbbbbc defgh000 00000000 00000000. */ 4111 4112static int 4113is_quarter_float (unsigned imm) 4114{ 4115 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000; 4116 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; 4117} 4118 4119/* Parse an 8-bit "quarter-precision" floating point number of the form: 4120 0baBbbbbbc defgh000 00000000 00000000. 4121 The zero and minus-zero cases need special handling, since they can't be 4122 encoded in the "quarter-precision" float format, but can nonetheless be 4123 loaded as integer constants. */ 4124 4125static unsigned 4126parse_qfloat_immediate (char **ccp, int *immed) 4127{ 4128 char *str = *ccp; 4129 char *fpnum; 4130 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 4131 int found_fpchar = 0; 4132 4133 skip_past_char (&str, '#'); 4134 4135 /* We must not accidentally parse an integer as a floating-point number. Make 4136 sure that the value we parse is not an integer by checking for special 4137 characters '.' or 'e'. 4138 FIXME: This is a horrible hack, but doing better is tricky because type 4139 information isn't in a very usable state at parse time. */ 4140 fpnum = str; 4141 skip_whitespace (fpnum); 4142 4143 if (strncmp (fpnum, "0x", 2) == 0) 4144 return FAIL; 4145 else 4146 { 4147 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) 4148 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') 4149 { 4150 found_fpchar = 1; 4151 break; 4152 } 4153 4154 if (!found_fpchar) 4155 return FAIL; 4156 } 4157 4158 if ((str = atof_ieee (str, 's', words)) != NULL) 4159 { 4160 unsigned fpword = 0; 4161 int i; 4162 4163 /* Our FP word must be 32 bits (single-precision FP). */ 4164 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 4165 { 4166 fpword <<= LITTLENUM_NUMBER_OF_BITS; 4167 fpword |= words[i]; 4168 } 4169 4170 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0) 4171 *immed = fpword; 4172 else 4173 return FAIL; 4174 4175 *ccp = str; 4176 4177 return SUCCESS; 4178 } 4179 4180 return FAIL; 4181} 4182 4183/* Shift operands. */ 4184enum shift_kind 4185{ 4186 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX 4187}; 4188 4189struct asm_shift_name 4190{ 4191 const char *name; 4192 enum shift_kind kind; 4193}; 4194 4195/* Third argument to parse_shift. */ 4196enum parse_shift_mode 4197{ 4198 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */ 4199 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */ 4200 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */ 4201 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */ 4202 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */ 4203}; 4204 4205/* Parse a <shift> specifier on an ARM data processing instruction. 4206 This has three forms: 4207 4208 (LSL|LSR|ASL|ASR|ROR) Rs 4209 (LSL|LSR|ASL|ASR|ROR) #imm 4210 RRX 4211 4212 Note that ASL is assimilated to LSL in the instruction encoding, and 4213 RRX to ROR #0 (which cannot be written as such). */ 4214 4215static int 4216parse_shift (char **str, int i, enum parse_shift_mode mode) 4217{ 4218 const struct asm_shift_name *shift_name; 4219 enum shift_kind shift; 4220 char *s = *str; 4221 char *p = s; 4222 int reg; 4223 4224 for (p = *str; ISALPHA (*p); p++) 4225 ; 4226 4227 if (p == *str) 4228 { 4229 inst.error = _("shift expression expected"); 4230 return FAIL; 4231 } 4232 4233 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str); 4234 4235 if (shift_name == NULL) 4236 { 4237 inst.error = _("shift expression expected"); 4238 return FAIL; 4239 } 4240 4241 shift = shift_name->kind; 4242 4243 switch (mode) 4244 { 4245 case NO_SHIFT_RESTRICT: 4246 case SHIFT_IMMEDIATE: break; 4247 4248 case SHIFT_LSL_OR_ASR_IMMEDIATE: 4249 if (shift != SHIFT_LSL && shift != SHIFT_ASR) 4250 { 4251 inst.error = _("'LSL' or 'ASR' required"); 4252 return FAIL; 4253 } 4254 break; 4255 4256 case SHIFT_LSL_IMMEDIATE: 4257 if (shift != SHIFT_LSL) 4258 { 4259 inst.error = _("'LSL' required"); 4260 return FAIL; 4261 } 4262 break; 4263 4264 case SHIFT_ASR_IMMEDIATE: 4265 if (shift != SHIFT_ASR) 4266 { 4267 inst.error = _("'ASR' required"); 4268 return FAIL; 4269 } 4270 break; 4271 4272 default: abort (); 4273 } 4274 4275 if (shift != SHIFT_RRX) 4276 { 4277 /* Whitespace can appear here if the next thing is a bare digit. */ 4278 skip_whitespace (p); 4279 4280 if (mode == NO_SHIFT_RESTRICT 4281 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4282 { 4283 inst.operands[i].imm = reg; 4284 inst.operands[i].immisreg = 1; 4285 } 4286 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4287 return FAIL; 4288 } 4289 inst.operands[i].shift_kind = shift; 4290 inst.operands[i].shifted = 1; 4291 *str = p; 4292 return SUCCESS; 4293} 4294 4295/* Parse a <shifter_operand> for an ARM data processing instruction: 4296 4297 #<immediate> 4298 #<immediate>, <rotate> 4299 <Rm> 4300 <Rm>, <shift> 4301 4302 where <shift> is defined by parse_shift above, and <rotate> is a 4303 multiple of 2 between 0 and 30. Validation of immediate operands 4304 is deferred to md_apply_fix. */ 4305 4306static int 4307parse_shifter_operand (char **str, int i) 4308{ 4309 int value; 4310 expressionS expr; 4311 4312 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL) 4313 { 4314 inst.operands[i].reg = value; 4315 inst.operands[i].isreg = 1; 4316 4317 /* parse_shift will override this if appropriate */ 4318 inst.reloc.exp.X_op = O_constant; 4319 inst.reloc.exp.X_add_number = 0; 4320 4321 if (skip_past_comma (str) == FAIL) 4322 return SUCCESS; 4323 4324 /* Shift operation on register. */ 4325 return parse_shift (str, i, NO_SHIFT_RESTRICT); 4326 } 4327 4328 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX)) 4329 return FAIL; 4330 4331 if (skip_past_comma (str) == SUCCESS) 4332 { 4333 /* #x, y -- ie explicit rotation by Y. */ 4334 if (my_get_expression (&expr, str, GE_NO_PREFIX)) 4335 return FAIL; 4336 4337 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant) 4338 { 4339 inst.error = _("constant expression expected"); 4340 return FAIL; 4341 } 4342 4343 value = expr.X_add_number; 4344 if (value < 0 || value > 30 || value % 2 != 0) 4345 { 4346 inst.error = _("invalid rotation"); 4347 return FAIL; 4348 } 4349 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255) 4350 { 4351 inst.error = _("invalid constant"); 4352 return FAIL; 4353 } 4354 4355 /* Convert to decoded value. md_apply_fix will put it back. */ 4356 inst.reloc.exp.X_add_number 4357 = (((inst.reloc.exp.X_add_number << (32 - value)) 4358 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff); 4359 } 4360 4361 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 4362 inst.reloc.pc_rel = 0; 4363 return SUCCESS; 4364} 4365 4366/* Group relocation information. Each entry in the table contains the 4367 textual name of the relocation as may appear in assembler source 4368 and must end with a colon. 4369 Along with this textual name are the relocation codes to be used if 4370 the corresponding instruction is an ALU instruction (ADD or SUB only), 4371 an LDR, an LDRS, or an LDC. */ 4372 4373struct group_reloc_table_entry 4374{ 4375 const char *name; 4376 int alu_code; 4377 int ldr_code; 4378 int ldrs_code; 4379 int ldc_code; 4380}; 4381 4382typedef enum 4383{ 4384 /* Varieties of non-ALU group relocation. */ 4385 4386 GROUP_LDR, 4387 GROUP_LDRS, 4388 GROUP_LDC 4389} group_reloc_type; 4390 4391static struct group_reloc_table_entry group_reloc_table[] = 4392 { /* Program counter relative: */ 4393 { "pc_g0_nc", 4394 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */ 4395 0, /* LDR */ 4396 0, /* LDRS */ 4397 0 }, /* LDC */ 4398 { "pc_g0", 4399 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */ 4400 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */ 4401 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */ 4402 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */ 4403 { "pc_g1_nc", 4404 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */ 4405 0, /* LDR */ 4406 0, /* LDRS */ 4407 0 }, /* LDC */ 4408 { "pc_g1", 4409 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */ 4410 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */ 4411 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */ 4412 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */ 4413 { "pc_g2", 4414 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */ 4415 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */ 4416 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */ 4417 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */ 4418 /* Section base relative */ 4419 { "sb_g0_nc", 4420 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */ 4421 0, /* LDR */ 4422 0, /* LDRS */ 4423 0 }, /* LDC */ 4424 { "sb_g0", 4425 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */ 4426 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */ 4427 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */ 4428 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */ 4429 { "sb_g1_nc", 4430 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */ 4431 0, /* LDR */ 4432 0, /* LDRS */ 4433 0 }, /* LDC */ 4434 { "sb_g1", 4435 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */ 4436 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */ 4437 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */ 4438 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */ 4439 { "sb_g2", 4440 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */ 4441 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ 4442 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ 4443 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */ 4444 4445/* Given the address of a pointer pointing to the textual name of a group 4446 relocation as may appear in assembler source, attempt to find its details 4447 in group_reloc_table. The pointer will be updated to the character after 4448 the trailing colon. On failure, FAIL will be returned; SUCCESS 4449 otherwise. On success, *entry will be updated to point at the relevant 4450 group_reloc_table entry. */ 4451 4452static int 4453find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out) 4454{ 4455 unsigned int i; 4456 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++) 4457 { 4458 int length = strlen (group_reloc_table[i].name); 4459 4460 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 && 4461 (*str)[length] == ':') 4462 { 4463 *out = &group_reloc_table[i]; 4464 *str += (length + 1); 4465 return SUCCESS; 4466 } 4467 } 4468 4469 return FAIL; 4470} 4471 4472/* Parse a <shifter_operand> for an ARM data processing instruction 4473 (as for parse_shifter_operand) where group relocations are allowed: 4474 4475 #<immediate> 4476 #<immediate>, <rotate> 4477 #:<group_reloc>:<expression> 4478 <Rm> 4479 <Rm>, <shift> 4480 4481 where <group_reloc> is one of the strings defined in group_reloc_table. 4482 The hashes are optional. 4483 4484 Everything else is as for parse_shifter_operand. */ 4485 4486static parse_operand_result 4487parse_shifter_operand_group_reloc (char **str, int i) 4488{ 4489 /* Determine if we have the sequence of characters #: or just : 4490 coming next. If we do, then we check for a group relocation. 4491 If we don't, punt the whole lot to parse_shifter_operand. */ 4492 4493 if (((*str)[0] == '#' && (*str)[1] == ':') 4494 || (*str)[0] == ':') 4495 { 4496 struct group_reloc_table_entry *entry; 4497 4498 if ((*str)[0] == '#') 4499 (*str) += 2; 4500 else 4501 (*str)++; 4502 4503 /* Try to parse a group relocation. Anything else is an error. */ 4504 if (find_group_reloc_table_entry (str, &entry) == FAIL) 4505 { 4506 inst.error = _("unknown group relocation"); 4507 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4508 } 4509 4510 /* We now have the group relocation table entry corresponding to 4511 the name in the assembler source. Next, we parse the expression. */ 4512 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX)) 4513 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4514 4515 /* Record the relocation type (always the ALU variant here). */ 4516 inst.reloc.type = entry->alu_code; 4517 assert (inst.reloc.type != 0); 4518 4519 return PARSE_OPERAND_SUCCESS; 4520 } 4521 else 4522 return parse_shifter_operand (str, i) == SUCCESS 4523 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL; 4524 4525 /* Never reached. */ 4526} 4527 4528/* Parse all forms of an ARM address expression. Information is written 4529 to inst.operands[i] and/or inst.reloc. 4530 4531 Preindexed addressing (.preind=1): 4532 4533 [Rn, #offset] .reg=Rn .reloc.exp=offset 4534 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4535 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4536 .shift_kind=shift .reloc.exp=shift_imm 4537 4538 These three may have a trailing ! which causes .writeback to be set also. 4539 4540 Postindexed addressing (.postind=1, .writeback=1): 4541 4542 [Rn], #offset .reg=Rn .reloc.exp=offset 4543 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4544 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4545 .shift_kind=shift .reloc.exp=shift_imm 4546 4547 Unindexed addressing (.preind=0, .postind=0): 4548 4549 [Rn], {option} .reg=Rn .imm=option .immisreg=0 4550 4551 Other: 4552 4553 [Rn]{!} shorthand for [Rn,#0]{!} 4554 =immediate .isreg=0 .reloc.exp=immediate 4555 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label 4556 4557 It is the caller's responsibility to check for addressing modes not 4558 supported by the instruction, and to set inst.reloc.type. */ 4559 4560static parse_operand_result 4561parse_address_main (char **str, int i, int group_relocations, 4562 group_reloc_type group_type) 4563{ 4564 char *p = *str; 4565 int reg; 4566 4567 if (skip_past_char (&p, '[') == FAIL) 4568 { 4569 if (skip_past_char (&p, '=') == FAIL) 4570 { 4571 /* bare address - translate to PC-relative offset */ 4572 inst.reloc.pc_rel = 1; 4573 inst.operands[i].reg = REG_PC; 4574 inst.operands[i].isreg = 1; 4575 inst.operands[i].preind = 1; 4576 } 4577 /* else a load-constant pseudo op, no special treatment needed here */ 4578 4579 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4580 return PARSE_OPERAND_FAIL; 4581 4582 *str = p; 4583 return PARSE_OPERAND_SUCCESS; 4584 } 4585 4586 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 4587 { 4588 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 4589 return PARSE_OPERAND_FAIL; 4590 } 4591 inst.operands[i].reg = reg; 4592 inst.operands[i].isreg = 1; 4593 4594 if (skip_past_comma (&p) == SUCCESS) 4595 { 4596 inst.operands[i].preind = 1; 4597 4598 if (*p == '+') p++; 4599 else if (*p == '-') p++, inst.operands[i].negative = 1; 4600 4601 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4602 { 4603 inst.operands[i].imm = reg; 4604 inst.operands[i].immisreg = 1; 4605 4606 if (skip_past_comma (&p) == SUCCESS) 4607 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 4608 return PARSE_OPERAND_FAIL; 4609 } 4610 else if (skip_past_char (&p, ':') == SUCCESS) 4611 { 4612 /* FIXME: '@' should be used here, but it's filtered out by generic 4613 code before we get to see it here. This may be subject to 4614 change. */ 4615 expressionS exp; 4616 my_get_expression (&exp, &p, GE_NO_PREFIX); 4617 if (exp.X_op != O_constant) 4618 { 4619 inst.error = _("alignment must be constant"); 4620 return PARSE_OPERAND_FAIL; 4621 } 4622 inst.operands[i].imm = exp.X_add_number << 8; 4623 inst.operands[i].immisalign = 1; 4624 /* Alignments are not pre-indexes. */ 4625 inst.operands[i].preind = 0; 4626 } 4627 else 4628 { 4629 if (inst.operands[i].negative) 4630 { 4631 inst.operands[i].negative = 0; 4632 p--; 4633 } 4634 4635 if (group_relocations && 4636 ((*p == '#' && *(p + 1) == ':') || *p == ':')) 4637 4638 { 4639 struct group_reloc_table_entry *entry; 4640 4641 /* Skip over the #: or : sequence. */ 4642 if (*p == '#') 4643 p += 2; 4644 else 4645 p++; 4646 4647 /* Try to parse a group relocation. Anything else is an 4648 error. */ 4649 if (find_group_reloc_table_entry (&p, &entry) == FAIL) 4650 { 4651 inst.error = _("unknown group relocation"); 4652 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4653 } 4654 4655 /* We now have the group relocation table entry corresponding to 4656 the name in the assembler source. Next, we parse the 4657 expression. */ 4658 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4659 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4660 4661 /* Record the relocation type. */ 4662 switch (group_type) 4663 { 4664 case GROUP_LDR: 4665 inst.reloc.type = entry->ldr_code; 4666 break; 4667 4668 case GROUP_LDRS: 4669 inst.reloc.type = entry->ldrs_code; 4670 break; 4671 4672 case GROUP_LDC: 4673 inst.reloc.type = entry->ldc_code; 4674 break; 4675 4676 default: 4677 assert (0); 4678 } 4679 4680 if (inst.reloc.type == 0) 4681 { 4682 inst.error = _("this group relocation is not allowed on this instruction"); 4683 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4684 } 4685 } 4686 else 4687 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4688 return PARSE_OPERAND_FAIL; 4689 } 4690 } 4691 else if (skip_past_char (&p, ':') == SUCCESS) 4692 { 4693 /* FIXME: '@' should be used here, but it's filtered out by generic 4694 code before we get to see it here. This may be subject to 4695 change. */ 4696 expressionS exp; 4697 my_get_expression (&exp, &p, GE_NO_PREFIX); 4698 if (exp.X_op != O_constant) 4699 { 4700 inst.error = _("alignment must be constant"); 4701 return PARSE_OPERAND_FAIL; 4702 } 4703 inst.operands[i].imm = exp.X_add_number << 8; 4704 inst.operands[i].immisalign = 1; 4705 /* Alignments are not pre-indexes. */ 4706 inst.operands[i].preind = 0; 4707 } 4708 4709 if (skip_past_char (&p, ']') == FAIL) 4710 { 4711 inst.error = _("']' expected"); 4712 return PARSE_OPERAND_FAIL; 4713 } 4714 4715 if (skip_past_char (&p, '!') == SUCCESS) 4716 inst.operands[i].writeback = 1; 4717 4718 else if (skip_past_comma (&p) == SUCCESS) 4719 { 4720 if (skip_past_char (&p, '{') == SUCCESS) 4721 { 4722 /* [Rn], {expr} - unindexed, with option */ 4723 if (parse_immediate (&p, &inst.operands[i].imm, 4724 0, 255, TRUE) == FAIL) 4725 return PARSE_OPERAND_FAIL; 4726 4727 if (skip_past_char (&p, '}') == FAIL) 4728 { 4729 inst.error = _("'}' expected at end of 'option' field"); 4730 return PARSE_OPERAND_FAIL; 4731 } 4732 if (inst.operands[i].preind) 4733 { 4734 inst.error = _("cannot combine index with option"); 4735 return PARSE_OPERAND_FAIL; 4736 } 4737 *str = p; 4738 return PARSE_OPERAND_SUCCESS; 4739 } 4740 else 4741 { 4742 inst.operands[i].postind = 1; 4743 inst.operands[i].writeback = 1; 4744 4745 if (inst.operands[i].preind) 4746 { 4747 inst.error = _("cannot combine pre- and post-indexing"); 4748 return PARSE_OPERAND_FAIL; 4749 } 4750 4751 if (*p == '+') p++; 4752 else if (*p == '-') p++, inst.operands[i].negative = 1; 4753 4754 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4755 { 4756 /* We might be using the immediate for alignment already. If we 4757 are, OR the register number into the low-order bits. */ 4758 if (inst.operands[i].immisalign) 4759 inst.operands[i].imm |= reg; 4760 else 4761 inst.operands[i].imm = reg; 4762 inst.operands[i].immisreg = 1; 4763 4764 if (skip_past_comma (&p) == SUCCESS) 4765 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 4766 return PARSE_OPERAND_FAIL; 4767 } 4768 else 4769 { 4770 if (inst.operands[i].negative) 4771 { 4772 inst.operands[i].negative = 0; 4773 p--; 4774 } 4775 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4776 return PARSE_OPERAND_FAIL; 4777 } 4778 } 4779 } 4780 4781 /* If at this point neither .preind nor .postind is set, we have a 4782 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */ 4783 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0) 4784 { 4785 inst.operands[i].preind = 1; 4786 inst.reloc.exp.X_op = O_constant; 4787 inst.reloc.exp.X_add_number = 0; 4788 } 4789 *str = p; 4790 return PARSE_OPERAND_SUCCESS; 4791} 4792 4793static int 4794parse_address (char **str, int i) 4795{ 4796 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS 4797 ? SUCCESS : FAIL; 4798} 4799 4800static parse_operand_result 4801parse_address_group_reloc (char **str, int i, group_reloc_type type) 4802{ 4803 return parse_address_main (str, i, 1, type); 4804} 4805 4806/* Parse an operand for a MOVW or MOVT instruction. */ 4807static int 4808parse_half (char **str) 4809{ 4810 char * p; 4811 4812 p = *str; 4813 skip_past_char (&p, '#'); 4814 if (strncasecmp (p, ":lower16:", 9) == 0) 4815 inst.reloc.type = BFD_RELOC_ARM_MOVW; 4816 else if (strncasecmp (p, ":upper16:", 9) == 0) 4817 inst.reloc.type = BFD_RELOC_ARM_MOVT; 4818 4819 if (inst.reloc.type != BFD_RELOC_UNUSED) 4820 { 4821 p += 9; 4822 skip_whitespace(p); 4823 } 4824 4825 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4826 return FAIL; 4827 4828 if (inst.reloc.type == BFD_RELOC_UNUSED) 4829 { 4830 if (inst.reloc.exp.X_op != O_constant) 4831 { 4832 inst.error = _("constant expression expected"); 4833 return FAIL; 4834 } 4835 if (inst.reloc.exp.X_add_number < 0 4836 || inst.reloc.exp.X_add_number > 0xffff) 4837 { 4838 inst.error = _("immediate value out of range"); 4839 return FAIL; 4840 } 4841 } 4842 *str = p; 4843 return SUCCESS; 4844} 4845 4846/* Miscellaneous. */ 4847 4848/* Parse a PSR flag operand. The value returned is FAIL on syntax error, 4849 or a bitmask suitable to be or-ed into the ARM msr instruction. */ 4850static int 4851parse_psr (char **str) 4852{ 4853 char *p; 4854 unsigned long psr_field; 4855 const struct asm_psr *psr; 4856 char *start; 4857 4858 /* CPSR's and SPSR's can now be lowercase. This is just a convenience 4859 feature for ease of use and backwards compatibility. */ 4860 p = *str; 4861 if (strncasecmp (p, "SPSR", 4) == 0) 4862 psr_field = SPSR_BIT; 4863 else if (strncasecmp (p, "CPSR", 4) == 0) 4864 psr_field = 0; 4865 else 4866 { 4867 start = p; 4868 do 4869 p++; 4870 while (ISALNUM (*p) || *p == '_'); 4871 4872 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start); 4873 if (!psr) 4874 return FAIL; 4875 4876 *str = p; 4877 return psr->field; 4878 } 4879 4880 p += 4; 4881 if (*p == '_') 4882 { 4883 /* A suffix follows. */ 4884 p++; 4885 start = p; 4886 4887 do 4888 p++; 4889 while (ISALNUM (*p) || *p == '_'); 4890 4891 psr = hash_find_n (arm_psr_hsh, start, p - start); 4892 if (!psr) 4893 goto error; 4894 4895 psr_field |= psr->field; 4896 } 4897 else 4898 { 4899 if (ISALNUM (*p)) 4900 goto error; /* Garbage after "[CS]PSR". */ 4901 4902 psr_field |= (PSR_c | PSR_f); 4903 } 4904 *str = p; 4905 return psr_field; 4906 4907 error: 4908 inst.error = _("flag for {c}psr instruction expected"); 4909 return FAIL; 4910} 4911 4912/* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a 4913 value suitable for splatting into the AIF field of the instruction. */ 4914 4915static int 4916parse_cps_flags (char **str) 4917{ 4918 int val = 0; 4919 int saw_a_flag = 0; 4920 char *s = *str; 4921 4922 for (;;) 4923 switch (*s++) 4924 { 4925 case '\0': case ',': 4926 goto done; 4927 4928 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break; 4929 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break; 4930 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break; 4931 4932 default: 4933 inst.error = _("unrecognized CPS flag"); 4934 return FAIL; 4935 } 4936 4937 done: 4938 if (saw_a_flag == 0) 4939 { 4940 inst.error = _("missing CPS flags"); 4941 return FAIL; 4942 } 4943 4944 *str = s - 1; 4945 return val; 4946} 4947 4948/* Parse an endian specifier ("BE" or "LE", case insensitive); 4949 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */ 4950 4951static int 4952parse_endian_specifier (char **str) 4953{ 4954 int little_endian; 4955 char *s = *str; 4956 4957 if (strncasecmp (s, "BE", 2)) 4958 little_endian = 0; 4959 else if (strncasecmp (s, "LE", 2)) 4960 little_endian = 1; 4961 else 4962 { 4963 inst.error = _("valid endian specifiers are be or le"); 4964 return FAIL; 4965 } 4966 4967 if (ISALNUM (s[2]) || s[2] == '_') 4968 { 4969 inst.error = _("valid endian specifiers are be or le"); 4970 return FAIL; 4971 } 4972 4973 *str = s + 2; 4974 return little_endian; 4975} 4976 4977/* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a 4978 value suitable for poking into the rotate field of an sxt or sxta 4979 instruction, or FAIL on error. */ 4980 4981static int 4982parse_ror (char **str) 4983{ 4984 int rot; 4985 char *s = *str; 4986 4987 if (strncasecmp (s, "ROR", 3) == 0) 4988 s += 3; 4989 else 4990 { 4991 inst.error = _("missing rotation field after comma"); 4992 return FAIL; 4993 } 4994 4995 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL) 4996 return FAIL; 4997 4998 switch (rot) 4999 { 5000 case 0: *str = s; return 0x0; 5001 case 8: *str = s; return 0x1; 5002 case 16: *str = s; return 0x2; 5003 case 24: *str = s; return 0x3; 5004 5005 default: 5006 inst.error = _("rotation can only be 0, 8, 16, or 24"); 5007 return FAIL; 5008 } 5009} 5010 5011/* Parse a conditional code (from conds[] below). The value returned is in the 5012 range 0 .. 14, or FAIL. */ 5013static int 5014parse_cond (char **str) 5015{ 5016 char *p, *q; 5017 const struct asm_cond *c; 5018 5019 p = q = *str; 5020 while (ISALPHA (*q)) 5021 q++; 5022 5023 c = hash_find_n (arm_cond_hsh, p, q - p); 5024 if (!c) 5025 { 5026 inst.error = _("condition required"); 5027 return FAIL; 5028 } 5029 5030 *str = q; 5031 return c->value; 5032} 5033 5034/* Parse an option for a barrier instruction. Returns the encoding for the 5035 option, or FAIL. */ 5036static int 5037parse_barrier (char **str) 5038{ 5039 char *p, *q; 5040 const struct asm_barrier_opt *o; 5041 5042 p = q = *str; 5043 while (ISALPHA (*q)) 5044 q++; 5045 5046 o = hash_find_n (arm_barrier_opt_hsh, p, q - p); 5047 if (!o) 5048 return FAIL; 5049 5050 *str = q; 5051 return o->value; 5052} 5053 5054/* Parse the operands of a table branch instruction. Similar to a memory 5055 operand. */ 5056static int 5057parse_tb (char **str) 5058{ 5059 char * p = *str; 5060 int reg; 5061 5062 if (skip_past_char (&p, '[') == FAIL) 5063 { 5064 inst.error = _("'[' expected"); 5065 return FAIL; 5066 } 5067 5068 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 5069 { 5070 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 5071 return FAIL; 5072 } 5073 inst.operands[0].reg = reg; 5074 5075 if (skip_past_comma (&p) == FAIL) 5076 { 5077 inst.error = _("',' expected"); 5078 return FAIL; 5079 } 5080 5081 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 5082 { 5083 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 5084 return FAIL; 5085 } 5086 inst.operands[0].imm = reg; 5087 5088 if (skip_past_comma (&p) == SUCCESS) 5089 { 5090 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL) 5091 return FAIL; 5092 if (inst.reloc.exp.X_add_number != 1) 5093 { 5094 inst.error = _("invalid shift"); 5095 return FAIL; 5096 } 5097 inst.operands[0].shifted = 1; 5098 } 5099 5100 if (skip_past_char (&p, ']') == FAIL) 5101 { 5102 inst.error = _("']' expected"); 5103 return FAIL; 5104 } 5105 *str = p; 5106 return SUCCESS; 5107} 5108 5109/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more 5110 information on the types the operands can take and how they are encoded. 5111 Up to four operands may be read; this function handles setting the 5112 ".present" field for each read operand itself. 5113 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS, 5114 else returns FAIL. */ 5115 5116static int 5117parse_neon_mov (char **str, int *which_operand) 5118{ 5119 int i = *which_operand, val; 5120 enum arm_reg_type rtype; 5121 char *ptr = *str; 5122 struct neon_type_el optype; 5123 5124 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 5125 { 5126 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */ 5127 inst.operands[i].reg = val; 5128 inst.operands[i].isscalar = 1; 5129 inst.operands[i].vectype = optype; 5130 inst.operands[i++].present = 1; 5131 5132 if (skip_past_comma (&ptr) == FAIL) 5133 goto wanted_comma; 5134 5135 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5136 goto wanted_arm; 5137 5138 inst.operands[i].reg = val; 5139 inst.operands[i].isreg = 1; 5140 inst.operands[i].present = 1; 5141 } 5142 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype)) 5143 != FAIL) 5144 { 5145 /* Cases 0, 1, 2, 3, 5 (D only). */ 5146 if (skip_past_comma (&ptr) == FAIL) 5147 goto wanted_comma; 5148 5149 inst.operands[i].reg = val; 5150 inst.operands[i].isreg = 1; 5151 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 5152 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5153 inst.operands[i].isvec = 1; 5154 inst.operands[i].vectype = optype; 5155 inst.operands[i++].present = 1; 5156 5157 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5158 { 5159 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. 5160 Case 13: VMOV <Sd>, <Rm> */ 5161 inst.operands[i].reg = val; 5162 inst.operands[i].isreg = 1; 5163 inst.operands[i].present = 1; 5164 5165 if (rtype == REG_TYPE_NQ) 5166 { 5167 first_error (_("can't use Neon quad register here")); 5168 return FAIL; 5169 } 5170 else if (rtype != REG_TYPE_VFS) 5171 { 5172 i++; 5173 if (skip_past_comma (&ptr) == FAIL) 5174 goto wanted_comma; 5175 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5176 goto wanted_arm; 5177 inst.operands[i].reg = val; 5178 inst.operands[i].isreg = 1; 5179 inst.operands[i].present = 1; 5180 } 5181 } 5182 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS) 5183 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm> 5184 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> 5185 Case 10: VMOV.F32 <Sd>, #<imm> 5186 Case 11: VMOV.F64 <Dd>, #<imm> */ 5187 inst.operands[i].immisfloat = 1; 5188 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, 5189 &optype)) != FAIL) 5190 { 5191 /* Case 0: VMOV<c><q> <Qd>, <Qm> 5192 Case 1: VMOV<c><q> <Dd>, <Dm> 5193 Case 8: VMOV.F32 <Sd>, <Sm> 5194 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */ 5195 5196 inst.operands[i].reg = val; 5197 inst.operands[i].isreg = 1; 5198 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 5199 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5200 inst.operands[i].isvec = 1; 5201 inst.operands[i].vectype = optype; 5202 inst.operands[i].present = 1; 5203 5204 if (skip_past_comma (&ptr) == SUCCESS) 5205 { 5206 /* Case 15. */ 5207 i++; 5208 5209 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5210 goto wanted_arm; 5211 5212 inst.operands[i].reg = val; 5213 inst.operands[i].isreg = 1; 5214 inst.operands[i++].present = 1; 5215 5216 if (skip_past_comma (&ptr) == FAIL) 5217 goto wanted_comma; 5218 5219 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5220 goto wanted_arm; 5221 5222 inst.operands[i].reg = val; 5223 inst.operands[i].isreg = 1; 5224 inst.operands[i++].present = 1; 5225 } 5226 } 5227 else if (parse_big_immediate (&ptr, i) == SUCCESS) 5228 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm> 5229 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */ 5230 ; 5231 else 5232 { 5233 first_error (_("expected <Rm> or <Dm> or <Qm> operand")); 5234 return FAIL; 5235 } 5236 } 5237 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5238 { 5239 /* Cases 6, 7. */ 5240 inst.operands[i].reg = val; 5241 inst.operands[i].isreg = 1; 5242 inst.operands[i++].present = 1; 5243 5244 if (skip_past_comma (&ptr) == FAIL) 5245 goto wanted_comma; 5246 5247 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 5248 { 5249 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */ 5250 inst.operands[i].reg = val; 5251 inst.operands[i].isscalar = 1; 5252 inst.operands[i].present = 1; 5253 inst.operands[i].vectype = optype; 5254 } 5255 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5256 { 5257 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */ 5258 inst.operands[i].reg = val; 5259 inst.operands[i].isreg = 1; 5260 inst.operands[i++].present = 1; 5261 5262 if (skip_past_comma (&ptr) == FAIL) 5263 goto wanted_comma; 5264 5265 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype)) 5266 == FAIL) 5267 { 5268 first_error (_(reg_expected_msgs[REG_TYPE_VFSD])); 5269 return FAIL; 5270 } 5271 5272 inst.operands[i].reg = val; 5273 inst.operands[i].isreg = 1; 5274 inst.operands[i].isvec = 1; 5275 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5276 inst.operands[i].vectype = optype; 5277 inst.operands[i].present = 1; 5278 5279 if (rtype == REG_TYPE_VFS) 5280 { 5281 /* Case 14. */ 5282 i++; 5283 if (skip_past_comma (&ptr) == FAIL) 5284 goto wanted_comma; 5285 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, 5286 &optype)) == FAIL) 5287 { 5288 first_error (_(reg_expected_msgs[REG_TYPE_VFS])); 5289 return FAIL; 5290 } 5291 inst.operands[i].reg = val; 5292 inst.operands[i].isreg = 1; 5293 inst.operands[i].isvec = 1; 5294 inst.operands[i].issingle = 1; 5295 inst.operands[i].vectype = optype; 5296 inst.operands[i].present = 1; 5297 } 5298 } 5299 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype)) 5300 != FAIL) 5301 { 5302 /* Case 13. */ 5303 inst.operands[i].reg = val; 5304 inst.operands[i].isreg = 1; 5305 inst.operands[i].isvec = 1; 5306 inst.operands[i].issingle = 1; 5307 inst.operands[i].vectype = optype; 5308 inst.operands[i++].present = 1; 5309 } 5310 } 5311 else 5312 { 5313 first_error (_("parse error")); 5314 return FAIL; 5315 } 5316 5317 /* Successfully parsed the operands. Update args. */ 5318 *which_operand = i; 5319 *str = ptr; 5320 return SUCCESS; 5321 5322 wanted_comma: 5323 first_error (_("expected comma")); 5324 return FAIL; 5325 5326 wanted_arm: 5327 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 5328 return FAIL; 5329} 5330 5331/* Matcher codes for parse_operands. */ 5332enum operand_parse_code 5333{ 5334 OP_stop, /* end of line */ 5335 5336 OP_RR, /* ARM register */ 5337 OP_RRnpc, /* ARM register, not r15 */ 5338 OP_RRnpcb, /* ARM register, not r15, in square brackets */ 5339 OP_RRw, /* ARM register, not r15, optional trailing ! */ 5340 OP_RCP, /* Coprocessor number */ 5341 OP_RCN, /* Coprocessor register */ 5342 OP_RF, /* FPA register */ 5343 OP_RVS, /* VFP single precision register */ 5344 OP_RVD, /* VFP double precision register (0..15) */ 5345 OP_RND, /* Neon double precision register (0..31) */ 5346 OP_RNQ, /* Neon quad precision register */ 5347 OP_RVSD, /* VFP single or double precision register */ 5348 OP_RNDQ, /* Neon double or quad precision register */ 5349 OP_RNSDQ, /* Neon single, double or quad precision register */ 5350 OP_RNSC, /* Neon scalar D[X] */ 5351 OP_RVC, /* VFP control register */ 5352 OP_RMF, /* Maverick F register */ 5353 OP_RMD, /* Maverick D register */ 5354 OP_RMFX, /* Maverick FX register */ 5355 OP_RMDX, /* Maverick DX register */ 5356 OP_RMAX, /* Maverick AX register */ 5357 OP_RMDS, /* Maverick DSPSC register */ 5358 OP_RIWR, /* iWMMXt wR register */ 5359 OP_RIWC, /* iWMMXt wC register */ 5360 OP_RIWG, /* iWMMXt wCG register */ 5361 OP_RXA, /* XScale accumulator register */ 5362 5363 OP_REGLST, /* ARM register list */ 5364 OP_VRSLST, /* VFP single-precision register list */ 5365 OP_VRDLST, /* VFP double-precision register list */ 5366 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ 5367 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ 5368 OP_NSTRLST, /* Neon element/structure list */ 5369 5370 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */ 5371 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ 5372 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ 5373 OP_RR_RNSC, /* ARM reg or Neon scalar. */ 5374 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ 5375 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ 5376 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ 5377 OP_VMOV, /* Neon VMOV operands. */ 5378 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */ 5379 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ 5380 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ 5381 5382 OP_I0, /* immediate zero */ 5383 OP_I7, /* immediate value 0 .. 7 */ 5384 OP_I15, /* 0 .. 15 */ 5385 OP_I16, /* 1 .. 16 */ 5386 OP_I16z, /* 0 .. 16 */ 5387 OP_I31, /* 0 .. 31 */ 5388 OP_I31w, /* 0 .. 31, optional trailing ! */ 5389 OP_I32, /* 1 .. 32 */ 5390 OP_I32z, /* 0 .. 32 */ 5391 OP_I63, /* 0 .. 63 */ 5392 OP_I63s, /* -64 .. 63 */ 5393 OP_I64, /* 1 .. 64 */ 5394 OP_I64z, /* 0 .. 64 */ 5395 OP_I255, /* 0 .. 255 */ 5396 5397 OP_I4b, /* immediate, prefix optional, 1 .. 4 */ 5398 OP_I7b, /* 0 .. 7 */ 5399 OP_I15b, /* 0 .. 15 */ 5400 OP_I31b, /* 0 .. 31 */ 5401 5402 OP_SH, /* shifter operand */ 5403 OP_SHG, /* shifter operand with possible group relocation */ 5404 OP_ADDR, /* Memory address expression (any mode) */ 5405 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */ 5406 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */ 5407 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */ 5408 OP_EXP, /* arbitrary expression */ 5409 OP_EXPi, /* same, with optional immediate prefix */ 5410 OP_EXPr, /* same, with optional relocation suffix */ 5411 OP_HALF, /* 0 .. 65535 or low/high reloc. */ 5412 5413 OP_CPSF, /* CPS flags */ 5414 OP_ENDI, /* Endianness specifier */ 5415 OP_PSR, /* CPSR/SPSR mask for msr */ 5416 OP_COND, /* conditional code */ 5417 OP_TB, /* Table branch. */ 5418 5419 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */ 5420 OP_APSR_RR, /* ARM register or "APSR_nzcv". */ 5421 5422 OP_RRnpc_I0, /* ARM register or literal 0 */ 5423 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */ 5424 OP_RR_EXi, /* ARM register or expression with imm prefix */ 5425 OP_RF_IF, /* FPA register or immediate */ 5426 OP_RIWR_RIWC, /* iWMMXt R or C reg */ 5427 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */ 5428 5429 /* Optional operands. */ 5430 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */ 5431 OP_oI31b, /* 0 .. 31 */ 5432 OP_oI32b, /* 1 .. 32 */ 5433 OP_oIffffb, /* 0 .. 65535 */ 5434 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ 5435 5436 OP_oRR, /* ARM register */ 5437 OP_oRRnpc, /* ARM register, not the PC */ 5438 OP_oRRw, /* ARM register, not r15, optional trailing ! */ 5439 OP_oRND, /* Optional Neon double precision register */ 5440 OP_oRNQ, /* Optional Neon quad precision register */ 5441 OP_oRNDQ, /* Optional Neon double or quad precision register */ 5442 OP_oRNSDQ, /* Optional single, double or quad precision vector register */ 5443 OP_oSHll, /* LSL immediate */ 5444 OP_oSHar, /* ASR immediate */ 5445 OP_oSHllar, /* LSL or ASR immediate */ 5446 OP_oROR, /* ROR 0/8/16/24 */ 5447 OP_oBARRIER, /* Option argument for a barrier instruction. */ 5448 5449 OP_FIRST_OPTIONAL = OP_oI7b 5450}; 5451 5452/* Generic instruction operand parser. This does no encoding and no 5453 semantic validation; it merely squirrels values away in the inst 5454 structure. Returns SUCCESS or FAIL depending on whether the 5455 specified grammar matched. */ 5456static int 5457parse_operands (char *str, const unsigned char *pattern) 5458{ 5459 unsigned const char *upat = pattern; 5460 char *backtrack_pos = 0; 5461 const char *backtrack_error = 0; 5462 int i, val, backtrack_index = 0; 5463 enum arm_reg_type rtype; 5464 parse_operand_result result; 5465 5466#define po_char_or_fail(chr) do { \ 5467 if (skip_past_char (&str, chr) == FAIL) \ 5468 goto bad_args; \ 5469} while (0) 5470 5471#define po_reg_or_fail(regtype) do { \ 5472 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 5473 &inst.operands[i].vectype); \ 5474 if (val == FAIL) \ 5475 { \ 5476 first_error (_(reg_expected_msgs[regtype])); \ 5477 goto failure; \ 5478 } \ 5479 inst.operands[i].reg = val; \ 5480 inst.operands[i].isreg = 1; \ 5481 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 5482 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 5483 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 5484 || rtype == REG_TYPE_VFD \ 5485 || rtype == REG_TYPE_NQ); \ 5486} while (0) 5487 5488#define po_reg_or_goto(regtype, label) do { \ 5489 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 5490 &inst.operands[i].vectype); \ 5491 if (val == FAIL) \ 5492 goto label; \ 5493 \ 5494 inst.operands[i].reg = val; \ 5495 inst.operands[i].isreg = 1; \ 5496 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 5497 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 5498 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 5499 || rtype == REG_TYPE_VFD \ 5500 || rtype == REG_TYPE_NQ); \ 5501} while (0) 5502 5503#define po_imm_or_fail(min, max, popt) do { \ 5504 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \ 5505 goto failure; \ 5506 inst.operands[i].imm = val; \ 5507} while (0) 5508 5509#define po_scalar_or_goto(elsz, label) do { \ 5510 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \ 5511 if (val == FAIL) \ 5512 goto label; \ 5513 inst.operands[i].reg = val; \ 5514 inst.operands[i].isscalar = 1; \ 5515} while (0) 5516 5517#define po_misc_or_fail(expr) do { \ 5518 if (expr) \ 5519 goto failure; \ 5520} while (0) 5521 5522#define po_misc_or_fail_no_backtrack(expr) do { \ 5523 result = expr; \ 5524 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\ 5525 backtrack_pos = 0; \ 5526 if (result != PARSE_OPERAND_SUCCESS) \ 5527 goto failure; \ 5528} while (0) 5529 5530 skip_whitespace (str); 5531 5532 for (i = 0; upat[i] != OP_stop; i++) 5533 { 5534 if (upat[i] >= OP_FIRST_OPTIONAL) 5535 { 5536 /* Remember where we are in case we need to backtrack. */ 5537 assert (!backtrack_pos); 5538 backtrack_pos = str; 5539 backtrack_error = inst.error; 5540 backtrack_index = i; 5541 } 5542 5543 if (i > 0 && (i > 1 || inst.operands[0].present)) 5544 po_char_or_fail (','); 5545 5546 switch (upat[i]) 5547 { 5548 /* Registers */ 5549 case OP_oRRnpc: 5550 case OP_RRnpc: 5551 case OP_oRR: 5552 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; 5553 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; 5554 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; 5555 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break; 5556 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; 5557 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; 5558 case OP_oRND: 5559 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; 5560 case OP_RVC: 5561 po_reg_or_goto (REG_TYPE_VFC, coproc_reg); 5562 break; 5563 /* Also accept generic coprocessor regs for unknown registers. */ 5564 coproc_reg: 5565 po_reg_or_fail (REG_TYPE_CN); 5566 break; 5567 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break; 5568 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break; 5569 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break; 5570 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break; 5571 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break; 5572 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break; 5573 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break; 5574 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break; 5575 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; 5576 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; 5577 case OP_oRNQ: 5578 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; 5579 case OP_oRNDQ: 5580 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; 5581 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; 5582 case OP_oRNSDQ: 5583 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; 5584 5585 /* Neon scalar. Using an element size of 8 means that some invalid 5586 scalars are accepted here, so deal with those in later code. */ 5587 case OP_RNSC: po_scalar_or_goto (8, failure); break; 5588 5589 /* WARNING: We can expand to two operands here. This has the potential 5590 to totally confuse the backtracking mechanism! It will be OK at 5591 least as long as we don't try to use optional args as well, 5592 though. */ 5593 case OP_NILO: 5594 { 5595 po_reg_or_goto (REG_TYPE_NDQ, try_imm); 5596 inst.operands[i].present = 1; 5597 i++; 5598 skip_past_comma (&str); 5599 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only); 5600 break; 5601 one_reg_only: 5602 /* Optional register operand was omitted. Unfortunately, it's in 5603 operands[i-1] and we need it to be in inst.operands[i]. Fix that 5604 here (this is a bit grotty). */ 5605 inst.operands[i] = inst.operands[i-1]; 5606 inst.operands[i-1].present = 0; 5607 break; 5608 try_imm: 5609 /* There's a possibility of getting a 64-bit immediate here, so 5610 we need special handling. */ 5611 if (parse_big_immediate (&str, i) == FAIL) 5612 { 5613 inst.error = _("immediate value is out of range"); 5614 goto failure; 5615 } 5616 } 5617 break; 5618 5619 case OP_RNDQ_I0: 5620 { 5621 po_reg_or_goto (REG_TYPE_NDQ, try_imm0); 5622 break; 5623 try_imm0: 5624 po_imm_or_fail (0, 0, TRUE); 5625 } 5626 break; 5627 5628 case OP_RVSD_I0: 5629 po_reg_or_goto (REG_TYPE_VFSD, try_imm0); 5630 break; 5631 5632 case OP_RR_RNSC: 5633 { 5634 po_scalar_or_goto (8, try_rr); 5635 break; 5636 try_rr: 5637 po_reg_or_fail (REG_TYPE_RN); 5638 } 5639 break; 5640 5641 case OP_RNSDQ_RNSC: 5642 { 5643 po_scalar_or_goto (8, try_nsdq); 5644 break; 5645 try_nsdq: 5646 po_reg_or_fail (REG_TYPE_NSDQ); 5647 } 5648 break; 5649 5650 case OP_RNDQ_RNSC: 5651 { 5652 po_scalar_or_goto (8, try_ndq); 5653 break; 5654 try_ndq: 5655 po_reg_or_fail (REG_TYPE_NDQ); 5656 } 5657 break; 5658 5659 case OP_RND_RNSC: 5660 { 5661 po_scalar_or_goto (8, try_vfd); 5662 break; 5663 try_vfd: 5664 po_reg_or_fail (REG_TYPE_VFD); 5665 } 5666 break; 5667 5668 case OP_VMOV: 5669 /* WARNING: parse_neon_mov can move the operand counter, i. If we're 5670 not careful then bad things might happen. */ 5671 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL); 5672 break; 5673 5674 case OP_RNDQ_IMVNb: 5675 { 5676 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm); 5677 break; 5678 try_mvnimm: 5679 /* There's a possibility of getting a 64-bit immediate here, so 5680 we need special handling. */ 5681 if (parse_big_immediate (&str, i) == FAIL) 5682 { 5683 inst.error = _("immediate value is out of range"); 5684 goto failure; 5685 } 5686 } 5687 break; 5688 5689 case OP_RNDQ_I63b: 5690 { 5691 po_reg_or_goto (REG_TYPE_NDQ, try_shimm); 5692 break; 5693 try_shimm: 5694 po_imm_or_fail (0, 63, TRUE); 5695 } 5696 break; 5697 5698 case OP_RRnpcb: 5699 po_char_or_fail ('['); 5700 po_reg_or_fail (REG_TYPE_RN); 5701 po_char_or_fail (']'); 5702 break; 5703 5704 case OP_RRw: 5705 case OP_oRRw: 5706 po_reg_or_fail (REG_TYPE_RN); 5707 if (skip_past_char (&str, '!') == SUCCESS) 5708 inst.operands[i].writeback = 1; 5709 break; 5710 5711 /* Immediates */ 5712 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break; 5713 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break; 5714 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break; 5715 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break; 5716 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break; 5717 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break; 5718 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break; 5719 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break; 5720 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break; 5721 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break; 5722 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break; 5723 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break; 5724 5725 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break; 5726 case OP_oI7b: 5727 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break; 5728 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break; 5729 case OP_oI31b: 5730 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break; 5731 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break; 5732 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break; 5733 5734 /* Immediate variants */ 5735 case OP_oI255c: 5736 po_char_or_fail ('{'); 5737 po_imm_or_fail (0, 255, TRUE); 5738 po_char_or_fail ('}'); 5739 break; 5740 5741 case OP_I31w: 5742 /* The expression parser chokes on a trailing !, so we have 5743 to find it first and zap it. */ 5744 { 5745 char *s = str; 5746 while (*s && *s != ',') 5747 s++; 5748 if (s[-1] == '!') 5749 { 5750 s[-1] = '\0'; 5751 inst.operands[i].writeback = 1; 5752 } 5753 po_imm_or_fail (0, 31, TRUE); 5754 if (str == s - 1) 5755 str = s; 5756 } 5757 break; 5758 5759 /* Expressions */ 5760 case OP_EXPi: EXPi: 5761 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5762 GE_OPT_PREFIX)); 5763 break; 5764 5765 case OP_EXP: 5766 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5767 GE_NO_PREFIX)); 5768 break; 5769 5770 case OP_EXPr: EXPr: 5771 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5772 GE_NO_PREFIX)); 5773 if (inst.reloc.exp.X_op == O_symbol) 5774 { 5775 val = parse_reloc (&str); 5776 if (val == -1) 5777 { 5778 inst.error = _("unrecognized relocation suffix"); 5779 goto failure; 5780 } 5781 else if (val != BFD_RELOC_UNUSED) 5782 { 5783 inst.operands[i].imm = val; 5784 inst.operands[i].hasreloc = 1; 5785 } 5786 } 5787 break; 5788 5789 /* Operand for MOVW or MOVT. */ 5790 case OP_HALF: 5791 po_misc_or_fail (parse_half (&str)); 5792 break; 5793 5794 /* Register or expression */ 5795 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break; 5796 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break; 5797 5798 /* Register or immediate */ 5799 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break; 5800 I0: po_imm_or_fail (0, 0, FALSE); break; 5801 5802 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break; 5803 IF: 5804 if (!is_immediate_prefix (*str)) 5805 goto bad_args; 5806 str++; 5807 val = parse_fpa_immediate (&str); 5808 if (val == FAIL) 5809 goto failure; 5810 /* FPA immediates are encoded as registers 8-15. 5811 parse_fpa_immediate has already applied the offset. */ 5812 inst.operands[i].reg = val; 5813 inst.operands[i].isreg = 1; 5814 break; 5815 5816 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break; 5817 I32z: po_imm_or_fail (0, 32, FALSE); break; 5818 5819 /* Two kinds of register */ 5820 case OP_RIWR_RIWC: 5821 { 5822 struct reg_entry *rege = arm_reg_parse_multi (&str); 5823 if (!rege 5824 || (rege->type != REG_TYPE_MMXWR 5825 && rege->type != REG_TYPE_MMXWC 5826 && rege->type != REG_TYPE_MMXWCG)) 5827 { 5828 inst.error = _("iWMMXt data or control register expected"); 5829 goto failure; 5830 } 5831 inst.operands[i].reg = rege->number; 5832 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR); 5833 } 5834 break; 5835 5836 case OP_RIWC_RIWG: 5837 { 5838 struct reg_entry *rege = arm_reg_parse_multi (&str); 5839 if (!rege 5840 || (rege->type != REG_TYPE_MMXWC 5841 && rege->type != REG_TYPE_MMXWCG)) 5842 { 5843 inst.error = _("iWMMXt control register expected"); 5844 goto failure; 5845 } 5846 inst.operands[i].reg = rege->number; 5847 inst.operands[i].isreg = 1; 5848 } 5849 break; 5850 5851 /* Misc */ 5852 case OP_CPSF: val = parse_cps_flags (&str); break; 5853 case OP_ENDI: val = parse_endian_specifier (&str); break; 5854 case OP_oROR: val = parse_ror (&str); break; 5855 case OP_PSR: val = parse_psr (&str); break; 5856 case OP_COND: val = parse_cond (&str); break; 5857 case OP_oBARRIER:val = parse_barrier (&str); break; 5858 5859 case OP_RVC_PSR: 5860 po_reg_or_goto (REG_TYPE_VFC, try_psr); 5861 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */ 5862 break; 5863 try_psr: 5864 val = parse_psr (&str); 5865 break; 5866 5867 case OP_APSR_RR: 5868 po_reg_or_goto (REG_TYPE_RN, try_apsr); 5869 break; 5870 try_apsr: 5871 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS 5872 instruction). */ 5873 if (strncasecmp (str, "APSR_", 5) == 0) 5874 { 5875 unsigned found = 0; 5876 str += 5; 5877 while (found < 15) 5878 switch (*str++) 5879 { 5880 case 'c': found = (found & 1) ? 16 : found | 1; break; 5881 case 'n': found = (found & 2) ? 16 : found | 2; break; 5882 case 'z': found = (found & 4) ? 16 : found | 4; break; 5883 case 'v': found = (found & 8) ? 16 : found | 8; break; 5884 default: found = 16; 5885 } 5886 if (found != 15) 5887 goto failure; 5888 inst.operands[i].isvec = 1; 5889 } 5890 else 5891 goto failure; 5892 break; 5893 5894 case OP_TB: 5895 po_misc_or_fail (parse_tb (&str)); 5896 break; 5897 5898 /* Register lists */ 5899 case OP_REGLST: 5900 val = parse_reg_list (&str); 5901 if (*str == '^') 5902 { 5903 inst.operands[1].writeback = 1; 5904 str++; 5905 } 5906 break; 5907 5908 case OP_VRSLST: 5909 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S); 5910 break; 5911 5912 case OP_VRDLST: 5913 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D); 5914 break; 5915 5916 case OP_VRSDLST: 5917 /* Allow Q registers too. */ 5918 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5919 REGLIST_NEON_D); 5920 if (val == FAIL) 5921 { 5922 inst.error = NULL; 5923 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5924 REGLIST_VFP_S); 5925 inst.operands[i].issingle = 1; 5926 } 5927 break; 5928 5929 case OP_NRDLST: 5930 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5931 REGLIST_NEON_D); 5932 break; 5933 5934 case OP_NSTRLST: 5935 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, 5936 &inst.operands[i].vectype); 5937 break; 5938 5939 /* Addressing modes */ 5940 case OP_ADDR: 5941 po_misc_or_fail (parse_address (&str, i)); 5942 break; 5943 5944 case OP_ADDRGLDR: 5945 po_misc_or_fail_no_backtrack ( 5946 parse_address_group_reloc (&str, i, GROUP_LDR)); 5947 break; 5948 5949 case OP_ADDRGLDRS: 5950 po_misc_or_fail_no_backtrack ( 5951 parse_address_group_reloc (&str, i, GROUP_LDRS)); 5952 break; 5953 5954 case OP_ADDRGLDC: 5955 po_misc_or_fail_no_backtrack ( 5956 parse_address_group_reloc (&str, i, GROUP_LDC)); 5957 break; 5958 5959 case OP_SH: 5960 po_misc_or_fail (parse_shifter_operand (&str, i)); 5961 break; 5962 5963 case OP_SHG: 5964 po_misc_or_fail_no_backtrack ( 5965 parse_shifter_operand_group_reloc (&str, i)); 5966 break; 5967 5968 case OP_oSHll: 5969 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE)); 5970 break; 5971 5972 case OP_oSHar: 5973 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE)); 5974 break; 5975 5976 case OP_oSHllar: 5977 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE)); 5978 break; 5979 5980 default: 5981 as_fatal ("unhandled operand code %d", upat[i]); 5982 } 5983 5984 /* Various value-based sanity checks and shared operations. We 5985 do not signal immediate failures for the register constraints; 5986 this allows a syntax error to take precedence. */ 5987 switch (upat[i]) 5988 { 5989 case OP_oRRnpc: 5990 case OP_RRnpc: 5991 case OP_RRnpcb: 5992 case OP_RRw: 5993 case OP_oRRw: 5994 case OP_RRnpc_I0: 5995 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC) 5996 inst.error = BAD_PC; 5997 break; 5998 5999 case OP_CPSF: 6000 case OP_ENDI: 6001 case OP_oROR: 6002 case OP_PSR: 6003 case OP_RVC_PSR: 6004 case OP_COND: 6005 case OP_oBARRIER: 6006 case OP_REGLST: 6007 case OP_VRSLST: 6008 case OP_VRDLST: 6009 case OP_VRSDLST: 6010 case OP_NRDLST: 6011 case OP_NSTRLST: 6012 if (val == FAIL) 6013 goto failure; 6014 inst.operands[i].imm = val; 6015 break; 6016 6017 default: 6018 break; 6019 } 6020 6021 /* If we get here, this operand was successfully parsed. */ 6022 inst.operands[i].present = 1; 6023 continue; 6024 6025 bad_args: 6026 inst.error = BAD_ARGS; 6027 6028 failure: 6029 if (!backtrack_pos) 6030 { 6031 /* The parse routine should already have set inst.error, but set a 6032 defaut here just in case. */ 6033 if (!inst.error) 6034 inst.error = _("syntax error"); 6035 return FAIL; 6036 } 6037 6038 /* Do not backtrack over a trailing optional argument that 6039 absorbed some text. We will only fail again, with the 6040 'garbage following instruction' error message, which is 6041 probably less helpful than the current one. */ 6042 if (backtrack_index == i && backtrack_pos != str 6043 && upat[i+1] == OP_stop) 6044 { 6045 if (!inst.error) 6046 inst.error = _("syntax error"); 6047 return FAIL; 6048 } 6049 6050 /* Try again, skipping the optional argument at backtrack_pos. */ 6051 str = backtrack_pos; 6052 inst.error = backtrack_error; 6053 inst.operands[backtrack_index].present = 0; 6054 i = backtrack_index; 6055 backtrack_pos = 0; 6056 } 6057 6058 /* Check that we have parsed all the arguments. */ 6059 if (*str != '\0' && !inst.error) 6060 inst.error = _("garbage following instruction"); 6061 6062 return inst.error ? FAIL : SUCCESS; 6063} 6064 6065#undef po_char_or_fail 6066#undef po_reg_or_fail 6067#undef po_reg_or_goto 6068#undef po_imm_or_fail 6069#undef po_scalar_or_fail 6070 6071/* Shorthand macro for instruction encoding functions issuing errors. */ 6072#define constraint(expr, err) do { \ 6073 if (expr) \ 6074 { \ 6075 inst.error = err; \ 6076 return; \ 6077 } \ 6078} while (0) 6079 6080/* Functions for operand encoding. ARM, then Thumb. */ 6081 6082#define rotate_left(v, n) (v << n | v >> (32 - n)) 6083 6084/* If VAL can be encoded in the immediate field of an ARM instruction, 6085 return the encoded form. Otherwise, return FAIL. */ 6086 6087static unsigned int 6088encode_arm_immediate (unsigned int val) 6089{ 6090 unsigned int a, i; 6091 6092 for (i = 0; i < 32; i += 2) 6093 if ((a = rotate_left (val, i)) <= 0xff) 6094 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ 6095 6096 return FAIL; 6097} 6098 6099/* If VAL can be encoded in the immediate field of a Thumb32 instruction, 6100 return the encoded form. Otherwise, return FAIL. */ 6101static unsigned int 6102encode_thumb32_immediate (unsigned int val) 6103{ 6104 unsigned int a, i; 6105 6106 if (val <= 0xff) 6107 return val; 6108 6109 for (i = 1; i <= 24; i++) 6110 { 6111 a = val >> i; 6112 if ((val & ~(0xff << i)) == 0) 6113 return ((val >> i) & 0x7f) | ((32 - i) << 7); 6114 } 6115 6116 a = val & 0xff; 6117 if (val == ((a << 16) | a)) 6118 return 0x100 | a; 6119 if (val == ((a << 24) | (a << 16) | (a << 8) | a)) 6120 return 0x300 | a; 6121 6122 a = val & 0xff00; 6123 if (val == ((a << 16) | a)) 6124 return 0x200 | (a >> 8); 6125 6126 return FAIL; 6127} 6128/* Encode a VFP SP or DP register number into inst.instruction. */ 6129 6130static void 6131encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos) 6132{ 6133 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm) 6134 && reg > 15) 6135 { 6136 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 6137 { 6138 if (thumb_mode) 6139 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 6140 fpu_vfp_ext_v3); 6141 else 6142 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 6143 fpu_vfp_ext_v3); 6144 } 6145 else 6146 { 6147 first_error (_("D register out of range for selected VFP version")); 6148 return; 6149 } 6150 } 6151 6152 switch (pos) 6153 { 6154 case VFP_REG_Sd: 6155 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22); 6156 break; 6157 6158 case VFP_REG_Sn: 6159 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7); 6160 break; 6161 6162 case VFP_REG_Sm: 6163 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5); 6164 break; 6165 6166 case VFP_REG_Dd: 6167 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22); 6168 break; 6169 6170 case VFP_REG_Dn: 6171 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7); 6172 break; 6173 6174 case VFP_REG_Dm: 6175 inst.instruction |= (reg & 15) | ((reg >> 4) << 5); 6176 break; 6177 6178 default: 6179 abort (); 6180 } 6181} 6182 6183/* Encode a <shift> in an ARM-format instruction. The immediate, 6184 if any, is handled by md_apply_fix. */ 6185static void 6186encode_arm_shift (int i) 6187{ 6188 if (inst.operands[i].shift_kind == SHIFT_RRX) 6189 inst.instruction |= SHIFT_ROR << 5; 6190 else 6191 { 6192 inst.instruction |= inst.operands[i].shift_kind << 5; 6193 if (inst.operands[i].immisreg) 6194 { 6195 inst.instruction |= SHIFT_BY_REG; 6196 inst.instruction |= inst.operands[i].imm << 8; 6197 } 6198 else 6199 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 6200 } 6201} 6202 6203static void 6204encode_arm_shifter_operand (int i) 6205{ 6206 if (inst.operands[i].isreg) 6207 { 6208 inst.instruction |= inst.operands[i].reg; 6209 encode_arm_shift (i); 6210 } 6211 else 6212 inst.instruction |= INST_IMMEDIATE; 6213} 6214 6215/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */ 6216static void 6217encode_arm_addr_mode_common (int i, bfd_boolean is_t) 6218{ 6219 assert (inst.operands[i].isreg); 6220 inst.instruction |= inst.operands[i].reg << 16; 6221 6222 if (inst.operands[i].preind) 6223 { 6224 if (is_t) 6225 { 6226 inst.error = _("instruction does not accept preindexed addressing"); 6227 return; 6228 } 6229 inst.instruction |= PRE_INDEX; 6230 if (inst.operands[i].writeback) 6231 inst.instruction |= WRITE_BACK; 6232 6233 } 6234 else if (inst.operands[i].postind) 6235 { 6236 assert (inst.operands[i].writeback); 6237 if (is_t) 6238 inst.instruction |= WRITE_BACK; 6239 } 6240 else /* unindexed - only for coprocessor */ 6241 { 6242 inst.error = _("instruction does not accept unindexed addressing"); 6243 return; 6244 } 6245 6246 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX)) 6247 && (((inst.instruction & 0x000f0000) >> 16) 6248 == ((inst.instruction & 0x0000f000) >> 12))) 6249 as_warn ((inst.instruction & LOAD_BIT) 6250 ? _("destination register same as write-back base") 6251 : _("source register same as write-back base")); 6252} 6253 6254/* inst.operands[i] was set up by parse_address. Encode it into an 6255 ARM-format mode 2 load or store instruction. If is_t is true, 6256 reject forms that cannot be used with a T instruction (i.e. not 6257 post-indexed). */ 6258static void 6259encode_arm_addr_mode_2 (int i, bfd_boolean is_t) 6260{ 6261 encode_arm_addr_mode_common (i, is_t); 6262 6263 if (inst.operands[i].immisreg) 6264 { 6265 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */ 6266 inst.instruction |= inst.operands[i].imm; 6267 if (!inst.operands[i].negative) 6268 inst.instruction |= INDEX_UP; 6269 if (inst.operands[i].shifted) 6270 { 6271 if (inst.operands[i].shift_kind == SHIFT_RRX) 6272 inst.instruction |= SHIFT_ROR << 5; 6273 else 6274 { 6275 inst.instruction |= inst.operands[i].shift_kind << 5; 6276 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 6277 } 6278 } 6279 } 6280 else /* immediate offset in inst.reloc */ 6281 { 6282 if (inst.reloc.type == BFD_RELOC_UNUSED) 6283 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM; 6284 } 6285} 6286 6287/* inst.operands[i] was set up by parse_address. Encode it into an 6288 ARM-format mode 3 load or store instruction. Reject forms that 6289 cannot be used with such instructions. If is_t is true, reject 6290 forms that cannot be used with a T instruction (i.e. not 6291 post-indexed). */ 6292static void 6293encode_arm_addr_mode_3 (int i, bfd_boolean is_t) 6294{ 6295 if (inst.operands[i].immisreg && inst.operands[i].shifted) 6296 { 6297 inst.error = _("instruction does not accept scaled register index"); 6298 return; 6299 } 6300 6301 encode_arm_addr_mode_common (i, is_t); 6302 6303 if (inst.operands[i].immisreg) 6304 { 6305 inst.instruction |= inst.operands[i].imm; 6306 if (!inst.operands[i].negative) 6307 inst.instruction |= INDEX_UP; 6308 } 6309 else /* immediate offset in inst.reloc */ 6310 { 6311 inst.instruction |= HWOFFSET_IMM; 6312 if (inst.reloc.type == BFD_RELOC_UNUSED) 6313 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8; 6314 } 6315} 6316 6317/* inst.operands[i] was set up by parse_address. Encode it into an 6318 ARM-format instruction. Reject all forms which cannot be encoded 6319 into a coprocessor load/store instruction. If wb_ok is false, 6320 reject use of writeback; if unind_ok is false, reject use of 6321 unindexed addressing. If reloc_override is not 0, use it instead 6322 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one 6323 (in which case it is preserved). */ 6324 6325static int 6326encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) 6327{ 6328 inst.instruction |= inst.operands[i].reg << 16; 6329 6330 assert (!(inst.operands[i].preind && inst.operands[i].postind)); 6331 6332 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ 6333 { 6334 assert (!inst.operands[i].writeback); 6335 if (!unind_ok) 6336 { 6337 inst.error = _("instruction does not support unindexed addressing"); 6338 return FAIL; 6339 } 6340 inst.instruction |= inst.operands[i].imm; 6341 inst.instruction |= INDEX_UP; 6342 return SUCCESS; 6343 } 6344 6345 if (inst.operands[i].preind) 6346 inst.instruction |= PRE_INDEX; 6347 6348 if (inst.operands[i].writeback) 6349 { 6350 if (inst.operands[i].reg == REG_PC) 6351 { 6352 inst.error = _("pc may not be used with write-back"); 6353 return FAIL; 6354 } 6355 if (!wb_ok) 6356 { 6357 inst.error = _("instruction does not support writeback"); 6358 return FAIL; 6359 } 6360 inst.instruction |= WRITE_BACK; 6361 } 6362 6363 if (reloc_override) 6364 inst.reloc.type = reloc_override; 6365 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC 6366 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) 6367 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) 6368 { 6369 if (thumb_mode) 6370 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; 6371 else 6372 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; 6373 } 6374 6375 return SUCCESS; 6376} 6377 6378/* inst.reloc.exp describes an "=expr" load pseudo-operation. 6379 Determine whether it can be performed with a move instruction; if 6380 it can, convert inst.instruction to that move instruction and 6381 return 1; if it can't, convert inst.instruction to a literal-pool 6382 load and return 0. If this is not a valid thing to do in the 6383 current context, set inst.error and return 1. 6384 6385 inst.operands[i] describes the destination register. */ 6386 6387static int 6388move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) 6389{ 6390 unsigned long tbit; 6391 6392 if (thumb_p) 6393 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; 6394 else 6395 tbit = LOAD_BIT; 6396 6397 if ((inst.instruction & tbit) == 0) 6398 { 6399 inst.error = _("invalid pseudo operation"); 6400 return 1; 6401 } 6402 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol) 6403 { 6404 inst.error = _("constant expression expected"); 6405 return 1; 6406 } 6407 if (inst.reloc.exp.X_op == O_constant) 6408 { 6409 if (thumb_p) 6410 { 6411 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) 6412 { 6413 /* This can be done with a mov(1) instruction. */ 6414 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); 6415 inst.instruction |= inst.reloc.exp.X_add_number; 6416 return 1; 6417 } 6418 } 6419 else 6420 { 6421 int value = encode_arm_immediate (inst.reloc.exp.X_add_number); 6422 if (value != FAIL) 6423 { 6424 /* This can be done with a mov instruction. */ 6425 inst.instruction &= LITERAL_MASK; 6426 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); 6427 inst.instruction |= value & 0xfff; 6428 return 1; 6429 } 6430 6431 value = encode_arm_immediate (~inst.reloc.exp.X_add_number); 6432 if (value != FAIL) 6433 { 6434 /* This can be done with a mvn instruction. */ 6435 inst.instruction &= LITERAL_MASK; 6436 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); 6437 inst.instruction |= value & 0xfff; 6438 return 1; 6439 } 6440 } 6441 } 6442 6443 if (add_to_lit_pool () == FAIL) 6444 { 6445 inst.error = _("literal pool insertion failed"); 6446 return 1; 6447 } 6448 inst.operands[1].reg = REG_PC; 6449 inst.operands[1].isreg = 1; 6450 inst.operands[1].preind = 1; 6451 inst.reloc.pc_rel = 1; 6452 inst.reloc.type = (thumb_p 6453 ? BFD_RELOC_ARM_THUMB_OFFSET 6454 : (mode_3 6455 ? BFD_RELOC_ARM_HWLITERAL 6456 : BFD_RELOC_ARM_LITERAL)); 6457 return 0; 6458} 6459 6460/* Functions for instruction encoding, sorted by subarchitecture. 6461 First some generics; their names are taken from the conventional 6462 bit positions for register arguments in ARM format instructions. */ 6463 6464static void 6465do_noargs (void) 6466{ 6467} 6468 6469static void 6470do_rd (void) 6471{ 6472 inst.instruction |= inst.operands[0].reg << 12; 6473} 6474 6475static void 6476do_rd_rm (void) 6477{ 6478 inst.instruction |= inst.operands[0].reg << 12; 6479 inst.instruction |= inst.operands[1].reg; 6480} 6481 6482static void 6483do_rd_rn (void) 6484{ 6485 inst.instruction |= inst.operands[0].reg << 12; 6486 inst.instruction |= inst.operands[1].reg << 16; 6487} 6488 6489static void 6490do_rn_rd (void) 6491{ 6492 inst.instruction |= inst.operands[0].reg << 16; 6493 inst.instruction |= inst.operands[1].reg << 12; 6494} 6495 6496static void 6497do_rd_rm_rn (void) 6498{ 6499 unsigned Rn = inst.operands[2].reg; 6500 /* Enforce restrictions on SWP instruction. */ 6501 if ((inst.instruction & 0x0fbfffff) == 0x01000090) 6502 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg, 6503 _("Rn must not overlap other operands")); 6504 inst.instruction |= inst.operands[0].reg << 12; 6505 inst.instruction |= inst.operands[1].reg; 6506 inst.instruction |= Rn << 16; 6507} 6508 6509static void 6510do_rd_rn_rm (void) 6511{ 6512 inst.instruction |= inst.operands[0].reg << 12; 6513 inst.instruction |= inst.operands[1].reg << 16; 6514 inst.instruction |= inst.operands[2].reg; 6515} 6516 6517static void 6518do_rm_rd_rn (void) 6519{ 6520 inst.instruction |= inst.operands[0].reg; 6521 inst.instruction |= inst.operands[1].reg << 12; 6522 inst.instruction |= inst.operands[2].reg << 16; 6523} 6524 6525static void 6526do_imm0 (void) 6527{ 6528 inst.instruction |= inst.operands[0].imm; 6529} 6530 6531static void 6532do_rd_cpaddr (void) 6533{ 6534 inst.instruction |= inst.operands[0].reg << 12; 6535 encode_arm_cp_address (1, TRUE, TRUE, 0); 6536} 6537 6538/* ARM instructions, in alphabetical order by function name (except 6539 that wrapper functions appear immediately after the function they 6540 wrap). */ 6541 6542/* This is a pseudo-op of the form "adr rd, label" to be converted 6543 into a relative address of the form "add rd, pc, #label-.-8". */ 6544 6545static void 6546do_adr (void) 6547{ 6548 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 6549 6550 /* Frag hacking will turn this into a sub instruction if the offset turns 6551 out to be negative. */ 6552 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 6553 inst.reloc.pc_rel = 1; 6554 inst.reloc.exp.X_add_number -= 8; 6555} 6556 6557/* This is a pseudo-op of the form "adrl rd, label" to be converted 6558 into a relative address of the form: 6559 add rd, pc, #low(label-.-8)" 6560 add rd, rd, #high(label-.-8)" */ 6561 6562static void 6563do_adrl (void) 6564{ 6565 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 6566 6567 /* Frag hacking will turn this into a sub instruction if the offset turns 6568 out to be negative. */ 6569 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE; 6570 inst.reloc.pc_rel = 1; 6571 inst.size = INSN_SIZE * 2; 6572 inst.reloc.exp.X_add_number -= 8; 6573} 6574 6575static void 6576do_arit (void) 6577{ 6578 if (!inst.operands[1].present) 6579 inst.operands[1].reg = inst.operands[0].reg; 6580 inst.instruction |= inst.operands[0].reg << 12; 6581 inst.instruction |= inst.operands[1].reg << 16; 6582 encode_arm_shifter_operand (2); 6583} 6584 6585static void 6586do_barrier (void) 6587{ 6588 if (inst.operands[0].present) 6589 { 6590 constraint ((inst.instruction & 0xf0) != 0x40 6591 && (inst.instruction & 0xf0) != 0x50 6592 && inst.operands[0].imm != 0xf, 6593 "bad barrier type"); 6594 inst.instruction |= inst.operands[0].imm; 6595 } 6596 else 6597 inst.instruction |= 0xf; 6598} 6599 6600static void 6601do_bfc (void) 6602{ 6603 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 6604 constraint (msb > 32, _("bit-field extends past end of register")); 6605 /* The instruction encoding stores the LSB and MSB, 6606 not the LSB and width. */ 6607 inst.instruction |= inst.operands[0].reg << 12; 6608 inst.instruction |= inst.operands[1].imm << 7; 6609 inst.instruction |= (msb - 1) << 16; 6610} 6611 6612static void 6613do_bfi (void) 6614{ 6615 unsigned int msb; 6616 6617 /* #0 in second position is alternative syntax for bfc, which is 6618 the same instruction but with REG_PC in the Rm field. */ 6619 if (!inst.operands[1].isreg) 6620 inst.operands[1].reg = REG_PC; 6621 6622 msb = inst.operands[2].imm + inst.operands[3].imm; 6623 constraint (msb > 32, _("bit-field extends past end of register")); 6624 /* The instruction encoding stores the LSB and MSB, 6625 not the LSB and width. */ 6626 inst.instruction |= inst.operands[0].reg << 12; 6627 inst.instruction |= inst.operands[1].reg; 6628 inst.instruction |= inst.operands[2].imm << 7; 6629 inst.instruction |= (msb - 1) << 16; 6630} 6631 6632static void 6633do_bfx (void) 6634{ 6635 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 6636 _("bit-field extends past end of register")); 6637 inst.instruction |= inst.operands[0].reg << 12; 6638 inst.instruction |= inst.operands[1].reg; 6639 inst.instruction |= inst.operands[2].imm << 7; 6640 inst.instruction |= (inst.operands[3].imm - 1) << 16; 6641} 6642 6643/* ARM V5 breakpoint instruction (argument parse) 6644 BKPT <16 bit unsigned immediate> 6645 Instruction is not conditional. 6646 The bit pattern given in insns[] has the COND_ALWAYS condition, 6647 and it is an error if the caller tried to override that. */ 6648 6649static void 6650do_bkpt (void) 6651{ 6652 /* Top 12 of 16 bits to bits 19:8. */ 6653 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4; 6654 6655 /* Bottom 4 of 16 bits to bits 3:0. */ 6656 inst.instruction |= inst.operands[0].imm & 0xf; 6657} 6658 6659static void 6660encode_branch (int default_reloc) 6661{ 6662 if (inst.operands[0].hasreloc) 6663 { 6664 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32, 6665 _("the only suffix valid here is '(plt)'")); 6666 inst.reloc.type = BFD_RELOC_ARM_PLT32; 6667 } 6668 else 6669 { 6670 inst.reloc.type = default_reloc; 6671 } 6672 inst.reloc.pc_rel = 1; 6673} 6674 6675static void 6676do_branch (void) 6677{ 6678#ifdef OBJ_ELF 6679 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6680 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 6681 else 6682#endif 6683 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 6684} 6685 6686static void 6687do_bl (void) 6688{ 6689#ifdef OBJ_ELF 6690 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6691 { 6692 if (inst.cond == COND_ALWAYS) 6693 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 6694 else 6695 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 6696 } 6697 else 6698#endif 6699 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 6700} 6701 6702/* ARM V5 branch-link-exchange instruction (argument parse) 6703 BLX <target_addr> ie BLX(1) 6704 BLX{<condition>} <Rm> ie BLX(2) 6705 Unfortunately, there are two different opcodes for this mnemonic. 6706 So, the insns[].value is not used, and the code here zaps values 6707 into inst.instruction. 6708 Also, the <target_addr> can be 25 bits, hence has its own reloc. */ 6709 6710static void 6711do_blx (void) 6712{ 6713 if (inst.operands[0].isreg) 6714 { 6715 /* Arg is a register; the opcode provided by insns[] is correct. 6716 It is not illegal to do "blx pc", just useless. */ 6717 if (inst.operands[0].reg == REG_PC) 6718 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful")); 6719 6720 inst.instruction |= inst.operands[0].reg; 6721 } 6722 else 6723 { 6724 /* Arg is an address; this instruction cannot be executed 6725 conditionally, and the opcode must be adjusted. */ 6726 constraint (inst.cond != COND_ALWAYS, BAD_COND); 6727 inst.instruction = 0xfa000000; 6728#ifdef OBJ_ELF 6729 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6730 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 6731 else 6732#endif 6733 encode_branch (BFD_RELOC_ARM_PCREL_BLX); 6734 } 6735} 6736 6737static void 6738do_bx (void) 6739{ 6740 if (inst.operands[0].reg == REG_PC) 6741 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful")); 6742 6743 inst.instruction |= inst.operands[0].reg; 6744} 6745 6746 6747/* ARM v5TEJ. Jump to Jazelle code. */ 6748 6749static void 6750do_bxj (void) 6751{ 6752 if (inst.operands[0].reg == REG_PC) 6753 as_tsktsk (_("use of r15 in bxj is not really useful")); 6754 6755 inst.instruction |= inst.operands[0].reg; 6756} 6757 6758/* Co-processor data operation: 6759 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} 6760 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */ 6761static void 6762do_cdp (void) 6763{ 6764 inst.instruction |= inst.operands[0].reg << 8; 6765 inst.instruction |= inst.operands[1].imm << 20; 6766 inst.instruction |= inst.operands[2].reg << 12; 6767 inst.instruction |= inst.operands[3].reg << 16; 6768 inst.instruction |= inst.operands[4].reg; 6769 inst.instruction |= inst.operands[5].imm << 5; 6770} 6771 6772static void 6773do_cmp (void) 6774{ 6775 inst.instruction |= inst.operands[0].reg << 16; 6776 encode_arm_shifter_operand (1); 6777} 6778 6779/* Transfer between coprocessor and ARM registers. 6780 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>} 6781 MRC2 6782 MCR{cond} 6783 MCR2 6784 6785 No special properties. */ 6786 6787static void 6788do_co_reg (void) 6789{ 6790 inst.instruction |= inst.operands[0].reg << 8; 6791 inst.instruction |= inst.operands[1].imm << 21; 6792 inst.instruction |= inst.operands[2].reg << 12; 6793 inst.instruction |= inst.operands[3].reg << 16; 6794 inst.instruction |= inst.operands[4].reg; 6795 inst.instruction |= inst.operands[5].imm << 5; 6796} 6797 6798/* Transfer between coprocessor register and pair of ARM registers. 6799 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>. 6800 MCRR2 6801 MRRC{cond} 6802 MRRC2 6803 6804 Two XScale instructions are special cases of these: 6805 6806 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0 6807 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0 6808 6809 Result unpredicatable if Rd or Rn is R15. */ 6810 6811static void 6812do_co_reg2c (void) 6813{ 6814 inst.instruction |= inst.operands[0].reg << 8; 6815 inst.instruction |= inst.operands[1].imm << 4; 6816 inst.instruction |= inst.operands[2].reg << 12; 6817 inst.instruction |= inst.operands[3].reg << 16; 6818 inst.instruction |= inst.operands[4].reg; 6819} 6820 6821static void 6822do_cpsi (void) 6823{ 6824 inst.instruction |= inst.operands[0].imm << 6; 6825 if (inst.operands[1].present) 6826 { 6827 inst.instruction |= CPSI_MMOD; 6828 inst.instruction |= inst.operands[1].imm; 6829 } 6830} 6831 6832static void 6833do_dbg (void) 6834{ 6835 inst.instruction |= inst.operands[0].imm; 6836} 6837 6838static void 6839do_it (void) 6840{ 6841 /* There is no IT instruction in ARM mode. We 6842 process it but do not generate code for it. */ 6843 inst.size = 0; 6844} 6845 6846static void 6847do_ldmstm (void) 6848{ 6849 int base_reg = inst.operands[0].reg; 6850 int range = inst.operands[1].imm; 6851 6852 inst.instruction |= base_reg << 16; 6853 inst.instruction |= range; 6854 6855 if (inst.operands[1].writeback) 6856 inst.instruction |= LDM_TYPE_2_OR_3; 6857 6858 if (inst.operands[0].writeback) 6859 { 6860 inst.instruction |= WRITE_BACK; 6861 /* Check for unpredictable uses of writeback. */ 6862 if (inst.instruction & LOAD_BIT) 6863 { 6864 /* Not allowed in LDM type 2. */ 6865 if ((inst.instruction & LDM_TYPE_2_OR_3) 6866 && ((range & (1 << REG_PC)) == 0)) 6867 as_warn (_("writeback of base register is UNPREDICTABLE")); 6868 /* Only allowed if base reg not in list for other types. */ 6869 else if (range & (1 << base_reg)) 6870 as_warn (_("writeback of base register when in register list is UNPREDICTABLE")); 6871 } 6872 else /* STM. */ 6873 { 6874 /* Not allowed for type 2. */ 6875 if (inst.instruction & LDM_TYPE_2_OR_3) 6876 as_warn (_("writeback of base register is UNPREDICTABLE")); 6877 /* Only allowed if base reg not in list, or first in list. */ 6878 else if ((range & (1 << base_reg)) 6879 && (range & ((1 << base_reg) - 1))) 6880 as_warn (_("if writeback register is in list, it must be the lowest reg in the list")); 6881 } 6882 } 6883} 6884 6885/* ARMv5TE load-consecutive (argument parse) 6886 Mode is like LDRH. 6887 6888 LDRccD R, mode 6889 STRccD R, mode. */ 6890 6891static void 6892do_ldrd (void) 6893{ 6894 constraint (inst.operands[0].reg % 2 != 0, 6895 _("first destination register must be even")); 6896 constraint (inst.operands[1].present 6897 && inst.operands[1].reg != inst.operands[0].reg + 1, 6898 _("can only load two consecutive registers")); 6899 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 6900 constraint (!inst.operands[2].isreg, _("'[' expected")); 6901 6902 if (!inst.operands[1].present) 6903 inst.operands[1].reg = inst.operands[0].reg + 1; 6904 6905 if (inst.instruction & LOAD_BIT) 6906 { 6907 /* encode_arm_addr_mode_3 will diagnose overlap between the base 6908 register and the first register written; we have to diagnose 6909 overlap between the base and the second register written here. */ 6910 6911 if (inst.operands[2].reg == inst.operands[1].reg 6912 && (inst.operands[2].writeback || inst.operands[2].postind)) 6913 as_warn (_("base register written back, and overlaps " 6914 "second destination register")); 6915 6916 /* For an index-register load, the index register must not overlap the 6917 destination (even if not write-back). */ 6918 else if (inst.operands[2].immisreg 6919 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg 6920 || (unsigned) inst.operands[2].imm == inst.operands[1].reg)) 6921 as_warn (_("index register overlaps destination register")); 6922 } 6923 6924 inst.instruction |= inst.operands[0].reg << 12; 6925 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE); 6926} 6927 6928static void 6929do_ldrex (void) 6930{ 6931 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 6932 || inst.operands[1].postind || inst.operands[1].writeback 6933 || inst.operands[1].immisreg || inst.operands[1].shifted 6934 || inst.operands[1].negative 6935 /* This can arise if the programmer has written 6936 strex rN, rM, foo 6937 or if they have mistakenly used a register name as the last 6938 operand, eg: 6939 strex rN, rM, rX 6940 It is very difficult to distinguish between these two cases 6941 because "rX" might actually be a label. ie the register 6942 name has been occluded by a symbol of the same name. So we 6943 just generate a general 'bad addressing mode' type error 6944 message and leave it up to the programmer to discover the 6945 true cause and fix their mistake. */ 6946 || (inst.operands[1].reg == REG_PC), 6947 BAD_ADDR_MODE); 6948 6949 constraint (inst.reloc.exp.X_op != O_constant 6950 || inst.reloc.exp.X_add_number != 0, 6951 _("offset must be zero in ARM encoding")); 6952 6953 inst.instruction |= inst.operands[0].reg << 12; 6954 inst.instruction |= inst.operands[1].reg << 16; 6955 inst.reloc.type = BFD_RELOC_UNUSED; 6956} 6957 6958static void 6959do_ldrexd (void) 6960{ 6961 constraint (inst.operands[0].reg % 2 != 0, 6962 _("even register required")); 6963 constraint (inst.operands[1].present 6964 && inst.operands[1].reg != inst.operands[0].reg + 1, 6965 _("can only load two consecutive registers")); 6966 /* If op 1 were present and equal to PC, this function wouldn't 6967 have been called in the first place. */ 6968 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 6969 6970 inst.instruction |= inst.operands[0].reg << 12; 6971 inst.instruction |= inst.operands[2].reg << 16; 6972} 6973 6974static void 6975do_ldst (void) 6976{ 6977 inst.instruction |= inst.operands[0].reg << 12; 6978 if (!inst.operands[1].isreg) 6979 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE)) 6980 return; 6981 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE); 6982} 6983 6984static void 6985do_ldstt (void) 6986{ 6987 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 6988 reject [Rn,...]. */ 6989 if (inst.operands[1].preind) 6990 { 6991 constraint (inst.reloc.exp.X_op != O_constant || 6992 inst.reloc.exp.X_add_number != 0, 6993 _("this instruction requires a post-indexed address")); 6994 6995 inst.operands[1].preind = 0; 6996 inst.operands[1].postind = 1; 6997 inst.operands[1].writeback = 1; 6998 } 6999 inst.instruction |= inst.operands[0].reg << 12; 7000 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE); 7001} 7002 7003/* Halfword and signed-byte load/store operations. */ 7004 7005static void 7006do_ldstv4 (void) 7007{ 7008 inst.instruction |= inst.operands[0].reg << 12; 7009 if (!inst.operands[1].isreg) 7010 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE)) 7011 return; 7012 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE); 7013} 7014 7015static void 7016do_ldsttv4 (void) 7017{ 7018 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 7019 reject [Rn,...]. */ 7020 if (inst.operands[1].preind) 7021 { 7022 constraint (inst.reloc.exp.X_op != O_constant || 7023 inst.reloc.exp.X_add_number != 0, 7024 _("this instruction requires a post-indexed address")); 7025 7026 inst.operands[1].preind = 0; 7027 inst.operands[1].postind = 1; 7028 inst.operands[1].writeback = 1; 7029 } 7030 inst.instruction |= inst.operands[0].reg << 12; 7031 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE); 7032} 7033 7034/* Co-processor register load/store. 7035 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */ 7036static void 7037do_lstc (void) 7038{ 7039 inst.instruction |= inst.operands[0].reg << 8; 7040 inst.instruction |= inst.operands[1].reg << 12; 7041 encode_arm_cp_address (2, TRUE, TRUE, 0); 7042} 7043 7044static void 7045do_mlas (void) 7046{ 7047 /* This restriction does not apply to mls (nor to mla in v6 or later). */ 7048 if (inst.operands[0].reg == inst.operands[1].reg 7049 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6) 7050 && !(inst.instruction & 0x00400000)) 7051 as_tsktsk (_("Rd and Rm should be different in mla")); 7052 7053 inst.instruction |= inst.operands[0].reg << 16; 7054 inst.instruction |= inst.operands[1].reg; 7055 inst.instruction |= inst.operands[2].reg << 8; 7056 inst.instruction |= inst.operands[3].reg << 12; 7057} 7058 7059static void 7060do_mov (void) 7061{ 7062 inst.instruction |= inst.operands[0].reg << 12; 7063 encode_arm_shifter_operand (1); 7064} 7065 7066/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */ 7067static void 7068do_mov16 (void) 7069{ 7070 bfd_vma imm; 7071 bfd_boolean top; 7072 7073 top = (inst.instruction & 0x00400000) != 0; 7074 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW, 7075 _(":lower16: not allowed this instruction")); 7076 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT, 7077 _(":upper16: not allowed instruction")); 7078 inst.instruction |= inst.operands[0].reg << 12; 7079 if (inst.reloc.type == BFD_RELOC_UNUSED) 7080 { 7081 imm = inst.reloc.exp.X_add_number; 7082 /* The value is in two pieces: 0:11, 16:19. */ 7083 inst.instruction |= (imm & 0x00000fff); 7084 inst.instruction |= (imm & 0x0000f000) << 4; 7085 } 7086} 7087 7088static void do_vfp_nsyn_opcode (const char *); 7089 7090static int 7091do_vfp_nsyn_mrs (void) 7092{ 7093 if (inst.operands[0].isvec) 7094 { 7095 if (inst.operands[1].reg != 1) 7096 first_error (_("operand 1 must be FPSCR")); 7097 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 7098 memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); 7099 do_vfp_nsyn_opcode ("fmstat"); 7100 } 7101 else if (inst.operands[1].isvec) 7102 do_vfp_nsyn_opcode ("fmrx"); 7103 else 7104 return FAIL; 7105 7106 return SUCCESS; 7107} 7108 7109static int 7110do_vfp_nsyn_msr (void) 7111{ 7112 if (inst.operands[0].isvec) 7113 do_vfp_nsyn_opcode ("fmxr"); 7114 else 7115 return FAIL; 7116 7117 return SUCCESS; 7118} 7119 7120static void 7121do_vfp_vmrs (void) 7122{ 7123 int rt; 7124 7125 /* The destination register can be r0-r14 or APSR_nzcv */ 7126 if (inst.operands[0].reg > 14) 7127 { 7128 inst.error = BAD_PC; 7129 return; 7130 } 7131 7132 /* If the destination is r13 and not in ARM mode then unprefictable */ 7133 if (thumb_mode && inst.operands[0].reg == REG_SP) 7134 { 7135 inst.error = BAD_SP; 7136 return; 7137 } 7138 7139 /* If the destination is APSR_nzcv */ 7140 if (inst.operands[0].isvec && inst.operands[1].reg != 1) 7141 { 7142 inst.error = BAD_VMRS; 7143 return; 7144 } 7145 7146 if (inst.operands[0].isvec) 7147 rt = 15; 7148 else 7149 rt = inst.operands[0].reg; 7150 7151 /* Or in the registers to use */ 7152 inst.instruction |= rt << 12; 7153 inst.instruction |= inst.operands[1].reg << 16; 7154} 7155 7156static void 7157do_vfp_vmsr (void) 7158{ 7159 /* The destination register can be r0-r14 or APSR_nzcv */ 7160 if (inst.operands[1].reg > 14) 7161 { 7162 inst.error = BAD_PC; 7163 return; 7164 } 7165 7166 /* If the destination is r13 and not in ARM mode then unprefictable */ 7167 if (thumb_mode && inst.operands[0].reg == REG_SP) 7168 { 7169 inst.error = BAD_SP; 7170 return; 7171 } 7172 7173 /* Or in the registers to use */ 7174 inst.instruction |= inst.operands[1].reg << 12; 7175 inst.instruction |= inst.operands[0].reg << 16; 7176} 7177 7178static void 7179do_mrs (void) 7180{ 7181 if (do_vfp_nsyn_mrs () == SUCCESS) 7182 return; 7183 7184 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 7185 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f)) 7186 != (PSR_c|PSR_f), 7187 _("'CPSR' or 'SPSR' expected")); 7188 inst.instruction |= inst.operands[0].reg << 12; 7189 inst.instruction |= (inst.operands[1].imm & SPSR_BIT); 7190} 7191 7192/* Two possible forms: 7193 "{C|S}PSR_<field>, Rm", 7194 "{C|S}PSR_f, #expression". */ 7195 7196static void 7197do_msr (void) 7198{ 7199 if (do_vfp_nsyn_msr () == SUCCESS) 7200 return; 7201 7202 inst.instruction |= inst.operands[0].imm; 7203 if (inst.operands[1].isreg) 7204 inst.instruction |= inst.operands[1].reg; 7205 else 7206 { 7207 inst.instruction |= INST_IMMEDIATE; 7208 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 7209 inst.reloc.pc_rel = 0; 7210 } 7211} 7212 7213static void 7214do_mul (void) 7215{ 7216 if (!inst.operands[2].present) 7217 inst.operands[2].reg = inst.operands[0].reg; 7218 inst.instruction |= inst.operands[0].reg << 16; 7219 inst.instruction |= inst.operands[1].reg; 7220 inst.instruction |= inst.operands[2].reg << 8; 7221 7222 if (inst.operands[0].reg == inst.operands[1].reg 7223 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)) 7224 as_tsktsk (_("Rd and Rm should be different in mul")); 7225} 7226 7227/* Long Multiply Parser 7228 UMULL RdLo, RdHi, Rm, Rs 7229 SMULL RdLo, RdHi, Rm, Rs 7230 UMLAL RdLo, RdHi, Rm, Rs 7231 SMLAL RdLo, RdHi, Rm, Rs. */ 7232 7233static void 7234do_mull (void) 7235{ 7236 inst.instruction |= inst.operands[0].reg << 12; 7237 inst.instruction |= inst.operands[1].reg << 16; 7238 inst.instruction |= inst.operands[2].reg; 7239 inst.instruction |= inst.operands[3].reg << 8; 7240 7241 /* rdhi, rdlo and rm must all be different prior to ARMv6. */ 7242 if (inst.operands[0].reg == inst.operands[1].reg 7243 || ((inst.operands[0].reg == inst.operands[2].reg 7244 || inst.operands[1].reg == inst.operands[2].reg) 7245 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))) 7246 as_tsktsk (_("rdhi, rdlo and rm must all be different")); 7247} 7248 7249static void 7250do_nop (void) 7251{ 7252 if (inst.operands[0].present) 7253 { 7254 /* Architectural NOP hints are CPSR sets with no bits selected. */ 7255 inst.instruction &= 0xf0000000; 7256 inst.instruction |= 0x0320f000 + inst.operands[0].imm; 7257 } 7258} 7259 7260/* ARM V6 Pack Halfword Bottom Top instruction (argument parse). 7261 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>} 7262 Condition defaults to COND_ALWAYS. 7263 Error if Rd, Rn or Rm are R15. */ 7264 7265static void 7266do_pkhbt (void) 7267{ 7268 inst.instruction |= inst.operands[0].reg << 12; 7269 inst.instruction |= inst.operands[1].reg << 16; 7270 inst.instruction |= inst.operands[2].reg; 7271 if (inst.operands[3].present) 7272 encode_arm_shift (3); 7273} 7274 7275/* ARM V6 PKHTB (Argument Parse). */ 7276 7277static void 7278do_pkhtb (void) 7279{ 7280 if (!inst.operands[3].present) 7281 { 7282 /* If the shift specifier is omitted, turn the instruction 7283 into pkhbt rd, rm, rn. */ 7284 inst.instruction &= 0xfff00010; 7285 inst.instruction |= inst.operands[0].reg << 12; 7286 inst.instruction |= inst.operands[1].reg; 7287 inst.instruction |= inst.operands[2].reg << 16; 7288 } 7289 else 7290 { 7291 inst.instruction |= inst.operands[0].reg << 12; 7292 inst.instruction |= inst.operands[1].reg << 16; 7293 inst.instruction |= inst.operands[2].reg; 7294 encode_arm_shift (3); 7295 } 7296} 7297 7298/* ARMv5TE: Preload-Cache 7299 7300 PLD <addr_mode> 7301 7302 Syntactically, like LDR with B=1, W=0, L=1. */ 7303 7304static void 7305do_pld (void) 7306{ 7307 constraint (!inst.operands[0].isreg, 7308 _("'[' expected after PLD mnemonic")); 7309 constraint (inst.operands[0].postind, 7310 _("post-indexed expression used in preload instruction")); 7311 constraint (inst.operands[0].writeback, 7312 _("writeback used in preload instruction")); 7313 constraint (!inst.operands[0].preind, 7314 _("unindexed addressing used in preload instruction")); 7315 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 7316} 7317 7318/* ARMv7: PLI <addr_mode> */ 7319static void 7320do_pli (void) 7321{ 7322 constraint (!inst.operands[0].isreg, 7323 _("'[' expected after PLI mnemonic")); 7324 constraint (inst.operands[0].postind, 7325 _("post-indexed expression used in preload instruction")); 7326 constraint (inst.operands[0].writeback, 7327 _("writeback used in preload instruction")); 7328 constraint (!inst.operands[0].preind, 7329 _("unindexed addressing used in preload instruction")); 7330 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 7331 inst.instruction &= ~PRE_INDEX; 7332} 7333 7334static void 7335do_push_pop (void) 7336{ 7337 inst.operands[1] = inst.operands[0]; 7338 memset (&inst.operands[0], 0, sizeof inst.operands[0]); 7339 inst.operands[0].isreg = 1; 7340 inst.operands[0].writeback = 1; 7341 inst.operands[0].reg = REG_SP; 7342 do_ldmstm (); 7343} 7344 7345/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the 7346 word at the specified address and the following word 7347 respectively. 7348 Unconditionally executed. 7349 Error if Rn is R15. */ 7350 7351static void 7352do_rfe (void) 7353{ 7354 inst.instruction |= inst.operands[0].reg << 16; 7355 if (inst.operands[0].writeback) 7356 inst.instruction |= WRITE_BACK; 7357} 7358 7359/* ARM V6 ssat (argument parse). */ 7360 7361static void 7362do_ssat (void) 7363{ 7364 inst.instruction |= inst.operands[0].reg << 12; 7365 inst.instruction |= (inst.operands[1].imm - 1) << 16; 7366 inst.instruction |= inst.operands[2].reg; 7367 7368 if (inst.operands[3].present) 7369 encode_arm_shift (3); 7370} 7371 7372/* ARM V6 usat (argument parse). */ 7373 7374static void 7375do_usat (void) 7376{ 7377 inst.instruction |= inst.operands[0].reg << 12; 7378 inst.instruction |= inst.operands[1].imm << 16; 7379 inst.instruction |= inst.operands[2].reg; 7380 7381 if (inst.operands[3].present) 7382 encode_arm_shift (3); 7383} 7384 7385/* ARM V6 ssat16 (argument parse). */ 7386 7387static void 7388do_ssat16 (void) 7389{ 7390 inst.instruction |= inst.operands[0].reg << 12; 7391 inst.instruction |= ((inst.operands[1].imm - 1) << 16); 7392 inst.instruction |= inst.operands[2].reg; 7393} 7394 7395static void 7396do_usat16 (void) 7397{ 7398 inst.instruction |= inst.operands[0].reg << 12; 7399 inst.instruction |= inst.operands[1].imm << 16; 7400 inst.instruction |= inst.operands[2].reg; 7401} 7402 7403/* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while 7404 preserving the other bits. 7405 7406 setend <endian_specifier>, where <endian_specifier> is either 7407 BE or LE. */ 7408 7409static void 7410do_setend (void) 7411{ 7412 if (inst.operands[0].imm) 7413 inst.instruction |= 0x200; 7414} 7415 7416static void 7417do_shift (void) 7418{ 7419 unsigned int Rm = (inst.operands[1].present 7420 ? inst.operands[1].reg 7421 : inst.operands[0].reg); 7422 7423 inst.instruction |= inst.operands[0].reg << 12; 7424 inst.instruction |= Rm; 7425 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */ 7426 { 7427 inst.instruction |= inst.operands[2].reg << 8; 7428 inst.instruction |= SHIFT_BY_REG; 7429 } 7430 else 7431 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 7432} 7433 7434static void 7435do_smc (void) 7436{ 7437 inst.reloc.type = BFD_RELOC_ARM_SMC; 7438 inst.reloc.pc_rel = 0; 7439} 7440 7441static void 7442do_swi (void) 7443{ 7444 inst.reloc.type = BFD_RELOC_ARM_SWI; 7445 inst.reloc.pc_rel = 0; 7446} 7447 7448/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse) 7449 SMLAxy{cond} Rd,Rm,Rs,Rn 7450 SMLAWy{cond} Rd,Rm,Rs,Rn 7451 Error if any register is R15. */ 7452 7453static void 7454do_smla (void) 7455{ 7456 inst.instruction |= inst.operands[0].reg << 16; 7457 inst.instruction |= inst.operands[1].reg; 7458 inst.instruction |= inst.operands[2].reg << 8; 7459 inst.instruction |= inst.operands[3].reg << 12; 7460} 7461 7462/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse) 7463 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs 7464 Error if any register is R15. 7465 Warning if Rdlo == Rdhi. */ 7466 7467static void 7468do_smlal (void) 7469{ 7470 inst.instruction |= inst.operands[0].reg << 12; 7471 inst.instruction |= inst.operands[1].reg << 16; 7472 inst.instruction |= inst.operands[2].reg; 7473 inst.instruction |= inst.operands[3].reg << 8; 7474 7475 if (inst.operands[0].reg == inst.operands[1].reg) 7476 as_tsktsk (_("rdhi and rdlo must be different")); 7477} 7478 7479/* ARM V5E (El Segundo) signed-multiply (argument parse) 7480 SMULxy{cond} Rd,Rm,Rs 7481 Error if any register is R15. */ 7482 7483static void 7484do_smul (void) 7485{ 7486 inst.instruction |= inst.operands[0].reg << 16; 7487 inst.instruction |= inst.operands[1].reg; 7488 inst.instruction |= inst.operands[2].reg << 8; 7489} 7490 7491/* ARM V6 srs (argument parse). The variable fields in the encoding are 7492 the same for both ARM and Thumb-2. */ 7493 7494static void 7495do_srs (void) 7496{ 7497 int reg; 7498 7499 if (inst.operands[0].present) 7500 { 7501 reg = inst.operands[0].reg; 7502 constraint (reg != 13, _("SRS base register must be r13")); 7503 } 7504 else 7505 reg = 13; 7506 7507 inst.instruction |= reg << 16; 7508 inst.instruction |= inst.operands[1].imm; 7509 if (inst.operands[0].writeback || inst.operands[1].writeback) 7510 inst.instruction |= WRITE_BACK; 7511} 7512 7513/* ARM V6 strex (argument parse). */ 7514 7515static void 7516do_strex (void) 7517{ 7518 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 7519 || inst.operands[2].postind || inst.operands[2].writeback 7520 || inst.operands[2].immisreg || inst.operands[2].shifted 7521 || inst.operands[2].negative 7522 /* See comment in do_ldrex(). */ 7523 || (inst.operands[2].reg == REG_PC), 7524 BAD_ADDR_MODE); 7525 7526 constraint (inst.operands[0].reg == inst.operands[1].reg 7527 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); 7528 7529 constraint (inst.reloc.exp.X_op != O_constant 7530 || inst.reloc.exp.X_add_number != 0, 7531 _("offset must be zero in ARM encoding")); 7532 7533 inst.instruction |= inst.operands[0].reg << 12; 7534 inst.instruction |= inst.operands[1].reg; 7535 inst.instruction |= inst.operands[2].reg << 16; 7536 inst.reloc.type = BFD_RELOC_UNUSED; 7537} 7538 7539static void 7540do_strexd (void) 7541{ 7542 constraint (inst.operands[1].reg % 2 != 0, 7543 _("even register required")); 7544 constraint (inst.operands[2].present 7545 && inst.operands[2].reg != inst.operands[1].reg + 1, 7546 _("can only store two consecutive registers")); 7547 /* If op 2 were present and equal to PC, this function wouldn't 7548 have been called in the first place. */ 7549 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here")); 7550 7551 constraint (inst.operands[0].reg == inst.operands[1].reg 7552 || inst.operands[0].reg == inst.operands[1].reg + 1 7553 || inst.operands[0].reg == inst.operands[3].reg, 7554 BAD_OVERLAP); 7555 7556 inst.instruction |= inst.operands[0].reg << 12; 7557 inst.instruction |= inst.operands[1].reg; 7558 inst.instruction |= inst.operands[3].reg << 16; 7559} 7560 7561/* ARM V6 SXTAH extracts a 16-bit value from a register, sign 7562 extends it to 32-bits, and adds the result to a value in another 7563 register. You can specify a rotation by 0, 8, 16, or 24 bits 7564 before extracting the 16-bit value. 7565 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>} 7566 Condition defaults to COND_ALWAYS. 7567 Error if any register uses R15. */ 7568 7569static void 7570do_sxtah (void) 7571{ 7572 inst.instruction |= inst.operands[0].reg << 12; 7573 inst.instruction |= inst.operands[1].reg << 16; 7574 inst.instruction |= inst.operands[2].reg; 7575 inst.instruction |= inst.operands[3].imm << 10; 7576} 7577 7578/* ARM V6 SXTH. 7579 7580 SXTH {<cond>} <Rd>, <Rm>{, <rotation>} 7581 Condition defaults to COND_ALWAYS. 7582 Error if any register uses R15. */ 7583 7584static void 7585do_sxth (void) 7586{ 7587 inst.instruction |= inst.operands[0].reg << 12; 7588 inst.instruction |= inst.operands[1].reg; 7589 inst.instruction |= inst.operands[2].imm << 10; 7590} 7591 7592/* VFP instructions. In a logical order: SP variant first, monad 7593 before dyad, arithmetic then move then load/store. */ 7594 7595static void 7596do_vfp_sp_monadic (void) 7597{ 7598 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7599 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 7600} 7601 7602static void 7603do_vfp_sp_dyadic (void) 7604{ 7605 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7606 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 7607 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 7608} 7609 7610static void 7611do_vfp_sp_compare_z (void) 7612{ 7613 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7614} 7615 7616static void 7617do_vfp_dp_sp_cvt (void) 7618{ 7619 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7620 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 7621} 7622 7623static void 7624do_vfp_sp_dp_cvt (void) 7625{ 7626 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7627 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 7628} 7629 7630static void 7631do_vfp_reg_from_sp (void) 7632{ 7633 inst.instruction |= inst.operands[0].reg << 12; 7634 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 7635} 7636 7637static void 7638do_vfp_reg2_from_sp2 (void) 7639{ 7640 constraint (inst.operands[2].imm != 2, 7641 _("only two consecutive VFP SP registers allowed here")); 7642 inst.instruction |= inst.operands[0].reg << 12; 7643 inst.instruction |= inst.operands[1].reg << 16; 7644 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 7645} 7646 7647static void 7648do_vfp_sp_from_reg (void) 7649{ 7650 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn); 7651 inst.instruction |= inst.operands[1].reg << 12; 7652} 7653 7654static void 7655do_vfp_sp2_from_reg2 (void) 7656{ 7657 constraint (inst.operands[0].imm != 2, 7658 _("only two consecutive VFP SP registers allowed here")); 7659 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm); 7660 inst.instruction |= inst.operands[1].reg << 12; 7661 inst.instruction |= inst.operands[2].reg << 16; 7662} 7663 7664static void 7665do_vfp_sp_ldst (void) 7666{ 7667 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7668 encode_arm_cp_address (1, FALSE, TRUE, 0); 7669} 7670 7671static void 7672do_vfp_dp_ldst (void) 7673{ 7674 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7675 encode_arm_cp_address (1, FALSE, TRUE, 0); 7676} 7677 7678 7679static void 7680vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type) 7681{ 7682 if (inst.operands[0].writeback) 7683 inst.instruction |= WRITE_BACK; 7684 else 7685 constraint (ldstm_type != VFP_LDSTMIA, 7686 _("this addressing mode requires base-register writeback")); 7687 inst.instruction |= inst.operands[0].reg << 16; 7688 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd); 7689 inst.instruction |= inst.operands[1].imm; 7690} 7691 7692static void 7693vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type) 7694{ 7695 int count; 7696 7697 if (inst.operands[0].writeback) 7698 inst.instruction |= WRITE_BACK; 7699 else 7700 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX, 7701 _("this addressing mode requires base-register writeback")); 7702 7703 inst.instruction |= inst.operands[0].reg << 16; 7704 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7705 7706 count = inst.operands[1].imm << 1; 7707 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX) 7708 count += 1; 7709 7710 inst.instruction |= count; 7711} 7712 7713static void 7714do_vfp_sp_ldstmia (void) 7715{ 7716 vfp_sp_ldstm (VFP_LDSTMIA); 7717} 7718 7719static void 7720do_vfp_sp_ldstmdb (void) 7721{ 7722 vfp_sp_ldstm (VFP_LDSTMDB); 7723} 7724 7725static void 7726do_vfp_dp_ldstmia (void) 7727{ 7728 vfp_dp_ldstm (VFP_LDSTMIA); 7729} 7730 7731static void 7732do_vfp_dp_ldstmdb (void) 7733{ 7734 vfp_dp_ldstm (VFP_LDSTMDB); 7735} 7736 7737static void 7738do_vfp_xp_ldstmia (void) 7739{ 7740 vfp_dp_ldstm (VFP_LDSTMIAX); 7741} 7742 7743static void 7744do_vfp_xp_ldstmdb (void) 7745{ 7746 vfp_dp_ldstm (VFP_LDSTMDBX); 7747} 7748 7749static void 7750do_vfp_dp_rd_rm (void) 7751{ 7752 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7753 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 7754} 7755 7756static void 7757do_vfp_dp_rn_rd (void) 7758{ 7759 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn); 7760 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7761} 7762 7763static void 7764do_vfp_dp_rd_rn (void) 7765{ 7766 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7767 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 7768} 7769 7770static void 7771do_vfp_dp_rd_rn_rm (void) 7772{ 7773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7774 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 7775 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm); 7776} 7777 7778static void 7779do_vfp_dp_rd (void) 7780{ 7781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7782} 7783 7784static void 7785do_vfp_dp_rm_rd_rn (void) 7786{ 7787 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm); 7788 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7789 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn); 7790} 7791 7792/* VFPv3 instructions. */ 7793static void 7794do_vfp_sp_const (void) 7795{ 7796 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7797 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 7798 inst.instruction |= (inst.operands[1].imm & 0x0f); 7799} 7800 7801static void 7802do_vfp_dp_const (void) 7803{ 7804 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7805 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 7806 inst.instruction |= (inst.operands[1].imm & 0x0f); 7807} 7808 7809static void 7810vfp_conv (int srcsize) 7811{ 7812 unsigned immbits = srcsize - inst.operands[1].imm; 7813 inst.instruction |= (immbits & 1) << 5; 7814 inst.instruction |= (immbits >> 1); 7815} 7816 7817static void 7818do_vfp_sp_conv_16 (void) 7819{ 7820 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7821 vfp_conv (16); 7822} 7823 7824static void 7825do_vfp_dp_conv_16 (void) 7826{ 7827 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7828 vfp_conv (16); 7829} 7830 7831static void 7832do_vfp_sp_conv_32 (void) 7833{ 7834 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7835 vfp_conv (32); 7836} 7837 7838static void 7839do_vfp_dp_conv_32 (void) 7840{ 7841 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7842 vfp_conv (32); 7843} 7844 7845 7846/* FPA instructions. Also in a logical order. */ 7847 7848static void 7849do_fpa_cmp (void) 7850{ 7851 inst.instruction |= inst.operands[0].reg << 16; 7852 inst.instruction |= inst.operands[1].reg; 7853} 7854 7855static void 7856do_fpa_ldmstm (void) 7857{ 7858 inst.instruction |= inst.operands[0].reg << 12; 7859 switch (inst.operands[1].imm) 7860 { 7861 case 1: inst.instruction |= CP_T_X; break; 7862 case 2: inst.instruction |= CP_T_Y; break; 7863 case 3: inst.instruction |= CP_T_Y | CP_T_X; break; 7864 case 4: break; 7865 default: abort (); 7866 } 7867 7868 if (inst.instruction & (PRE_INDEX | INDEX_UP)) 7869 { 7870 /* The instruction specified "ea" or "fd", so we can only accept 7871 [Rn]{!}. The instruction does not really support stacking or 7872 unstacking, so we have to emulate these by setting appropriate 7873 bits and offsets. */ 7874 constraint (inst.reloc.exp.X_op != O_constant 7875 || inst.reloc.exp.X_add_number != 0, 7876 _("this instruction does not support indexing")); 7877 7878 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback) 7879 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm; 7880 7881 if (!(inst.instruction & INDEX_UP)) 7882 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number; 7883 7884 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback) 7885 { 7886 inst.operands[2].preind = 0; 7887 inst.operands[2].postind = 1; 7888 } 7889 } 7890 7891 encode_arm_cp_address (2, TRUE, TRUE, 0); 7892} 7893 7894 7895/* iWMMXt instructions: strictly in alphabetical order. */ 7896 7897static void 7898do_iwmmxt_tandorc (void) 7899{ 7900 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here")); 7901} 7902 7903static void 7904do_iwmmxt_textrc (void) 7905{ 7906 inst.instruction |= inst.operands[0].reg << 12; 7907 inst.instruction |= inst.operands[1].imm; 7908} 7909 7910static void 7911do_iwmmxt_textrm (void) 7912{ 7913 inst.instruction |= inst.operands[0].reg << 12; 7914 inst.instruction |= inst.operands[1].reg << 16; 7915 inst.instruction |= inst.operands[2].imm; 7916} 7917 7918static void 7919do_iwmmxt_tinsr (void) 7920{ 7921 inst.instruction |= inst.operands[0].reg << 16; 7922 inst.instruction |= inst.operands[1].reg << 12; 7923 inst.instruction |= inst.operands[2].imm; 7924} 7925 7926static void 7927do_iwmmxt_tmia (void) 7928{ 7929 inst.instruction |= inst.operands[0].reg << 5; 7930 inst.instruction |= inst.operands[1].reg; 7931 inst.instruction |= inst.operands[2].reg << 12; 7932} 7933 7934static void 7935do_iwmmxt_waligni (void) 7936{ 7937 inst.instruction |= inst.operands[0].reg << 12; 7938 inst.instruction |= inst.operands[1].reg << 16; 7939 inst.instruction |= inst.operands[2].reg; 7940 inst.instruction |= inst.operands[3].imm << 20; 7941} 7942 7943static void 7944do_iwmmxt_wmerge (void) 7945{ 7946 inst.instruction |= inst.operands[0].reg << 12; 7947 inst.instruction |= inst.operands[1].reg << 16; 7948 inst.instruction |= inst.operands[2].reg; 7949 inst.instruction |= inst.operands[3].imm << 21; 7950} 7951 7952static void 7953do_iwmmxt_wmov (void) 7954{ 7955 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */ 7956 inst.instruction |= inst.operands[0].reg << 12; 7957 inst.instruction |= inst.operands[1].reg << 16; 7958 inst.instruction |= inst.operands[1].reg; 7959} 7960 7961static void 7962do_iwmmxt_wldstbh (void) 7963{ 7964 int reloc; 7965 inst.instruction |= inst.operands[0].reg << 12; 7966 if (thumb_mode) 7967 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2; 7968 else 7969 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2; 7970 encode_arm_cp_address (1, TRUE, FALSE, reloc); 7971} 7972 7973static void 7974do_iwmmxt_wldstw (void) 7975{ 7976 /* RIWR_RIWC clears .isreg for a control register. */ 7977 if (!inst.operands[0].isreg) 7978 { 7979 constraint (inst.cond != COND_ALWAYS, BAD_COND); 7980 inst.instruction |= 0xf0000000; 7981 } 7982 7983 inst.instruction |= inst.operands[0].reg << 12; 7984 encode_arm_cp_address (1, TRUE, TRUE, 0); 7985} 7986 7987static void 7988do_iwmmxt_wldstd (void) 7989{ 7990 inst.instruction |= inst.operands[0].reg << 12; 7991 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2) 7992 && inst.operands[1].immisreg) 7993 { 7994 inst.instruction &= ~0x1a000ff; 7995 inst.instruction |= (0xf << 28); 7996 if (inst.operands[1].preind) 7997 inst.instruction |= PRE_INDEX; 7998 if (!inst.operands[1].negative) 7999 inst.instruction |= INDEX_UP; 8000 if (inst.operands[1].writeback) 8001 inst.instruction |= WRITE_BACK; 8002 inst.instruction |= inst.operands[1].reg << 16; 8003 inst.instruction |= inst.reloc.exp.X_add_number << 4; 8004 inst.instruction |= inst.operands[1].imm; 8005 } 8006 else 8007 encode_arm_cp_address (1, TRUE, FALSE, 0); 8008} 8009 8010static void 8011do_iwmmxt_wshufh (void) 8012{ 8013 inst.instruction |= inst.operands[0].reg << 12; 8014 inst.instruction |= inst.operands[1].reg << 16; 8015 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16); 8016 inst.instruction |= (inst.operands[2].imm & 0x0f); 8017} 8018 8019static void 8020do_iwmmxt_wzero (void) 8021{ 8022 /* WZERO reg is an alias for WANDN reg, reg, reg. */ 8023 inst.instruction |= inst.operands[0].reg; 8024 inst.instruction |= inst.operands[0].reg << 12; 8025 inst.instruction |= inst.operands[0].reg << 16; 8026} 8027 8028static void 8029do_iwmmxt_wrwrwr_or_imm5 (void) 8030{ 8031 if (inst.operands[2].isreg) 8032 do_rd_rn_rm (); 8033 else { 8034 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2), 8035 _("immediate operand requires iWMMXt2")); 8036 do_rd_rn (); 8037 if (inst.operands[2].imm == 0) 8038 { 8039 switch ((inst.instruction >> 20) & 0xf) 8040 { 8041 case 4: 8042 case 5: 8043 case 6: 8044 case 7: 8045 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */ 8046 inst.operands[2].imm = 16; 8047 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20); 8048 break; 8049 case 8: 8050 case 9: 8051 case 10: 8052 case 11: 8053 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */ 8054 inst.operands[2].imm = 32; 8055 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20); 8056 break; 8057 case 12: 8058 case 13: 8059 case 14: 8060 case 15: 8061 { 8062 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */ 8063 unsigned long wrn; 8064 wrn = (inst.instruction >> 16) & 0xf; 8065 inst.instruction &= 0xff0fff0f; 8066 inst.instruction |= wrn; 8067 /* Bail out here; the instruction is now assembled. */ 8068 return; 8069 } 8070 } 8071 } 8072 /* Map 32 -> 0, etc. */ 8073 inst.operands[2].imm &= 0x1f; 8074 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); 8075 } 8076} 8077 8078/* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register 8079 operations first, then control, shift, and load/store. */ 8080 8081/* Insns like "foo X,Y,Z". */ 8082 8083static void 8084do_mav_triple (void) 8085{ 8086 inst.instruction |= inst.operands[0].reg << 16; 8087 inst.instruction |= inst.operands[1].reg; 8088 inst.instruction |= inst.operands[2].reg << 12; 8089} 8090 8091/* Insns like "foo W,X,Y,Z". 8092 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */ 8093 8094static void 8095do_mav_quad (void) 8096{ 8097 inst.instruction |= inst.operands[0].reg << 5; 8098 inst.instruction |= inst.operands[1].reg << 12; 8099 inst.instruction |= inst.operands[2].reg << 16; 8100 inst.instruction |= inst.operands[3].reg; 8101} 8102 8103/* cfmvsc32<cond> DSPSC,MVDX[15:0]. */ 8104static void 8105do_mav_dspsc (void) 8106{ 8107 inst.instruction |= inst.operands[1].reg << 12; 8108} 8109 8110/* Maverick shift immediate instructions. 8111 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0]. 8112 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */ 8113 8114static void 8115do_mav_shift (void) 8116{ 8117 int imm = inst.operands[2].imm; 8118 8119 inst.instruction |= inst.operands[0].reg << 12; 8120 inst.instruction |= inst.operands[1].reg << 16; 8121 8122 /* Bits 0-3 of the insn should have bits 0-3 of the immediate. 8123 Bits 5-7 of the insn should have bits 4-6 of the immediate. 8124 Bit 4 should be 0. */ 8125 imm = (imm & 0xf) | ((imm & 0x70) << 1); 8126 8127 inst.instruction |= imm; 8128} 8129 8130/* XScale instructions. Also sorted arithmetic before move. */ 8131 8132/* Xscale multiply-accumulate (argument parse) 8133 MIAcc acc0,Rm,Rs 8134 MIAPHcc acc0,Rm,Rs 8135 MIAxycc acc0,Rm,Rs. */ 8136 8137static void 8138do_xsc_mia (void) 8139{ 8140 inst.instruction |= inst.operands[1].reg; 8141 inst.instruction |= inst.operands[2].reg << 12; 8142} 8143 8144/* Xscale move-accumulator-register (argument parse) 8145 8146 MARcc acc0,RdLo,RdHi. */ 8147 8148static void 8149do_xsc_mar (void) 8150{ 8151 inst.instruction |= inst.operands[1].reg << 12; 8152 inst.instruction |= inst.operands[2].reg << 16; 8153} 8154 8155/* Xscale move-register-accumulator (argument parse) 8156 8157 MRAcc RdLo,RdHi,acc0. */ 8158 8159static void 8160do_xsc_mra (void) 8161{ 8162 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP); 8163 inst.instruction |= inst.operands[0].reg << 12; 8164 inst.instruction |= inst.operands[1].reg << 16; 8165} 8166 8167/* Encoding functions relevant only to Thumb. */ 8168 8169/* inst.operands[i] is a shifted-register operand; encode 8170 it into inst.instruction in the format used by Thumb32. */ 8171 8172static void 8173encode_thumb32_shifted_operand (int i) 8174{ 8175 unsigned int value = inst.reloc.exp.X_add_number; 8176 unsigned int shift = inst.operands[i].shift_kind; 8177 8178 constraint (inst.operands[i].immisreg, 8179 _("shift by register not allowed in thumb mode")); 8180 inst.instruction |= inst.operands[i].reg; 8181 if (shift == SHIFT_RRX) 8182 inst.instruction |= SHIFT_ROR << 4; 8183 else 8184 { 8185 constraint (inst.reloc.exp.X_op != O_constant, 8186 _("expression too complex")); 8187 8188 constraint (value > 32 8189 || (value == 32 && (shift == SHIFT_LSL 8190 || shift == SHIFT_ROR)), 8191 _("shift expression is too large")); 8192 8193 if (value == 0) 8194 shift = SHIFT_LSL; 8195 else if (value == 32) 8196 value = 0; 8197 8198 inst.instruction |= shift << 4; 8199 inst.instruction |= (value & 0x1c) << 10; 8200 inst.instruction |= (value & 0x03) << 6; 8201 } 8202} 8203 8204 8205/* inst.operands[i] was set up by parse_address. Encode it into a 8206 Thumb32 format load or store instruction. Reject forms that cannot 8207 be used with such instructions. If is_t is true, reject forms that 8208 cannot be used with a T instruction; if is_d is true, reject forms 8209 that cannot be used with a D instruction. */ 8210 8211static void 8212encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) 8213{ 8214 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC); 8215 8216 constraint (!inst.operands[i].isreg, 8217 _("Instruction does not support =N addresses")); 8218 8219 inst.instruction |= inst.operands[i].reg << 16; 8220 if (inst.operands[i].immisreg) 8221 { 8222 constraint (is_pc, _("cannot use register index with PC-relative addressing")); 8223 constraint (is_t || is_d, _("cannot use register index with this instruction")); 8224 constraint (inst.operands[i].negative, 8225 _("Thumb does not support negative register indexing")); 8226 constraint (inst.operands[i].postind, 8227 _("Thumb does not support register post-indexing")); 8228 constraint (inst.operands[i].writeback, 8229 _("Thumb does not support register indexing with writeback")); 8230 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL, 8231 _("Thumb supports only LSL in shifted register indexing")); 8232 8233 inst.instruction |= inst.operands[i].imm; 8234 if (inst.operands[i].shifted) 8235 { 8236 constraint (inst.reloc.exp.X_op != O_constant, 8237 _("expression too complex")); 8238 constraint (inst.reloc.exp.X_add_number < 0 8239 || inst.reloc.exp.X_add_number > 3, 8240 _("shift out of range")); 8241 inst.instruction |= inst.reloc.exp.X_add_number << 4; 8242 } 8243 inst.reloc.type = BFD_RELOC_UNUSED; 8244 } 8245 else if (inst.operands[i].preind) 8246 { 8247 constraint (is_pc && inst.operands[i].writeback, 8248 _("cannot use writeback with PC-relative addressing")); 8249 constraint (is_t && inst.operands[i].writeback, 8250 _("cannot use writeback with this instruction")); 8251 8252 if (is_d) 8253 { 8254 inst.instruction |= 0x01000000; 8255 if (inst.operands[i].writeback) 8256 inst.instruction |= 0x00200000; 8257 } 8258 else 8259 { 8260 inst.instruction |= 0x00000c00; 8261 if (inst.operands[i].writeback) 8262 inst.instruction |= 0x00000100; 8263 } 8264 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 8265 } 8266 else if (inst.operands[i].postind) 8267 { 8268 assert (inst.operands[i].writeback); 8269 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing")); 8270 constraint (is_t, _("cannot use post-indexing with this instruction")); 8271 8272 if (is_d) 8273 inst.instruction |= 0x00200000; 8274 else 8275 inst.instruction |= 0x00000900; 8276 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 8277 } 8278 else /* unindexed - only for coprocessor */ 8279 inst.error = _("instruction does not accept unindexed addressing"); 8280} 8281 8282/* Table of Thumb instructions which exist in both 16- and 32-bit 8283 encodings (the latter only in post-V6T2 cores). The index is the 8284 value used in the insns table below. When there is more than one 8285 possible 16-bit encoding for the instruction, this table always 8286 holds variant (1). 8287 Also contains several pseudo-instructions used during relaxation. */ 8288#define T16_32_TAB \ 8289 X(adc, 4140, eb400000), \ 8290 X(adcs, 4140, eb500000), \ 8291 X(add, 1c00, eb000000), \ 8292 X(adds, 1c00, eb100000), \ 8293 X(addi, 0000, f1000000), \ 8294 X(addis, 0000, f1100000), \ 8295 X(add_pc,000f, f20f0000), \ 8296 X(add_sp,000d, f10d0000), \ 8297 X(adr, 000f, f20f0000), \ 8298 X(and, 4000, ea000000), \ 8299 X(ands, 4000, ea100000), \ 8300 X(asr, 1000, fa40f000), \ 8301 X(asrs, 1000, fa50f000), \ 8302 X(b, e000, f000b000), \ 8303 X(bcond, d000, f0008000), \ 8304 X(bic, 4380, ea200000), \ 8305 X(bics, 4380, ea300000), \ 8306 X(cmn, 42c0, eb100f00), \ 8307 X(cmp, 2800, ebb00f00), \ 8308 X(cpsie, b660, f3af8400), \ 8309 X(cpsid, b670, f3af8600), \ 8310 X(cpy, 4600, ea4f0000), \ 8311 X(dec_sp,80dd, f1ad0d00), \ 8312 X(eor, 4040, ea800000), \ 8313 X(eors, 4040, ea900000), \ 8314 X(inc_sp,00dd, f10d0d00), \ 8315 X(ldmia, c800, e8900000), \ 8316 X(ldr, 6800, f8500000), \ 8317 X(ldrb, 7800, f8100000), \ 8318 X(ldrh, 8800, f8300000), \ 8319 X(ldrsb, 5600, f9100000), \ 8320 X(ldrsh, 5e00, f9300000), \ 8321 X(ldr_pc,4800, f85f0000), \ 8322 X(ldr_pc2,4800, f85f0000), \ 8323 X(ldr_sp,9800, f85d0000), \ 8324 X(lsl, 0000, fa00f000), \ 8325 X(lsls, 0000, fa10f000), \ 8326 X(lsr, 0800, fa20f000), \ 8327 X(lsrs, 0800, fa30f000), \ 8328 X(mov, 2000, ea4f0000), \ 8329 X(movs, 2000, ea5f0000), \ 8330 X(mul, 4340, fb00f000), \ 8331 X(muls, 4340, ffffffff), /* no 32b muls */ \ 8332 X(mvn, 43c0, ea6f0000), \ 8333 X(mvns, 43c0, ea7f0000), \ 8334 X(neg, 4240, f1c00000), /* rsb #0 */ \ 8335 X(negs, 4240, f1d00000), /* rsbs #0 */ \ 8336 X(orr, 4300, ea400000), \ 8337 X(orrs, 4300, ea500000), \ 8338 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \ 8339 X(push, b400, e92d0000), /* stmdb sp!,... */ \ 8340 X(rev, ba00, fa90f080), \ 8341 X(rev16, ba40, fa90f090), \ 8342 X(revsh, bac0, fa90f0b0), \ 8343 X(ror, 41c0, fa60f000), \ 8344 X(rors, 41c0, fa70f000), \ 8345 X(sbc, 4180, eb600000), \ 8346 X(sbcs, 4180, eb700000), \ 8347 X(stmia, c000, e8800000), \ 8348 X(str, 6000, f8400000), \ 8349 X(strb, 7000, f8000000), \ 8350 X(strh, 8000, f8200000), \ 8351 X(str_sp,9000, f84d0000), \ 8352 X(sub, 1e00, eba00000), \ 8353 X(subs, 1e00, ebb00000), \ 8354 X(subi, 8000, f1a00000), \ 8355 X(subis, 8000, f1b00000), \ 8356 X(sxtb, b240, fa4ff080), \ 8357 X(sxth, b200, fa0ff080), \ 8358 X(tst, 4200, ea100f00), \ 8359 X(uxtb, b2c0, fa5ff080), \ 8360 X(uxth, b280, fa1ff080), \ 8361 X(nop, bf00, f3af8000), \ 8362 X(yield, bf10, f3af8001), \ 8363 X(wfe, bf20, f3af8002), \ 8364 X(wfi, bf30, f3af8003), \ 8365 X(sev, bf40, f3af9004), /* typo, 8004? */ 8366 8367/* To catch errors in encoding functions, the codes are all offset by 8368 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined 8369 as 16-bit instructions. */ 8370#define X(a,b,c) T_MNEM_##a 8371enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB }; 8372#undef X 8373 8374#define X(a,b,c) 0x##b 8375static const unsigned short thumb_op16[] = { T16_32_TAB }; 8376#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)]) 8377#undef X 8378 8379#define X(a,b,c) 0x##c 8380static const unsigned int thumb_op32[] = { T16_32_TAB }; 8381#define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)]) 8382#define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000) 8383#undef X 8384#undef T16_32_TAB 8385 8386/* Thumb instruction encoders, in alphabetical order. */ 8387 8388/* ADDW or SUBW. */ 8389static void 8390do_t_add_sub_w (void) 8391{ 8392 int Rd, Rn; 8393 8394 Rd = inst.operands[0].reg; 8395 Rn = inst.operands[1].reg; 8396 8397 constraint (Rd == 15, _("PC not allowed as destination")); 8398 inst.instruction |= (Rn << 16) | (Rd << 8); 8399 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 8400} 8401 8402/* Parse an add or subtract instruction. We get here with inst.instruction 8403 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */ 8404 8405static void 8406do_t_add_sub (void) 8407{ 8408 int Rd, Rs, Rn; 8409 8410 Rd = inst.operands[0].reg; 8411 Rs = (inst.operands[1].present 8412 ? inst.operands[1].reg /* Rd, Rs, foo */ 8413 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8414 8415 if (unified_syntax) 8416 { 8417 bfd_boolean flags; 8418 bfd_boolean narrow; 8419 int opcode; 8420 8421 flags = (inst.instruction == T_MNEM_adds 8422 || inst.instruction == T_MNEM_subs); 8423 if (flags) 8424 narrow = (current_it_mask == 0); 8425 else 8426 narrow = (current_it_mask != 0); 8427 if (!inst.operands[2].isreg) 8428 { 8429 int add; 8430 8431 add = (inst.instruction == T_MNEM_add 8432 || inst.instruction == T_MNEM_adds); 8433 opcode = 0; 8434 if (inst.size_req != 4) 8435 { 8436 /* Attempt to use a narrow opcode, with relaxation if 8437 appropriate. */ 8438 if (Rd == REG_SP && Rs == REG_SP && !flags) 8439 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp; 8440 else if (Rd <= 7 && Rs == REG_SP && add && !flags) 8441 opcode = T_MNEM_add_sp; 8442 else if (Rd <= 7 && Rs == REG_PC && add && !flags) 8443 opcode = T_MNEM_add_pc; 8444 else if (Rd <= 7 && Rs <= 7 && narrow) 8445 { 8446 if (flags) 8447 opcode = add ? T_MNEM_addis : T_MNEM_subis; 8448 else 8449 opcode = add ? T_MNEM_addi : T_MNEM_subi; 8450 } 8451 if (opcode) 8452 { 8453 inst.instruction = THUMB_OP16(opcode); 8454 inst.instruction |= (Rd << 4) | Rs; 8455 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8456 if (inst.size_req != 2) 8457 inst.relax = opcode; 8458 } 8459 else 8460 constraint (inst.size_req == 2, BAD_HIREG); 8461 } 8462 if (inst.size_req == 4 8463 || (inst.size_req != 2 && !opcode)) 8464 { 8465 if (Rd == REG_PC) 8466 { 8467 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs, 8468 _("only SUBS PC, LR, #const allowed")); 8469 constraint (inst.reloc.exp.X_op != O_constant, 8470 _("expression too complex")); 8471 constraint (inst.reloc.exp.X_add_number < 0 8472 || inst.reloc.exp.X_add_number > 0xff, 8473 _("immediate value out of range")); 8474 inst.instruction = T2_SUBS_PC_LR 8475 | inst.reloc.exp.X_add_number; 8476 inst.reloc.type = BFD_RELOC_UNUSED; 8477 return; 8478 } 8479 else if (Rs == REG_PC) 8480 { 8481 /* Always use addw/subw. */ 8482 inst.instruction = add ? 0xf20f0000 : 0xf2af0000; 8483 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 8484 } 8485 else 8486 { 8487 inst.instruction = THUMB_OP32 (inst.instruction); 8488 inst.instruction = (inst.instruction & 0xe1ffffff) 8489 | 0x10000000; 8490 if (flags) 8491 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8492 else 8493 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM; 8494 } 8495 inst.instruction |= Rd << 8; 8496 inst.instruction |= Rs << 16; 8497 } 8498 } 8499 else 8500 { 8501 Rn = inst.operands[2].reg; 8502 /* See if we can do this with a 16-bit instruction. */ 8503 if (!inst.operands[2].shifted && inst.size_req != 4) 8504 { 8505 if (Rd > 7 || Rs > 7 || Rn > 7) 8506 narrow = FALSE; 8507 8508 if (narrow) 8509 { 8510 inst.instruction = ((inst.instruction == T_MNEM_adds 8511 || inst.instruction == T_MNEM_add) 8512 ? T_OPCODE_ADD_R3 8513 : T_OPCODE_SUB_R3); 8514 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 8515 return; 8516 } 8517 8518 if (inst.instruction == T_MNEM_add) 8519 { 8520 if (Rd == Rs) 8521 { 8522 inst.instruction = T_OPCODE_ADD_HI; 8523 inst.instruction |= (Rd & 8) << 4; 8524 inst.instruction |= (Rd & 7); 8525 inst.instruction |= Rn << 3; 8526 return; 8527 } 8528 /* ... because addition is commutative! */ 8529 else if (Rd == Rn) 8530 { 8531 inst.instruction = T_OPCODE_ADD_HI; 8532 inst.instruction |= (Rd & 8) << 4; 8533 inst.instruction |= (Rd & 7); 8534 inst.instruction |= Rs << 3; 8535 return; 8536 } 8537 } 8538 } 8539 /* If we get here, it can't be done in 16 bits. */ 8540 constraint (inst.operands[2].shifted && inst.operands[2].immisreg, 8541 _("shift must be constant")); 8542 inst.instruction = THUMB_OP32 (inst.instruction); 8543 inst.instruction |= Rd << 8; 8544 inst.instruction |= Rs << 16; 8545 encode_thumb32_shifted_operand (2); 8546 } 8547 } 8548 else 8549 { 8550 constraint (inst.instruction == T_MNEM_adds 8551 || inst.instruction == T_MNEM_subs, 8552 BAD_THUMB32); 8553 8554 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */ 8555 { 8556 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP)) 8557 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC), 8558 BAD_HIREG); 8559 8560 inst.instruction = (inst.instruction == T_MNEM_add 8561 ? 0x0000 : 0x8000); 8562 inst.instruction |= (Rd << 4) | Rs; 8563 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8564 return; 8565 } 8566 8567 Rn = inst.operands[2].reg; 8568 constraint (inst.operands[2].shifted, _("unshifted register required")); 8569 8570 /* We now have Rd, Rs, and Rn set to registers. */ 8571 if (Rd > 7 || Rs > 7 || Rn > 7) 8572 { 8573 /* Can't do this for SUB. */ 8574 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG); 8575 inst.instruction = T_OPCODE_ADD_HI; 8576 inst.instruction |= (Rd & 8) << 4; 8577 inst.instruction |= (Rd & 7); 8578 if (Rs == Rd) 8579 inst.instruction |= Rn << 3; 8580 else if (Rn == Rd) 8581 inst.instruction |= Rs << 3; 8582 else 8583 constraint (1, _("dest must overlap one source register")); 8584 } 8585 else 8586 { 8587 inst.instruction = (inst.instruction == T_MNEM_add 8588 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3); 8589 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 8590 } 8591 } 8592} 8593 8594static void 8595do_t_adr (void) 8596{ 8597 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7) 8598 { 8599 /* Defer to section relaxation. */ 8600 inst.relax = inst.instruction; 8601 inst.instruction = THUMB_OP16 (inst.instruction); 8602 inst.instruction |= inst.operands[0].reg << 4; 8603 } 8604 else if (unified_syntax && inst.size_req != 2) 8605 { 8606 /* Generate a 32-bit opcode. */ 8607 inst.instruction = THUMB_OP32 (inst.instruction); 8608 inst.instruction |= inst.operands[0].reg << 8; 8609 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12; 8610 inst.reloc.pc_rel = 1; 8611 } 8612 else 8613 { 8614 /* Generate a 16-bit opcode. */ 8615 inst.instruction = THUMB_OP16 (inst.instruction); 8616 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8617 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */ 8618 inst.reloc.pc_rel = 1; 8619 8620 inst.instruction |= inst.operands[0].reg << 4; 8621 } 8622} 8623 8624/* Arithmetic instructions for which there is just one 16-bit 8625 instruction encoding, and it allows only two low registers. 8626 For maximal compatibility with ARM syntax, we allow three register 8627 operands even when Thumb-32 instructions are not available, as long 8628 as the first two are identical. For instance, both "sbc r0,r1" and 8629 "sbc r0,r0,r1" are allowed. */ 8630static void 8631do_t_arit3 (void) 8632{ 8633 int Rd, Rs, Rn; 8634 8635 Rd = inst.operands[0].reg; 8636 Rs = (inst.operands[1].present 8637 ? inst.operands[1].reg /* Rd, Rs, foo */ 8638 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8639 Rn = inst.operands[2].reg; 8640 8641 if (unified_syntax) 8642 { 8643 if (!inst.operands[2].isreg) 8644 { 8645 /* For an immediate, we always generate a 32-bit opcode; 8646 section relaxation will shrink it later if possible. */ 8647 inst.instruction = THUMB_OP32 (inst.instruction); 8648 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8649 inst.instruction |= Rd << 8; 8650 inst.instruction |= Rs << 16; 8651 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8652 } 8653 else 8654 { 8655 bfd_boolean narrow; 8656 8657 /* See if we can do this with a 16-bit instruction. */ 8658 if (THUMB_SETS_FLAGS (inst.instruction)) 8659 narrow = current_it_mask == 0; 8660 else 8661 narrow = current_it_mask != 0; 8662 8663 if (Rd > 7 || Rn > 7 || Rs > 7) 8664 narrow = FALSE; 8665 if (inst.operands[2].shifted) 8666 narrow = FALSE; 8667 if (inst.size_req == 4) 8668 narrow = FALSE; 8669 8670 if (narrow 8671 && Rd == Rs) 8672 { 8673 inst.instruction = THUMB_OP16 (inst.instruction); 8674 inst.instruction |= Rd; 8675 inst.instruction |= Rn << 3; 8676 return; 8677 } 8678 8679 /* If we get here, it can't be done in 16 bits. */ 8680 constraint (inst.operands[2].shifted 8681 && inst.operands[2].immisreg, 8682 _("shift must be constant")); 8683 inst.instruction = THUMB_OP32 (inst.instruction); 8684 inst.instruction |= Rd << 8; 8685 inst.instruction |= Rs << 16; 8686 encode_thumb32_shifted_operand (2); 8687 } 8688 } 8689 else 8690 { 8691 /* On its face this is a lie - the instruction does set the 8692 flags. However, the only supported mnemonic in this mode 8693 says it doesn't. */ 8694 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 8695 8696 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 8697 _("unshifted register required")); 8698 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 8699 constraint (Rd != Rs, 8700 _("dest and source1 must be the same register")); 8701 8702 inst.instruction = THUMB_OP16 (inst.instruction); 8703 inst.instruction |= Rd; 8704 inst.instruction |= Rn << 3; 8705 } 8706} 8707 8708/* Similarly, but for instructions where the arithmetic operation is 8709 commutative, so we can allow either of them to be different from 8710 the destination operand in a 16-bit instruction. For instance, all 8711 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are 8712 accepted. */ 8713static void 8714do_t_arit3c (void) 8715{ 8716 int Rd, Rs, Rn; 8717 8718 Rd = inst.operands[0].reg; 8719 Rs = (inst.operands[1].present 8720 ? inst.operands[1].reg /* Rd, Rs, foo */ 8721 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8722 Rn = inst.operands[2].reg; 8723 8724 if (unified_syntax) 8725 { 8726 if (!inst.operands[2].isreg) 8727 { 8728 /* For an immediate, we always generate a 32-bit opcode; 8729 section relaxation will shrink it later if possible. */ 8730 inst.instruction = THUMB_OP32 (inst.instruction); 8731 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8732 inst.instruction |= Rd << 8; 8733 inst.instruction |= Rs << 16; 8734 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8735 } 8736 else 8737 { 8738 bfd_boolean narrow; 8739 8740 /* See if we can do this with a 16-bit instruction. */ 8741 if (THUMB_SETS_FLAGS (inst.instruction)) 8742 narrow = current_it_mask == 0; 8743 else 8744 narrow = current_it_mask != 0; 8745 8746 if (Rd > 7 || Rn > 7 || Rs > 7) 8747 narrow = FALSE; 8748 if (inst.operands[2].shifted) 8749 narrow = FALSE; 8750 if (inst.size_req == 4) 8751 narrow = FALSE; 8752 8753 if (narrow) 8754 { 8755 if (Rd == Rs) 8756 { 8757 inst.instruction = THUMB_OP16 (inst.instruction); 8758 inst.instruction |= Rd; 8759 inst.instruction |= Rn << 3; 8760 return; 8761 } 8762 if (Rd == Rn) 8763 { 8764 inst.instruction = THUMB_OP16 (inst.instruction); 8765 inst.instruction |= Rd; 8766 inst.instruction |= Rs << 3; 8767 return; 8768 } 8769 } 8770 8771 /* If we get here, it can't be done in 16 bits. */ 8772 constraint (inst.operands[2].shifted 8773 && inst.operands[2].immisreg, 8774 _("shift must be constant")); 8775 inst.instruction = THUMB_OP32 (inst.instruction); 8776 inst.instruction |= Rd << 8; 8777 inst.instruction |= Rs << 16; 8778 encode_thumb32_shifted_operand (2); 8779 } 8780 } 8781 else 8782 { 8783 /* On its face this is a lie - the instruction does set the 8784 flags. However, the only supported mnemonic in this mode 8785 says it doesn't. */ 8786 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 8787 8788 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 8789 _("unshifted register required")); 8790 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 8791 8792 inst.instruction = THUMB_OP16 (inst.instruction); 8793 inst.instruction |= Rd; 8794 8795 if (Rd == Rs) 8796 inst.instruction |= Rn << 3; 8797 else if (Rd == Rn) 8798 inst.instruction |= Rs << 3; 8799 else 8800 constraint (1, _("dest must overlap one source register")); 8801 } 8802} 8803 8804static void 8805do_t_barrier (void) 8806{ 8807 if (inst.operands[0].present) 8808 { 8809 constraint ((inst.instruction & 0xf0) != 0x40 8810 && inst.operands[0].imm != 0xf, 8811 "bad barrier type"); 8812 inst.instruction |= inst.operands[0].imm; 8813 } 8814 else 8815 inst.instruction |= 0xf; 8816} 8817 8818static void 8819do_t_bfc (void) 8820{ 8821 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 8822 constraint (msb > 32, _("bit-field extends past end of register")); 8823 /* The instruction encoding stores the LSB and MSB, 8824 not the LSB and width. */ 8825 inst.instruction |= inst.operands[0].reg << 8; 8826 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10; 8827 inst.instruction |= (inst.operands[1].imm & 0x03) << 6; 8828 inst.instruction |= msb - 1; 8829} 8830 8831static void 8832do_t_bfi (void) 8833{ 8834 unsigned int msb; 8835 8836 /* #0 in second position is alternative syntax for bfc, which is 8837 the same instruction but with REG_PC in the Rm field. */ 8838 if (!inst.operands[1].isreg) 8839 inst.operands[1].reg = REG_PC; 8840 8841 msb = inst.operands[2].imm + inst.operands[3].imm; 8842 constraint (msb > 32, _("bit-field extends past end of register")); 8843 /* The instruction encoding stores the LSB and MSB, 8844 not the LSB and width. */ 8845 inst.instruction |= inst.operands[0].reg << 8; 8846 inst.instruction |= inst.operands[1].reg << 16; 8847 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 8848 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 8849 inst.instruction |= msb - 1; 8850} 8851 8852static void 8853do_t_bfx (void) 8854{ 8855 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 8856 _("bit-field extends past end of register")); 8857 inst.instruction |= inst.operands[0].reg << 8; 8858 inst.instruction |= inst.operands[1].reg << 16; 8859 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 8860 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 8861 inst.instruction |= inst.operands[3].imm - 1; 8862} 8863 8864/* ARM V5 Thumb BLX (argument parse) 8865 BLX <target_addr> which is BLX(1) 8866 BLX <Rm> which is BLX(2) 8867 Unfortunately, there are two different opcodes for this mnemonic. 8868 So, the insns[].value is not used, and the code here zaps values 8869 into inst.instruction. 8870 8871 ??? How to take advantage of the additional two bits of displacement 8872 available in Thumb32 mode? Need new relocation? */ 8873 8874static void 8875do_t_blx (void) 8876{ 8877 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8878 if (inst.operands[0].isreg) 8879 /* We have a register, so this is BLX(2). */ 8880 inst.instruction |= inst.operands[0].reg << 3; 8881 else 8882 { 8883 /* No register. This must be BLX(1). */ 8884 inst.instruction = 0xf000e800; 8885#ifdef OBJ_ELF 8886 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 8887 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 8888 else 8889#endif 8890 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX; 8891 inst.reloc.pc_rel = 1; 8892 } 8893} 8894 8895static void 8896do_t_branch (void) 8897{ 8898 int opcode; 8899 int cond; 8900 8901 if (current_it_mask) 8902 { 8903 /* Conditional branches inside IT blocks are encoded as unconditional 8904 branches. */ 8905 cond = COND_ALWAYS; 8906 /* A branch must be the last instruction in an IT block. */ 8907 constraint (current_it_mask != 0x10, BAD_BRANCH); 8908 } 8909 else 8910 cond = inst.cond; 8911 8912 if (cond != COND_ALWAYS) 8913 opcode = T_MNEM_bcond; 8914 else 8915 opcode = inst.instruction; 8916 8917 if (unified_syntax && inst.size_req == 4) 8918 { 8919 inst.instruction = THUMB_OP32(opcode); 8920 if (cond == COND_ALWAYS) 8921 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25; 8922 else 8923 { 8924 assert (cond != 0xF); 8925 inst.instruction |= cond << 22; 8926 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20; 8927 } 8928 } 8929 else 8930 { 8931 inst.instruction = THUMB_OP16(opcode); 8932 if (cond == COND_ALWAYS) 8933 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12; 8934 else 8935 { 8936 inst.instruction |= cond << 8; 8937 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9; 8938 } 8939 /* Allow section relaxation. */ 8940 if (unified_syntax && inst.size_req != 2) 8941 inst.relax = opcode; 8942 } 8943 8944 inst.reloc.pc_rel = 1; 8945} 8946 8947static void 8948do_t_bkpt (void) 8949{ 8950 constraint (inst.cond != COND_ALWAYS, 8951 _("instruction is always unconditional")); 8952 if (inst.operands[0].present) 8953 { 8954 constraint (inst.operands[0].imm > 255, 8955 _("immediate value out of range")); 8956 inst.instruction |= inst.operands[0].imm; 8957 } 8958} 8959 8960static void 8961do_t_branch23 (void) 8962{ 8963 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8964 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 8965 inst.reloc.pc_rel = 1; 8966 8967 /* If the destination of the branch is a defined symbol which does not have 8968 the THUMB_FUNC attribute, then we must be calling a function which has 8969 the (interfacearm) attribute. We look for the Thumb entry point to that 8970 function and change the branch to refer to that function instead. */ 8971 if ( inst.reloc.exp.X_op == O_symbol 8972 && inst.reloc.exp.X_add_symbol != NULL 8973 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol) 8974 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol)) 8975 inst.reloc.exp.X_add_symbol = 8976 find_real_start (inst.reloc.exp.X_add_symbol); 8977} 8978 8979static void 8980do_t_bx (void) 8981{ 8982 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8983 inst.instruction |= inst.operands[0].reg << 3; 8984 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc 8985 should cause the alignment to be checked once it is known. This is 8986 because BX PC only works if the instruction is word aligned. */ 8987} 8988 8989static void 8990do_t_bxj (void) 8991{ 8992 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8993 if (inst.operands[0].reg == REG_PC) 8994 as_tsktsk (_("use of r15 in bxj is not really useful")); 8995 8996 inst.instruction |= inst.operands[0].reg << 16; 8997} 8998 8999static void 9000do_t_clz (void) 9001{ 9002 inst.instruction |= inst.operands[0].reg << 8; 9003 inst.instruction |= inst.operands[1].reg << 16; 9004 inst.instruction |= inst.operands[1].reg; 9005} 9006 9007static void 9008do_t_cps (void) 9009{ 9010 constraint (current_it_mask, BAD_NOT_IT); 9011 inst.instruction |= inst.operands[0].imm; 9012} 9013 9014static void 9015do_t_cpsi (void) 9016{ 9017 constraint (current_it_mask, BAD_NOT_IT); 9018 if (unified_syntax 9019 && (inst.operands[1].present || inst.size_req == 4) 9020 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) 9021 { 9022 unsigned int imod = (inst.instruction & 0x0030) >> 4; 9023 inst.instruction = 0xf3af8000; 9024 inst.instruction |= imod << 9; 9025 inst.instruction |= inst.operands[0].imm << 5; 9026 if (inst.operands[1].present) 9027 inst.instruction |= 0x100 | inst.operands[1].imm; 9028 } 9029 else 9030 { 9031 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1) 9032 && (inst.operands[0].imm & 4), 9033 _("selected processor does not support 'A' form " 9034 "of this instruction")); 9035 constraint (inst.operands[1].present || inst.size_req == 4, 9036 _("Thumb does not support the 2-argument " 9037 "form of this instruction")); 9038 inst.instruction |= inst.operands[0].imm; 9039 } 9040} 9041 9042/* THUMB CPY instruction (argument parse). */ 9043 9044static void 9045do_t_cpy (void) 9046{ 9047 if (inst.size_req == 4) 9048 { 9049 inst.instruction = THUMB_OP32 (T_MNEM_mov); 9050 inst.instruction |= inst.operands[0].reg << 8; 9051 inst.instruction |= inst.operands[1].reg; 9052 } 9053 else 9054 { 9055 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 9056 inst.instruction |= (inst.operands[0].reg & 0x7); 9057 inst.instruction |= inst.operands[1].reg << 3; 9058 } 9059} 9060 9061static void 9062do_t_cbz (void) 9063{ 9064 constraint (current_it_mask, BAD_NOT_IT); 9065 constraint (inst.operands[0].reg > 7, BAD_HIREG); 9066 inst.instruction |= inst.operands[0].reg; 9067 inst.reloc.pc_rel = 1; 9068 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7; 9069} 9070 9071static void 9072do_t_dbg (void) 9073{ 9074 inst.instruction |= inst.operands[0].imm; 9075} 9076 9077static void 9078do_t_div (void) 9079{ 9080 if (!inst.operands[1].present) 9081 inst.operands[1].reg = inst.operands[0].reg; 9082 inst.instruction |= inst.operands[0].reg << 8; 9083 inst.instruction |= inst.operands[1].reg << 16; 9084 inst.instruction |= inst.operands[2].reg; 9085} 9086 9087static void 9088do_t_hint (void) 9089{ 9090 if (unified_syntax && inst.size_req == 4) 9091 inst.instruction = THUMB_OP32 (inst.instruction); 9092 else 9093 inst.instruction = THUMB_OP16 (inst.instruction); 9094} 9095 9096static void 9097do_t_it (void) 9098{ 9099 unsigned int cond = inst.operands[0].imm; 9100 9101 constraint (current_it_mask, BAD_NOT_IT); 9102 current_it_mask = (inst.instruction & 0xf) | 0x10; 9103 current_cc = cond; 9104 9105 /* If the condition is a negative condition, invert the mask. */ 9106 if ((cond & 0x1) == 0x0) 9107 { 9108 unsigned int mask = inst.instruction & 0x000f; 9109 9110 if ((mask & 0x7) == 0) 9111 /* no conversion needed */; 9112 else if ((mask & 0x3) == 0) 9113 mask ^= 0x8; 9114 else if ((mask & 0x1) == 0) 9115 mask ^= 0xC; 9116 else 9117 mask ^= 0xE; 9118 9119 inst.instruction &= 0xfff0; 9120 inst.instruction |= mask; 9121 } 9122 9123 inst.instruction |= cond << 4; 9124} 9125 9126/* Helper function used for both push/pop and ldm/stm. */ 9127static void 9128encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) 9129{ 9130 bfd_boolean load; 9131 9132 load = (inst.instruction & (1 << 20)) != 0; 9133 9134 if (mask & (1 << 13)) 9135 inst.error = _("SP not allowed in register list"); 9136 if (load) 9137 { 9138 if (mask & (1 << 14) 9139 && mask & (1 << 15)) 9140 inst.error = _("LR and PC should not both be in register list"); 9141 9142 if ((mask & (1 << base)) != 0 9143 && writeback) 9144 as_warn (_("base register should not be in register list " 9145 "when written back")); 9146 } 9147 else 9148 { 9149 if (mask & (1 << 15)) 9150 inst.error = _("PC not allowed in register list"); 9151 9152 if (mask & (1 << base)) 9153 as_warn (_("value stored for r%d is UNPREDICTABLE"), base); 9154 } 9155 9156 if ((mask & (mask - 1)) == 0) 9157 { 9158 /* Single register transfers implemented as str/ldr. */ 9159 if (writeback) 9160 { 9161 if (inst.instruction & (1 << 23)) 9162 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */ 9163 else 9164 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */ 9165 } 9166 else 9167 { 9168 if (inst.instruction & (1 << 23)) 9169 inst.instruction = 0x00800000; /* ia -> [base] */ 9170 else 9171 inst.instruction = 0x00000c04; /* db -> [base, #-4] */ 9172 } 9173 9174 inst.instruction |= 0xf8400000; 9175 if (load) 9176 inst.instruction |= 0x00100000; 9177 9178 mask = ffs(mask) - 1; 9179 mask <<= 12; 9180 } 9181 else if (writeback) 9182 inst.instruction |= WRITE_BACK; 9183 9184 inst.instruction |= mask; 9185 inst.instruction |= base << 16; 9186} 9187 9188static void 9189do_t_ldmstm (void) 9190{ 9191 /* This really doesn't seem worth it. */ 9192 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 9193 _("expression too complex")); 9194 constraint (inst.operands[1].writeback, 9195 _("Thumb load/store multiple does not support {reglist}^")); 9196 9197 if (unified_syntax) 9198 { 9199 bfd_boolean narrow; 9200 unsigned mask; 9201 9202 narrow = FALSE; 9203 /* See if we can use a 16-bit instruction. */ 9204 if (inst.instruction < 0xffff /* not ldmdb/stmdb */ 9205 && inst.size_req != 4 9206 && !(inst.operands[1].imm & ~0xff)) 9207 { 9208 mask = 1 << inst.operands[0].reg; 9209 9210 if (inst.operands[0].reg <= 7 9211 && (inst.instruction == T_MNEM_stmia 9212 ? inst.operands[0].writeback 9213 : (inst.operands[0].writeback 9214 == !(inst.operands[1].imm & mask)))) 9215 { 9216 if (inst.instruction == T_MNEM_stmia 9217 && (inst.operands[1].imm & mask) 9218 && (inst.operands[1].imm & (mask - 1))) 9219 as_warn (_("value stored for r%d is UNPREDICTABLE"), 9220 inst.operands[0].reg); 9221 9222 inst.instruction = THUMB_OP16 (inst.instruction); 9223 inst.instruction |= inst.operands[0].reg << 8; 9224 inst.instruction |= inst.operands[1].imm; 9225 narrow = TRUE; 9226 } 9227 else if (inst.operands[0] .reg == REG_SP 9228 && inst.operands[0].writeback) 9229 { 9230 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia 9231 ? T_MNEM_push : T_MNEM_pop); 9232 inst.instruction |= inst.operands[1].imm; 9233 narrow = TRUE; 9234 } 9235 } 9236 9237 if (!narrow) 9238 { 9239 if (inst.instruction < 0xffff) 9240 inst.instruction = THUMB_OP32 (inst.instruction); 9241 9242 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm, 9243 inst.operands[0].writeback); 9244 } 9245 } 9246 else 9247 { 9248 constraint (inst.operands[0].reg > 7 9249 || (inst.operands[1].imm & ~0xff), BAD_HIREG); 9250 constraint (inst.instruction != T_MNEM_ldmia 9251 && inst.instruction != T_MNEM_stmia, 9252 _("Thumb-2 instruction only valid in unified syntax")); 9253 if (inst.instruction == T_MNEM_stmia) 9254 { 9255 if (!inst.operands[0].writeback) 9256 as_warn (_("this instruction will write back the base register")); 9257 if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) 9258 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) 9259 as_warn (_("value stored for r%d is UNPREDICTABLE"), 9260 inst.operands[0].reg); 9261 } 9262 else 9263 { 9264 if (!inst.operands[0].writeback 9265 && !(inst.operands[1].imm & (1 << inst.operands[0].reg))) 9266 as_warn (_("this instruction will write back the base register")); 9267 else if (inst.operands[0].writeback 9268 && (inst.operands[1].imm & (1 << inst.operands[0].reg))) 9269 as_warn (_("this instruction will not write back the base register")); 9270 } 9271 9272 inst.instruction = THUMB_OP16 (inst.instruction); 9273 inst.instruction |= inst.operands[0].reg << 8; 9274 inst.instruction |= inst.operands[1].imm; 9275 } 9276} 9277 9278static void 9279do_t_ldrex (void) 9280{ 9281 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 9282 || inst.operands[1].postind || inst.operands[1].writeback 9283 || inst.operands[1].immisreg || inst.operands[1].shifted 9284 || inst.operands[1].negative, 9285 BAD_ADDR_MODE); 9286 9287 inst.instruction |= inst.operands[0].reg << 12; 9288 inst.instruction |= inst.operands[1].reg << 16; 9289 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 9290} 9291 9292static void 9293do_t_ldrexd (void) 9294{ 9295 if (!inst.operands[1].present) 9296 { 9297 constraint (inst.operands[0].reg == REG_LR, 9298 _("r14 not allowed as first register " 9299 "when second register is omitted")); 9300 inst.operands[1].reg = inst.operands[0].reg + 1; 9301 } 9302 constraint (inst.operands[0].reg == inst.operands[1].reg, 9303 BAD_OVERLAP); 9304 9305 inst.instruction |= inst.operands[0].reg << 12; 9306 inst.instruction |= inst.operands[1].reg << 8; 9307 inst.instruction |= inst.operands[2].reg << 16; 9308} 9309 9310static void 9311do_t_ldst (void) 9312{ 9313 unsigned long opcode; 9314 int Rn; 9315 9316 opcode = inst.instruction; 9317 if (unified_syntax) 9318 { 9319 if (!inst.operands[1].isreg) 9320 { 9321 if (opcode <= 0xffff) 9322 inst.instruction = THUMB_OP32 (opcode); 9323 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 9324 return; 9325 } 9326 if (inst.operands[1].isreg 9327 && !inst.operands[1].writeback 9328 && !inst.operands[1].shifted && !inst.operands[1].postind 9329 && !inst.operands[1].negative && inst.operands[0].reg <= 7 9330 && opcode <= 0xffff 9331 && inst.size_req != 4) 9332 { 9333 /* Insn may have a 16-bit form. */ 9334 Rn = inst.operands[1].reg; 9335 if (inst.operands[1].immisreg) 9336 { 9337 inst.instruction = THUMB_OP16 (opcode); 9338 /* [Rn, Ri] */ 9339 if (Rn <= 7 && inst.operands[1].imm <= 7) 9340 goto op16; 9341 } 9342 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh 9343 && opcode != T_MNEM_ldrsb) 9344 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr) 9345 || (Rn == REG_SP && opcode == T_MNEM_str)) 9346 { 9347 /* [Rn, #const] */ 9348 if (Rn > 7) 9349 { 9350 if (Rn == REG_PC) 9351 { 9352 if (inst.reloc.pc_rel) 9353 opcode = T_MNEM_ldr_pc2; 9354 else 9355 opcode = T_MNEM_ldr_pc; 9356 } 9357 else 9358 { 9359 if (opcode == T_MNEM_ldr) 9360 opcode = T_MNEM_ldr_sp; 9361 else 9362 opcode = T_MNEM_str_sp; 9363 } 9364 inst.instruction = inst.operands[0].reg << 8; 9365 } 9366 else 9367 { 9368 inst.instruction = inst.operands[0].reg; 9369 inst.instruction |= inst.operands[1].reg << 3; 9370 } 9371 inst.instruction |= THUMB_OP16 (opcode); 9372 if (inst.size_req == 2) 9373 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9374 else 9375 inst.relax = opcode; 9376 return; 9377 } 9378 } 9379 /* Definitely a 32-bit variant. */ 9380 inst.instruction = THUMB_OP32 (opcode); 9381 inst.instruction |= inst.operands[0].reg << 12; 9382 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE); 9383 return; 9384 } 9385 9386 constraint (inst.operands[0].reg > 7, BAD_HIREG); 9387 9388 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb) 9389 { 9390 /* Only [Rn,Rm] is acceptable. */ 9391 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG); 9392 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg 9393 || inst.operands[1].postind || inst.operands[1].shifted 9394 || inst.operands[1].negative, 9395 _("Thumb does not support this addressing mode")); 9396 inst.instruction = THUMB_OP16 (inst.instruction); 9397 goto op16; 9398 } 9399 9400 inst.instruction = THUMB_OP16 (inst.instruction); 9401 if (!inst.operands[1].isreg) 9402 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 9403 return; 9404 9405 constraint (!inst.operands[1].preind 9406 || inst.operands[1].shifted 9407 || inst.operands[1].writeback, 9408 _("Thumb does not support this addressing mode")); 9409 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP) 9410 { 9411 constraint (inst.instruction & 0x0600, 9412 _("byte or halfword not valid for base register")); 9413 constraint (inst.operands[1].reg == REG_PC 9414 && !(inst.instruction & THUMB_LOAD_BIT), 9415 _("r15 based store not allowed")); 9416 constraint (inst.operands[1].immisreg, 9417 _("invalid base register for register offset")); 9418 9419 if (inst.operands[1].reg == REG_PC) 9420 inst.instruction = T_OPCODE_LDR_PC; 9421 else if (inst.instruction & THUMB_LOAD_BIT) 9422 inst.instruction = T_OPCODE_LDR_SP; 9423 else 9424 inst.instruction = T_OPCODE_STR_SP; 9425 9426 inst.instruction |= inst.operands[0].reg << 8; 9427 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9428 return; 9429 } 9430 9431 constraint (inst.operands[1].reg > 7, BAD_HIREG); 9432 if (!inst.operands[1].immisreg) 9433 { 9434 /* Immediate offset. */ 9435 inst.instruction |= inst.operands[0].reg; 9436 inst.instruction |= inst.operands[1].reg << 3; 9437 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9438 return; 9439 } 9440 9441 /* Register offset. */ 9442 constraint (inst.operands[1].imm > 7, BAD_HIREG); 9443 constraint (inst.operands[1].negative, 9444 _("Thumb does not support this addressing mode")); 9445 9446 op16: 9447 switch (inst.instruction) 9448 { 9449 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break; 9450 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break; 9451 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break; 9452 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break; 9453 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break; 9454 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break; 9455 case 0x5600 /* ldrsb */: 9456 case 0x5e00 /* ldrsh */: break; 9457 default: abort (); 9458 } 9459 9460 inst.instruction |= inst.operands[0].reg; 9461 inst.instruction |= inst.operands[1].reg << 3; 9462 inst.instruction |= inst.operands[1].imm << 6; 9463} 9464 9465static void 9466do_t_ldstd (void) 9467{ 9468 if (!inst.operands[1].present) 9469 { 9470 inst.operands[1].reg = inst.operands[0].reg + 1; 9471 constraint (inst.operands[0].reg == REG_LR, 9472 _("r14 not allowed here")); 9473 } 9474 inst.instruction |= inst.operands[0].reg << 12; 9475 inst.instruction |= inst.operands[1].reg << 8; 9476 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE); 9477 9478} 9479 9480static void 9481do_t_ldstt (void) 9482{ 9483 inst.instruction |= inst.operands[0].reg << 12; 9484 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE); 9485} 9486 9487static void 9488do_t_mla (void) 9489{ 9490 inst.instruction |= inst.operands[0].reg << 8; 9491 inst.instruction |= inst.operands[1].reg << 16; 9492 inst.instruction |= inst.operands[2].reg; 9493 inst.instruction |= inst.operands[3].reg << 12; 9494} 9495 9496static void 9497do_t_mlal (void) 9498{ 9499 inst.instruction |= inst.operands[0].reg << 12; 9500 inst.instruction |= inst.operands[1].reg << 8; 9501 inst.instruction |= inst.operands[2].reg << 16; 9502 inst.instruction |= inst.operands[3].reg; 9503} 9504 9505static void 9506do_t_mov_cmp (void) 9507{ 9508 if (unified_syntax) 9509 { 9510 int r0off = (inst.instruction == T_MNEM_mov 9511 || inst.instruction == T_MNEM_movs) ? 8 : 16; 9512 unsigned long opcode; 9513 bfd_boolean narrow; 9514 bfd_boolean low_regs; 9515 9516 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7); 9517 opcode = inst.instruction; 9518 if (current_it_mask) 9519 narrow = opcode != T_MNEM_movs; 9520 else 9521 narrow = opcode != T_MNEM_movs || low_regs; 9522 if (inst.size_req == 4 9523 || inst.operands[1].shifted) 9524 narrow = FALSE; 9525 9526 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */ 9527 if (opcode == T_MNEM_movs && inst.operands[1].isreg 9528 && !inst.operands[1].shifted 9529 && inst.operands[0].reg == REG_PC 9530 && inst.operands[1].reg == REG_LR) 9531 { 9532 inst.instruction = T2_SUBS_PC_LR; 9533 return; 9534 } 9535 9536 if (!inst.operands[1].isreg) 9537 { 9538 /* Immediate operand. */ 9539 if (current_it_mask == 0 && opcode == T_MNEM_mov) 9540 narrow = 0; 9541 if (low_regs && narrow) 9542 { 9543 inst.instruction = THUMB_OP16 (opcode); 9544 inst.instruction |= inst.operands[0].reg << 8; 9545 if (inst.size_req == 2) 9546 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 9547 else 9548 inst.relax = opcode; 9549 } 9550 else 9551 { 9552 inst.instruction = THUMB_OP32 (inst.instruction); 9553 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 9554 inst.instruction |= inst.operands[0].reg << r0off; 9555 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9556 } 9557 } 9558 else if (inst.operands[1].shifted && inst.operands[1].immisreg 9559 && (inst.instruction == T_MNEM_mov 9560 || inst.instruction == T_MNEM_movs)) 9561 { 9562 /* Register shifts are encoded as separate shift instructions. */ 9563 bfd_boolean flags = (inst.instruction == T_MNEM_movs); 9564 9565 if (current_it_mask) 9566 narrow = !flags; 9567 else 9568 narrow = flags; 9569 9570 if (inst.size_req == 4) 9571 narrow = FALSE; 9572 9573 if (!low_regs || inst.operands[1].imm > 7) 9574 narrow = FALSE; 9575 9576 if (inst.operands[0].reg != inst.operands[1].reg) 9577 narrow = FALSE; 9578 9579 switch (inst.operands[1].shift_kind) 9580 { 9581 case SHIFT_LSL: 9582 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl); 9583 break; 9584 case SHIFT_ASR: 9585 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr); 9586 break; 9587 case SHIFT_LSR: 9588 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr); 9589 break; 9590 case SHIFT_ROR: 9591 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror); 9592 break; 9593 default: 9594 abort(); 9595 } 9596 9597 inst.instruction = opcode; 9598 if (narrow) 9599 { 9600 inst.instruction |= inst.operands[0].reg; 9601 inst.instruction |= inst.operands[1].imm << 3; 9602 } 9603 else 9604 { 9605 if (flags) 9606 inst.instruction |= CONDS_BIT; 9607 9608 inst.instruction |= inst.operands[0].reg << 8; 9609 inst.instruction |= inst.operands[1].reg << 16; 9610 inst.instruction |= inst.operands[1].imm; 9611 } 9612 } 9613 else if (!narrow) 9614 { 9615 /* Some mov with immediate shift have narrow variants. 9616 Register shifts are handled above. */ 9617 if (low_regs && inst.operands[1].shifted 9618 && (inst.instruction == T_MNEM_mov 9619 || inst.instruction == T_MNEM_movs)) 9620 { 9621 if (current_it_mask) 9622 narrow = (inst.instruction == T_MNEM_mov); 9623 else 9624 narrow = (inst.instruction == T_MNEM_movs); 9625 } 9626 9627 if (narrow) 9628 { 9629 switch (inst.operands[1].shift_kind) 9630 { 9631 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 9632 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 9633 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 9634 default: narrow = FALSE; break; 9635 } 9636 } 9637 9638 if (narrow) 9639 { 9640 inst.instruction |= inst.operands[0].reg; 9641 inst.instruction |= inst.operands[1].reg << 3; 9642 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 9643 } 9644 else 9645 { 9646 inst.instruction = THUMB_OP32 (inst.instruction); 9647 inst.instruction |= inst.operands[0].reg << r0off; 9648 encode_thumb32_shifted_operand (1); 9649 } 9650 } 9651 else 9652 switch (inst.instruction) 9653 { 9654 case T_MNEM_mov: 9655 inst.instruction = T_OPCODE_MOV_HR; 9656 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 9657 inst.instruction |= (inst.operands[0].reg & 0x7); 9658 inst.instruction |= inst.operands[1].reg << 3; 9659 break; 9660 9661 case T_MNEM_movs: 9662 /* We know we have low registers at this point. 9663 Generate ADD Rd, Rs, #0. */ 9664 inst.instruction = T_OPCODE_ADD_I3; 9665 inst.instruction |= inst.operands[0].reg; 9666 inst.instruction |= inst.operands[1].reg << 3; 9667 break; 9668 9669 case T_MNEM_cmp: 9670 if (low_regs) 9671 { 9672 inst.instruction = T_OPCODE_CMP_LR; 9673 inst.instruction |= inst.operands[0].reg; 9674 inst.instruction |= inst.operands[1].reg << 3; 9675 } 9676 else 9677 { 9678 inst.instruction = T_OPCODE_CMP_HR; 9679 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 9680 inst.instruction |= (inst.operands[0].reg & 0x7); 9681 inst.instruction |= inst.operands[1].reg << 3; 9682 } 9683 break; 9684 } 9685 return; 9686 } 9687 9688 inst.instruction = THUMB_OP16 (inst.instruction); 9689 if (inst.operands[1].isreg) 9690 { 9691 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8) 9692 { 9693 /* A move of two lowregs is encoded as ADD Rd, Rs, #0 9694 since a MOV instruction produces unpredictable results. */ 9695 if (inst.instruction == T_OPCODE_MOV_I8) 9696 inst.instruction = T_OPCODE_ADD_I3; 9697 else 9698 inst.instruction = T_OPCODE_CMP_LR; 9699 9700 inst.instruction |= inst.operands[0].reg; 9701 inst.instruction |= inst.operands[1].reg << 3; 9702 } 9703 else 9704 { 9705 if (inst.instruction == T_OPCODE_MOV_I8) 9706 inst.instruction = T_OPCODE_MOV_HR; 9707 else 9708 inst.instruction = T_OPCODE_CMP_HR; 9709 do_t_cpy (); 9710 } 9711 } 9712 else 9713 { 9714 constraint (inst.operands[0].reg > 7, 9715 _("only lo regs allowed with immediate")); 9716 inst.instruction |= inst.operands[0].reg << 8; 9717 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 9718 } 9719} 9720 9721static void 9722do_t_mov16 (void) 9723{ 9724 bfd_vma imm; 9725 bfd_boolean top; 9726 9727 top = (inst.instruction & 0x00800000) != 0; 9728 if (inst.reloc.type == BFD_RELOC_ARM_MOVW) 9729 { 9730 constraint (top, _(":lower16: not allowed this instruction")); 9731 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW; 9732 } 9733 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT) 9734 { 9735 constraint (!top, _(":upper16: not allowed this instruction")); 9736 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT; 9737 } 9738 9739 inst.instruction |= inst.operands[0].reg << 8; 9740 if (inst.reloc.type == BFD_RELOC_UNUSED) 9741 { 9742 imm = inst.reloc.exp.X_add_number; 9743 inst.instruction |= (imm & 0xf000) << 4; 9744 inst.instruction |= (imm & 0x0800) << 15; 9745 inst.instruction |= (imm & 0x0700) << 4; 9746 inst.instruction |= (imm & 0x00ff); 9747 } 9748} 9749 9750static void 9751do_t_mvn_tst (void) 9752{ 9753 if (unified_syntax) 9754 { 9755 int r0off = (inst.instruction == T_MNEM_mvn 9756 || inst.instruction == T_MNEM_mvns) ? 8 : 16; 9757 bfd_boolean narrow; 9758 9759 if (inst.size_req == 4 9760 || inst.instruction > 0xffff 9761 || inst.operands[1].shifted 9762 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 9763 narrow = FALSE; 9764 else if (inst.instruction == T_MNEM_cmn) 9765 narrow = TRUE; 9766 else if (THUMB_SETS_FLAGS (inst.instruction)) 9767 narrow = (current_it_mask == 0); 9768 else 9769 narrow = (current_it_mask != 0); 9770 9771 if (!inst.operands[1].isreg) 9772 { 9773 /* For an immediate, we always generate a 32-bit opcode; 9774 section relaxation will shrink it later if possible. */ 9775 if (inst.instruction < 0xffff) 9776 inst.instruction = THUMB_OP32 (inst.instruction); 9777 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 9778 inst.instruction |= inst.operands[0].reg << r0off; 9779 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9780 } 9781 else 9782 { 9783 /* See if we can do this with a 16-bit instruction. */ 9784 if (narrow) 9785 { 9786 inst.instruction = THUMB_OP16 (inst.instruction); 9787 inst.instruction |= inst.operands[0].reg; 9788 inst.instruction |= inst.operands[1].reg << 3; 9789 } 9790 else 9791 { 9792 constraint (inst.operands[1].shifted 9793 && inst.operands[1].immisreg, 9794 _("shift must be constant")); 9795 if (inst.instruction < 0xffff) 9796 inst.instruction = THUMB_OP32 (inst.instruction); 9797 inst.instruction |= inst.operands[0].reg << r0off; 9798 encode_thumb32_shifted_operand (1); 9799 } 9800 } 9801 } 9802 else 9803 { 9804 constraint (inst.instruction > 0xffff 9805 || inst.instruction == T_MNEM_mvns, BAD_THUMB32); 9806 constraint (!inst.operands[1].isreg || inst.operands[1].shifted, 9807 _("unshifted register required")); 9808 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9809 BAD_HIREG); 9810 9811 inst.instruction = THUMB_OP16 (inst.instruction); 9812 inst.instruction |= inst.operands[0].reg; 9813 inst.instruction |= inst.operands[1].reg << 3; 9814 } 9815} 9816 9817static void 9818do_t_mrs (void) 9819{ 9820 int flags; 9821 9822 if (do_vfp_nsyn_mrs () == SUCCESS) 9823 return; 9824 9825 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); 9826 if (flags == 0) 9827 { 9828 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 9829 _("selected processor does not support " 9830 "requested special purpose register")); 9831 } 9832 else 9833 { 9834 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 9835 _("selected processor does not support " 9836 "requested special purpose register %x")); 9837 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 9838 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f), 9839 _("'CPSR' or 'SPSR' expected")); 9840 } 9841 9842 inst.instruction |= inst.operands[0].reg << 8; 9843 inst.instruction |= (flags & SPSR_BIT) >> 2; 9844 inst.instruction |= inst.operands[1].imm & 0xff; 9845} 9846 9847static void 9848do_t_msr (void) 9849{ 9850 int flags; 9851 9852 if (do_vfp_nsyn_msr () == SUCCESS) 9853 return; 9854 9855 constraint (!inst.operands[1].isreg, 9856 _("Thumb encoding does not support an immediate here")); 9857 flags = inst.operands[0].imm; 9858 if (flags & ~0xff) 9859 { 9860 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 9861 _("selected processor does not support " 9862 "requested special purpose register")); 9863 } 9864 else 9865 { 9866 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 9867 _("selected processor does not support " 9868 "requested special purpose register")); 9869 flags |= PSR_f; 9870 } 9871 inst.instruction |= (flags & SPSR_BIT) >> 2; 9872 inst.instruction |= (flags & ~SPSR_BIT) >> 8; 9873 inst.instruction |= (flags & 0xff); 9874 inst.instruction |= inst.operands[1].reg << 16; 9875} 9876 9877static void 9878do_t_mul (void) 9879{ 9880 if (!inst.operands[2].present) 9881 inst.operands[2].reg = inst.operands[0].reg; 9882 9883 /* There is no 32-bit MULS and no 16-bit MUL. */ 9884 if (unified_syntax && inst.instruction == T_MNEM_mul) 9885 { 9886 inst.instruction = THUMB_OP32 (inst.instruction); 9887 inst.instruction |= inst.operands[0].reg << 8; 9888 inst.instruction |= inst.operands[1].reg << 16; 9889 inst.instruction |= inst.operands[2].reg << 0; 9890 } 9891 else 9892 { 9893 constraint (!unified_syntax 9894 && inst.instruction == T_MNEM_muls, BAD_THUMB32); 9895 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9896 BAD_HIREG); 9897 9898 inst.instruction = THUMB_OP16 (inst.instruction); 9899 inst.instruction |= inst.operands[0].reg; 9900 9901 if (inst.operands[0].reg == inst.operands[1].reg) 9902 inst.instruction |= inst.operands[2].reg << 3; 9903 else if (inst.operands[0].reg == inst.operands[2].reg) 9904 inst.instruction |= inst.operands[1].reg << 3; 9905 else 9906 constraint (1, _("dest must overlap one source register")); 9907 } 9908} 9909 9910static void 9911do_t_mull (void) 9912{ 9913 inst.instruction |= inst.operands[0].reg << 12; 9914 inst.instruction |= inst.operands[1].reg << 8; 9915 inst.instruction |= inst.operands[2].reg << 16; 9916 inst.instruction |= inst.operands[3].reg; 9917 9918 if (inst.operands[0].reg == inst.operands[1].reg) 9919 as_tsktsk (_("rdhi and rdlo must be different")); 9920} 9921 9922static void 9923do_t_nop (void) 9924{ 9925 if (unified_syntax) 9926 { 9927 if (inst.size_req == 4 || inst.operands[0].imm > 15) 9928 { 9929 inst.instruction = THUMB_OP32 (inst.instruction); 9930 inst.instruction |= inst.operands[0].imm; 9931 } 9932 else 9933 { 9934 inst.instruction = THUMB_OP16 (inst.instruction); 9935 inst.instruction |= inst.operands[0].imm << 4; 9936 } 9937 } 9938 else 9939 { 9940 constraint (inst.operands[0].present, 9941 _("Thumb does not support NOP with hints")); 9942 inst.instruction = 0x46c0; 9943 } 9944} 9945 9946static void 9947do_t_neg (void) 9948{ 9949 if (unified_syntax) 9950 { 9951 bfd_boolean narrow; 9952 9953 if (THUMB_SETS_FLAGS (inst.instruction)) 9954 narrow = (current_it_mask == 0); 9955 else 9956 narrow = (current_it_mask != 0); 9957 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 9958 narrow = FALSE; 9959 if (inst.size_req == 4) 9960 narrow = FALSE; 9961 9962 if (!narrow) 9963 { 9964 inst.instruction = THUMB_OP32 (inst.instruction); 9965 inst.instruction |= inst.operands[0].reg << 8; 9966 inst.instruction |= inst.operands[1].reg << 16; 9967 } 9968 else 9969 { 9970 inst.instruction = THUMB_OP16 (inst.instruction); 9971 inst.instruction |= inst.operands[0].reg; 9972 inst.instruction |= inst.operands[1].reg << 3; 9973 } 9974 } 9975 else 9976 { 9977 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9978 BAD_HIREG); 9979 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 9980 9981 inst.instruction = THUMB_OP16 (inst.instruction); 9982 inst.instruction |= inst.operands[0].reg; 9983 inst.instruction |= inst.operands[1].reg << 3; 9984 } 9985} 9986 9987static void 9988do_t_pkhbt (void) 9989{ 9990 inst.instruction |= inst.operands[0].reg << 8; 9991 inst.instruction |= inst.operands[1].reg << 16; 9992 inst.instruction |= inst.operands[2].reg; 9993 if (inst.operands[3].present) 9994 { 9995 unsigned int val = inst.reloc.exp.X_add_number; 9996 constraint (inst.reloc.exp.X_op != O_constant, 9997 _("expression too complex")); 9998 inst.instruction |= (val & 0x1c) << 10; 9999 inst.instruction |= (val & 0x03) << 6; 10000 } 10001} 10002 10003static void 10004do_t_pkhtb (void) 10005{ 10006 if (!inst.operands[3].present) 10007 inst.instruction &= ~0x00000020; 10008 do_t_pkhbt (); 10009} 10010 10011static void 10012do_t_pld (void) 10013{ 10014 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE); 10015} 10016 10017static void 10018do_t_push_pop (void) 10019{ 10020 unsigned mask; 10021 10022 constraint (inst.operands[0].writeback, 10023 _("push/pop do not support {reglist}^")); 10024 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 10025 _("expression too complex")); 10026 10027 mask = inst.operands[0].imm; 10028 if ((mask & ~0xff) == 0) 10029 inst.instruction = THUMB_OP16 (inst.instruction) | mask; 10030 else if ((inst.instruction == T_MNEM_push 10031 && (mask & ~0xff) == 1 << REG_LR) 10032 || (inst.instruction == T_MNEM_pop 10033 && (mask & ~0xff) == 1 << REG_PC)) 10034 { 10035 inst.instruction = THUMB_OP16 (inst.instruction); 10036 inst.instruction |= THUMB_PP_PC_LR; 10037 inst.instruction |= mask & 0xff; 10038 } 10039 else if (unified_syntax) 10040 { 10041 inst.instruction = THUMB_OP32 (inst.instruction); 10042 encode_thumb2_ldmstm(13, mask, TRUE); 10043 } 10044 else 10045 { 10046 inst.error = _("invalid register list to push/pop instruction"); 10047 return; 10048 } 10049} 10050 10051static void 10052do_t_rbit (void) 10053{ 10054 inst.instruction |= inst.operands[0].reg << 8; 10055 inst.instruction |= inst.operands[1].reg << 16; 10056} 10057 10058static void 10059do_t_rd_rm (void) 10060{ 10061 inst.instruction |= inst.operands[0].reg << 8; 10062 inst.instruction |= inst.operands[1].reg; 10063} 10064 10065static void 10066do_t_rev (void) 10067{ 10068 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 10069 && inst.size_req != 4) 10070 { 10071 inst.instruction = THUMB_OP16 (inst.instruction); 10072 inst.instruction |= inst.operands[0].reg; 10073 inst.instruction |= inst.operands[1].reg << 3; 10074 } 10075 else if (unified_syntax) 10076 { 10077 inst.instruction = THUMB_OP32 (inst.instruction); 10078 inst.instruction |= inst.operands[0].reg << 8; 10079 inst.instruction |= inst.operands[1].reg << 16; 10080 inst.instruction |= inst.operands[1].reg; 10081 } 10082 else 10083 inst.error = BAD_HIREG; 10084} 10085 10086static void 10087do_t_rsb (void) 10088{ 10089 int Rd, Rs; 10090 10091 Rd = inst.operands[0].reg; 10092 Rs = (inst.operands[1].present 10093 ? inst.operands[1].reg /* Rd, Rs, foo */ 10094 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 10095 10096 inst.instruction |= Rd << 8; 10097 inst.instruction |= Rs << 16; 10098 if (!inst.operands[2].isreg) 10099 { 10100 bfd_boolean narrow; 10101 10102 if ((inst.instruction & 0x00100000) != 0) 10103 narrow = (current_it_mask == 0); 10104 else 10105 narrow = (current_it_mask != 0); 10106 10107 if (Rd > 7 || Rs > 7) 10108 narrow = FALSE; 10109 10110 if (inst.size_req == 4 || !unified_syntax) 10111 narrow = FALSE; 10112 10113 if (inst.reloc.exp.X_op != O_constant 10114 || inst.reloc.exp.X_add_number != 0) 10115 narrow = FALSE; 10116 10117 /* Turn rsb #0 into 16-bit neg. We should probably do this via 10118 relaxation, but it doesn't seem worth the hassle. */ 10119 if (narrow) 10120 { 10121 inst.reloc.type = BFD_RELOC_UNUSED; 10122 inst.instruction = THUMB_OP16 (T_MNEM_negs); 10123 inst.instruction |= Rs << 3; 10124 inst.instruction |= Rd; 10125 } 10126 else 10127 { 10128 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 10129 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 10130 } 10131 } 10132 else 10133 encode_thumb32_shifted_operand (2); 10134} 10135 10136static void 10137do_t_setend (void) 10138{ 10139 constraint (current_it_mask, BAD_NOT_IT); 10140 if (inst.operands[0].imm) 10141 inst.instruction |= 0x8; 10142} 10143 10144static void 10145do_t_shift (void) 10146{ 10147 if (!inst.operands[1].present) 10148 inst.operands[1].reg = inst.operands[0].reg; 10149 10150 if (unified_syntax) 10151 { 10152 bfd_boolean narrow; 10153 int shift_kind; 10154 10155 switch (inst.instruction) 10156 { 10157 case T_MNEM_asr: 10158 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break; 10159 case T_MNEM_lsl: 10160 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break; 10161 case T_MNEM_lsr: 10162 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break; 10163 case T_MNEM_ror: 10164 case T_MNEM_rors: shift_kind = SHIFT_ROR; break; 10165 default: abort (); 10166 } 10167 10168 if (THUMB_SETS_FLAGS (inst.instruction)) 10169 narrow = (current_it_mask == 0); 10170 else 10171 narrow = (current_it_mask != 0); 10172 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 10173 narrow = FALSE; 10174 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) 10175 narrow = FALSE; 10176 if (inst.operands[2].isreg 10177 && (inst.operands[1].reg != inst.operands[0].reg 10178 || inst.operands[2].reg > 7)) 10179 narrow = FALSE; 10180 if (inst.size_req == 4) 10181 narrow = FALSE; 10182 10183 if (!narrow) 10184 { 10185 if (inst.operands[2].isreg) 10186 { 10187 inst.instruction = THUMB_OP32 (inst.instruction); 10188 inst.instruction |= inst.operands[0].reg << 8; 10189 inst.instruction |= inst.operands[1].reg << 16; 10190 inst.instruction |= inst.operands[2].reg; 10191 } 10192 else 10193 { 10194 inst.operands[1].shifted = 1; 10195 inst.operands[1].shift_kind = shift_kind; 10196 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction) 10197 ? T_MNEM_movs : T_MNEM_mov); 10198 inst.instruction |= inst.operands[0].reg << 8; 10199 encode_thumb32_shifted_operand (1); 10200 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */ 10201 inst.reloc.type = BFD_RELOC_UNUSED; 10202 } 10203 } 10204 else 10205 { 10206 if (inst.operands[2].isreg) 10207 { 10208 switch (shift_kind) 10209 { 10210 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break; 10211 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break; 10212 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break; 10213 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break; 10214 default: abort (); 10215 } 10216 10217 inst.instruction |= inst.operands[0].reg; 10218 inst.instruction |= inst.operands[2].reg << 3; 10219 } 10220 else 10221 { 10222 switch (shift_kind) 10223 { 10224 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 10225 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 10226 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 10227 default: abort (); 10228 } 10229 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 10230 inst.instruction |= inst.operands[0].reg; 10231 inst.instruction |= inst.operands[1].reg << 3; 10232 } 10233 } 10234 } 10235 else 10236 { 10237 constraint (inst.operands[0].reg > 7 10238 || inst.operands[1].reg > 7, BAD_HIREG); 10239 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 10240 10241 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */ 10242 { 10243 constraint (inst.operands[2].reg > 7, BAD_HIREG); 10244 constraint (inst.operands[0].reg != inst.operands[1].reg, 10245 _("source1 and dest must be same register")); 10246 10247 switch (inst.instruction) 10248 { 10249 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break; 10250 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break; 10251 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break; 10252 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break; 10253 default: abort (); 10254 } 10255 10256 inst.instruction |= inst.operands[0].reg; 10257 inst.instruction |= inst.operands[2].reg << 3; 10258 } 10259 else 10260 { 10261 switch (inst.instruction) 10262 { 10263 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break; 10264 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break; 10265 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break; 10266 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return; 10267 default: abort (); 10268 } 10269 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 10270 inst.instruction |= inst.operands[0].reg; 10271 inst.instruction |= inst.operands[1].reg << 3; 10272 } 10273 } 10274} 10275 10276static void 10277do_t_simd (void) 10278{ 10279 inst.instruction |= inst.operands[0].reg << 8; 10280 inst.instruction |= inst.operands[1].reg << 16; 10281 inst.instruction |= inst.operands[2].reg; 10282} 10283 10284static void 10285do_t_smc (void) 10286{ 10287 unsigned int value = inst.reloc.exp.X_add_number; 10288 constraint (inst.reloc.exp.X_op != O_constant, 10289 _("expression too complex")); 10290 inst.reloc.type = BFD_RELOC_UNUSED; 10291 inst.instruction |= (value & 0xf000) >> 12; 10292 inst.instruction |= (value & 0x0ff0); 10293 inst.instruction |= (value & 0x000f) << 16; 10294} 10295 10296static void 10297do_t_ssat (void) 10298{ 10299 inst.instruction |= inst.operands[0].reg << 8; 10300 inst.instruction |= inst.operands[1].imm - 1; 10301 inst.instruction |= inst.operands[2].reg << 16; 10302 10303 if (inst.operands[3].present) 10304 { 10305 constraint (inst.reloc.exp.X_op != O_constant, 10306 _("expression too complex")); 10307 10308 if (inst.reloc.exp.X_add_number != 0) 10309 { 10310 if (inst.operands[3].shift_kind == SHIFT_ASR) 10311 inst.instruction |= 0x00200000; /* sh bit */ 10312 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 10313 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 10314 } 10315 inst.reloc.type = BFD_RELOC_UNUSED; 10316 } 10317} 10318 10319static void 10320do_t_ssat16 (void) 10321{ 10322 inst.instruction |= inst.operands[0].reg << 8; 10323 inst.instruction |= inst.operands[1].imm - 1; 10324 inst.instruction |= inst.operands[2].reg << 16; 10325} 10326 10327static void 10328do_t_strex (void) 10329{ 10330 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 10331 || inst.operands[2].postind || inst.operands[2].writeback 10332 || inst.operands[2].immisreg || inst.operands[2].shifted 10333 || inst.operands[2].negative, 10334 BAD_ADDR_MODE); 10335 10336 inst.instruction |= inst.operands[0].reg << 8; 10337 inst.instruction |= inst.operands[1].reg << 12; 10338 inst.instruction |= inst.operands[2].reg << 16; 10339 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 10340} 10341 10342static void 10343do_t_strexd (void) 10344{ 10345 if (!inst.operands[2].present) 10346 inst.operands[2].reg = inst.operands[1].reg + 1; 10347 10348 constraint (inst.operands[0].reg == inst.operands[1].reg 10349 || inst.operands[0].reg == inst.operands[2].reg 10350 || inst.operands[0].reg == inst.operands[3].reg 10351 || inst.operands[1].reg == inst.operands[2].reg, 10352 BAD_OVERLAP); 10353 10354 inst.instruction |= inst.operands[0].reg; 10355 inst.instruction |= inst.operands[1].reg << 12; 10356 inst.instruction |= inst.operands[2].reg << 8; 10357 inst.instruction |= inst.operands[3].reg << 16; 10358} 10359 10360static void 10361do_t_sxtah (void) 10362{ 10363 inst.instruction |= inst.operands[0].reg << 8; 10364 inst.instruction |= inst.operands[1].reg << 16; 10365 inst.instruction |= inst.operands[2].reg; 10366 inst.instruction |= inst.operands[3].imm << 4; 10367} 10368 10369static void 10370do_t_sxth (void) 10371{ 10372 if (inst.instruction <= 0xffff && inst.size_req != 4 10373 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 10374 && (!inst.operands[2].present || inst.operands[2].imm == 0)) 10375 { 10376 inst.instruction = THUMB_OP16 (inst.instruction); 10377 inst.instruction |= inst.operands[0].reg; 10378 inst.instruction |= inst.operands[1].reg << 3; 10379 } 10380 else if (unified_syntax) 10381 { 10382 if (inst.instruction <= 0xffff) 10383 inst.instruction = THUMB_OP32 (inst.instruction); 10384 inst.instruction |= inst.operands[0].reg << 8; 10385 inst.instruction |= inst.operands[1].reg; 10386 inst.instruction |= inst.operands[2].imm << 4; 10387 } 10388 else 10389 { 10390 constraint (inst.operands[2].present && inst.operands[2].imm != 0, 10391 _("Thumb encoding does not support rotation")); 10392 constraint (1, BAD_HIREG); 10393 } 10394} 10395 10396static void 10397do_t_swi (void) 10398{ 10399 inst.reloc.type = BFD_RELOC_ARM_SWI; 10400} 10401 10402static void 10403do_t_tb (void) 10404{ 10405 int half; 10406 10407 half = (inst.instruction & 0x10) != 0; 10408 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 10409 constraint (inst.operands[0].immisreg, 10410 _("instruction requires register index")); 10411 constraint (inst.operands[0].imm == 15, 10412 _("PC is not a valid index register")); 10413 constraint (!half && inst.operands[0].shifted, 10414 _("instruction does not allow shifted index")); 10415 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm; 10416} 10417 10418static void 10419do_t_usat (void) 10420{ 10421 inst.instruction |= inst.operands[0].reg << 8; 10422 inst.instruction |= inst.operands[1].imm; 10423 inst.instruction |= inst.operands[2].reg << 16; 10424 10425 if (inst.operands[3].present) 10426 { 10427 constraint (inst.reloc.exp.X_op != O_constant, 10428 _("expression too complex")); 10429 if (inst.reloc.exp.X_add_number != 0) 10430 { 10431 if (inst.operands[3].shift_kind == SHIFT_ASR) 10432 inst.instruction |= 0x00200000; /* sh bit */ 10433 10434 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 10435 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 10436 } 10437 inst.reloc.type = BFD_RELOC_UNUSED; 10438 } 10439} 10440 10441static void 10442do_t_usat16 (void) 10443{ 10444 inst.instruction |= inst.operands[0].reg << 8; 10445 inst.instruction |= inst.operands[1].imm; 10446 inst.instruction |= inst.operands[2].reg << 16; 10447} 10448 10449/* Neon instruction encoder helpers. */ 10450 10451/* Encodings for the different types for various Neon opcodes. */ 10452 10453/* An "invalid" code for the following tables. */ 10454#define N_INV -1u 10455 10456struct neon_tab_entry 10457{ 10458 unsigned integer; 10459 unsigned float_or_poly; 10460 unsigned scalar_or_imm; 10461}; 10462 10463/* Map overloaded Neon opcodes to their respective encodings. */ 10464#define NEON_ENC_TAB \ 10465 X(vabd, 0x0000700, 0x1200d00, N_INV), \ 10466 X(vmax, 0x0000600, 0x0000f00, N_INV), \ 10467 X(vmin, 0x0000610, 0x0200f00, N_INV), \ 10468 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ 10469 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ 10470 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ 10471 X(vadd, 0x0000800, 0x0000d00, N_INV), \ 10472 X(vsub, 0x1000800, 0x0200d00, N_INV), \ 10473 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ 10474 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ 10475 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ 10476 /* Register variants of the following two instructions are encoded as 10477 vcge / vcgt with the operands reversed. */ \ 10478 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \ 10479 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \ 10480 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ 10481 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \ 10482 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \ 10483 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \ 10484 X(vmlal, 0x0800800, N_INV, 0x0800240), \ 10485 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \ 10486 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \ 10487 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \ 10488 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \ 10489 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ 10490 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ 10491 X(vshl, 0x0000400, N_INV, 0x0800510), \ 10492 X(vqshl, 0x0000410, N_INV, 0x0800710), \ 10493 X(vand, 0x0000110, N_INV, 0x0800030), \ 10494 X(vbic, 0x0100110, N_INV, 0x0800030), \ 10495 X(veor, 0x1000110, N_INV, N_INV), \ 10496 X(vorn, 0x0300110, N_INV, 0x0800010), \ 10497 X(vorr, 0x0200110, N_INV, 0x0800010), \ 10498 X(vmvn, 0x1b00580, N_INV, 0x0800030), \ 10499 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \ 10500 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \ 10501 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \ 10502 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \ 10503 X(vst1, 0x0000000, 0x0800000, N_INV), \ 10504 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \ 10505 X(vst2, 0x0000100, 0x0800100, N_INV), \ 10506 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \ 10507 X(vst3, 0x0000200, 0x0800200, N_INV), \ 10508 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \ 10509 X(vst4, 0x0000300, 0x0800300, N_INV), \ 10510 X(vmovn, 0x1b20200, N_INV, N_INV), \ 10511 X(vtrn, 0x1b20080, N_INV, N_INV), \ 10512 X(vqmovn, 0x1b20200, N_INV, N_INV), \ 10513 X(vqmovun, 0x1b20240, N_INV, N_INV), \ 10514 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \ 10515 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \ 10516 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \ 10517 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \ 10518 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \ 10519 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \ 10520 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV) 10521 10522enum neon_opc 10523{ 10524#define X(OPC,I,F,S) N_MNEM_##OPC 10525NEON_ENC_TAB 10526#undef X 10527}; 10528 10529static const struct neon_tab_entry neon_enc_tab[] = 10530{ 10531#define X(OPC,I,F,S) { (I), (F), (S) } 10532NEON_ENC_TAB 10533#undef X 10534}; 10535 10536#define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10537#define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10538#define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10539#define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10540#define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10541#define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10542#define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10543#define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10544#define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10545#define NEON_ENC_SINGLE(X) \ 10546 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000)) 10547#define NEON_ENC_DOUBLE(X) \ 10548 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000)) 10549 10550/* Define shapes for instruction operands. The following mnemonic characters 10551 are used in this table: 10552 10553 F - VFP S<n> register 10554 D - Neon D<n> register 10555 Q - Neon Q<n> register 10556 I - Immediate 10557 S - Scalar 10558 R - ARM register 10559 L - D<n> register list 10560 10561 This table is used to generate various data: 10562 - enumerations of the form NS_DDR to be used as arguments to 10563 neon_select_shape. 10564 - a table classifying shapes into single, double, quad, mixed. 10565 - a table used to drive neon_select_shape. 10566*/ 10567 10568#define NEON_SHAPE_DEF \ 10569 X(3, (D, D, D), DOUBLE), \ 10570 X(3, (Q, Q, Q), QUAD), \ 10571 X(3, (D, D, I), DOUBLE), \ 10572 X(3, (Q, Q, I), QUAD), \ 10573 X(3, (D, D, S), DOUBLE), \ 10574 X(3, (Q, Q, S), QUAD), \ 10575 X(2, (D, D), DOUBLE), \ 10576 X(2, (Q, Q), QUAD), \ 10577 X(2, (D, S), DOUBLE), \ 10578 X(2, (Q, S), QUAD), \ 10579 X(2, (D, R), DOUBLE), \ 10580 X(2, (Q, R), QUAD), \ 10581 X(2, (D, I), DOUBLE), \ 10582 X(2, (Q, I), QUAD), \ 10583 X(3, (D, L, D), DOUBLE), \ 10584 X(2, (D, Q), MIXED), \ 10585 X(2, (Q, D), MIXED), \ 10586 X(3, (D, Q, I), MIXED), \ 10587 X(3, (Q, D, I), MIXED), \ 10588 X(3, (Q, D, D), MIXED), \ 10589 X(3, (D, Q, Q), MIXED), \ 10590 X(3, (Q, Q, D), MIXED), \ 10591 X(3, (Q, D, S), MIXED), \ 10592 X(3, (D, Q, S), MIXED), \ 10593 X(4, (D, D, D, I), DOUBLE), \ 10594 X(4, (Q, Q, Q, I), QUAD), \ 10595 X(2, (F, F), SINGLE), \ 10596 X(3, (F, F, F), SINGLE), \ 10597 X(2, (F, I), SINGLE), \ 10598 X(2, (F, D), MIXED), \ 10599 X(2, (D, F), MIXED), \ 10600 X(3, (F, F, I), MIXED), \ 10601 X(4, (R, R, F, F), SINGLE), \ 10602 X(4, (F, F, R, R), SINGLE), \ 10603 X(3, (D, R, R), DOUBLE), \ 10604 X(3, (R, R, D), DOUBLE), \ 10605 X(2, (S, R), SINGLE), \ 10606 X(2, (R, S), SINGLE), \ 10607 X(2, (F, R), SINGLE), \ 10608 X(2, (R, F), SINGLE) 10609 10610#define S2(A,B) NS_##A##B 10611#define S3(A,B,C) NS_##A##B##C 10612#define S4(A,B,C,D) NS_##A##B##C##D 10613 10614#define X(N, L, C) S##N L 10615 10616enum neon_shape 10617{ 10618 NEON_SHAPE_DEF, 10619 NS_NULL 10620}; 10621 10622#undef X 10623#undef S2 10624#undef S3 10625#undef S4 10626 10627enum neon_shape_class 10628{ 10629 SC_SINGLE, 10630 SC_DOUBLE, 10631 SC_QUAD, 10632 SC_MIXED 10633}; 10634 10635#define X(N, L, C) SC_##C 10636 10637static enum neon_shape_class neon_shape_class[] = 10638{ 10639 NEON_SHAPE_DEF 10640}; 10641 10642#undef X 10643 10644enum neon_shape_el 10645{ 10646 SE_F, 10647 SE_D, 10648 SE_Q, 10649 SE_I, 10650 SE_S, 10651 SE_R, 10652 SE_L 10653}; 10654 10655/* Register widths of above. */ 10656static unsigned neon_shape_el_size[] = 10657{ 10658 32, 10659 64, 10660 128, 10661 0, 10662 32, 10663 32, 10664 0 10665}; 10666 10667struct neon_shape_info 10668{ 10669 unsigned els; 10670 enum neon_shape_el el[NEON_MAX_TYPE_ELS]; 10671}; 10672 10673#define S2(A,B) { SE_##A, SE_##B } 10674#define S3(A,B,C) { SE_##A, SE_##B, SE_##C } 10675#define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D } 10676 10677#define X(N, L, C) { N, S##N L } 10678 10679static struct neon_shape_info neon_shape_tab[] = 10680{ 10681 NEON_SHAPE_DEF 10682}; 10683 10684#undef X 10685#undef S2 10686#undef S3 10687#undef S4 10688 10689/* Bit masks used in type checking given instructions. 10690 'N_EQK' means the type must be the same as (or based on in some way) the key 10691 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is 10692 set, various other bits can be set as well in order to modify the meaning of 10693 the type constraint. */ 10694 10695enum neon_type_mask 10696{ 10697 N_S8 = 0x000001, 10698 N_S16 = 0x000002, 10699 N_S32 = 0x000004, 10700 N_S64 = 0x000008, 10701 N_U8 = 0x000010, 10702 N_U16 = 0x000020, 10703 N_U32 = 0x000040, 10704 N_U64 = 0x000080, 10705 N_I8 = 0x000100, 10706 N_I16 = 0x000200, 10707 N_I32 = 0x000400, 10708 N_I64 = 0x000800, 10709 N_8 = 0x001000, 10710 N_16 = 0x002000, 10711 N_32 = 0x004000, 10712 N_64 = 0x008000, 10713 N_P8 = 0x010000, 10714 N_P16 = 0x020000, 10715 N_F32 = 0x040000, 10716 N_F64 = 0x080000, 10717 N_KEY = 0x100000, /* key element (main type specifier). */ 10718 N_EQK = 0x200000, /* given operand has the same type & size as the key. */ 10719 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */ 10720 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */ 10721 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */ 10722 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */ 10723 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */ 10724 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */ 10725 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */ 10726 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */ 10727 N_UTYP = 0, 10728 N_MAX_NONSPECIAL = N_F64 10729}; 10730 10731#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ) 10732 10733#define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64) 10734#define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) 10735#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) 10736#define N_SUF_32 (N_SU_32 | N_F32) 10737#define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) 10738#define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32) 10739 10740/* Pass this as the first type argument to neon_check_type to ignore types 10741 altogether. */ 10742#define N_IGNORE_TYPE (N_KEY | N_EQK) 10743 10744/* Select a "shape" for the current instruction (describing register types or 10745 sizes) from a list of alternatives. Return NS_NULL if the current instruction 10746 doesn't fit. For non-polymorphic shapes, checking is usually done as a 10747 function of operand parsing, so this function doesn't need to be called. 10748 Shapes should be listed in order of decreasing length. */ 10749 10750static enum neon_shape 10751neon_select_shape (enum neon_shape shape, ...) 10752{ 10753 va_list ap; 10754 enum neon_shape first_shape = shape; 10755 10756 /* Fix missing optional operands. FIXME: we don't know at this point how 10757 many arguments we should have, so this makes the assumption that we have 10758 > 1. This is true of all current Neon opcodes, I think, but may not be 10759 true in the future. */ 10760 if (!inst.operands[1].present) 10761 inst.operands[1] = inst.operands[0]; 10762 10763 va_start (ap, shape); 10764 10765 for (; shape != NS_NULL; shape = va_arg (ap, int)) 10766 { 10767 unsigned j; 10768 int matches = 1; 10769 10770 for (j = 0; j < neon_shape_tab[shape].els; j++) 10771 { 10772 if (!inst.operands[j].present) 10773 { 10774 matches = 0; 10775 break; 10776 } 10777 10778 switch (neon_shape_tab[shape].el[j]) 10779 { 10780 case SE_F: 10781 if (!(inst.operands[j].isreg 10782 && inst.operands[j].isvec 10783 && inst.operands[j].issingle 10784 && !inst.operands[j].isquad)) 10785 matches = 0; 10786 break; 10787 10788 case SE_D: 10789 if (!(inst.operands[j].isreg 10790 && inst.operands[j].isvec 10791 && !inst.operands[j].isquad 10792 && !inst.operands[j].issingle)) 10793 matches = 0; 10794 break; 10795 10796 case SE_R: 10797 if (!(inst.operands[j].isreg 10798 && !inst.operands[j].isvec)) 10799 matches = 0; 10800 break; 10801 10802 case SE_Q: 10803 if (!(inst.operands[j].isreg 10804 && inst.operands[j].isvec 10805 && inst.operands[j].isquad 10806 && !inst.operands[j].issingle)) 10807 matches = 0; 10808 break; 10809 10810 case SE_I: 10811 if (!(!inst.operands[j].isreg 10812 && !inst.operands[j].isscalar)) 10813 matches = 0; 10814 break; 10815 10816 case SE_S: 10817 if (!(!inst.operands[j].isreg 10818 && inst.operands[j].isscalar)) 10819 matches = 0; 10820 break; 10821 10822 case SE_L: 10823 break; 10824 } 10825 } 10826 if (matches) 10827 break; 10828 } 10829 10830 va_end (ap); 10831 10832 if (shape == NS_NULL && first_shape != NS_NULL) 10833 first_error (_("invalid instruction shape")); 10834 10835 return shape; 10836} 10837 10838/* True if SHAPE is predominantly a quadword operation (most of the time, this 10839 means the Q bit should be set). */ 10840 10841static int 10842neon_quad (enum neon_shape shape) 10843{ 10844 return neon_shape_class[shape] == SC_QUAD; 10845} 10846 10847static void 10848neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type, 10849 unsigned *g_size) 10850{ 10851 /* Allow modification to be made to types which are constrained to be 10852 based on the key element, based on bits set alongside N_EQK. */ 10853 if ((typebits & N_EQK) != 0) 10854 { 10855 if ((typebits & N_HLF) != 0) 10856 *g_size /= 2; 10857 else if ((typebits & N_DBL) != 0) 10858 *g_size *= 2; 10859 if ((typebits & N_SGN) != 0) 10860 *g_type = NT_signed; 10861 else if ((typebits & N_UNS) != 0) 10862 *g_type = NT_unsigned; 10863 else if ((typebits & N_INT) != 0) 10864 *g_type = NT_integer; 10865 else if ((typebits & N_FLT) != 0) 10866 *g_type = NT_float; 10867 else if ((typebits & N_SIZ) != 0) 10868 *g_type = NT_untyped; 10869 } 10870} 10871 10872/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key" 10873 operand type, i.e. the single type specified in a Neon instruction when it 10874 is the only one given. */ 10875 10876static struct neon_type_el 10877neon_type_promote (struct neon_type_el *key, unsigned thisarg) 10878{ 10879 struct neon_type_el dest = *key; 10880 10881 assert ((thisarg & N_EQK) != 0); 10882 10883 neon_modify_type_size (thisarg, &dest.type, &dest.size); 10884 10885 return dest; 10886} 10887 10888/* Convert Neon type and size into compact bitmask representation. */ 10889 10890static enum neon_type_mask 10891type_chk_of_el_type (enum neon_el_type type, unsigned size) 10892{ 10893 switch (type) 10894 { 10895 case NT_untyped: 10896 switch (size) 10897 { 10898 case 8: return N_8; 10899 case 16: return N_16; 10900 case 32: return N_32; 10901 case 64: return N_64; 10902 default: ; 10903 } 10904 break; 10905 10906 case NT_integer: 10907 switch (size) 10908 { 10909 case 8: return N_I8; 10910 case 16: return N_I16; 10911 case 32: return N_I32; 10912 case 64: return N_I64; 10913 default: ; 10914 } 10915 break; 10916 10917 case NT_float: 10918 switch (size) 10919 { 10920 case 32: return N_F32; 10921 case 64: return N_F64; 10922 default: ; 10923 } 10924 break; 10925 10926 case NT_poly: 10927 switch (size) 10928 { 10929 case 8: return N_P8; 10930 case 16: return N_P16; 10931 default: ; 10932 } 10933 break; 10934 10935 case NT_signed: 10936 switch (size) 10937 { 10938 case 8: return N_S8; 10939 case 16: return N_S16; 10940 case 32: return N_S32; 10941 case 64: return N_S64; 10942 default: ; 10943 } 10944 break; 10945 10946 case NT_unsigned: 10947 switch (size) 10948 { 10949 case 8: return N_U8; 10950 case 16: return N_U16; 10951 case 32: return N_U32; 10952 case 64: return N_U64; 10953 default: ; 10954 } 10955 break; 10956 10957 default: ; 10958 } 10959 10960 return N_UTYP; 10961} 10962 10963/* Convert compact Neon bitmask type representation to a type and size. Only 10964 handles the case where a single bit is set in the mask. */ 10965 10966static int 10967el_type_of_type_chk (enum neon_el_type *type, unsigned *size, 10968 enum neon_type_mask mask) 10969{ 10970 if ((mask & N_EQK) != 0) 10971 return FAIL; 10972 10973 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0) 10974 *size = 8; 10975 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0) 10976 *size = 16; 10977 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0) 10978 *size = 32; 10979 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0) 10980 *size = 64; 10981 else 10982 return FAIL; 10983 10984 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0) 10985 *type = NT_signed; 10986 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0) 10987 *type = NT_unsigned; 10988 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0) 10989 *type = NT_integer; 10990 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0) 10991 *type = NT_untyped; 10992 else if ((mask & (N_P8 | N_P16)) != 0) 10993 *type = NT_poly; 10994 else if ((mask & (N_F32 | N_F64)) != 0) 10995 *type = NT_float; 10996 else 10997 return FAIL; 10998 10999 return SUCCESS; 11000} 11001 11002/* Modify a bitmask of allowed types. This is only needed for type 11003 relaxation. */ 11004 11005static unsigned 11006modify_types_allowed (unsigned allowed, unsigned mods) 11007{ 11008 unsigned size; 11009 enum neon_el_type type; 11010 unsigned destmask; 11011 int i; 11012 11013 destmask = 0; 11014 11015 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1) 11016 { 11017 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS) 11018 { 11019 neon_modify_type_size (mods, &type, &size); 11020 destmask |= type_chk_of_el_type (type, size); 11021 } 11022 } 11023 11024 return destmask; 11025} 11026 11027/* Check type and return type classification. 11028 The manual states (paraphrase): If one datatype is given, it indicates the 11029 type given in: 11030 - the second operand, if there is one 11031 - the operand, if there is no second operand 11032 - the result, if there are no operands. 11033 This isn't quite good enough though, so we use a concept of a "key" datatype 11034 which is set on a per-instruction basis, which is the one which matters when 11035 only one data type is written. 11036 Note: this function has side-effects (e.g. filling in missing operands). All 11037 Neon instructions should call it before performing bit encoding. */ 11038 11039static struct neon_type_el 11040neon_check_type (unsigned els, enum neon_shape ns, ...) 11041{ 11042 va_list ap; 11043 unsigned i, pass, key_el = 0; 11044 unsigned types[NEON_MAX_TYPE_ELS]; 11045 enum neon_el_type k_type = NT_invtype; 11046 unsigned k_size = -1u; 11047 struct neon_type_el badtype = {NT_invtype, -1}; 11048 unsigned key_allowed = 0; 11049 11050 /* Optional registers in Neon instructions are always (not) in operand 1. 11051 Fill in the missing operand here, if it was omitted. */ 11052 if (els > 1 && !inst.operands[1].present) 11053 inst.operands[1] = inst.operands[0]; 11054 11055 /* Suck up all the varargs. */ 11056 va_start (ap, ns); 11057 for (i = 0; i < els; i++) 11058 { 11059 unsigned thisarg = va_arg (ap, unsigned); 11060 if (thisarg == N_IGNORE_TYPE) 11061 { 11062 va_end (ap); 11063 return badtype; 11064 } 11065 types[i] = thisarg; 11066 if ((thisarg & N_KEY) != 0) 11067 key_el = i; 11068 } 11069 va_end (ap); 11070 11071 if (inst.vectype.elems > 0) 11072 for (i = 0; i < els; i++) 11073 if (inst.operands[i].vectype.type != NT_invtype) 11074 { 11075 first_error (_("types specified in both the mnemonic and operands")); 11076 return badtype; 11077 } 11078 11079 /* Duplicate inst.vectype elements here as necessary. 11080 FIXME: No idea if this is exactly the same as the ARM assembler, 11081 particularly when an insn takes one register and one non-register 11082 operand. */ 11083 if (inst.vectype.elems == 1 && els > 1) 11084 { 11085 unsigned j; 11086 inst.vectype.elems = els; 11087 inst.vectype.el[key_el] = inst.vectype.el[0]; 11088 for (j = 0; j < els; j++) 11089 if (j != key_el) 11090 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 11091 types[j]); 11092 } 11093 else if (inst.vectype.elems == 0 && els > 0) 11094 { 11095 unsigned j; 11096 /* No types were given after the mnemonic, so look for types specified 11097 after each operand. We allow some flexibility here; as long as the 11098 "key" operand has a type, we can infer the others. */ 11099 for (j = 0; j < els; j++) 11100 if (inst.operands[j].vectype.type != NT_invtype) 11101 inst.vectype.el[j] = inst.operands[j].vectype; 11102 11103 if (inst.operands[key_el].vectype.type != NT_invtype) 11104 { 11105 for (j = 0; j < els; j++) 11106 if (inst.operands[j].vectype.type == NT_invtype) 11107 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 11108 types[j]); 11109 } 11110 else 11111 { 11112 first_error (_("operand types can't be inferred")); 11113 return badtype; 11114 } 11115 } 11116 else if (inst.vectype.elems != els) 11117 { 11118 first_error (_("type specifier has the wrong number of parts")); 11119 return badtype; 11120 } 11121 11122 for (pass = 0; pass < 2; pass++) 11123 { 11124 for (i = 0; i < els; i++) 11125 { 11126 unsigned thisarg = types[i]; 11127 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0) 11128 ? modify_types_allowed (key_allowed, thisarg) : thisarg; 11129 enum neon_el_type g_type = inst.vectype.el[i].type; 11130 unsigned g_size = inst.vectype.el[i].size; 11131 11132 /* Decay more-specific signed & unsigned types to sign-insensitive 11133 integer types if sign-specific variants are unavailable. */ 11134 if ((g_type == NT_signed || g_type == NT_unsigned) 11135 && (types_allowed & N_SU_ALL) == 0) 11136 g_type = NT_integer; 11137 11138 /* If only untyped args are allowed, decay any more specific types to 11139 them. Some instructions only care about signs for some element 11140 sizes, so handle that properly. */ 11141 if ((g_size == 8 && (types_allowed & N_8) != 0) 11142 || (g_size == 16 && (types_allowed & N_16) != 0) 11143 || (g_size == 32 && (types_allowed & N_32) != 0) 11144 || (g_size == 64 && (types_allowed & N_64) != 0)) 11145 g_type = NT_untyped; 11146 11147 if (pass == 0) 11148 { 11149 if ((thisarg & N_KEY) != 0) 11150 { 11151 k_type = g_type; 11152 k_size = g_size; 11153 key_allowed = thisarg & ~N_KEY; 11154 } 11155 } 11156 else 11157 { 11158 if ((thisarg & N_VFP) != 0) 11159 { 11160 enum neon_shape_el regshape = neon_shape_tab[ns].el[i]; 11161 unsigned regwidth = neon_shape_el_size[regshape], match; 11162 11163 /* In VFP mode, operands must match register widths. If we 11164 have a key operand, use its width, else use the width of 11165 the current operand. */ 11166 if (k_size != -1u) 11167 match = k_size; 11168 else 11169 match = g_size; 11170 11171 if (regwidth != match) 11172 { 11173 first_error (_("operand size must match register width")); 11174 return badtype; 11175 } 11176 } 11177 11178 if ((thisarg & N_EQK) == 0) 11179 { 11180 unsigned given_type = type_chk_of_el_type (g_type, g_size); 11181 11182 if ((given_type & types_allowed) == 0) 11183 { 11184 first_error (_("bad type in Neon instruction")); 11185 return badtype; 11186 } 11187 } 11188 else 11189 { 11190 enum neon_el_type mod_k_type = k_type; 11191 unsigned mod_k_size = k_size; 11192 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size); 11193 if (g_type != mod_k_type || g_size != mod_k_size) 11194 { 11195 first_error (_("inconsistent types in Neon instruction")); 11196 return badtype; 11197 } 11198 } 11199 } 11200 } 11201 } 11202 11203 return inst.vectype.el[key_el]; 11204} 11205 11206/* Neon-style VFP instruction forwarding. */ 11207 11208/* Thumb VFP instructions have 0xE in the condition field. */ 11209 11210static void 11211do_vfp_cond_or_thumb (void) 11212{ 11213 if (thumb_mode) 11214 inst.instruction |= 0xe0000000; 11215 else 11216 inst.instruction |= inst.cond << 28; 11217} 11218 11219/* Look up and encode a simple mnemonic, for use as a helper function for the 11220 Neon-style VFP syntax. This avoids duplication of bits of the insns table, 11221 etc. It is assumed that operand parsing has already been done, and that the 11222 operands are in the form expected by the given opcode (this isn't necessarily 11223 the same as the form in which they were parsed, hence some massaging must 11224 take place before this function is called). 11225 Checks current arch version against that in the looked-up opcode. */ 11226 11227static void 11228do_vfp_nsyn_opcode (const char *opname) 11229{ 11230 const struct asm_opcode *opcode; 11231 11232 opcode = hash_find (arm_ops_hsh, opname); 11233 11234 if (!opcode) 11235 abort (); 11236 11237 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, 11238 thumb_mode ? *opcode->tvariant : *opcode->avariant), 11239 _(BAD_FPU)); 11240 11241 if (thumb_mode) 11242 { 11243 inst.instruction = opcode->tvalue; 11244 opcode->tencode (); 11245 } 11246 else 11247 { 11248 inst.instruction = (inst.cond << 28) | opcode->avalue; 11249 opcode->aencode (); 11250 } 11251} 11252 11253static void 11254do_vfp_nsyn_add_sub (enum neon_shape rs) 11255{ 11256 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd; 11257 11258 if (rs == NS_FFF) 11259 { 11260 if (is_add) 11261 do_vfp_nsyn_opcode ("fadds"); 11262 else 11263 do_vfp_nsyn_opcode ("fsubs"); 11264 } 11265 else 11266 { 11267 if (is_add) 11268 do_vfp_nsyn_opcode ("faddd"); 11269 else 11270 do_vfp_nsyn_opcode ("fsubd"); 11271 } 11272} 11273 11274/* Check operand types to see if this is a VFP instruction, and if so call 11275 PFN (). */ 11276 11277static int 11278try_vfp_nsyn (int args, void (*pfn) (enum neon_shape)) 11279{ 11280 enum neon_shape rs; 11281 struct neon_type_el et; 11282 11283 switch (args) 11284 { 11285 case 2: 11286 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11287 et = neon_check_type (2, rs, 11288 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11289 break; 11290 11291 case 3: 11292 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11293 et = neon_check_type (3, rs, 11294 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11295 break; 11296 11297 default: 11298 abort (); 11299 } 11300 11301 if (et.type != NT_invtype) 11302 { 11303 pfn (rs); 11304 return SUCCESS; 11305 } 11306 else 11307 inst.error = NULL; 11308 11309 return FAIL; 11310} 11311 11312static void 11313do_vfp_nsyn_mla_mls (enum neon_shape rs) 11314{ 11315 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla; 11316 11317 if (rs == NS_FFF) 11318 { 11319 if (is_mla) 11320 do_vfp_nsyn_opcode ("fmacs"); 11321 else 11322 do_vfp_nsyn_opcode ("fmscs"); 11323 } 11324 else 11325 { 11326 if (is_mla) 11327 do_vfp_nsyn_opcode ("fmacd"); 11328 else 11329 do_vfp_nsyn_opcode ("fmscd"); 11330 } 11331} 11332 11333static void 11334do_vfp_nsyn_mul (enum neon_shape rs) 11335{ 11336 if (rs == NS_FFF) 11337 do_vfp_nsyn_opcode ("fmuls"); 11338 else 11339 do_vfp_nsyn_opcode ("fmuld"); 11340} 11341 11342static void 11343do_vfp_nsyn_abs_neg (enum neon_shape rs) 11344{ 11345 int is_neg = (inst.instruction & 0x80) != 0; 11346 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY); 11347 11348 if (rs == NS_FF) 11349 { 11350 if (is_neg) 11351 do_vfp_nsyn_opcode ("fnegs"); 11352 else 11353 do_vfp_nsyn_opcode ("fabss"); 11354 } 11355 else 11356 { 11357 if (is_neg) 11358 do_vfp_nsyn_opcode ("fnegd"); 11359 else 11360 do_vfp_nsyn_opcode ("fabsd"); 11361 } 11362} 11363 11364/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision 11365 insns belong to Neon, and are handled elsewhere. */ 11366 11367static void 11368do_vfp_nsyn_ldm_stm (int is_dbmode) 11369{ 11370 int is_ldm = (inst.instruction & (1 << 20)) != 0; 11371 if (is_ldm) 11372 { 11373 if (is_dbmode) 11374 do_vfp_nsyn_opcode ("fldmdbs"); 11375 else 11376 do_vfp_nsyn_opcode ("fldmias"); 11377 } 11378 else 11379 { 11380 if (is_dbmode) 11381 do_vfp_nsyn_opcode ("fstmdbs"); 11382 else 11383 do_vfp_nsyn_opcode ("fstmias"); 11384 } 11385} 11386 11387static void 11388do_vfp_nsyn_sqrt (void) 11389{ 11390 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11391 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11392 11393 if (rs == NS_FF) 11394 do_vfp_nsyn_opcode ("fsqrts"); 11395 else 11396 do_vfp_nsyn_opcode ("fsqrtd"); 11397} 11398 11399static void 11400do_vfp_nsyn_div (void) 11401{ 11402 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11403 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 11404 N_F32 | N_F64 | N_KEY | N_VFP); 11405 11406 if (rs == NS_FFF) 11407 do_vfp_nsyn_opcode ("fdivs"); 11408 else 11409 do_vfp_nsyn_opcode ("fdivd"); 11410} 11411 11412static void 11413do_vfp_nsyn_nmul (void) 11414{ 11415 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11416 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 11417 N_F32 | N_F64 | N_KEY | N_VFP); 11418 11419 if (rs == NS_FFF) 11420 { 11421 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11422 do_vfp_sp_dyadic (); 11423 } 11424 else 11425 { 11426 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11427 do_vfp_dp_rd_rn_rm (); 11428 } 11429 do_vfp_cond_or_thumb (); 11430} 11431 11432static void 11433do_vfp_nsyn_cmp (void) 11434{ 11435 if (inst.operands[1].isreg) 11436 { 11437 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11438 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11439 11440 if (rs == NS_FF) 11441 { 11442 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11443 do_vfp_sp_monadic (); 11444 } 11445 else 11446 { 11447 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11448 do_vfp_dp_rd_rm (); 11449 } 11450 } 11451 else 11452 { 11453 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL); 11454 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK); 11455 11456 switch (inst.instruction & 0x0fffffff) 11457 { 11458 case N_MNEM_vcmp: 11459 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp; 11460 break; 11461 case N_MNEM_vcmpe: 11462 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe; 11463 break; 11464 default: 11465 abort (); 11466 } 11467 11468 if (rs == NS_FI) 11469 { 11470 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11471 do_vfp_sp_compare_z (); 11472 } 11473 else 11474 { 11475 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11476 do_vfp_dp_rd (); 11477 } 11478 } 11479 do_vfp_cond_or_thumb (); 11480} 11481 11482static void 11483nsyn_insert_sp (void) 11484{ 11485 inst.operands[1] = inst.operands[0]; 11486 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 11487 inst.operands[0].reg = 13; 11488 inst.operands[0].isreg = 1; 11489 inst.operands[0].writeback = 1; 11490 inst.operands[0].present = 1; 11491} 11492 11493static void 11494do_vfp_nsyn_push (void) 11495{ 11496 nsyn_insert_sp (); 11497 if (inst.operands[1].issingle) 11498 do_vfp_nsyn_opcode ("fstmdbs"); 11499 else 11500 do_vfp_nsyn_opcode ("fstmdbd"); 11501} 11502 11503static void 11504do_vfp_nsyn_pop (void) 11505{ 11506 nsyn_insert_sp (); 11507 if (inst.operands[1].issingle) 11508 do_vfp_nsyn_opcode ("fldmias"); 11509 else 11510 do_vfp_nsyn_opcode ("fldmiad"); 11511} 11512 11513/* Fix up Neon data-processing instructions, ORing in the correct bits for 11514 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */ 11515 11516static unsigned 11517neon_dp_fixup (unsigned i) 11518{ 11519 if (thumb_mode) 11520 { 11521 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */ 11522 if (i & (1 << 24)) 11523 i |= 1 << 28; 11524 11525 i &= ~(1 << 24); 11526 11527 i |= 0xef000000; 11528 } 11529 else 11530 i |= 0xf2000000; 11531 11532 return i; 11533} 11534 11535/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3 11536 (0, 1, 2, 3). */ 11537 11538static unsigned 11539neon_logbits (unsigned x) 11540{ 11541 return ffs (x) - 4; 11542} 11543 11544#define LOW4(R) ((R) & 0xf) 11545#define HI1(R) (((R) >> 4) & 1) 11546 11547/* Encode insns with bit pattern: 11548 11549 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0| 11550 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm | 11551 11552 SIZE is passed in bits. -1 means size field isn't changed, in case it has a 11553 different meaning for some instruction. */ 11554 11555static void 11556neon_three_same (int isquad, int ubit, int size) 11557{ 11558 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11559 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11560 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 11561 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 11562 inst.instruction |= LOW4 (inst.operands[2].reg); 11563 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 11564 inst.instruction |= (isquad != 0) << 6; 11565 inst.instruction |= (ubit != 0) << 24; 11566 if (size != -1) 11567 inst.instruction |= neon_logbits (size) << 20; 11568 11569 inst.instruction = neon_dp_fixup (inst.instruction); 11570} 11571 11572/* Encode instructions of the form: 11573 11574 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0| 11575 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm | 11576 11577 Don't write size if SIZE == -1. */ 11578 11579static void 11580neon_two_same (int qbit, int ubit, int size) 11581{ 11582 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11583 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11584 inst.instruction |= LOW4 (inst.operands[1].reg); 11585 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11586 inst.instruction |= (qbit != 0) << 6; 11587 inst.instruction |= (ubit != 0) << 24; 11588 11589 if (size != -1) 11590 inst.instruction |= neon_logbits (size) << 18; 11591 11592 inst.instruction = neon_dp_fixup (inst.instruction); 11593} 11594 11595/* Neon instruction encoders, in approximate order of appearance. */ 11596 11597static void 11598do_neon_dyadic_i_su (void) 11599{ 11600 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11601 struct neon_type_el et = neon_check_type (3, rs, 11602 N_EQK, N_EQK, N_SU_32 | N_KEY); 11603 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11604} 11605 11606static void 11607do_neon_dyadic_i64_su (void) 11608{ 11609 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11610 struct neon_type_el et = neon_check_type (3, rs, 11611 N_EQK, N_EQK, N_SU_ALL | N_KEY); 11612 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11613} 11614 11615static void 11616neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et, 11617 unsigned immbits) 11618{ 11619 unsigned size = et.size >> 3; 11620 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11621 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11622 inst.instruction |= LOW4 (inst.operands[1].reg); 11623 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11624 inst.instruction |= (isquad != 0) << 6; 11625 inst.instruction |= immbits << 16; 11626 inst.instruction |= (size >> 3) << 7; 11627 inst.instruction |= (size & 0x7) << 19; 11628 if (write_ubit) 11629 inst.instruction |= (uval != 0) << 24; 11630 11631 inst.instruction = neon_dp_fixup (inst.instruction); 11632} 11633 11634static void 11635do_neon_shl_imm (void) 11636{ 11637 if (!inst.operands[2].isreg) 11638 { 11639 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11640 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL); 11641 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11642 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm); 11643 } 11644 else 11645 { 11646 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11647 struct neon_type_el et = neon_check_type (3, rs, 11648 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 11649 unsigned int tmp; 11650 11651 /* VSHL/VQSHL 3-register variants have syntax such as: 11652 vshl.xx Dd, Dm, Dn 11653 whereas other 3-register operations encoded by neon_three_same have 11654 syntax like: 11655 vadd.xx Dd, Dn, Dm 11656 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg 11657 here. */ 11658 tmp = inst.operands[2].reg; 11659 inst.operands[2].reg = inst.operands[1].reg; 11660 inst.operands[1].reg = tmp; 11661 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11662 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11663 } 11664} 11665 11666static void 11667do_neon_qshl_imm (void) 11668{ 11669 if (!inst.operands[2].isreg) 11670 { 11671 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11672 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 11673 11674 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11675 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 11676 inst.operands[2].imm); 11677 } 11678 else 11679 { 11680 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11681 struct neon_type_el et = neon_check_type (3, rs, 11682 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 11683 unsigned int tmp; 11684 11685 /* See note in do_neon_shl_imm. */ 11686 tmp = inst.operands[2].reg; 11687 inst.operands[2].reg = inst.operands[1].reg; 11688 inst.operands[1].reg = tmp; 11689 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11690 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11691 } 11692} 11693 11694static void 11695do_neon_rshl (void) 11696{ 11697 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11698 struct neon_type_el et = neon_check_type (3, rs, 11699 N_EQK, N_EQK, N_SU_ALL | N_KEY); 11700 unsigned int tmp; 11701 11702 tmp = inst.operands[2].reg; 11703 inst.operands[2].reg = inst.operands[1].reg; 11704 inst.operands[1].reg = tmp; 11705 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11706} 11707 11708static int 11709neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size) 11710{ 11711 /* Handle .I8 pseudo-instructions. */ 11712 if (size == 8) 11713 { 11714 /* Unfortunately, this will make everything apart from zero out-of-range. 11715 FIXME is this the intended semantics? There doesn't seem much point in 11716 accepting .I8 if so. */ 11717 immediate |= immediate << 8; 11718 size = 16; 11719 } 11720 11721 if (size >= 32) 11722 { 11723 if (immediate == (immediate & 0x000000ff)) 11724 { 11725 *immbits = immediate; 11726 return 0x1; 11727 } 11728 else if (immediate == (immediate & 0x0000ff00)) 11729 { 11730 *immbits = immediate >> 8; 11731 return 0x3; 11732 } 11733 else if (immediate == (immediate & 0x00ff0000)) 11734 { 11735 *immbits = immediate >> 16; 11736 return 0x5; 11737 } 11738 else if (immediate == (immediate & 0xff000000)) 11739 { 11740 *immbits = immediate >> 24; 11741 return 0x7; 11742 } 11743 if ((immediate & 0xffff) != (immediate >> 16)) 11744 goto bad_immediate; 11745 immediate &= 0xffff; 11746 } 11747 11748 if (immediate == (immediate & 0x000000ff)) 11749 { 11750 *immbits = immediate; 11751 return 0x9; 11752 } 11753 else if (immediate == (immediate & 0x0000ff00)) 11754 { 11755 *immbits = immediate >> 8; 11756 return 0xb; 11757 } 11758 11759 bad_immediate: 11760 first_error (_("immediate value out of range")); 11761 return FAIL; 11762} 11763 11764/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits 11765 A, B, C, D. */ 11766 11767static int 11768neon_bits_same_in_bytes (unsigned imm) 11769{ 11770 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) 11771 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) 11772 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) 11773 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); 11774} 11775 11776/* For immediate of above form, return 0bABCD. */ 11777 11778static unsigned 11779neon_squash_bits (unsigned imm) 11780{ 11781 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) 11782 | ((imm & 0x01000000) >> 21); 11783} 11784 11785/* Compress quarter-float representation to 0b...000 abcdefgh. */ 11786 11787static unsigned 11788neon_qfloat_bits (unsigned imm) 11789{ 11790 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); 11791} 11792 11793/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into 11794 the instruction. *OP is passed as the initial value of the op field, and 11795 may be set to a different value depending on the constant (i.e. 11796 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not 11797 MVN). If the immediate looks like a repeated parttern then also 11798 try smaller element sizes. */ 11799 11800static int 11801neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, 11802 unsigned *immbits, int *op, int size, 11803 enum neon_el_type type) 11804{ 11805 /* Only permit float immediates (including 0.0/-0.0) if the operand type is 11806 float. */ 11807 if (type == NT_float && !float_p) 11808 return FAIL; 11809 11810 if (type == NT_float && is_quarter_float (immlo) && immhi == 0) 11811 { 11812 if (size != 32 || *op == 1) 11813 return FAIL; 11814 *immbits = neon_qfloat_bits (immlo); 11815 return 0xf; 11816 } 11817 11818 if (size == 64) 11819 { 11820 if (neon_bits_same_in_bytes (immhi) 11821 && neon_bits_same_in_bytes (immlo)) 11822 { 11823 if (*op == 1) 11824 return FAIL; 11825 *immbits = (neon_squash_bits (immhi) << 4) 11826 | neon_squash_bits (immlo); 11827 *op = 1; 11828 return 0xe; 11829 } 11830 11831 if (immhi != immlo) 11832 return FAIL; 11833 } 11834 11835 if (size >= 32) 11836 { 11837 if (immlo == (immlo & 0x000000ff)) 11838 { 11839 *immbits = immlo; 11840 return 0x0; 11841 } 11842 else if (immlo == (immlo & 0x0000ff00)) 11843 { 11844 *immbits = immlo >> 8; 11845 return 0x2; 11846 } 11847 else if (immlo == (immlo & 0x00ff0000)) 11848 { 11849 *immbits = immlo >> 16; 11850 return 0x4; 11851 } 11852 else if (immlo == (immlo & 0xff000000)) 11853 { 11854 *immbits = immlo >> 24; 11855 return 0x6; 11856 } 11857 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) 11858 { 11859 *immbits = (immlo >> 8) & 0xff; 11860 return 0xc; 11861 } 11862 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) 11863 { 11864 *immbits = (immlo >> 16) & 0xff; 11865 return 0xd; 11866 } 11867 11868 if ((immlo & 0xffff) != (immlo >> 16)) 11869 return FAIL; 11870 immlo &= 0xffff; 11871 } 11872 11873 if (size >= 16) 11874 { 11875 if (immlo == (immlo & 0x000000ff)) 11876 { 11877 *immbits = immlo; 11878 return 0x8; 11879 } 11880 else if (immlo == (immlo & 0x0000ff00)) 11881 { 11882 *immbits = immlo >> 8; 11883 return 0xa; 11884 } 11885 11886 if ((immlo & 0xff) != (immlo >> 8)) 11887 return FAIL; 11888 immlo &= 0xff; 11889 } 11890 11891 if (immlo == (immlo & 0x000000ff)) 11892 { 11893 /* Don't allow MVN with 8-bit immediate. */ 11894 if (*op == 1) 11895 return FAIL; 11896 *immbits = immlo; 11897 return 0xe; 11898 } 11899 11900 return FAIL; 11901} 11902 11903/* Write immediate bits [7:0] to the following locations: 11904 11905 |28/24|23 19|18 16|15 4|3 0| 11906 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| 11907 11908 This function is used by VMOV/VMVN/VORR/VBIC. */ 11909 11910static void 11911neon_write_immbits (unsigned immbits) 11912{ 11913 inst.instruction |= immbits & 0xf; 11914 inst.instruction |= ((immbits >> 4) & 0x7) << 16; 11915 inst.instruction |= ((immbits >> 7) & 0x1) << 24; 11916} 11917 11918/* Invert low-order SIZE bits of XHI:XLO. */ 11919 11920static void 11921neon_invert_size (unsigned *xlo, unsigned *xhi, int size) 11922{ 11923 unsigned immlo = xlo ? *xlo : 0; 11924 unsigned immhi = xhi ? *xhi : 0; 11925 11926 switch (size) 11927 { 11928 case 8: 11929 immlo = (~immlo) & 0xff; 11930 break; 11931 11932 case 16: 11933 immlo = (~immlo) & 0xffff; 11934 break; 11935 11936 case 64: 11937 immhi = (~immhi) & 0xffffffff; 11938 /* fall through. */ 11939 11940 case 32: 11941 immlo = (~immlo) & 0xffffffff; 11942 break; 11943 11944 default: 11945 abort (); 11946 } 11947 11948 if (xlo) 11949 *xlo = immlo; 11950 11951 if (xhi) 11952 *xhi = immhi; 11953} 11954 11955static void 11956do_neon_logic (void) 11957{ 11958 if (inst.operands[2].present && inst.operands[2].isreg) 11959 { 11960 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11961 neon_check_type (3, rs, N_IGNORE_TYPE); 11962 /* U bit and size field were set as part of the bitmask. */ 11963 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11964 neon_three_same (neon_quad (rs), 0, -1); 11965 } 11966 else 11967 { 11968 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 11969 struct neon_type_el et = neon_check_type (2, rs, 11970 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 11971 enum neon_opc opcode = inst.instruction & 0x0fffffff; 11972 unsigned immbits; 11973 int cmode; 11974 11975 if (et.type == NT_invtype) 11976 return; 11977 11978 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11979 11980 immbits = inst.operands[1].imm; 11981 if (et.size == 64) 11982 { 11983 /* .i64 is a pseudo-op, so the immediate must be a repeating 11984 pattern. */ 11985 if (immbits != (inst.operands[1].regisimm ? 11986 inst.operands[1].reg : 0)) 11987 { 11988 /* Set immbits to an invalid constant. */ 11989 immbits = 0xdeadbeef; 11990 } 11991 } 11992 11993 switch (opcode) 11994 { 11995 case N_MNEM_vbic: 11996 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11997 break; 11998 11999 case N_MNEM_vorr: 12000 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 12001 break; 12002 12003 case N_MNEM_vand: 12004 /* Pseudo-instruction for VBIC. */ 12005 neon_invert_size (&immbits, 0, et.size); 12006 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 12007 break; 12008 12009 case N_MNEM_vorn: 12010 /* Pseudo-instruction for VORR. */ 12011 neon_invert_size (&immbits, 0, et.size); 12012 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 12013 break; 12014 12015 default: 12016 abort (); 12017 } 12018 12019 if (cmode == FAIL) 12020 return; 12021 12022 inst.instruction |= neon_quad (rs) << 6; 12023 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12024 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12025 inst.instruction |= cmode << 8; 12026 neon_write_immbits (immbits); 12027 12028 inst.instruction = neon_dp_fixup (inst.instruction); 12029 } 12030} 12031 12032static void 12033do_neon_bitfield (void) 12034{ 12035 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12036 neon_check_type (3, rs, N_IGNORE_TYPE); 12037 neon_three_same (neon_quad (rs), 0, -1); 12038} 12039 12040static void 12041neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types, 12042 unsigned destbits) 12043{ 12044 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12045 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK, 12046 types | N_KEY); 12047 if (et.type == NT_float) 12048 { 12049 inst.instruction = NEON_ENC_FLOAT (inst.instruction); 12050 neon_three_same (neon_quad (rs), 0, -1); 12051 } 12052 else 12053 { 12054 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12055 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size); 12056 } 12057} 12058 12059static void 12060do_neon_dyadic_if_su (void) 12061{ 12062 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 12063} 12064 12065static void 12066do_neon_dyadic_if_su_d (void) 12067{ 12068 /* This version only allow D registers, but that constraint is enforced during 12069 operand parsing so we don't need to do anything extra here. */ 12070 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 12071} 12072 12073static void 12074do_neon_dyadic_if_i_d (void) 12075{ 12076 /* The "untyped" case can't happen. Do this to stop the "U" bit being 12077 affected if we specify unsigned args. */ 12078 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 12079} 12080 12081enum vfp_or_neon_is_neon_bits 12082{ 12083 NEON_CHECK_CC = 1, 12084 NEON_CHECK_ARCH = 2 12085}; 12086 12087/* Call this function if an instruction which may have belonged to the VFP or 12088 Neon instruction sets, but turned out to be a Neon instruction (due to the 12089 operand types involved, etc.). We have to check and/or fix-up a couple of 12090 things: 12091 12092 - Make sure the user hasn't attempted to make a Neon instruction 12093 conditional. 12094 - Alter the value in the condition code field if necessary. 12095 - Make sure that the arch supports Neon instructions. 12096 12097 Which of these operations take place depends on bits from enum 12098 vfp_or_neon_is_neon_bits. 12099 12100 WARNING: This function has side effects! If NEON_CHECK_CC is used and the 12101 current instruction's condition is COND_ALWAYS, the condition field is 12102 changed to inst.uncond_value. This is necessary because instructions shared 12103 between VFP and Neon may be conditional for the VFP variants only, and the 12104 unconditional Neon version must have, e.g., 0xF in the condition field. */ 12105 12106static int 12107vfp_or_neon_is_neon (unsigned check) 12108{ 12109 /* Conditions are always legal in Thumb mode (IT blocks). */ 12110 if (!thumb_mode && (check & NEON_CHECK_CC)) 12111 { 12112 if (inst.cond != COND_ALWAYS) 12113 { 12114 first_error (_(BAD_COND)); 12115 return FAIL; 12116 } 12117 if (inst.uncond_value != -1) 12118 inst.instruction |= inst.uncond_value << 28; 12119 } 12120 12121 if ((check & NEON_CHECK_ARCH) 12122 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) 12123 { 12124 first_error (_(BAD_FPU)); 12125 return FAIL; 12126 } 12127 12128 return SUCCESS; 12129} 12130 12131static void 12132do_neon_addsub_if_i (void) 12133{ 12134 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS) 12135 return; 12136 12137 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12138 return; 12139 12140 /* The "untyped" case can't happen. Do this to stop the "U" bit being 12141 affected if we specify unsigned args. */ 12142 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0); 12143} 12144 12145/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the 12146 result to be: 12147 V<op> A,B (A is operand 0, B is operand 2) 12148 to mean: 12149 V<op> A,B,A 12150 not: 12151 V<op> A,B,B 12152 so handle that case specially. */ 12153 12154static void 12155neon_exchange_operands (void) 12156{ 12157 void *scratch = alloca (sizeof (inst.operands[0])); 12158 if (inst.operands[1].present) 12159 { 12160 /* Swap operands[1] and operands[2]. */ 12161 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0])); 12162 inst.operands[1] = inst.operands[2]; 12163 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0])); 12164 } 12165 else 12166 { 12167 inst.operands[1] = inst.operands[2]; 12168 inst.operands[2] = inst.operands[0]; 12169 } 12170} 12171 12172static void 12173neon_compare (unsigned regtypes, unsigned immtypes, int invert) 12174{ 12175 if (inst.operands[2].isreg) 12176 { 12177 if (invert) 12178 neon_exchange_operands (); 12179 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ); 12180 } 12181 else 12182 { 12183 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12184 struct neon_type_el et = neon_check_type (2, rs, 12185 N_EQK | N_SIZ, immtypes | N_KEY); 12186 12187 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12188 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12189 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12190 inst.instruction |= LOW4 (inst.operands[1].reg); 12191 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12192 inst.instruction |= neon_quad (rs) << 6; 12193 inst.instruction |= (et.type == NT_float) << 10; 12194 inst.instruction |= neon_logbits (et.size) << 18; 12195 12196 inst.instruction = neon_dp_fixup (inst.instruction); 12197 } 12198} 12199 12200static void 12201do_neon_cmp (void) 12202{ 12203 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE); 12204} 12205 12206static void 12207do_neon_cmp_inv (void) 12208{ 12209 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE); 12210} 12211 12212static void 12213do_neon_ceq (void) 12214{ 12215 neon_compare (N_IF_32, N_IF_32, FALSE); 12216} 12217 12218/* For multiply instructions, we have the possibility of 16-bit or 32-bit 12219 scalars, which are encoded in 5 bits, M : Rm. 12220 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in 12221 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the 12222 index in M. */ 12223 12224static unsigned 12225neon_scalar_for_mul (unsigned scalar, unsigned elsize) 12226{ 12227 unsigned regno = NEON_SCALAR_REG (scalar); 12228 unsigned elno = NEON_SCALAR_INDEX (scalar); 12229 12230 switch (elsize) 12231 { 12232 case 16: 12233 if (regno > 7 || elno > 3) 12234 goto bad_scalar; 12235 return regno | (elno << 3); 12236 12237 case 32: 12238 if (regno > 15 || elno > 1) 12239 goto bad_scalar; 12240 return regno | (elno << 4); 12241 12242 default: 12243 bad_scalar: 12244 first_error (_("scalar out of range for multiply instruction")); 12245 } 12246 12247 return 0; 12248} 12249 12250/* Encode multiply / multiply-accumulate scalar instructions. */ 12251 12252static void 12253neon_mul_mac (struct neon_type_el et, int ubit) 12254{ 12255 unsigned scalar; 12256 12257 /* Give a more helpful error message if we have an invalid type. */ 12258 if (et.type == NT_invtype) 12259 return; 12260 12261 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size); 12262 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12263 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12264 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12265 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12266 inst.instruction |= LOW4 (scalar); 12267 inst.instruction |= HI1 (scalar) << 5; 12268 inst.instruction |= (et.type == NT_float) << 8; 12269 inst.instruction |= neon_logbits (et.size) << 20; 12270 inst.instruction |= (ubit != 0) << 24; 12271 12272 inst.instruction = neon_dp_fixup (inst.instruction); 12273} 12274 12275static void 12276do_neon_mac_maybe_scalar (void) 12277{ 12278 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS) 12279 return; 12280 12281 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12282 return; 12283 12284 if (inst.operands[2].isscalar) 12285 { 12286 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 12287 struct neon_type_el et = neon_check_type (3, rs, 12288 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY); 12289 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12290 neon_mul_mac (et, neon_quad (rs)); 12291 } 12292 else 12293 { 12294 /* The "untyped" case can't happen. Do this to stop the "U" bit being 12295 affected if we specify unsigned args. */ 12296 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 12297 } 12298} 12299 12300static void 12301do_neon_tst (void) 12302{ 12303 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12304 struct neon_type_el et = neon_check_type (3, rs, 12305 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY); 12306 neon_three_same (neon_quad (rs), 0, et.size); 12307} 12308 12309/* VMUL with 3 registers allows the P8 type. The scalar version supports the 12310 same types as the MAC equivalents. The polynomial type for this instruction 12311 is encoded the same as the integer type. */ 12312 12313static void 12314do_neon_mul (void) 12315{ 12316 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS) 12317 return; 12318 12319 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12320 return; 12321 12322 if (inst.operands[2].isscalar) 12323 do_neon_mac_maybe_scalar (); 12324 else 12325 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0); 12326} 12327 12328static void 12329do_neon_qdmulh (void) 12330{ 12331 if (inst.operands[2].isscalar) 12332 { 12333 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 12334 struct neon_type_el et = neon_check_type (3, rs, 12335 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 12336 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12337 neon_mul_mac (et, neon_quad (rs)); 12338 } 12339 else 12340 { 12341 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12342 struct neon_type_el et = neon_check_type (3, rs, 12343 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 12344 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12345 /* The U bit (rounding) comes from bit mask. */ 12346 neon_three_same (neon_quad (rs), 0, et.size); 12347 } 12348} 12349 12350static void 12351do_neon_fcmp_absolute (void) 12352{ 12353 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12354 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 12355 /* Size field comes from bit mask. */ 12356 neon_three_same (neon_quad (rs), 1, -1); 12357} 12358 12359static void 12360do_neon_fcmp_absolute_inv (void) 12361{ 12362 neon_exchange_operands (); 12363 do_neon_fcmp_absolute (); 12364} 12365 12366static void 12367do_neon_step (void) 12368{ 12369 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12370 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 12371 neon_three_same (neon_quad (rs), 0, -1); 12372} 12373 12374static void 12375do_neon_abs_neg (void) 12376{ 12377 enum neon_shape rs; 12378 struct neon_type_el et; 12379 12380 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS) 12381 return; 12382 12383 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12384 return; 12385 12386 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12387 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY); 12388 12389 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12390 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12391 inst.instruction |= LOW4 (inst.operands[1].reg); 12392 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12393 inst.instruction |= neon_quad (rs) << 6; 12394 inst.instruction |= (et.type == NT_float) << 10; 12395 inst.instruction |= neon_logbits (et.size) << 18; 12396 12397 inst.instruction = neon_dp_fixup (inst.instruction); 12398} 12399 12400static void 12401do_neon_sli (void) 12402{ 12403 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12404 struct neon_type_el et = neon_check_type (2, rs, 12405 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12406 int imm = inst.operands[2].imm; 12407 constraint (imm < 0 || (unsigned)imm >= et.size, 12408 _("immediate out of range for insert")); 12409 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 12410} 12411 12412static void 12413do_neon_sri (void) 12414{ 12415 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12416 struct neon_type_el et = neon_check_type (2, rs, 12417 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12418 int imm = inst.operands[2].imm; 12419 constraint (imm < 1 || (unsigned)imm > et.size, 12420 _("immediate out of range for insert")); 12421 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm); 12422} 12423 12424static void 12425do_neon_qshlu_imm (void) 12426{ 12427 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12428 struct neon_type_el et = neon_check_type (2, rs, 12429 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY); 12430 int imm = inst.operands[2].imm; 12431 constraint (imm < 0 || (unsigned)imm >= et.size, 12432 _("immediate out of range for shift")); 12433 /* Only encodes the 'U present' variant of the instruction. 12434 In this case, signed types have OP (bit 8) set to 0. 12435 Unsigned types have OP set to 1. */ 12436 inst.instruction |= (et.type == NT_unsigned) << 8; 12437 /* The rest of the bits are the same as other immediate shifts. */ 12438 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 12439} 12440 12441static void 12442do_neon_qmovn (void) 12443{ 12444 struct neon_type_el et = neon_check_type (2, NS_DQ, 12445 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 12446 /* Saturating move where operands can be signed or unsigned, and the 12447 destination has the same signedness. */ 12448 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12449 if (et.type == NT_unsigned) 12450 inst.instruction |= 0xc0; 12451 else 12452 inst.instruction |= 0x80; 12453 neon_two_same (0, 1, et.size / 2); 12454} 12455 12456static void 12457do_neon_qmovun (void) 12458{ 12459 struct neon_type_el et = neon_check_type (2, NS_DQ, 12460 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 12461 /* Saturating move with unsigned results. Operands must be signed. */ 12462 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12463 neon_two_same (0, 1, et.size / 2); 12464} 12465 12466static void 12467do_neon_rshift_sat_narrow (void) 12468{ 12469 /* FIXME: Types for narrowing. If operands are signed, results can be signed 12470 or unsigned. If operands are unsigned, results must also be unsigned. */ 12471 struct neon_type_el et = neon_check_type (2, NS_DQI, 12472 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 12473 int imm = inst.operands[2].imm; 12474 /* This gets the bounds check, size encoding and immediate bits calculation 12475 right. */ 12476 et.size /= 2; 12477 12478 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for 12479 VQMOVN.I<size> <Dd>, <Qm>. */ 12480 if (imm == 0) 12481 { 12482 inst.operands[2].present = 0; 12483 inst.instruction = N_MNEM_vqmovn; 12484 do_neon_qmovn (); 12485 return; 12486 } 12487 12488 constraint (imm < 1 || (unsigned)imm > et.size, 12489 _("immediate out of range")); 12490 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm); 12491} 12492 12493static void 12494do_neon_rshift_sat_narrow_u (void) 12495{ 12496 /* FIXME: Types for narrowing. If operands are signed, results can be signed 12497 or unsigned. If operands are unsigned, results must also be unsigned. */ 12498 struct neon_type_el et = neon_check_type (2, NS_DQI, 12499 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 12500 int imm = inst.operands[2].imm; 12501 /* This gets the bounds check, size encoding and immediate bits calculation 12502 right. */ 12503 et.size /= 2; 12504 12505 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for 12506 VQMOVUN.I<size> <Dd>, <Qm>. */ 12507 if (imm == 0) 12508 { 12509 inst.operands[2].present = 0; 12510 inst.instruction = N_MNEM_vqmovun; 12511 do_neon_qmovun (); 12512 return; 12513 } 12514 12515 constraint (imm < 1 || (unsigned)imm > et.size, 12516 _("immediate out of range")); 12517 /* FIXME: The manual is kind of unclear about what value U should have in 12518 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it 12519 must be 1. */ 12520 neon_imm_shift (TRUE, 1, 0, et, et.size - imm); 12521} 12522 12523static void 12524do_neon_movn (void) 12525{ 12526 struct neon_type_el et = neon_check_type (2, NS_DQ, 12527 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 12528 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12529 neon_two_same (0, 1, et.size / 2); 12530} 12531 12532static void 12533do_neon_rshift_narrow (void) 12534{ 12535 struct neon_type_el et = neon_check_type (2, NS_DQI, 12536 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 12537 int imm = inst.operands[2].imm; 12538 /* This gets the bounds check, size encoding and immediate bits calculation 12539 right. */ 12540 et.size /= 2; 12541 12542 /* If immediate is zero then we are a pseudo-instruction for 12543 VMOVN.I<size> <Dd>, <Qm> */ 12544 if (imm == 0) 12545 { 12546 inst.operands[2].present = 0; 12547 inst.instruction = N_MNEM_vmovn; 12548 do_neon_movn (); 12549 return; 12550 } 12551 12552 constraint (imm < 1 || (unsigned)imm > et.size, 12553 _("immediate out of range for narrowing operation")); 12554 neon_imm_shift (FALSE, 0, 0, et, et.size - imm); 12555} 12556 12557static void 12558do_neon_shll (void) 12559{ 12560 /* FIXME: Type checking when lengthening. */ 12561 struct neon_type_el et = neon_check_type (2, NS_QDI, 12562 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY); 12563 unsigned imm = inst.operands[2].imm; 12564 12565 if (imm == et.size) 12566 { 12567 /* Maximum shift variant. */ 12568 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12569 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12570 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12571 inst.instruction |= LOW4 (inst.operands[1].reg); 12572 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12573 inst.instruction |= neon_logbits (et.size) << 18; 12574 12575 inst.instruction = neon_dp_fixup (inst.instruction); 12576 } 12577 else 12578 { 12579 /* A more-specific type check for non-max versions. */ 12580 et = neon_check_type (2, NS_QDI, 12581 N_EQK | N_DBL, N_SU_32 | N_KEY); 12582 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12583 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm); 12584 } 12585} 12586 12587/* Check the various types for the VCVT instruction, and return which version 12588 the current instruction is. */ 12589 12590static int 12591neon_cvt_flavour (enum neon_shape rs) 12592{ 12593#define CVT_VAR(C,X,Y) \ 12594 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \ 12595 if (et.type != NT_invtype) \ 12596 { \ 12597 inst.error = NULL; \ 12598 return (C); \ 12599 } 12600 struct neon_type_el et; 12601 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF 12602 || rs == NS_FF) ? N_VFP : 0; 12603 /* The instruction versions which take an immediate take one register 12604 argument, which is extended to the width of the full register. Thus the 12605 "source" and "destination" registers must have the same width. Hack that 12606 here by making the size equal to the key (wider, in this case) operand. */ 12607 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0; 12608 12609 CVT_VAR (0, N_S32, N_F32); 12610 CVT_VAR (1, N_U32, N_F32); 12611 CVT_VAR (2, N_F32, N_S32); 12612 CVT_VAR (3, N_F32, N_U32); 12613 12614 whole_reg = N_VFP; 12615 12616 /* VFP instructions. */ 12617 CVT_VAR (4, N_F32, N_F64); 12618 CVT_VAR (5, N_F64, N_F32); 12619 CVT_VAR (6, N_S32, N_F64 | key); 12620 CVT_VAR (7, N_U32, N_F64 | key); 12621 CVT_VAR (8, N_F64 | key, N_S32); 12622 CVT_VAR (9, N_F64 | key, N_U32); 12623 /* VFP instructions with bitshift. */ 12624 CVT_VAR (10, N_F32 | key, N_S16); 12625 CVT_VAR (11, N_F32 | key, N_U16); 12626 CVT_VAR (12, N_F64 | key, N_S16); 12627 CVT_VAR (13, N_F64 | key, N_U16); 12628 CVT_VAR (14, N_S16, N_F32 | key); 12629 CVT_VAR (15, N_U16, N_F32 | key); 12630 CVT_VAR (16, N_S16, N_F64 | key); 12631 CVT_VAR (17, N_U16, N_F64 | key); 12632 12633 return -1; 12634#undef CVT_VAR 12635} 12636 12637/* Neon-syntax VFP conversions. */ 12638 12639static void 12640do_vfp_nsyn_cvt (enum neon_shape rs, int flavour) 12641{ 12642 const char *opname = 0; 12643 12644 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI) 12645 { 12646 /* Conversions with immediate bitshift. */ 12647 const char *enc[] = 12648 { 12649 "ftosls", 12650 "ftouls", 12651 "fsltos", 12652 "fultos", 12653 NULL, 12654 NULL, 12655 "ftosld", 12656 "ftould", 12657 "fsltod", 12658 "fultod", 12659 "fshtos", 12660 "fuhtos", 12661 "fshtod", 12662 "fuhtod", 12663 "ftoshs", 12664 "ftouhs", 12665 "ftoshd", 12666 "ftouhd" 12667 }; 12668 12669 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 12670 { 12671 opname = enc[flavour]; 12672 constraint (inst.operands[0].reg != inst.operands[1].reg, 12673 _("operands 0 and 1 must be the same register")); 12674 inst.operands[1] = inst.operands[2]; 12675 memset (&inst.operands[2], '\0', sizeof (inst.operands[2])); 12676 } 12677 } 12678 else 12679 { 12680 /* Conversions without bitshift. */ 12681 const char *enc[] = 12682 { 12683 "ftosizs", 12684 "ftouizs", 12685 "fsitos", 12686 "fuitos", 12687 "fcvtsd", 12688 "fcvtds", 12689 "ftosizd", 12690 "ftouizd", 12691 "fsitod", 12692 "fuitod" 12693 }; 12694 12695 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 12696 opname = enc[flavour]; 12697 } 12698 12699 if (opname) 12700 do_vfp_nsyn_opcode (opname); 12701} 12702 12703static void 12704do_vfp_nsyn_cvtz (void) 12705{ 12706 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL); 12707 int flavour = neon_cvt_flavour (rs); 12708 const char *enc[] = 12709 { 12710 "ftosizs", 12711 "ftouizs", 12712 NULL, 12713 NULL, 12714 NULL, 12715 NULL, 12716 "ftosizd", 12717 "ftouizd" 12718 }; 12719 12720 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour]) 12721 do_vfp_nsyn_opcode (enc[flavour]); 12722} 12723 12724static void 12725do_neon_cvt (void) 12726{ 12727 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ, 12728 NS_FD, NS_DF, NS_FF, NS_NULL); 12729 int flavour = neon_cvt_flavour (rs); 12730 12731 /* VFP rather than Neon conversions. */ 12732 if (flavour >= 4) 12733 { 12734 do_vfp_nsyn_cvt (rs, flavour); 12735 return; 12736 } 12737 12738 switch (rs) 12739 { 12740 case NS_DDI: 12741 case NS_QQI: 12742 { 12743 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12744 return; 12745 12746 /* Fixed-point conversion with #0 immediate is encoded as an 12747 integer conversion. */ 12748 if (inst.operands[2].present && inst.operands[2].imm == 0) 12749 goto int_encode; 12750 unsigned immbits = 32 - inst.operands[2].imm; 12751 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 }; 12752 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12753 if (flavour != -1) 12754 inst.instruction |= enctab[flavour]; 12755 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12756 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12757 inst.instruction |= LOW4 (inst.operands[1].reg); 12758 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12759 inst.instruction |= neon_quad (rs) << 6; 12760 inst.instruction |= 1 << 21; 12761 inst.instruction |= immbits << 16; 12762 12763 inst.instruction = neon_dp_fixup (inst.instruction); 12764 } 12765 break; 12766 12767 case NS_DD: 12768 case NS_QQ: 12769 int_encode: 12770 { 12771 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 }; 12772 12773 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12774 12775 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12776 return; 12777 12778 if (flavour != -1) 12779 inst.instruction |= enctab[flavour]; 12780 12781 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12782 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12783 inst.instruction |= LOW4 (inst.operands[1].reg); 12784 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12785 inst.instruction |= neon_quad (rs) << 6; 12786 inst.instruction |= 2 << 18; 12787 12788 inst.instruction = neon_dp_fixup (inst.instruction); 12789 } 12790 break; 12791 12792 default: 12793 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */ 12794 do_vfp_nsyn_cvt (rs, flavour); 12795 } 12796} 12797 12798static void 12799neon_move_immediate (void) 12800{ 12801 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 12802 struct neon_type_el et = neon_check_type (2, rs, 12803 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 12804 unsigned immlo, immhi = 0, immbits; 12805 int op, cmode, float_p; 12806 12807 constraint (et.type == NT_invtype, 12808 _("operand size must be specified for immediate VMOV")); 12809 12810 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */ 12811 op = (inst.instruction & (1 << 5)) != 0; 12812 12813 immlo = inst.operands[1].imm; 12814 if (inst.operands[1].regisimm) 12815 immhi = inst.operands[1].reg; 12816 12817 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0, 12818 _("immediate has bits set outside the operand size")); 12819 12820 float_p = inst.operands[1].immisfloat; 12821 12822 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op, 12823 et.size, et.type)) == FAIL) 12824 { 12825 /* Invert relevant bits only. */ 12826 neon_invert_size (&immlo, &immhi, et.size); 12827 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable 12828 with one or the other; those cases are caught by 12829 neon_cmode_for_move_imm. */ 12830 op = !op; 12831 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, 12832 &op, et.size, et.type)) == FAIL) 12833 { 12834 first_error (_("immediate out of range")); 12835 return; 12836 } 12837 } 12838 12839 inst.instruction &= ~(1 << 5); 12840 inst.instruction |= op << 5; 12841 12842 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12843 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12844 inst.instruction |= neon_quad (rs) << 6; 12845 inst.instruction |= cmode << 8; 12846 12847 neon_write_immbits (immbits); 12848} 12849 12850static void 12851do_neon_mvn (void) 12852{ 12853 if (inst.operands[1].isreg) 12854 { 12855 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12856 12857 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12858 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12859 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12860 inst.instruction |= LOW4 (inst.operands[1].reg); 12861 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12862 inst.instruction |= neon_quad (rs) << 6; 12863 } 12864 else 12865 { 12866 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12867 neon_move_immediate (); 12868 } 12869 12870 inst.instruction = neon_dp_fixup (inst.instruction); 12871} 12872 12873/* Encode instructions of form: 12874 12875 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0| 12876 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | 12877 12878*/ 12879 12880static void 12881neon_mixed_length (struct neon_type_el et, unsigned size) 12882{ 12883 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12884 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12885 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12886 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12887 inst.instruction |= LOW4 (inst.operands[2].reg); 12888 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12889 inst.instruction |= (et.type == NT_unsigned) << 24; 12890 inst.instruction |= neon_logbits (size) << 20; 12891 12892 inst.instruction = neon_dp_fixup (inst.instruction); 12893} 12894 12895static void 12896do_neon_dyadic_long (void) 12897{ 12898 /* FIXME: Type checking for lengthening op. */ 12899 struct neon_type_el et = neon_check_type (3, NS_QDD, 12900 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY); 12901 neon_mixed_length (et, et.size); 12902} 12903 12904static void 12905do_neon_abal (void) 12906{ 12907 struct neon_type_el et = neon_check_type (3, NS_QDD, 12908 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY); 12909 neon_mixed_length (et, et.size); 12910} 12911 12912static void 12913neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes) 12914{ 12915 if (inst.operands[2].isscalar) 12916 { 12917 struct neon_type_el et = neon_check_type (3, NS_QDS, 12918 N_EQK | N_DBL, N_EQK, regtypes | N_KEY); 12919 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12920 neon_mul_mac (et, et.type == NT_unsigned); 12921 } 12922 else 12923 { 12924 struct neon_type_el et = neon_check_type (3, NS_QDD, 12925 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY); 12926 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12927 neon_mixed_length (et, et.size); 12928 } 12929} 12930 12931static void 12932do_neon_mac_maybe_scalar_long (void) 12933{ 12934 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32); 12935} 12936 12937static void 12938do_neon_dyadic_wide (void) 12939{ 12940 struct neon_type_el et = neon_check_type (3, NS_QQD, 12941 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY); 12942 neon_mixed_length (et, et.size); 12943} 12944 12945static void 12946do_neon_dyadic_narrow (void) 12947{ 12948 struct neon_type_el et = neon_check_type (3, NS_QDD, 12949 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY); 12950 /* Operand sign is unimportant, and the U bit is part of the opcode, 12951 so force the operand type to integer. */ 12952 et.type = NT_integer; 12953 neon_mixed_length (et, et.size / 2); 12954} 12955 12956static void 12957do_neon_mul_sat_scalar_long (void) 12958{ 12959 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32); 12960} 12961 12962static void 12963do_neon_vmull (void) 12964{ 12965 if (inst.operands[2].isscalar) 12966 do_neon_mac_maybe_scalar_long (); 12967 else 12968 { 12969 struct neon_type_el et = neon_check_type (3, NS_QDD, 12970 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY); 12971 if (et.type == NT_poly) 12972 inst.instruction = NEON_ENC_POLY (inst.instruction); 12973 else 12974 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12975 /* For polynomial encoding, size field must be 0b00 and the U bit must be 12976 zero. Should be OK as-is. */ 12977 neon_mixed_length (et, et.size); 12978 } 12979} 12980 12981static void 12982do_neon_ext (void) 12983{ 12984 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL); 12985 struct neon_type_el et = neon_check_type (3, rs, 12986 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12987 unsigned imm = (inst.operands[3].imm * et.size) / 8; 12988 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range")); 12989 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12990 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12991 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12992 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12993 inst.instruction |= LOW4 (inst.operands[2].reg); 12994 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12995 inst.instruction |= neon_quad (rs) << 6; 12996 inst.instruction |= imm << 8; 12997 12998 inst.instruction = neon_dp_fixup (inst.instruction); 12999} 13000 13001static void 13002do_neon_rev (void) 13003{ 13004 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13005 struct neon_type_el et = neon_check_type (2, rs, 13006 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13007 unsigned op = (inst.instruction >> 7) & 3; 13008 /* N (width of reversed regions) is encoded as part of the bitmask. We 13009 extract it here to check the elements to be reversed are smaller. 13010 Otherwise we'd get a reserved instruction. */ 13011 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0; 13012 assert (elsize != 0); 13013 constraint (et.size >= elsize, 13014 _("elements must be smaller than reversal region")); 13015 neon_two_same (neon_quad (rs), 1, et.size); 13016} 13017 13018static void 13019do_neon_dup (void) 13020{ 13021 if (inst.operands[1].isscalar) 13022 { 13023 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL); 13024 struct neon_type_el et = neon_check_type (2, rs, 13025 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13026 unsigned sizebits = et.size >> 3; 13027 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg); 13028 int logsize = neon_logbits (et.size); 13029 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize; 13030 13031 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL) 13032 return; 13033 13034 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 13035 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13036 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13037 inst.instruction |= LOW4 (dm); 13038 inst.instruction |= HI1 (dm) << 5; 13039 inst.instruction |= neon_quad (rs) << 6; 13040 inst.instruction |= x << 17; 13041 inst.instruction |= sizebits << 16; 13042 13043 inst.instruction = neon_dp_fixup (inst.instruction); 13044 } 13045 else 13046 { 13047 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL); 13048 struct neon_type_el et = neon_check_type (2, rs, 13049 N_8 | N_16 | N_32 | N_KEY, N_EQK); 13050 /* Duplicate ARM register to lanes of vector. */ 13051 inst.instruction = NEON_ENC_ARMREG (inst.instruction); 13052 switch (et.size) 13053 { 13054 case 8: inst.instruction |= 0x400000; break; 13055 case 16: inst.instruction |= 0x000020; break; 13056 case 32: inst.instruction |= 0x000000; break; 13057 default: break; 13058 } 13059 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 13060 inst.instruction |= LOW4 (inst.operands[0].reg) << 16; 13061 inst.instruction |= HI1 (inst.operands[0].reg) << 7; 13062 inst.instruction |= neon_quad (rs) << 21; 13063 /* The encoding for this instruction is identical for the ARM and Thumb 13064 variants, except for the condition field. */ 13065 do_vfp_cond_or_thumb (); 13066 } 13067} 13068 13069/* VMOV has particularly many variations. It can be one of: 13070 0. VMOV<c><q> <Qd>, <Qm> 13071 1. VMOV<c><q> <Dd>, <Dm> 13072 (Register operations, which are VORR with Rm = Rn.) 13073 2. VMOV<c><q>.<dt> <Qd>, #<imm> 13074 3. VMOV<c><q>.<dt> <Dd>, #<imm> 13075 (Immediate loads.) 13076 4. VMOV<c><q>.<size> <Dn[x]>, <Rd> 13077 (ARM register to scalar.) 13078 5. VMOV<c><q> <Dm>, <Rd>, <Rn> 13079 (Two ARM registers to vector.) 13080 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]> 13081 (Scalar to ARM register.) 13082 7. VMOV<c><q> <Rd>, <Rn>, <Dm> 13083 (Vector to two ARM registers.) 13084 8. VMOV.F32 <Sd>, <Sm> 13085 9. VMOV.F64 <Dd>, <Dm> 13086 (VFP register moves.) 13087 10. VMOV.F32 <Sd>, #imm 13088 11. VMOV.F64 <Dd>, #imm 13089 (VFP float immediate load.) 13090 12. VMOV <Rd>, <Sm> 13091 (VFP single to ARM reg.) 13092 13. VMOV <Sd>, <Rm> 13093 (ARM reg to VFP single.) 13094 14. VMOV <Rd>, <Re>, <Sn>, <Sm> 13095 (Two ARM regs to two VFP singles.) 13096 15. VMOV <Sd>, <Se>, <Rn>, <Rm> 13097 (Two VFP singles to two ARM regs.) 13098 13099 These cases can be disambiguated using neon_select_shape, except cases 1/9 13100 and 3/11 which depend on the operand type too. 13101 13102 All the encoded bits are hardcoded by this function. 13103 13104 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!). 13105 Cases 5, 7 may be used with VFPv2 and above. 13106 13107 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you 13108 can specify a type where it doesn't make sense to, and is ignored). 13109*/ 13110 13111static void 13112do_neon_mov (void) 13113{ 13114 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD, 13115 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, 13116 NS_NULL); 13117 struct neon_type_el et; 13118 const char *ldconst = 0; 13119 13120 switch (rs) 13121 { 13122 case NS_DD: /* case 1/9. */ 13123 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 13124 /* It is not an error here if no type is given. */ 13125 inst.error = NULL; 13126 if (et.type == NT_float && et.size == 64) 13127 { 13128 do_vfp_nsyn_opcode ("fcpyd"); 13129 break; 13130 } 13131 /* fall through. */ 13132 13133 case NS_QQ: /* case 0/1. */ 13134 { 13135 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 13136 return; 13137 /* The architecture manual I have doesn't explicitly state which 13138 value the U bit should have for register->register moves, but 13139 the equivalent VORR instruction has U = 0, so do that. */ 13140 inst.instruction = 0x0200110; 13141 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13142 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13143 inst.instruction |= LOW4 (inst.operands[1].reg); 13144 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 13145 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 13146 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 13147 inst.instruction |= neon_quad (rs) << 6; 13148 13149 inst.instruction = neon_dp_fixup (inst.instruction); 13150 } 13151 break; 13152 13153 case NS_DI: /* case 3/11. */ 13154 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 13155 inst.error = NULL; 13156 if (et.type == NT_float && et.size == 64) 13157 { 13158 /* case 11 (fconstd). */ 13159 ldconst = "fconstd"; 13160 goto encode_fconstd; 13161 } 13162 /* fall through. */ 13163 13164 case NS_QI: /* case 2/3. */ 13165 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 13166 return; 13167 inst.instruction = 0x0800010; 13168 neon_move_immediate (); 13169 inst.instruction = neon_dp_fixup (inst.instruction); 13170 break; 13171 13172 case NS_SR: /* case 4. */ 13173 { 13174 unsigned bcdebits = 0; 13175 struct neon_type_el et = neon_check_type (2, NS_NULL, 13176 N_8 | N_16 | N_32 | N_KEY, N_EQK); 13177 int logsize = neon_logbits (et.size); 13178 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg); 13179 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg); 13180 13181 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 13182 _(BAD_FPU)); 13183 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 13184 && et.size != 32, _(BAD_FPU)); 13185 constraint (et.type == NT_invtype, _("bad type for scalar")); 13186 constraint (x >= 64 / et.size, _("scalar index out of range")); 13187 13188 switch (et.size) 13189 { 13190 case 8: bcdebits = 0x8; break; 13191 case 16: bcdebits = 0x1; break; 13192 case 32: bcdebits = 0x0; break; 13193 default: ; 13194 } 13195 13196 bcdebits |= x << logsize; 13197 13198 inst.instruction = 0xe000b10; 13199 do_vfp_cond_or_thumb (); 13200 inst.instruction |= LOW4 (dn) << 16; 13201 inst.instruction |= HI1 (dn) << 7; 13202 inst.instruction |= inst.operands[1].reg << 12; 13203 inst.instruction |= (bcdebits & 3) << 5; 13204 inst.instruction |= (bcdebits >> 2) << 21; 13205 } 13206 break; 13207 13208 case NS_DRR: /* case 5 (fmdrr). */ 13209 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 13210 _(BAD_FPU)); 13211 13212 inst.instruction = 0xc400b10; 13213 do_vfp_cond_or_thumb (); 13214 inst.instruction |= LOW4 (inst.operands[0].reg); 13215 inst.instruction |= HI1 (inst.operands[0].reg) << 5; 13216 inst.instruction |= inst.operands[1].reg << 12; 13217 inst.instruction |= inst.operands[2].reg << 16; 13218 break; 13219 13220 case NS_RS: /* case 6. */ 13221 { 13222 struct neon_type_el et = neon_check_type (2, NS_NULL, 13223 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY); 13224 unsigned logsize = neon_logbits (et.size); 13225 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg); 13226 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg); 13227 unsigned abcdebits = 0; 13228 13229 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 13230 _(BAD_FPU)); 13231 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 13232 && et.size != 32, _(BAD_FPU)); 13233 constraint (et.type == NT_invtype, _("bad type for scalar")); 13234 constraint (x >= 64 / et.size, _("scalar index out of range")); 13235 13236 switch (et.size) 13237 { 13238 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break; 13239 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break; 13240 case 32: abcdebits = 0x00; break; 13241 default: ; 13242 } 13243 13244 abcdebits |= x << logsize; 13245 inst.instruction = 0xe100b10; 13246 do_vfp_cond_or_thumb (); 13247 inst.instruction |= LOW4 (dn) << 16; 13248 inst.instruction |= HI1 (dn) << 7; 13249 inst.instruction |= inst.operands[0].reg << 12; 13250 inst.instruction |= (abcdebits & 3) << 5; 13251 inst.instruction |= (abcdebits >> 2) << 21; 13252 } 13253 break; 13254 13255 case NS_RRD: /* case 7 (fmrrd). */ 13256 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 13257 _(BAD_FPU)); 13258 13259 inst.instruction = 0xc500b10; 13260 do_vfp_cond_or_thumb (); 13261 inst.instruction |= inst.operands[0].reg << 12; 13262 inst.instruction |= inst.operands[1].reg << 16; 13263 inst.instruction |= LOW4 (inst.operands[2].reg); 13264 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 13265 break; 13266 13267 case NS_FF: /* case 8 (fcpys). */ 13268 do_vfp_nsyn_opcode ("fcpys"); 13269 break; 13270 13271 case NS_FI: /* case 10 (fconsts). */ 13272 ldconst = "fconsts"; 13273 encode_fconstd: 13274 if (is_quarter_float (inst.operands[1].imm)) 13275 { 13276 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm); 13277 do_vfp_nsyn_opcode (ldconst); 13278 } 13279 else 13280 first_error (_("immediate out of range")); 13281 break; 13282 13283 case NS_RF: /* case 12 (fmrs). */ 13284 do_vfp_nsyn_opcode ("fmrs"); 13285 break; 13286 13287 case NS_FR: /* case 13 (fmsr). */ 13288 do_vfp_nsyn_opcode ("fmsr"); 13289 break; 13290 13291 /* The encoders for the fmrrs and fmsrr instructions expect three operands 13292 (one of which is a list), but we have parsed four. Do some fiddling to 13293 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2 13294 expect. */ 13295 case NS_RRFF: /* case 14 (fmrrs). */ 13296 constraint (inst.operands[3].reg != inst.operands[2].reg + 1, 13297 _("VFP registers must be adjacent")); 13298 inst.operands[2].imm = 2; 13299 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 13300 do_vfp_nsyn_opcode ("fmrrs"); 13301 break; 13302 13303 case NS_FFRR: /* case 15 (fmsrr). */ 13304 constraint (inst.operands[1].reg != inst.operands[0].reg + 1, 13305 _("VFP registers must be adjacent")); 13306 inst.operands[1] = inst.operands[2]; 13307 inst.operands[2] = inst.operands[3]; 13308 inst.operands[0].imm = 2; 13309 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 13310 do_vfp_nsyn_opcode ("fmsrr"); 13311 break; 13312 13313 default: 13314 abort (); 13315 } 13316} 13317 13318static void 13319do_neon_rshift_round_imm (void) 13320{ 13321 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 13322 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 13323 int imm = inst.operands[2].imm; 13324 13325 /* imm == 0 case is encoded as VMOV for V{R}SHR. */ 13326 if (imm == 0) 13327 { 13328 inst.operands[2].present = 0; 13329 do_neon_mov (); 13330 return; 13331 } 13332 13333 constraint (imm < 1 || (unsigned)imm > et.size, 13334 _("immediate out of range for shift")); 13335 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 13336 et.size - imm); 13337} 13338 13339static void 13340do_neon_movl (void) 13341{ 13342 struct neon_type_el et = neon_check_type (2, NS_QD, 13343 N_EQK | N_DBL, N_SU_32 | N_KEY); 13344 unsigned sizebits = et.size >> 3; 13345 inst.instruction |= sizebits << 19; 13346 neon_two_same (0, et.type == NT_unsigned, -1); 13347} 13348 13349static void 13350do_neon_trn (void) 13351{ 13352 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13353 struct neon_type_el et = neon_check_type (2, rs, 13354 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13355 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 13356 neon_two_same (neon_quad (rs), 1, et.size); 13357} 13358 13359static void 13360do_neon_zip_uzp (void) 13361{ 13362 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13363 struct neon_type_el et = neon_check_type (2, rs, 13364 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13365 if (rs == NS_DD && et.size == 32) 13366 { 13367 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */ 13368 inst.instruction = N_MNEM_vtrn; 13369 do_neon_trn (); 13370 return; 13371 } 13372 neon_two_same (neon_quad (rs), 1, et.size); 13373} 13374 13375static void 13376do_neon_sat_abs_neg (void) 13377{ 13378 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13379 struct neon_type_el et = neon_check_type (2, rs, 13380 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 13381 neon_two_same (neon_quad (rs), 1, et.size); 13382} 13383 13384static void 13385do_neon_pair_long (void) 13386{ 13387 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13388 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY); 13389 /* Unsigned is encoded in OP field (bit 7) for these instruction. */ 13390 inst.instruction |= (et.type == NT_unsigned) << 7; 13391 neon_two_same (neon_quad (rs), 1, et.size); 13392} 13393 13394static void 13395do_neon_recip_est (void) 13396{ 13397 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13398 struct neon_type_el et = neon_check_type (2, rs, 13399 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY); 13400 inst.instruction |= (et.type == NT_float) << 8; 13401 neon_two_same (neon_quad (rs), 1, et.size); 13402} 13403 13404static void 13405do_neon_cls (void) 13406{ 13407 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13408 struct neon_type_el et = neon_check_type (2, rs, 13409 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 13410 neon_two_same (neon_quad (rs), 1, et.size); 13411} 13412 13413static void 13414do_neon_clz (void) 13415{ 13416 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13417 struct neon_type_el et = neon_check_type (2, rs, 13418 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY); 13419 neon_two_same (neon_quad (rs), 1, et.size); 13420} 13421 13422static void 13423do_neon_cnt (void) 13424{ 13425 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13426 struct neon_type_el et = neon_check_type (2, rs, 13427 N_EQK | N_INT, N_8 | N_KEY); 13428 neon_two_same (neon_quad (rs), 1, et.size); 13429} 13430 13431static void 13432do_neon_swp (void) 13433{ 13434 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13435 neon_two_same (neon_quad (rs), 1, -1); 13436} 13437 13438static void 13439do_neon_tbl_tbx (void) 13440{ 13441 unsigned listlenbits; 13442 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY); 13443 13444 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4) 13445 { 13446 first_error (_("bad list length for table lookup")); 13447 return; 13448 } 13449 13450 listlenbits = inst.operands[1].imm - 1; 13451 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13452 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13453 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 13454 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 13455 inst.instruction |= LOW4 (inst.operands[2].reg); 13456 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 13457 inst.instruction |= listlenbits << 8; 13458 13459 inst.instruction = neon_dp_fixup (inst.instruction); 13460} 13461 13462static void 13463do_neon_ldm_stm (void) 13464{ 13465 /* P, U and L bits are part of bitmask. */ 13466 int is_dbmode = (inst.instruction & (1 << 24)) != 0; 13467 unsigned offsetbits = inst.operands[1].imm * 2; 13468 13469 if (inst.operands[1].issingle) 13470 { 13471 do_vfp_nsyn_ldm_stm (is_dbmode); 13472 return; 13473 } 13474 13475 constraint (is_dbmode && !inst.operands[0].writeback, 13476 _("writeback (!) must be used for VLDMDB and VSTMDB")); 13477 13478 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16, 13479 _("register list must contain at least 1 and at most 16 " 13480 "registers")); 13481 13482 inst.instruction |= inst.operands[0].reg << 16; 13483 inst.instruction |= inst.operands[0].writeback << 21; 13484 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 13485 inst.instruction |= HI1 (inst.operands[1].reg) << 22; 13486 13487 inst.instruction |= offsetbits; 13488 13489 do_vfp_cond_or_thumb (); 13490} 13491 13492static void 13493do_neon_ldr_str (void) 13494{ 13495 int is_ldr = (inst.instruction & (1 << 20)) != 0; 13496 13497 if (inst.operands[0].issingle) 13498 { 13499 if (is_ldr) 13500 do_vfp_nsyn_opcode ("flds"); 13501 else 13502 do_vfp_nsyn_opcode ("fsts"); 13503 } 13504 else 13505 { 13506 if (is_ldr) 13507 do_vfp_nsyn_opcode ("fldd"); 13508 else 13509 do_vfp_nsyn_opcode ("fstd"); 13510 } 13511} 13512 13513/* "interleave" version also handles non-interleaving register VLD1/VST1 13514 instructions. */ 13515 13516static void 13517do_neon_ld_st_interleave (void) 13518{ 13519 struct neon_type_el et = neon_check_type (1, NS_NULL, 13520 N_8 | N_16 | N_32 | N_64); 13521 unsigned alignbits = 0; 13522 unsigned idx; 13523 /* The bits in this table go: 13524 0: register stride of one (0) or two (1) 13525 1,2: register list length, minus one (1, 2, 3, 4). 13526 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>). 13527 We use -1 for invalid entries. */ 13528 const int typetable[] = 13529 { 13530 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */ 13531 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */ 13532 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */ 13533 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */ 13534 }; 13535 int typebits; 13536 13537 if (et.type == NT_invtype) 13538 return; 13539 13540 if (inst.operands[1].immisalign) 13541 switch (inst.operands[1].imm >> 8) 13542 { 13543 case 64: alignbits = 1; break; 13544 case 128: 13545 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 13546 goto bad_alignment; 13547 alignbits = 2; 13548 break; 13549 case 256: 13550 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 13551 goto bad_alignment; 13552 alignbits = 3; 13553 break; 13554 default: 13555 bad_alignment: 13556 first_error (_("bad alignment")); 13557 return; 13558 } 13559 13560 inst.instruction |= alignbits << 4; 13561 inst.instruction |= neon_logbits (et.size) << 6; 13562 13563 /* Bits [4:6] of the immediate in a list specifier encode register stride 13564 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of 13565 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look 13566 up the right value for "type" in a table based on this value and the given 13567 list style, then stick it back. */ 13568 idx = ((inst.operands[0].imm >> 4) & 7) 13569 | (((inst.instruction >> 8) & 3) << 3); 13570 13571 typebits = typetable[idx]; 13572 13573 constraint (typebits == -1, _("bad list type for instruction")); 13574 13575 inst.instruction &= ~0xf00; 13576 inst.instruction |= typebits << 8; 13577} 13578 13579/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup. 13580 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0 13581 otherwise. The variable arguments are a list of pairs of legal (size, align) 13582 values, terminated with -1. */ 13583 13584static int 13585neon_alignment_bit (int size, int align, int *do_align, ...) 13586{ 13587 va_list ap; 13588 int result = FAIL, thissize, thisalign; 13589 13590 if (!inst.operands[1].immisalign) 13591 { 13592 *do_align = 0; 13593 return SUCCESS; 13594 } 13595 13596 va_start (ap, do_align); 13597 13598 do 13599 { 13600 thissize = va_arg (ap, int); 13601 if (thissize == -1) 13602 break; 13603 thisalign = va_arg (ap, int); 13604 13605 if (size == thissize && align == thisalign) 13606 result = SUCCESS; 13607 } 13608 while (result != SUCCESS); 13609 13610 va_end (ap); 13611 13612 if (result == SUCCESS) 13613 *do_align = 1; 13614 else 13615 first_error (_("unsupported alignment for instruction")); 13616 13617 return result; 13618} 13619 13620static void 13621do_neon_ld_st_lane (void) 13622{ 13623 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 13624 int align_good, do_align = 0; 13625 int logsize = neon_logbits (et.size); 13626 int align = inst.operands[1].imm >> 8; 13627 int n = (inst.instruction >> 8) & 3; 13628 int max_el = 64 / et.size; 13629 13630 if (et.type == NT_invtype) 13631 return; 13632 13633 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1, 13634 _("bad list length")); 13635 constraint (NEON_LANE (inst.operands[0].imm) >= max_el, 13636 _("scalar index out of range")); 13637 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2 13638 && et.size == 8, 13639 _("stride of 2 unavailable when element size is 8")); 13640 13641 switch (n) 13642 { 13643 case 0: /* VLD1 / VST1. */ 13644 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16, 13645 32, 32, -1); 13646 if (align_good == FAIL) 13647 return; 13648 if (do_align) 13649 { 13650 unsigned alignbits = 0; 13651 switch (et.size) 13652 { 13653 case 16: alignbits = 0x1; break; 13654 case 32: alignbits = 0x3; break; 13655 default: ; 13656 } 13657 inst.instruction |= alignbits << 4; 13658 } 13659 break; 13660 13661 case 1: /* VLD2 / VST2. */ 13662 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32, 13663 32, 64, -1); 13664 if (align_good == FAIL) 13665 return; 13666 if (do_align) 13667 inst.instruction |= 1 << 4; 13668 break; 13669 13670 case 2: /* VLD3 / VST3. */ 13671 constraint (inst.operands[1].immisalign, 13672 _("can't use alignment with this instruction")); 13673 break; 13674 13675 case 3: /* VLD4 / VST4. */ 13676 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 13677 16, 64, 32, 64, 32, 128, -1); 13678 if (align_good == FAIL) 13679 return; 13680 if (do_align) 13681 { 13682 unsigned alignbits = 0; 13683 switch (et.size) 13684 { 13685 case 8: alignbits = 0x1; break; 13686 case 16: alignbits = 0x1; break; 13687 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break; 13688 default: ; 13689 } 13690 inst.instruction |= alignbits << 4; 13691 } 13692 break; 13693 13694 default: ; 13695 } 13696 13697 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */ 13698 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13699 inst.instruction |= 1 << (4 + logsize); 13700 13701 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5); 13702 inst.instruction |= logsize << 10; 13703} 13704 13705/* Encode single n-element structure to all lanes VLD<n> instructions. */ 13706 13707static void 13708do_neon_ld_dup (void) 13709{ 13710 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 13711 int align_good, do_align = 0; 13712 13713 if (et.type == NT_invtype) 13714 return; 13715 13716 switch ((inst.instruction >> 8) & 3) 13717 { 13718 case 0: /* VLD1. */ 13719 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2); 13720 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 13721 &do_align, 16, 16, 32, 32, -1); 13722 if (align_good == FAIL) 13723 return; 13724 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm)) 13725 { 13726 case 1: break; 13727 case 2: inst.instruction |= 1 << 5; break; 13728 default: first_error (_("bad list length")); return; 13729 } 13730 inst.instruction |= neon_logbits (et.size) << 6; 13731 break; 13732 13733 case 1: /* VLD2. */ 13734 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 13735 &do_align, 8, 16, 16, 32, 32, 64, -1); 13736 if (align_good == FAIL) 13737 return; 13738 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2, 13739 _("bad list length")); 13740 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13741 inst.instruction |= 1 << 5; 13742 inst.instruction |= neon_logbits (et.size) << 6; 13743 break; 13744 13745 case 2: /* VLD3. */ 13746 constraint (inst.operands[1].immisalign, 13747 _("can't use alignment with this instruction")); 13748 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3, 13749 _("bad list length")); 13750 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13751 inst.instruction |= 1 << 5; 13752 inst.instruction |= neon_logbits (et.size) << 6; 13753 break; 13754 13755 case 3: /* VLD4. */ 13756 { 13757 int align = inst.operands[1].imm >> 8; 13758 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 13759 16, 64, 32, 64, 32, 128, -1); 13760 if (align_good == FAIL) 13761 return; 13762 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4, 13763 _("bad list length")); 13764 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13765 inst.instruction |= 1 << 5; 13766 if (et.size == 32 && align == 128) 13767 inst.instruction |= 0x3 << 6; 13768 else 13769 inst.instruction |= neon_logbits (et.size) << 6; 13770 } 13771 break; 13772 13773 default: ; 13774 } 13775 13776 inst.instruction |= do_align << 4; 13777} 13778 13779/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those 13780 apart from bits [11:4]. */ 13781 13782static void 13783do_neon_ldx_stx (void) 13784{ 13785 switch (NEON_LANE (inst.operands[0].imm)) 13786 { 13787 case NEON_INTERLEAVE_LANES: 13788 inst.instruction = NEON_ENC_INTERLV (inst.instruction); 13789 do_neon_ld_st_interleave (); 13790 break; 13791 13792 case NEON_ALL_LANES: 13793 inst.instruction = NEON_ENC_DUP (inst.instruction); 13794 do_neon_ld_dup (); 13795 break; 13796 13797 default: 13798 inst.instruction = NEON_ENC_LANE (inst.instruction); 13799 do_neon_ld_st_lane (); 13800 } 13801 13802 /* L bit comes from bit mask. */ 13803 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13804 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13805 inst.instruction |= inst.operands[1].reg << 16; 13806 13807 if (inst.operands[1].postind) 13808 { 13809 int postreg = inst.operands[1].imm & 0xf; 13810 constraint (!inst.operands[1].immisreg, 13811 _("post-index must be a register")); 13812 constraint (postreg == 0xd || postreg == 0xf, 13813 _("bad register for post-index")); 13814 inst.instruction |= postreg; 13815 } 13816 else if (inst.operands[1].writeback) 13817 { 13818 inst.instruction |= 0xd; 13819 } 13820 else 13821 inst.instruction |= 0xf; 13822 13823 if (thumb_mode) 13824 inst.instruction |= 0xf9000000; 13825 else 13826 inst.instruction |= 0xf4000000; 13827} 13828 13829 13830/* Overall per-instruction processing. */ 13831 13832/* We need to be able to fix up arbitrary expressions in some statements. 13833 This is so that we can handle symbols that are an arbitrary distance from 13834 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 13835 which returns part of an address in a form which will be valid for 13836 a data instruction. We do this by pushing the expression into a symbol 13837 in the expr_section, and creating a fix for that. */ 13838 13839static void 13840fix_new_arm (fragS * frag, 13841 int where, 13842 short int size, 13843 expressionS * exp, 13844 int pc_rel, 13845 int reloc) 13846{ 13847 fixS * new_fix; 13848 13849 switch (exp->X_op) 13850 { 13851 case O_constant: 13852 case O_symbol: 13853 case O_add: 13854 case O_subtract: 13855 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); 13856 break; 13857 13858 default: 13859 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 13860 pc_rel, reloc); 13861 break; 13862 } 13863 13864 /* Mark whether the fix is to a THUMB instruction, or an ARM 13865 instruction. */ 13866 new_fix->tc_fix_data = thumb_mode; 13867} 13868 13869/* Create a frg for an instruction requiring relaxation. */ 13870static void 13871output_relax_insn (void) 13872{ 13873 char * to; 13874 symbolS *sym; 13875 int offset; 13876 13877 /* The size of the instruction is unknown, so tie the debug info to the 13878 start of the instruction. */ 13879 dwarf2_emit_insn (0); 13880 13881 switch (inst.reloc.exp.X_op) 13882 { 13883 case O_symbol: 13884 sym = inst.reloc.exp.X_add_symbol; 13885 offset = inst.reloc.exp.X_add_number; 13886 break; 13887 case O_constant: 13888 sym = NULL; 13889 offset = inst.reloc.exp.X_add_number; 13890 break; 13891 default: 13892 sym = make_expr_symbol (&inst.reloc.exp); 13893 offset = 0; 13894 break; 13895 } 13896 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE, 13897 inst.relax, sym, offset, NULL/*offset, opcode*/); 13898 md_number_to_chars (to, inst.instruction, THUMB_SIZE); 13899} 13900 13901/* Write a 32-bit thumb instruction to buf. */ 13902static void 13903put_thumb32_insn (char * buf, unsigned long insn) 13904{ 13905 md_number_to_chars (buf, insn >> 16, THUMB_SIZE); 13906 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE); 13907} 13908 13909static void 13910output_inst (const char * str) 13911{ 13912 char * to = NULL; 13913 13914 if (inst.error) 13915 { 13916 as_bad ("%s -- `%s'", inst.error, str); 13917 return; 13918 } 13919 if (inst.relax) { 13920 output_relax_insn(); 13921 return; 13922 } 13923 if (inst.size == 0) 13924 return; 13925 13926 to = frag_more (inst.size); 13927 13928 if (thumb_mode && (inst.size > THUMB_SIZE)) 13929 { 13930 assert (inst.size == (2 * THUMB_SIZE)); 13931 put_thumb32_insn (to, inst.instruction); 13932 } 13933 else if (inst.size > INSN_SIZE) 13934 { 13935 assert (inst.size == (2 * INSN_SIZE)); 13936 md_number_to_chars (to, inst.instruction, INSN_SIZE); 13937 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE); 13938 } 13939 else 13940 md_number_to_chars (to, inst.instruction, inst.size); 13941 13942 if (inst.reloc.type != BFD_RELOC_UNUSED) 13943 fix_new_arm (frag_now, to - frag_now->fr_literal, 13944 inst.size, & inst.reloc.exp, inst.reloc.pc_rel, 13945 inst.reloc.type); 13946 13947 dwarf2_emit_insn (inst.size); 13948} 13949 13950/* Tag values used in struct asm_opcode's tag field. */ 13951enum opcode_tag 13952{ 13953 OT_unconditional, /* Instruction cannot be conditionalized. 13954 The ARM condition field is still 0xE. */ 13955 OT_unconditionalF, /* Instruction cannot be conditionalized 13956 and carries 0xF in its ARM condition field. */ 13957 OT_csuffix, /* Instruction takes a conditional suffix. */ 13958 OT_csuffixF, /* Some forms of the instruction take a conditional 13959 suffix, others place 0xF where the condition field 13960 would be. */ 13961 OT_cinfix3, /* Instruction takes a conditional infix, 13962 beginning at character index 3. (In 13963 unified mode, it becomes a suffix.) */ 13964 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for 13965 tsts, cmps, cmns, and teqs. */ 13966 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at 13967 character index 3, even in unified mode. Used for 13968 legacy instructions where suffix and infix forms 13969 may be ambiguous. */ 13970 OT_csuf_or_in3, /* Instruction takes either a conditional 13971 suffix or an infix at character index 3. */ 13972 OT_odd_infix_unc, /* This is the unconditional variant of an 13973 instruction that takes a conditional infix 13974 at an unusual position. In unified mode, 13975 this variant will accept a suffix. */ 13976 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0 13977 are the conditional variants of instructions that 13978 take conditional infixes in unusual positions. 13979 The infix appears at character index 13980 (tag - OT_odd_infix_0). These are not accepted 13981 in unified mode. */ 13982}; 13983 13984/* Subroutine of md_assemble, responsible for looking up the primary 13985 opcode from the mnemonic the user wrote. STR points to the 13986 beginning of the mnemonic. 13987 13988 This is not simply a hash table lookup, because of conditional 13989 variants. Most instructions have conditional variants, which are 13990 expressed with a _conditional affix_ to the mnemonic. If we were 13991 to encode each conditional variant as a literal string in the opcode 13992 table, it would have approximately 20,000 entries. 13993 13994 Most mnemonics take this affix as a suffix, and in unified syntax, 13995 'most' is upgraded to 'all'. However, in the divided syntax, some 13996 instructions take the affix as an infix, notably the s-variants of 13997 the arithmetic instructions. Of those instructions, all but six 13998 have the infix appear after the third character of the mnemonic. 13999 14000 Accordingly, the algorithm for looking up primary opcodes given 14001 an identifier is: 14002 14003 1. Look up the identifier in the opcode table. 14004 If we find a match, go to step U. 14005 14006 2. Look up the last two characters of the identifier in the 14007 conditions table. If we find a match, look up the first N-2 14008 characters of the identifier in the opcode table. If we 14009 find a match, go to step CE. 14010 14011 3. Look up the fourth and fifth characters of the identifier in 14012 the conditions table. If we find a match, extract those 14013 characters from the identifier, and look up the remaining 14014 characters in the opcode table. If we find a match, go 14015 to step CM. 14016 14017 4. Fail. 14018 14019 U. Examine the tag field of the opcode structure, in case this is 14020 one of the six instructions with its conditional infix in an 14021 unusual place. If it is, the tag tells us where to find the 14022 infix; look it up in the conditions table and set inst.cond 14023 accordingly. Otherwise, this is an unconditional instruction. 14024 Again set inst.cond accordingly. Return the opcode structure. 14025 14026 CE. Examine the tag field to make sure this is an instruction that 14027 should receive a conditional suffix. If it is not, fail. 14028 Otherwise, set inst.cond from the suffix we already looked up, 14029 and return the opcode structure. 14030 14031 CM. Examine the tag field to make sure this is an instruction that 14032 should receive a conditional infix after the third character. 14033 If it is not, fail. Otherwise, undo the edits to the current 14034 line of input and proceed as for case CE. */ 14035 14036static const struct asm_opcode * 14037opcode_lookup (char **str) 14038{ 14039 char *end, *base; 14040 char *affix; 14041 const struct asm_opcode *opcode; 14042 const struct asm_cond *cond; 14043 char save[2]; 14044 bfd_boolean neon_supported; 14045 14046 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1); 14047 14048 /* Scan up to the end of the mnemonic, which must end in white space, 14049 '.' (in unified mode, or for Neon instructions), or end of string. */ 14050 for (base = end = *str; *end != '\0'; end++) 14051 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.')) 14052 break; 14053 14054 if (end == base) 14055 return 0; 14056 14057 /* Handle a possible width suffix and/or Neon type suffix. */ 14058 if (end[0] == '.') 14059 { 14060 int offset = 2; 14061 14062 /* The .w and .n suffixes are only valid if the unified syntax is in 14063 use. */ 14064 if (unified_syntax && end[1] == 'w') 14065 inst.size_req = 4; 14066 else if (unified_syntax && end[1] == 'n') 14067 inst.size_req = 2; 14068 else 14069 offset = 0; 14070 14071 inst.vectype.elems = 0; 14072 14073 *str = end + offset; 14074 14075 if (end[offset] == '.') 14076 { 14077 /* See if we have a Neon type suffix (possible in either unified or 14078 non-unified ARM syntax mode). */ 14079 if (parse_neon_type (&inst.vectype, str) == FAIL) 14080 return 0; 14081 } 14082 else if (end[offset] != '\0' && end[offset] != ' ') 14083 return 0; 14084 } 14085 else 14086 *str = end; 14087 14088 /* Look for unaffixed or special-case affixed mnemonic. */ 14089 opcode = hash_find_n (arm_ops_hsh, base, end - base); 14090 if (opcode) 14091 { 14092 /* step U */ 14093 if (opcode->tag < OT_odd_infix_0) 14094 { 14095 inst.cond = COND_ALWAYS; 14096 return opcode; 14097 } 14098 14099 if (unified_syntax) 14100 as_warn (_("conditional infixes are deprecated in unified syntax")); 14101 affix = base + (opcode->tag - OT_odd_infix_0); 14102 cond = hash_find_n (arm_cond_hsh, affix, 2); 14103 assert (cond); 14104 14105 inst.cond = cond->value; 14106 return opcode; 14107 } 14108 14109 /* Cannot have a conditional suffix on a mnemonic of less than two 14110 characters. */ 14111 if (end - base < 3) 14112 return 0; 14113 14114 /* Look for suffixed mnemonic. */ 14115 affix = end - 2; 14116 cond = hash_find_n (arm_cond_hsh, affix, 2); 14117 opcode = hash_find_n (arm_ops_hsh, base, affix - base); 14118 if (opcode && cond) 14119 { 14120 /* step CE */ 14121 switch (opcode->tag) 14122 { 14123 case OT_cinfix3_legacy: 14124 /* Ignore conditional suffixes matched on infix only mnemonics. */ 14125 break; 14126 14127 case OT_cinfix3: 14128 case OT_cinfix3_deprecated: 14129 case OT_odd_infix_unc: 14130 if (!unified_syntax) 14131 return 0; 14132 /* else fall through */ 14133 14134 case OT_csuffix: 14135 case OT_csuffixF: 14136 case OT_csuf_or_in3: 14137 inst.cond = cond->value; 14138 return opcode; 14139 14140 case OT_unconditional: 14141 case OT_unconditionalF: 14142 if (thumb_mode) 14143 { 14144 inst.cond = cond->value; 14145 } 14146 else 14147 { 14148 /* delayed diagnostic */ 14149 inst.error = BAD_COND; 14150 inst.cond = COND_ALWAYS; 14151 } 14152 return opcode; 14153 14154 default: 14155 return 0; 14156 } 14157 } 14158 14159 /* Cannot have a usual-position infix on a mnemonic of less than 14160 six characters (five would be a suffix). */ 14161 if (end - base < 6) 14162 return 0; 14163 14164 /* Look for infixed mnemonic in the usual position. */ 14165 affix = base + 3; 14166 cond = hash_find_n (arm_cond_hsh, affix, 2); 14167 if (!cond) 14168 return 0; 14169 14170 memcpy (save, affix, 2); 14171 memmove (affix, affix + 2, (end - affix) - 2); 14172 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2); 14173 memmove (affix + 2, affix, (end - affix) - 2); 14174 memcpy (affix, save, 2); 14175 14176 if (opcode 14177 && (opcode->tag == OT_cinfix3 14178 || opcode->tag == OT_cinfix3_deprecated 14179 || opcode->tag == OT_csuf_or_in3 14180 || opcode->tag == OT_cinfix3_legacy)) 14181 { 14182 /* step CM */ 14183 if (unified_syntax 14184 && (opcode->tag == OT_cinfix3 14185 || opcode->tag == OT_cinfix3_deprecated)) 14186 as_warn (_("conditional infixes are deprecated in unified syntax")); 14187 14188 inst.cond = cond->value; 14189 return opcode; 14190 } 14191 14192 return 0; 14193} 14194 14195void 14196md_assemble (char *str) 14197{ 14198 char *p = str; 14199 const struct asm_opcode * opcode; 14200 14201 /* Align the previous label if needed. */ 14202 if (last_label_seen != NULL) 14203 { 14204 symbol_set_frag (last_label_seen, frag_now); 14205 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 14206 S_SET_SEGMENT (last_label_seen, now_seg); 14207 } 14208 14209 memset (&inst, '\0', sizeof (inst)); 14210 inst.reloc.type = BFD_RELOC_UNUSED; 14211 14212 opcode = opcode_lookup (&p); 14213 if (!opcode) 14214 { 14215 /* It wasn't an instruction, but it might be a register alias of 14216 the form alias .req reg, or a Neon .dn/.qn directive. */ 14217 if (!create_register_alias (str, p) 14218 && !create_neon_reg_alias (str, p)) 14219 as_bad (_("bad instruction `%s'"), str); 14220 14221 return; 14222 } 14223 14224 if (opcode->tag == OT_cinfix3_deprecated) 14225 as_warn (_("s suffix on comparison instruction is deprecated")); 14226 14227 /* The value which unconditional instructions should have in place of the 14228 condition field. */ 14229 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1; 14230 14231 if (thumb_mode) 14232 { 14233 arm_feature_set variant; 14234 14235 variant = cpu_variant; 14236 /* Only allow coprocessor instructions on Thumb-2 capable devices. */ 14237 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2)) 14238 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard); 14239 /* Check that this instruction is supported for this CPU. */ 14240 if (!opcode->tvariant 14241 || (thumb_mode == 1 14242 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant))) 14243 { 14244 as_bad (_("selected processor does not support `%s'"), str); 14245 return; 14246 } 14247 if (inst.cond != COND_ALWAYS && !unified_syntax 14248 && opcode->tencode != do_t_branch) 14249 { 14250 as_bad (_("Thumb does not support conditional execution")); 14251 return; 14252 } 14253 14254 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req) 14255 { 14256 /* Implicit require narrow instructions on Thumb-1. This avoids 14257 relaxation accidentally introducing Thumb-2 instructions. */ 14258 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23) 14259 inst.size_req = 2; 14260 } 14261 14262 /* Check conditional suffixes. */ 14263 if (current_it_mask) 14264 { 14265 int cond; 14266 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1; 14267 current_it_mask <<= 1; 14268 current_it_mask &= 0x1f; 14269 /* The BKPT instruction is unconditional even in an IT block. */ 14270 if (!inst.error 14271 && cond != inst.cond && opcode->tencode != do_t_bkpt) 14272 { 14273 as_bad (_("incorrect condition in IT block")); 14274 return; 14275 } 14276 } 14277 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch) 14278 { 14279 as_bad (_("thumb conditional instrunction not in IT block")); 14280 return; 14281 } 14282 14283 mapping_state (MAP_THUMB); 14284 inst.instruction = opcode->tvalue; 14285 14286 if (!parse_operands (p, opcode->operands)) 14287 opcode->tencode (); 14288 14289 /* Clear current_it_mask at the end of an IT block. */ 14290 if (current_it_mask == 0x10) 14291 current_it_mask = 0; 14292 14293 if (!(inst.error || inst.relax)) 14294 { 14295 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff); 14296 inst.size = (inst.instruction > 0xffff ? 4 : 2); 14297 if (inst.size_req && inst.size_req != inst.size) 14298 { 14299 as_bad (_("cannot honor width suffix -- `%s'"), str); 14300 return; 14301 } 14302 } 14303 14304 /* Something has gone badly wrong if we try to relax a fixed size 14305 instruction. */ 14306 assert (inst.size_req == 0 || !inst.relax); 14307 14308 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 14309 *opcode->tvariant); 14310 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly 14311 set those bits when Thumb-2 32-bit instructions are seen. ie. 14312 anything other than bl/blx. 14313 This is overly pessimistic for relaxable instructions. */ 14314 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800) 14315 || inst.relax) 14316 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 14317 arm_ext_v6t2); 14318 } 14319 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 14320 { 14321 /* Check that this instruction is supported for this CPU. */ 14322 if (!opcode->avariant || 14323 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)) 14324 { 14325 as_bad (_("selected processor does not support `%s'"), str); 14326 return; 14327 } 14328 if (inst.size_req) 14329 { 14330 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str); 14331 return; 14332 } 14333 14334 mapping_state (MAP_ARM); 14335 inst.instruction = opcode->avalue; 14336 if (opcode->tag == OT_unconditionalF) 14337 inst.instruction |= 0xF << 28; 14338 else 14339 inst.instruction |= inst.cond << 28; 14340 inst.size = INSN_SIZE; 14341 if (!parse_operands (p, opcode->operands)) 14342 opcode->aencode (); 14343 /* Arm mode bx is marked as both v4T and v5 because it's still required 14344 on a hypothetical non-thumb v5 core. */ 14345 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t) 14346 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5)) 14347 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t); 14348 else 14349 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 14350 *opcode->avariant); 14351 } 14352 else 14353 { 14354 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor " 14355 "-- `%s'"), str); 14356 return; 14357 } 14358 output_inst (str); 14359} 14360 14361/* Various frobbings of labels and their addresses. */ 14362 14363void 14364arm_start_line_hook (void) 14365{ 14366 last_label_seen = NULL; 14367} 14368 14369void 14370arm_frob_label (symbolS * sym) 14371{ 14372 last_label_seen = sym; 14373 14374 ARM_SET_THUMB (sym, thumb_mode); 14375 14376#if defined OBJ_COFF || defined OBJ_ELF 14377 ARM_SET_INTERWORK (sym, support_interwork); 14378#endif 14379 14380 /* Note - do not allow local symbols (.Lxxx) to be labeled 14381 as Thumb functions. This is because these labels, whilst 14382 they exist inside Thumb code, are not the entry points for 14383 possible ARM->Thumb calls. Also, these labels can be used 14384 as part of a computed goto or switch statement. eg gcc 14385 can generate code that looks like this: 14386 14387 ldr r2, [pc, .Laaa] 14388 lsl r3, r3, #2 14389 ldr r2, [r3, r2] 14390 mov pc, r2 14391 14392 .Lbbb: .word .Lxxx 14393 .Lccc: .word .Lyyy 14394 ..etc... 14395 .Laaa: .word Lbbb 14396 14397 The first instruction loads the address of the jump table. 14398 The second instruction converts a table index into a byte offset. 14399 The third instruction gets the jump address out of the table. 14400 The fourth instruction performs the jump. 14401 14402 If the address stored at .Laaa is that of a symbol which has the 14403 Thumb_Func bit set, then the linker will arrange for this address 14404 to have the bottom bit set, which in turn would mean that the 14405 address computation performed by the third instruction would end 14406 up with the bottom bit set. Since the ARM is capable of unaligned 14407 word loads, the instruction would then load the incorrect address 14408 out of the jump table, and chaos would ensue. */ 14409 if (label_is_thumb_function_name 14410 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L') 14411 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0) 14412 { 14413 /* When the address of a Thumb function is taken the bottom 14414 bit of that address should be set. This will allow 14415 interworking between Arm and Thumb functions to work 14416 correctly. */ 14417 14418 THUMB_SET_FUNC (sym, 1); 14419 14420 label_is_thumb_function_name = FALSE; 14421 } 14422 14423 dwarf2_emit_label (sym); 14424} 14425 14426int 14427arm_data_in_code (void) 14428{ 14429 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5)) 14430 { 14431 *input_line_pointer = '/'; 14432 input_line_pointer += 5; 14433 *input_line_pointer = 0; 14434 return 1; 14435 } 14436 14437 return 0; 14438} 14439 14440char * 14441arm_canonicalize_symbol_name (char * name) 14442{ 14443 int len; 14444 14445 if (thumb_mode && (len = strlen (name)) > 5 14446 && streq (name + len - 5, "/data")) 14447 *(name + len - 5) = 0; 14448 14449 return name; 14450} 14451 14452/* Table of all register names defined by default. The user can 14453 define additional names with .req. Note that all register names 14454 should appear in both upper and lowercase variants. Some registers 14455 also have mixed-case names. */ 14456 14457#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 } 14458#define REGNUM(p,n,t) REGDEF(p##n, n, t) 14459#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t) 14460#define REGSET(p,t) \ 14461 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 14462 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 14463 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 14464 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t) 14465#define REGSETH(p,t) \ 14466 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 14467 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 14468 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 14469 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t) 14470#define REGSET2(p,t) \ 14471 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \ 14472 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \ 14473 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \ 14474 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t) 14475 14476static const struct reg_entry reg_names[] = 14477{ 14478 /* ARM integer registers. */ 14479 REGSET(r, RN), REGSET(R, RN), 14480 14481 /* ATPCS synonyms. */ 14482 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN), 14483 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN), 14484 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN), 14485 14486 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN), 14487 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN), 14488 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN), 14489 14490 /* Well-known aliases. */ 14491 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN), 14492 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN), 14493 14494 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN), 14495 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN), 14496 14497 /* Coprocessor numbers. */ 14498 REGSET(p, CP), REGSET(P, CP), 14499 14500 /* Coprocessor register numbers. The "cr" variants are for backward 14501 compatibility. */ 14502 REGSET(c, CN), REGSET(C, CN), 14503 REGSET(cr, CN), REGSET(CR, CN), 14504 14505 /* FPA registers. */ 14506 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN), 14507 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN), 14508 14509 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN), 14510 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN), 14511 14512 /* VFP SP registers. */ 14513 REGSET(s,VFS), REGSET(S,VFS), 14514 REGSETH(s,VFS), REGSETH(S,VFS), 14515 14516 /* VFP DP Registers. */ 14517 REGSET(d,VFD), REGSET(D,VFD), 14518 /* Extra Neon DP registers. */ 14519 REGSETH(d,VFD), REGSETH(D,VFD), 14520 14521 /* Neon QP registers. */ 14522 REGSET2(q,NQ), REGSET2(Q,NQ), 14523 14524 /* VFP control registers. */ 14525 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC), 14526 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC), 14527 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC), 14528 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC), 14529 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC), 14530 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC), 14531 14532 /* Maverick DSP coprocessor registers. */ 14533 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX), 14534 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX), 14535 14536 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX), 14537 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX), 14538 REGDEF(dspsc,0,DSPSC), 14539 14540 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX), 14541 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX), 14542 REGDEF(DSPSC,0,DSPSC), 14543 14544 /* iWMMXt data registers - p0, c0-15. */ 14545 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR), 14546 14547 /* iWMMXt control registers - p1, c0-3. */ 14548 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC), 14549 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC), 14550 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC), 14551 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC), 14552 14553 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */ 14554 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG), 14555 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG), 14556 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG), 14557 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG), 14558 14559 /* XScale accumulator registers. */ 14560 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE), 14561}; 14562#undef REGDEF 14563#undef REGNUM 14564#undef REGSET 14565 14566/* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled 14567 within psr_required_here. */ 14568static const struct asm_psr psrs[] = 14569{ 14570 /* Backward compatibility notation. Note that "all" is no longer 14571 truly all possible PSR bits. */ 14572 {"all", PSR_c | PSR_f}, 14573 {"flg", PSR_f}, 14574 {"ctl", PSR_c}, 14575 14576 /* Individual flags. */ 14577 {"f", PSR_f}, 14578 {"c", PSR_c}, 14579 {"x", PSR_x}, 14580 {"s", PSR_s}, 14581 /* Combinations of flags. */ 14582 {"fs", PSR_f | PSR_s}, 14583 {"fx", PSR_f | PSR_x}, 14584 {"fc", PSR_f | PSR_c}, 14585 {"sf", PSR_s | PSR_f}, 14586 {"sx", PSR_s | PSR_x}, 14587 {"sc", PSR_s | PSR_c}, 14588 {"xf", PSR_x | PSR_f}, 14589 {"xs", PSR_x | PSR_s}, 14590 {"xc", PSR_x | PSR_c}, 14591 {"cf", PSR_c | PSR_f}, 14592 {"cs", PSR_c | PSR_s}, 14593 {"cx", PSR_c | PSR_x}, 14594 {"fsx", PSR_f | PSR_s | PSR_x}, 14595 {"fsc", PSR_f | PSR_s | PSR_c}, 14596 {"fxs", PSR_f | PSR_x | PSR_s}, 14597 {"fxc", PSR_f | PSR_x | PSR_c}, 14598 {"fcs", PSR_f | PSR_c | PSR_s}, 14599 {"fcx", PSR_f | PSR_c | PSR_x}, 14600 {"sfx", PSR_s | PSR_f | PSR_x}, 14601 {"sfc", PSR_s | PSR_f | PSR_c}, 14602 {"sxf", PSR_s | PSR_x | PSR_f}, 14603 {"sxc", PSR_s | PSR_x | PSR_c}, 14604 {"scf", PSR_s | PSR_c | PSR_f}, 14605 {"scx", PSR_s | PSR_c | PSR_x}, 14606 {"xfs", PSR_x | PSR_f | PSR_s}, 14607 {"xfc", PSR_x | PSR_f | PSR_c}, 14608 {"xsf", PSR_x | PSR_s | PSR_f}, 14609 {"xsc", PSR_x | PSR_s | PSR_c}, 14610 {"xcf", PSR_x | PSR_c | PSR_f}, 14611 {"xcs", PSR_x | PSR_c | PSR_s}, 14612 {"cfs", PSR_c | PSR_f | PSR_s}, 14613 {"cfx", PSR_c | PSR_f | PSR_x}, 14614 {"csf", PSR_c | PSR_s | PSR_f}, 14615 {"csx", PSR_c | PSR_s | PSR_x}, 14616 {"cxf", PSR_c | PSR_x | PSR_f}, 14617 {"cxs", PSR_c | PSR_x | PSR_s}, 14618 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c}, 14619 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x}, 14620 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c}, 14621 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s}, 14622 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x}, 14623 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s}, 14624 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c}, 14625 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x}, 14626 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c}, 14627 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f}, 14628 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x}, 14629 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f}, 14630 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c}, 14631 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s}, 14632 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c}, 14633 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f}, 14634 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s}, 14635 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f}, 14636 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x}, 14637 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s}, 14638 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x}, 14639 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f}, 14640 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s}, 14641 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f}, 14642}; 14643 14644/* Table of V7M psr names. */ 14645static const struct asm_psr v7m_psrs[] = 14646{ 14647 {"apsr", 0 }, {"APSR", 0 }, 14648 {"iapsr", 1 }, {"IAPSR", 1 }, 14649 {"eapsr", 2 }, {"EAPSR", 2 }, 14650 {"psr", 3 }, {"PSR", 3 }, 14651 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 }, 14652 {"ipsr", 5 }, {"IPSR", 5 }, 14653 {"epsr", 6 }, {"EPSR", 6 }, 14654 {"iepsr", 7 }, {"IEPSR", 7 }, 14655 {"msp", 8 }, {"MSP", 8 }, 14656 {"psp", 9 }, {"PSP", 9 }, 14657 {"primask", 16}, {"PRIMASK", 16}, 14658 {"basepri", 17}, {"BASEPRI", 17}, 14659 {"basepri_max", 18}, {"BASEPRI_MAX", 18}, 14660 {"faultmask", 19}, {"FAULTMASK", 19}, 14661 {"control", 20}, {"CONTROL", 20} 14662}; 14663 14664/* Table of all shift-in-operand names. */ 14665static const struct asm_shift_name shift_names [] = 14666{ 14667 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL }, 14668 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL }, 14669 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR }, 14670 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR }, 14671 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR }, 14672 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX } 14673}; 14674 14675/* Table of all explicit relocation names. */ 14676#ifdef OBJ_ELF 14677static struct reloc_entry reloc_names[] = 14678{ 14679 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 }, 14680 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF }, 14681 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 }, 14682 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 }, 14683 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 }, 14684 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 }, 14685 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32}, 14686 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32}, 14687 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32}, 14688 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32}, 14689 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32} 14690}; 14691#endif 14692 14693/* Table of all conditional affixes. 0xF is not defined as a condition code. */ 14694static const struct asm_cond conds[] = 14695{ 14696 {"eq", 0x0}, 14697 {"ne", 0x1}, 14698 {"cs", 0x2}, {"hs", 0x2}, 14699 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3}, 14700 {"mi", 0x4}, 14701 {"pl", 0x5}, 14702 {"vs", 0x6}, 14703 {"vc", 0x7}, 14704 {"hi", 0x8}, 14705 {"ls", 0x9}, 14706 {"ge", 0xa}, 14707 {"lt", 0xb}, 14708 {"gt", 0xc}, 14709 {"le", 0xd}, 14710 {"al", 0xe} 14711}; 14712 14713static struct asm_barrier_opt barrier_opt_names[] = 14714{ 14715 { "sy", 0xf }, 14716 { "un", 0x7 }, 14717 { "st", 0xe }, 14718 { "unst", 0x6 }, 14719 { "ish", 0xb }, 14720 { "sh", 0xb }, 14721 { "ishst", 0xa }, 14722 { "shst", 0xa }, 14723 { "nsh", 0x7 }, 14724 { "nshst", 0x6 }, 14725 { "osh", 0x3 }, 14726 { "oshst", 0x2 } 14727}; 14728 14729/* Table of ARM-format instructions. */ 14730 14731/* Macros for gluing together operand strings. N.B. In all cases 14732 other than OPS0, the trailing OP_stop comes from default 14733 zero-initialization of the unspecified elements of the array. */ 14734#define OPS0() { OP_stop, } 14735#define OPS1(a) { OP_##a, } 14736#define OPS2(a,b) { OP_##a,OP_##b, } 14737#define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, } 14738#define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, } 14739#define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, } 14740#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, } 14741 14742/* These macros abstract out the exact format of the mnemonic table and 14743 save some repeated characters. */ 14744 14745/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */ 14746#define TxCE(mnem, op, top, nops, ops, ae, te) \ 14747 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \ 14748 THUMB_VARIANT, do_##ae, do_##te } 14749 14750/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for 14751 a T_MNEM_xyz enumerator. */ 14752#define TCE(mnem, aop, top, nops, ops, ae, te) \ 14753 TxCE(mnem, aop, 0x##top, nops, ops, ae, te) 14754#define tCE(mnem, aop, top, nops, ops, ae, te) \ 14755 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14756 14757/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional 14758 infix after the third character. */ 14759#define TxC3(mnem, op, top, nops, ops, ae, te) \ 14760 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \ 14761 THUMB_VARIANT, do_##ae, do_##te } 14762#define TxC3w(mnem, op, top, nops, ops, ae, te) \ 14763 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \ 14764 THUMB_VARIANT, do_##ae, do_##te } 14765#define TC3(mnem, aop, top, nops, ops, ae, te) \ 14766 TxC3(mnem, aop, 0x##top, nops, ops, ae, te) 14767#define TC3w(mnem, aop, top, nops, ops, ae, te) \ 14768 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te) 14769#define tC3(mnem, aop, top, nops, ops, ae, te) \ 14770 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14771#define tC3w(mnem, aop, top, nops, ops, ae, te) \ 14772 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14773 14774/* Mnemonic with a conditional infix in an unusual place. Each and every variant has to 14775 appear in the condition table. */ 14776#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \ 14777 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14778 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te } 14779 14780#define TxCM(m1, m2, op, top, nops, ops, ae, te) \ 14781 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \ 14782 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \ 14783 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \ 14784 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \ 14785 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \ 14786 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \ 14787 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \ 14788 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \ 14789 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \ 14790 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \ 14791 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \ 14792 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \ 14793 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \ 14794 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \ 14795 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \ 14796 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \ 14797 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \ 14798 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \ 14799 TxCM_(m1, al, m2, op, top, nops, ops, ae, te) 14800 14801#define TCM(m1,m2, aop, top, nops, ops, ae, te) \ 14802 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te) 14803#define tCM(m1,m2, aop, top, nops, ops, ae, te) \ 14804 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te) 14805 14806/* Mnemonic that cannot be conditionalized. The ARM condition-code 14807 field is still 0xE. Many of the Thumb variants can be executed 14808 conditionally, so this is checked separately. */ 14809#define TUE(mnem, op, top, nops, ops, ae, te) \ 14810 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \ 14811 THUMB_VARIANT, do_##ae, do_##te } 14812 14813/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM 14814 condition code field. */ 14815#define TUF(mnem, op, top, nops, ops, ae, te) \ 14816 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \ 14817 THUMB_VARIANT, do_##ae, do_##te } 14818 14819/* ARM-only variants of all the above. */ 14820#define CE(mnem, op, nops, ops, ae) \ 14821 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14822 14823#define C3(mnem, op, nops, ops, ae) \ 14824 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14825 14826/* Legacy mnemonics that always have conditional infix after the third 14827 character. */ 14828#define CL(mnem, op, nops, ops, ae) \ 14829 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14830 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14831 14832/* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */ 14833#define cCE(mnem, op, nops, ops, ae) \ 14834 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14835 14836/* Legacy coprocessor instructions where conditional infix and conditional 14837 suffix are ambiguous. For consistency this includes all FPA instructions, 14838 not just the potentially ambiguous ones. */ 14839#define cCL(mnem, op, nops, ops, ae) \ 14840 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14841 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14842 14843/* Coprocessor, takes either a suffix or a position-3 infix 14844 (for an FPA corner case). */ 14845#define C3E(mnem, op, nops, ops, ae) \ 14846 { #mnem, OPS##nops ops, OT_csuf_or_in3, \ 14847 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14848 14849#define xCM_(m1, m2, m3, op, nops, ops, ae) \ 14850 { #m1 #m2 #m3, OPS##nops ops, \ 14851 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14852 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14853 14854#define CM(m1, m2, op, nops, ops, ae) \ 14855 xCM_(m1, , m2, op, nops, ops, ae), \ 14856 xCM_(m1, eq, m2, op, nops, ops, ae), \ 14857 xCM_(m1, ne, m2, op, nops, ops, ae), \ 14858 xCM_(m1, cs, m2, op, nops, ops, ae), \ 14859 xCM_(m1, hs, m2, op, nops, ops, ae), \ 14860 xCM_(m1, cc, m2, op, nops, ops, ae), \ 14861 xCM_(m1, ul, m2, op, nops, ops, ae), \ 14862 xCM_(m1, lo, m2, op, nops, ops, ae), \ 14863 xCM_(m1, mi, m2, op, nops, ops, ae), \ 14864 xCM_(m1, pl, m2, op, nops, ops, ae), \ 14865 xCM_(m1, vs, m2, op, nops, ops, ae), \ 14866 xCM_(m1, vc, m2, op, nops, ops, ae), \ 14867 xCM_(m1, hi, m2, op, nops, ops, ae), \ 14868 xCM_(m1, ls, m2, op, nops, ops, ae), \ 14869 xCM_(m1, ge, m2, op, nops, ops, ae), \ 14870 xCM_(m1, lt, m2, op, nops, ops, ae), \ 14871 xCM_(m1, gt, m2, op, nops, ops, ae), \ 14872 xCM_(m1, le, m2, op, nops, ops, ae), \ 14873 xCM_(m1, al, m2, op, nops, ops, ae) 14874 14875#define UE(mnem, op, nops, ops, ae) \ 14876 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14877 14878#define UF(mnem, op, nops, ops, ae) \ 14879 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14880 14881/* Neon data-processing. ARM versions are unconditional with cond=0xf. 14882 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we 14883 use the same encoding function for each. */ 14884#define NUF(mnem, op, nops, ops, enc) \ 14885 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \ 14886 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14887 14888/* Neon data processing, version which indirects through neon_enc_tab for 14889 the various overloaded versions of opcodes. */ 14890#define nUF(mnem, op, nops, ops, enc) \ 14891 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \ 14892 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14893 14894/* Neon insn with conditional suffix for the ARM version, non-overloaded 14895 version. */ 14896#define NCE_tag(mnem, op, nops, ops, enc, tag) \ 14897 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \ 14898 THUMB_VARIANT, do_##enc, do_##enc } 14899 14900#define NCE(mnem, op, nops, ops, enc) \ 14901 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14902 14903#define NCEF(mnem, op, nops, ops, enc) \ 14904 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14905 14906/* Neon insn with conditional suffix for the ARM version, overloaded types. */ 14907#define nCE_tag(mnem, op, nops, ops, enc, tag) \ 14908 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \ 14909 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14910 14911#define nCE(mnem, op, nops, ops, enc) \ 14912 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14913 14914#define nCEF(mnem, op, nops, ops, enc) \ 14915 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14916 14917#define do_0 0 14918 14919/* Thumb-only, unconditional. */ 14920#define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te) 14921 14922static const struct asm_opcode insns[] = 14923{ 14924#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */ 14925#define THUMB_VARIANT &arm_ext_v4t 14926 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c), 14927 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c), 14928 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c), 14929 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c), 14930 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub), 14931 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub), 14932 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub), 14933 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub), 14934 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c), 14935 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c), 14936 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3), 14937 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3), 14938 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c), 14939 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c), 14940 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3), 14941 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3), 14942 14943 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism 14944 for setting PSR flag bits. They are obsolete in V6 and do not 14945 have Thumb equivalents. */ 14946 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14947 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14948 CL(tstp, 110f000, 2, (RR, SH), cmp), 14949 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14950 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14951 CL(cmpp, 150f000, 2, (RR, SH), cmp), 14952 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14953 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14954 CL(cmnp, 170f000, 2, (RR, SH), cmp), 14955 14956 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp), 14957 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp), 14958 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst), 14959 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst), 14960 14961 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst), 14962 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14963 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst), 14964 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14965 14966 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14967 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14968 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14969 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14970 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14971 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14972 14973 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi), 14974 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi), 14975 tCE(b, a000000, b, 1, (EXPr), branch, t_branch), 14976 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23), 14977 14978 /* Pseudo ops. */ 14979 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr), 14980 C3(adrl, 28f0000, 2, (RR, EXP), adrl), 14981 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop), 14982 14983 /* Thumb-compatibility pseudo ops. */ 14984 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift), 14985 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift), 14986 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift), 14987 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift), 14988 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift), 14989 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift), 14990 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift), 14991 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift), 14992 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg), 14993 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg), 14994 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop), 14995 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop), 14996 14997 /* These may simplify to neg. */ 14998 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb), 14999 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb), 15000 15001 TCE(rrx, 1a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rd_rm), 15002 TCE(rrxs, 1b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rd_rm), 15003 15004#undef THUMB_VARIANT 15005#define THUMB_VARIANT &arm_ext_v6 15006 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy), 15007 15008 /* V1 instructions with no Thumb analogue prior to V6T2. */ 15009#undef THUMB_VARIANT 15010#define THUMB_VARIANT &arm_ext_v6t2 15011 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 15012 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 15013 CL(teqp, 130f000, 2, (RR, SH), cmp), 15014 15015 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt), 15016 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt), 15017 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt), 15018 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt), 15019 15020 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 15021 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 15022 15023 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 15024 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 15025 15026 /* V1 instructions with no Thumb analogue at all. */ 15027 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit), 15028 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit), 15029 15030 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm), 15031 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm), 15032 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm), 15033 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm), 15034 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm), 15035 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm), 15036 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm), 15037 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm), 15038 15039#undef ARM_VARIANT 15040#define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */ 15041#undef THUMB_VARIANT 15042#define THUMB_VARIANT &arm_ext_v4t 15043 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 15044 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 15045 15046#undef THUMB_VARIANT 15047#define THUMB_VARIANT &arm_ext_v6t2 15048 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 15049 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas), 15050 15051 /* Generic coprocessor instructions. */ 15052 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 15053 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15054 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15055 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15056 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15057 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15058 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15059 15060#undef ARM_VARIANT 15061#define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */ 15062 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 15063 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 15064 15065#undef ARM_VARIANT 15066#define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */ 15067 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs), 15068 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr), 15069 15070#undef ARM_VARIANT 15071#define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */ 15072 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 15073 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 15074 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 15075 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 15076 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 15077 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 15078 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 15079 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 15080 15081#undef ARM_VARIANT 15082#define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */ 15083#undef THUMB_VARIANT 15084#define THUMB_VARIANT &arm_ext_v4t 15085 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15086 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15087 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15088 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15089 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15090 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15091 15092#undef ARM_VARIANT 15093#define ARM_VARIANT &arm_ext_v4t_5 15094 /* ARM Architecture 4T. */ 15095 /* Note: bx (and blx) are required on V5, even if the processor does 15096 not support Thumb. */ 15097 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx), 15098 15099#undef ARM_VARIANT 15100#define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */ 15101#undef THUMB_VARIANT 15102#define THUMB_VARIANT &arm_ext_v5t 15103 /* Note: blx has 2 variants; the .value coded here is for 15104 BLX(2). Only this variant has conditional execution. */ 15105 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx), 15106 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt), 15107 15108#undef THUMB_VARIANT 15109#define THUMB_VARIANT &arm_ext_v6t2 15110 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz), 15111 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15112 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15113 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15114 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15115 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 15116 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15117 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15118 15119#undef ARM_VARIANT 15120#define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */ 15121 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15122 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15123 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15124 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15125 15126 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15127 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15128 15129 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15130 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15131 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15132 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15133 15134 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15135 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15136 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15137 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15138 15139 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15140 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15141 15142 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15143 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15144 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15145 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15146 15147#undef ARM_VARIANT 15148#define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */ 15149 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld), 15150 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 15151 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 15152 15153 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15154 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15155 15156#undef ARM_VARIANT 15157#define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */ 15158 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj), 15159 15160#undef ARM_VARIANT 15161#define ARM_VARIANT &arm_ext_v6 /* ARM V6. */ 15162#undef THUMB_VARIANT 15163#define THUMB_VARIANT &arm_ext_v6 15164 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi), 15165 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi), 15166 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15167 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15168 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15169 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15170 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15171 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15172 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15173 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend), 15174 15175#undef THUMB_VARIANT 15176#define THUMB_VARIANT &arm_ext_v6t2 15177 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex), 15178 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex), 15179 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15180 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15181 15182 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat), 15183 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat), 15184 15185/* ARM V6 not included in V7M (eg. integer SIMD). */ 15186#undef THUMB_VARIANT 15187#define THUMB_VARIANT &arm_ext_v6_notm 15188 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps), 15189 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt), 15190 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb), 15191 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15192 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15193 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15194 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15195 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15196 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15197 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15198 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15199 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15200 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15201 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15202 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15203 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15204 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15205 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15206 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15207 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15208 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15209 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15210 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15211 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15212 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15213 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15214 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15215 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15216 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15217 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15218 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15219 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15220 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15221 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15222 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15223 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15224 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15225 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15226 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15227 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe), 15228 UF(rfeib, 9900a00, 1, (RRw), rfe), 15229 UF(rfeda, 8100a00, 1, (RRw), rfe), 15230 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe), 15231 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe), 15232 UF(rfefa, 9900a00, 1, (RRw), rfe), 15233 UF(rfeea, 8100a00, 1, (RRw), rfe), 15234 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe), 15235 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15236 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15237 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15238 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15239 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15240 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15241 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15242 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15243 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15244 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15245 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15246 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15247 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15248 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15249 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15250 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15251 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15252 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15253 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15254 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15255 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15256 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15257 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15258 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15259 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15260 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15261 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15262 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs), 15263 UF(srsib, 9c00500, 2, (oRRw, I31w), srs), 15264 UF(srsda, 8400500, 2, (oRRw, I31w), srs), 15265 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs), 15266 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16), 15267 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal), 15268 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15269 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15270 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16), 15271 15272#undef ARM_VARIANT 15273#define ARM_VARIANT &arm_ext_v6k 15274#undef THUMB_VARIANT 15275#define THUMB_VARIANT &arm_ext_v6k 15276 tCE(yield, 320f001, yield, 0, (), noargs, t_hint), 15277 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint), 15278 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint), 15279 tCE(sev, 320f004, sev, 0, (), noargs, t_hint), 15280 15281#undef THUMB_VARIANT 15282#define THUMB_VARIANT &arm_ext_v6_notm 15283 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd), 15284 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd), 15285 15286#undef THUMB_VARIANT 15287#define THUMB_VARIANT &arm_ext_v6t2 15288 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 15289 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 15290 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 15291 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 15292 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs), 15293 15294#undef ARM_VARIANT 15295#define ARM_VARIANT &arm_ext_v6z 15296 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc), 15297 15298#undef ARM_VARIANT 15299#define ARM_VARIANT &arm_ext_v6t2 15300 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc), 15301 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi), 15302 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx), 15303 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx), 15304 15305 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 15306 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16), 15307 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16), 15308 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit), 15309 15310 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15311 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15312 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15313 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15314 15315 UT(cbnz, b900, 2, (RR, EXP), t_cbz), 15316 UT(cbz, b100, 2, (RR, EXP), t_cbz), 15317 /* ARM does not really have an IT instruction, so always allow it. */ 15318#undef ARM_VARIANT 15319#define ARM_VARIANT &arm_ext_v1 15320 TUE(it, 0, bf08, 1, (COND), it, t_it), 15321 TUE(itt, 0, bf0c, 1, (COND), it, t_it), 15322 TUE(ite, 0, bf04, 1, (COND), it, t_it), 15323 TUE(ittt, 0, bf0e, 1, (COND), it, t_it), 15324 TUE(itet, 0, bf06, 1, (COND), it, t_it), 15325 TUE(itte, 0, bf0a, 1, (COND), it, t_it), 15326 TUE(itee, 0, bf02, 1, (COND), it, t_it), 15327 TUE(itttt, 0, bf0f, 1, (COND), it, t_it), 15328 TUE(itett, 0, bf07, 1, (COND), it, t_it), 15329 TUE(ittet, 0, bf0b, 1, (COND), it, t_it), 15330 TUE(iteet, 0, bf03, 1, (COND), it, t_it), 15331 TUE(ittte, 0, bf0d, 1, (COND), it, t_it), 15332 TUE(itete, 0, bf05, 1, (COND), it, t_it), 15333 TUE(ittee, 0, bf09, 1, (COND), it, t_it), 15334 TUE(iteee, 0, bf01, 1, (COND), it, t_it), 15335 15336 /* Thumb2 only instructions. */ 15337#undef ARM_VARIANT 15338#define ARM_VARIANT NULL 15339 15340 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 15341 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 15342 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb), 15343 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb), 15344 15345 /* Thumb-2 hardware division instructions (R and M profiles only). */ 15346#undef THUMB_VARIANT 15347#define THUMB_VARIANT &arm_ext_div 15348 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div), 15349 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div), 15350 15351 /* ARM V7 instructions. */ 15352#undef ARM_VARIANT 15353#define ARM_VARIANT &arm_ext_v7 15354#undef THUMB_VARIANT 15355#define THUMB_VARIANT &arm_ext_v7 15356 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld), 15357 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg), 15358 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier), 15359 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier), 15360 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier), 15361 15362#undef ARM_VARIANT 15363#define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */ 15364 cCE(wfs, e200110, 1, (RR), rd), 15365 cCE(rfs, e300110, 1, (RR), rd), 15366 cCE(wfc, e400110, 1, (RR), rd), 15367 cCE(rfc, e500110, 1, (RR), rd), 15368 15369 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr), 15370 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr), 15371 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr), 15372 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr), 15373 15374 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr), 15375 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr), 15376 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr), 15377 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr), 15378 15379 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm), 15380 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm), 15381 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm), 15382 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm), 15383 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm), 15384 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm), 15385 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm), 15386 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm), 15387 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm), 15388 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm), 15389 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm), 15390 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm), 15391 15392 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm), 15393 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm), 15394 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm), 15395 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm), 15396 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm), 15397 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm), 15398 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm), 15399 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm), 15400 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm), 15401 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm), 15402 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm), 15403 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm), 15404 15405 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm), 15406 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm), 15407 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm), 15408 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm), 15409 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm), 15410 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm), 15411 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm), 15412 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm), 15413 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm), 15414 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm), 15415 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm), 15416 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm), 15417 15418 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm), 15419 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm), 15420 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm), 15421 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm), 15422 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm), 15423 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm), 15424 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm), 15425 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm), 15426 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm), 15427 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm), 15428 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm), 15429 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm), 15430 15431 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm), 15432 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm), 15433 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm), 15434 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm), 15435 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm), 15436 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm), 15437 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm), 15438 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm), 15439 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm), 15440 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm), 15441 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm), 15442 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm), 15443 15444 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm), 15445 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm), 15446 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm), 15447 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm), 15448 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm), 15449 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm), 15450 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm), 15451 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm), 15452 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm), 15453 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm), 15454 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm), 15455 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm), 15456 15457 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm), 15458 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm), 15459 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm), 15460 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm), 15461 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm), 15462 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm), 15463 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm), 15464 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm), 15465 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm), 15466 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm), 15467 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm), 15468 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm), 15469 15470 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm), 15471 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm), 15472 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm), 15473 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm), 15474 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm), 15475 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm), 15476 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm), 15477 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm), 15478 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm), 15479 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm), 15480 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm), 15481 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm), 15482 15483 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm), 15484 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm), 15485 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm), 15486 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm), 15487 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm), 15488 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm), 15489 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm), 15490 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm), 15491 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm), 15492 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm), 15493 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm), 15494 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm), 15495 15496 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm), 15497 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm), 15498 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm), 15499 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm), 15500 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm), 15501 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm), 15502 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm), 15503 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm), 15504 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm), 15505 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm), 15506 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm), 15507 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm), 15508 15509 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm), 15510 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm), 15511 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm), 15512 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm), 15513 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm), 15514 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm), 15515 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm), 15516 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm), 15517 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm), 15518 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm), 15519 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm), 15520 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm), 15521 15522 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm), 15523 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm), 15524 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm), 15525 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm), 15526 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm), 15527 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm), 15528 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm), 15529 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm), 15530 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm), 15531 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm), 15532 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm), 15533 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm), 15534 15535 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm), 15536 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm), 15537 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm), 15538 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm), 15539 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm), 15540 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm), 15541 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm), 15542 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm), 15543 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm), 15544 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm), 15545 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm), 15546 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm), 15547 15548 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm), 15549 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm), 15550 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm), 15551 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm), 15552 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm), 15553 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm), 15554 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm), 15555 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm), 15556 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm), 15557 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm), 15558 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm), 15559 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm), 15560 15561 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm), 15562 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm), 15563 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm), 15564 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm), 15565 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm), 15566 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm), 15567 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm), 15568 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm), 15569 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm), 15570 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm), 15571 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm), 15572 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm), 15573 15574 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm), 15575 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm), 15576 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm), 15577 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm), 15578 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm), 15579 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm), 15580 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm), 15581 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm), 15582 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm), 15583 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm), 15584 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm), 15585 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm), 15586 15587 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm), 15588 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm), 15589 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm), 15590 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm), 15591 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm), 15592 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15593 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15594 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15595 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm), 15596 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm), 15597 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm), 15598 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm), 15599 15600 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm), 15601 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm), 15602 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm), 15603 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm), 15604 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm), 15605 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15606 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15607 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15608 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm), 15609 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm), 15610 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm), 15611 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm), 15612 15613 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm), 15614 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm), 15615 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm), 15616 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm), 15617 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm), 15618 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15619 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15620 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15621 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm), 15622 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm), 15623 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm), 15624 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm), 15625 15626 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm), 15627 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm), 15628 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm), 15629 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm), 15630 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm), 15631 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15632 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15633 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15634 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm), 15635 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm), 15636 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm), 15637 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm), 15638 15639 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm), 15640 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm), 15641 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm), 15642 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm), 15643 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm), 15644 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15645 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15646 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15647 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm), 15648 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm), 15649 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm), 15650 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm), 15651 15652 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm), 15653 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm), 15654 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm), 15655 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm), 15656 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm), 15657 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15658 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15659 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15660 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm), 15661 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm), 15662 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm), 15663 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm), 15664 15665 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm), 15666 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm), 15667 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm), 15668 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm), 15669 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm), 15670 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15671 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15672 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15673 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm), 15674 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm), 15675 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm), 15676 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm), 15677 15678 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm), 15679 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm), 15680 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm), 15681 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm), 15682 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm), 15683 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15684 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15685 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15686 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm), 15687 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm), 15688 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm), 15689 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm), 15690 15691 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm), 15692 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm), 15693 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm), 15694 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm), 15695 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm), 15696 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15697 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15698 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15699 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm), 15700 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm), 15701 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm), 15702 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm), 15703 15704 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm), 15705 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm), 15706 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm), 15707 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm), 15708 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm), 15709 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15710 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15711 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15712 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm), 15713 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm), 15714 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm), 15715 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm), 15716 15717 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15718 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15719 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15720 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15721 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15722 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15723 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15724 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15725 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15726 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15727 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15728 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15729 15730 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15731 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15732 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15733 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15734 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15735 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15736 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15737 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15738 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15739 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15740 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15741 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15742 15743 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15744 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15745 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15746 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15747 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15748 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15749 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15750 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15751 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15752 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15753 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15754 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15755 15756 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp), 15757 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp), 15758 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp), 15759 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp), 15760 15761 cCL(flts, e000110, 2, (RF, RR), rn_rd), 15762 cCL(fltsp, e000130, 2, (RF, RR), rn_rd), 15763 cCL(fltsm, e000150, 2, (RF, RR), rn_rd), 15764 cCL(fltsz, e000170, 2, (RF, RR), rn_rd), 15765 cCL(fltd, e000190, 2, (RF, RR), rn_rd), 15766 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd), 15767 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd), 15768 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd), 15769 cCL(flte, e080110, 2, (RF, RR), rn_rd), 15770 cCL(fltep, e080130, 2, (RF, RR), rn_rd), 15771 cCL(fltem, e080150, 2, (RF, RR), rn_rd), 15772 cCL(fltez, e080170, 2, (RF, RR), rn_rd), 15773 15774 /* The implementation of the FIX instruction is broken on some 15775 assemblers, in that it accepts a precision specifier as well as a 15776 rounding specifier, despite the fact that this is meaningless. 15777 To be more compatible, we accept it as well, though of course it 15778 does not set any bits. */ 15779 cCE(fix, e100110, 2, (RR, RF), rd_rm), 15780 cCL(fixp, e100130, 2, (RR, RF), rd_rm), 15781 cCL(fixm, e100150, 2, (RR, RF), rd_rm), 15782 cCL(fixz, e100170, 2, (RR, RF), rd_rm), 15783 cCL(fixsp, e100130, 2, (RR, RF), rd_rm), 15784 cCL(fixsm, e100150, 2, (RR, RF), rd_rm), 15785 cCL(fixsz, e100170, 2, (RR, RF), rd_rm), 15786 cCL(fixdp, e100130, 2, (RR, RF), rd_rm), 15787 cCL(fixdm, e100150, 2, (RR, RF), rd_rm), 15788 cCL(fixdz, e100170, 2, (RR, RF), rd_rm), 15789 cCL(fixep, e100130, 2, (RR, RF), rd_rm), 15790 cCL(fixem, e100150, 2, (RR, RF), rd_rm), 15791 cCL(fixez, e100170, 2, (RR, RF), rd_rm), 15792 15793 /* Instructions that were new with the real FPA, call them V2. */ 15794#undef ARM_VARIANT 15795#define ARM_VARIANT &fpu_fpa_ext_v2 15796 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15797 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15798 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15799 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15800 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15801 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15802 15803#undef ARM_VARIANT 15804#define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */ 15805 /* Moves and type conversions. */ 15806 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic), 15807 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp), 15808 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg), 15809 cCE(fmstat, ef1fa10, 0, (), noargs), 15810 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic), 15811 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic), 15812 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic), 15813 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15814 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic), 15815 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15816 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn), 15817 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd), 15818 cCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vfp_vmrs), 15819 cCE(vmsr, ee00a10, 2, (RVC, RR), vfp_vmsr), 15820 15821 /* Memory operations. */ 15822 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15823 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15824 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15825 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15826 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15827 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15828 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15829 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15830 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15831 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15832 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15833 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15834 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15835 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15836 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15837 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15838 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15839 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15840 15841 /* Monadic operations. */ 15842 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic), 15843 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic), 15844 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic), 15845 15846 /* Dyadic operations. */ 15847 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15848 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15849 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15850 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15851 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15852 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15853 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15854 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15855 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15856 15857 /* Comparisons. */ 15858 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic), 15859 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z), 15860 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic), 15861 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z), 15862 15863#undef ARM_VARIANT 15864#define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */ 15865 /* Moves and type conversions. */ 15866 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15867 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15868 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15869 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd), 15870 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd), 15871 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn), 15872 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn), 15873 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15874 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt), 15875 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15876 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15877 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15878 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15879 15880 /* Memory operations. */ 15881 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15882 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15883 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15884 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15885 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15886 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15887 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15888 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15889 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15890 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15891 15892 /* Monadic operations. */ 15893 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15894 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15895 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15896 15897 /* Dyadic operations. */ 15898 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15899 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15900 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15901 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15902 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15903 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15904 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15905 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15906 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15907 15908 /* Comparisons. */ 15909 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15910 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd), 15911 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15912 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd), 15913 15914#undef ARM_VARIANT 15915#define ARM_VARIANT &fpu_vfp_ext_v2 15916 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2), 15917 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2), 15918 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn), 15919 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm), 15920 15921/* Instructions which may belong to either the Neon or VFP instruction sets. 15922 Individual encoder functions perform additional architecture checks. */ 15923#undef ARM_VARIANT 15924#define ARM_VARIANT &fpu_vfp_ext_v1xd 15925#undef THUMB_VARIANT 15926#define THUMB_VARIANT &fpu_vfp_ext_v1xd 15927 /* These mnemonics are unique to VFP. */ 15928 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt), 15929 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div), 15930 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15931 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15932 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15933 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15934 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15935 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push), 15936 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop), 15937 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz), 15938 15939 /* Mnemonics shared by Neon and VFP. */ 15940 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul), 15941 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15942 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15943 15944 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15945 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15946 15947 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15948 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15949 15950 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15951 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15952 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15953 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15954 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15955 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15956 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15957 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15958 15959 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt), 15960 15961 /* NOTE: All VMOV encoding is special-cased! */ 15962 NCE(vmov, 0, 1, (VMOV), neon_mov), 15963 NCE(vmovq, 0, 1, (VMOV), neon_mov), 15964 15965#undef THUMB_VARIANT 15966#define THUMB_VARIANT &fpu_neon_ext_v1 15967#undef ARM_VARIANT 15968#define ARM_VARIANT &fpu_neon_ext_v1 15969 /* Data processing with three registers of the same length. */ 15970 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */ 15971 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su), 15972 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su), 15973 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15974 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15975 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15976 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15977 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15978 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15979 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */ 15980 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15981 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15982 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15983 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15984 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15985 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15986 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15987 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15988 /* If not immediate, fall back to neon_dyadic_i64_su. 15989 shl_imm should accept I8 I16 I32 I64, 15990 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */ 15991 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm), 15992 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm), 15993 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm), 15994 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm), 15995 /* Logic ops, types optional & ignored. */ 15996 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic), 15997 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic), 15998 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic), 15999 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic), 16000 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic), 16001 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic), 16002 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic), 16003 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic), 16004 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic), 16005 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic), 16006 /* Bitfield ops, untyped. */ 16007 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 16008 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 16009 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 16010 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 16011 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 16012 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 16013 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */ 16014 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 16015 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 16016 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 16017 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 16018 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 16019 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 16020 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall 16021 back to neon_dyadic_if_su. */ 16022 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 16023 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 16024 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 16025 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 16026 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 16027 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 16028 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 16029 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 16030 /* Comparison. Type I8 I16 I32 F32. */ 16031 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq), 16032 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq), 16033 /* As above, D registers only. */ 16034 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 16035 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 16036 /* Int and float variants, signedness unimportant. */ 16037 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 16038 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 16039 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d), 16040 /* Add/sub take types I8 I16 I32 I64 F32. */ 16041 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 16042 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 16043 /* vtst takes sizes 8, 16, 32. */ 16044 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst), 16045 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst), 16046 /* VMUL takes I8 I16 I32 F32 P8. */ 16047 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul), 16048 /* VQD{R}MULH takes S16 S32. */ 16049 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 16050 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 16051 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 16052 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 16053 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 16054 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 16055 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 16056 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 16057 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 16058 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 16059 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 16060 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 16061 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 16062 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step), 16063 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 16064 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step), 16065 16066 /* Two address, int/float. Types S8 S16 S32 F32. */ 16067 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg), 16068 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg), 16069 16070 /* Data processing with two registers and a shift amount. */ 16071 /* Right shifts, and variants with rounding. 16072 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */ 16073 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 16074 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 16075 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 16076 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 16077 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 16078 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 16079 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 16080 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 16081 /* Shift and insert. Sizes accepted 8 16 32 64. */ 16082 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli), 16083 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli), 16084 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri), 16085 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri), 16086 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */ 16087 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm), 16088 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm), 16089 /* Right shift immediate, saturating & narrowing, with rounding variants. 16090 Types accepted S16 S32 S64 U16 U32 U64. */ 16091 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 16092 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 16093 /* As above, unsigned. Types accepted S16 S32 S64. */ 16094 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 16095 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 16096 /* Right shift narrowing. Types accepted I16 I32 I64. */ 16097 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow), 16098 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow), 16099 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */ 16100 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll), 16101 /* CVT with optional immediate for fixed-point variant. */ 16102 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt), 16103 16104 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn), 16105 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn), 16106 16107 /* Data processing, three registers of different lengths. */ 16108 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */ 16109 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal), 16110 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long), 16111 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long), 16112 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long), 16113 /* If not scalar, fall back to neon_dyadic_long. 16114 Vector types as above, scalar types S16 S32 U16 U32. */ 16115 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 16116 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 16117 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */ 16118 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 16119 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 16120 /* Dyadic, narrowing insns. Types I16 I32 I64. */ 16121 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16122 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16123 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16124 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16125 /* Saturating doubling multiplies. Types S16 S32. */ 16126 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16127 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16128 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16129 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types 16130 S16 S32 U16 U32. */ 16131 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull), 16132 16133 /* Extract. Size 8. */ 16134 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext), 16135 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext), 16136 16137 /* Two registers, miscellaneous. */ 16138 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */ 16139 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev), 16140 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev), 16141 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev), 16142 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev), 16143 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev), 16144 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev), 16145 /* Vector replicate. Sizes 8 16 32. */ 16146 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup), 16147 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup), 16148 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */ 16149 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl), 16150 /* VMOVN. Types I16 I32 I64. */ 16151 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn), 16152 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */ 16153 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn), 16154 /* VQMOVUN. Types S16 S32 S64. */ 16155 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun), 16156 /* VZIP / VUZP. Sizes 8 16 32. */ 16157 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp), 16158 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp), 16159 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp), 16160 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp), 16161 /* VQABS / VQNEG. Types S8 S16 S32. */ 16162 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 16163 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg), 16164 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 16165 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg), 16166 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */ 16167 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long), 16168 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long), 16169 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long), 16170 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long), 16171 /* Reciprocal estimates. Types U32 F32. */ 16172 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est), 16173 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est), 16174 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est), 16175 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est), 16176 /* VCLS. Types S8 S16 S32. */ 16177 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls), 16178 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls), 16179 /* VCLZ. Types I8 I16 I32. */ 16180 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz), 16181 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz), 16182 /* VCNT. Size 8. */ 16183 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt), 16184 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt), 16185 /* Two address, untyped. */ 16186 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp), 16187 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp), 16188 /* VTRN. Sizes 8 16 32. */ 16189 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn), 16190 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn), 16191 16192 /* Table lookup. Size 8. */ 16193 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx), 16194 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx), 16195 16196#undef THUMB_VARIANT 16197#define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext 16198#undef ARM_VARIANT 16199#define ARM_VARIANT &fpu_vfp_v3_or_neon_ext 16200 /* Neon element/structure load/store. */ 16201 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx), 16202 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx), 16203 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx), 16204 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx), 16205 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx), 16206 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx), 16207 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx), 16208 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx), 16209 16210#undef THUMB_VARIANT 16211#define THUMB_VARIANT &fpu_vfp_ext_v3 16212#undef ARM_VARIANT 16213#define ARM_VARIANT &fpu_vfp_ext_v3 16214 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const), 16215 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const), 16216 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16217 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16218 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16219 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16220 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16221 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16222 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16223 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16224 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16225 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16226 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16227 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16228 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16229 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16230 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16231 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16232 16233#undef THUMB_VARIANT 16234#undef ARM_VARIANT 16235#define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */ 16236 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16237 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16238 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16239 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16240 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16241 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16242 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar), 16243 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra), 16244 16245#undef ARM_VARIANT 16246#define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */ 16247 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc), 16248 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc), 16249 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc), 16250 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd), 16251 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd), 16252 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd), 16253 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc), 16254 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc), 16255 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc), 16256 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16257 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16258 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16259 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16260 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16261 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16262 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16263 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16264 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16265 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd), 16266 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn), 16267 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16268 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16269 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16270 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16271 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16272 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16273 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn), 16274 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn), 16275 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn), 16276 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn), 16277 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm), 16278 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc), 16279 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc), 16280 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc), 16281 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn), 16282 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn), 16283 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn), 16284 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16285 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16286 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16287 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16288 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16289 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16290 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16291 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16292 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16293 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni), 16294 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16295 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16296 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16297 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16298 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16299 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16300 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16301 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16302 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16303 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16304 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16305 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16306 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16307 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16308 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16309 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16310 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16311 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16312 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16313 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16314 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16315 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 16316 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd), 16317 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16318 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16319 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16320 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16321 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16322 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16323 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16324 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16325 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16326 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16327 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16328 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16329 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16330 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16331 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16332 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16333 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16334 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16335 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov), 16336 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16337 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16338 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16339 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16340 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16341 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16342 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16343 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16344 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16345 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16346 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16347 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16348 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16349 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16350 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16351 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16352 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16353 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16354 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16355 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16356 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16357 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh), 16358 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16359 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16360 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16361 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16362 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16363 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16364 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16365 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16366 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16367 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16368 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16369 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16370 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16371 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16372 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16373 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16374 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16375 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16376 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16377 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16378 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 16379 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd), 16380 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16381 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16382 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16383 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16384 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16385 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16386 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16387 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16388 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16389 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn), 16390 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn), 16391 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn), 16392 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn), 16393 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn), 16394 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn), 16395 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16396 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16397 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16398 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn), 16399 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn), 16400 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn), 16401 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn), 16402 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn), 16403 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn), 16404 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16405 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16406 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16407 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16408 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero), 16409 16410#undef ARM_VARIANT 16411#define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */ 16412 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc), 16413 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc), 16414 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc), 16415 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn), 16416 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn), 16417 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn), 16418 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16419 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16420 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16421 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16422 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16423 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16424 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16425 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16426 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16427 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16428 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16429 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16430 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16431 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16432 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge), 16433 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16434 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16435 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16436 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16437 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16438 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16439 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16440 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16441 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16442 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16443 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16444 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16445 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16446 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16447 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16448 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16449 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16450 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16451 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16452 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16453 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16454 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16455 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16456 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16457 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16458 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16459 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16460 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16461 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16462 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16463 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16464 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16465 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16466 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16467 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16468 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16469 16470#undef ARM_VARIANT 16471#define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */ 16472 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr), 16473 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr), 16474 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 16475 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 16476 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr), 16477 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr), 16478 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 16479 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 16480 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd), 16481 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn), 16482 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd), 16483 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn), 16484 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd), 16485 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn), 16486 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd), 16487 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn), 16488 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd), 16489 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn), 16490 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn), 16491 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn), 16492 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn), 16493 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn), 16494 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn), 16495 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn), 16496 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn), 16497 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn), 16498 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn), 16499 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn), 16500 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc), 16501 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd), 16502 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn), 16503 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn), 16504 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn), 16505 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn), 16506 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn), 16507 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn), 16508 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn), 16509 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn), 16510 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn), 16511 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn), 16512 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn), 16513 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn), 16514 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple), 16515 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple), 16516 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift), 16517 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift), 16518 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm), 16519 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm), 16520 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm), 16521 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm), 16522 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn), 16523 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn), 16524 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn), 16525 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn), 16526 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm), 16527 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm), 16528 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm), 16529 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm), 16530 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm), 16531 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm), 16532 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn), 16533 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn), 16534 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn), 16535 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn), 16536 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16537 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16538 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16539 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16540 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16541 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16542 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16543 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16544 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 16545 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 16546 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 16547 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 16548}; 16549#undef ARM_VARIANT 16550#undef THUMB_VARIANT 16551#undef TCE 16552#undef TCM 16553#undef TUE 16554#undef TUF 16555#undef TCC 16556#undef cCE 16557#undef cCL 16558#undef C3E 16559#undef CE 16560#undef CM 16561#undef UE 16562#undef UF 16563#undef UT 16564#undef NUF 16565#undef nUF 16566#undef NCE 16567#undef nCE 16568#undef OPS0 16569#undef OPS1 16570#undef OPS2 16571#undef OPS3 16572#undef OPS4 16573#undef OPS5 16574#undef OPS6 16575#undef do_0 16576 16577/* MD interface: bits in the object file. */ 16578 16579/* Turn an integer of n bytes (in val) into a stream of bytes appropriate 16580 for use in the a.out file, and stores them in the array pointed to by buf. 16581 This knows about the endian-ness of the target machine and does 16582 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 16583 2 (short) and 4 (long) Floating numbers are put out as a series of 16584 LITTLENUMS (shorts, here at least). */ 16585 16586void 16587md_number_to_chars (char * buf, valueT val, int n) 16588{ 16589 if (target_big_endian) 16590 number_to_chars_bigendian (buf, val, n); 16591 else 16592 number_to_chars_littleendian (buf, val, n); 16593} 16594 16595static valueT 16596md_chars_to_number (char * buf, int n) 16597{ 16598 valueT result = 0; 16599 unsigned char * where = (unsigned char *) buf; 16600 16601 if (target_big_endian) 16602 { 16603 while (n--) 16604 { 16605 result <<= 8; 16606 result |= (*where++ & 255); 16607 } 16608 } 16609 else 16610 { 16611 while (n--) 16612 { 16613 result <<= 8; 16614 result |= (where[n] & 255); 16615 } 16616 } 16617 16618 return result; 16619} 16620 16621/* MD interface: Sections. */ 16622 16623/* Estimate the size of a frag before relaxing. Assume everything fits in 16624 2 bytes. */ 16625 16626int 16627md_estimate_size_before_relax (fragS * fragp, 16628 segT segtype ATTRIBUTE_UNUSED) 16629{ 16630 fragp->fr_var = 2; 16631 return 2; 16632} 16633 16634/* Convert a machine dependent frag. */ 16635 16636void 16637md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp) 16638{ 16639 unsigned long insn; 16640 unsigned long old_op; 16641 char *buf; 16642 expressionS exp; 16643 fixS *fixp; 16644 int reloc_type; 16645 int pc_rel; 16646 int opcode; 16647 16648 buf = fragp->fr_literal + fragp->fr_fix; 16649 16650 old_op = bfd_get_16(abfd, buf); 16651 if (fragp->fr_symbol) { 16652 exp.X_op = O_symbol; 16653 exp.X_add_symbol = fragp->fr_symbol; 16654 } else { 16655 exp.X_op = O_constant; 16656 } 16657 exp.X_add_number = fragp->fr_offset; 16658 opcode = fragp->fr_subtype; 16659 switch (opcode) 16660 { 16661 case T_MNEM_ldr_pc: 16662 case T_MNEM_ldr_pc2: 16663 case T_MNEM_ldr_sp: 16664 case T_MNEM_str_sp: 16665 case T_MNEM_ldr: 16666 case T_MNEM_ldrb: 16667 case T_MNEM_ldrh: 16668 case T_MNEM_str: 16669 case T_MNEM_strb: 16670 case T_MNEM_strh: 16671 if (fragp->fr_var == 4) 16672 { 16673 insn = THUMB_OP32(opcode); 16674 if ((old_op >> 12) == 4 || (old_op >> 12) == 9) 16675 { 16676 insn |= (old_op & 0x700) << 4; 16677 } 16678 else 16679 { 16680 insn |= (old_op & 7) << 12; 16681 insn |= (old_op & 0x38) << 13; 16682 } 16683 insn |= 0x00000c00; 16684 put_thumb32_insn (buf, insn); 16685 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM; 16686 } 16687 else 16688 { 16689 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET; 16690 } 16691 pc_rel = (opcode == T_MNEM_ldr_pc2); 16692 break; 16693 case T_MNEM_adr: 16694 if (fragp->fr_var == 4) 16695 { 16696 insn = THUMB_OP32 (opcode); 16697 insn |= (old_op & 0xf0) << 4; 16698 put_thumb32_insn (buf, insn); 16699 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12; 16700 } 16701 else 16702 { 16703 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16704 exp.X_add_number -= 4; 16705 } 16706 pc_rel = 1; 16707 break; 16708 case T_MNEM_mov: 16709 case T_MNEM_movs: 16710 case T_MNEM_cmp: 16711 case T_MNEM_cmn: 16712 if (fragp->fr_var == 4) 16713 { 16714 int r0off = (opcode == T_MNEM_mov 16715 || opcode == T_MNEM_movs) ? 0 : 8; 16716 insn = THUMB_OP32 (opcode); 16717 insn = (insn & 0xe1ffffff) | 0x10000000; 16718 insn |= (old_op & 0x700) << r0off; 16719 put_thumb32_insn (buf, insn); 16720 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16721 } 16722 else 16723 { 16724 reloc_type = BFD_RELOC_ARM_THUMB_IMM; 16725 } 16726 pc_rel = 0; 16727 break; 16728 case T_MNEM_b: 16729 if (fragp->fr_var == 4) 16730 { 16731 insn = THUMB_OP32(opcode); 16732 put_thumb32_insn (buf, insn); 16733 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25; 16734 } 16735 else 16736 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12; 16737 pc_rel = 1; 16738 break; 16739 case T_MNEM_bcond: 16740 if (fragp->fr_var == 4) 16741 { 16742 insn = THUMB_OP32(opcode); 16743 insn |= (old_op & 0xf00) << 14; 16744 put_thumb32_insn (buf, insn); 16745 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20; 16746 } 16747 else 16748 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9; 16749 pc_rel = 1; 16750 break; 16751 case T_MNEM_add_sp: 16752 case T_MNEM_add_pc: 16753 case T_MNEM_inc_sp: 16754 case T_MNEM_dec_sp: 16755 if (fragp->fr_var == 4) 16756 { 16757 /* ??? Choose between add and addw. */ 16758 insn = THUMB_OP32 (opcode); 16759 insn |= (old_op & 0xf0) << 4; 16760 put_thumb32_insn (buf, insn); 16761 if (opcode == T_MNEM_add_pc) 16762 reloc_type = BFD_RELOC_ARM_T32_IMM12; 16763 else 16764 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16765 } 16766 else 16767 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16768 pc_rel = 0; 16769 break; 16770 16771 case T_MNEM_addi: 16772 case T_MNEM_addis: 16773 case T_MNEM_subi: 16774 case T_MNEM_subis: 16775 if (fragp->fr_var == 4) 16776 { 16777 insn = THUMB_OP32 (opcode); 16778 insn |= (old_op & 0xf0) << 4; 16779 insn |= (old_op & 0xf) << 16; 16780 put_thumb32_insn (buf, insn); 16781 if (insn & (1 << 20)) 16782 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16783 else 16784 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16785 } 16786 else 16787 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16788 pc_rel = 0; 16789 break; 16790 default: 16791 abort(); 16792 } 16793 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel, 16794 reloc_type); 16795 fixp->fx_file = fragp->fr_file; 16796 fixp->fx_line = fragp->fr_line; 16797 fragp->fr_fix += fragp->fr_var; 16798} 16799 16800/* Return the size of a relaxable immediate operand instruction. 16801 SHIFT and SIZE specify the form of the allowable immediate. */ 16802static int 16803relax_immediate (fragS *fragp, int size, int shift) 16804{ 16805 offsetT offset; 16806 offsetT mask; 16807 offsetT low; 16808 16809 /* ??? Should be able to do better than this. */ 16810 if (fragp->fr_symbol) 16811 return 4; 16812 16813 low = (1 << shift) - 1; 16814 mask = (1 << (shift + size)) - (1 << shift); 16815 offset = fragp->fr_offset; 16816 /* Force misaligned offsets to 32-bit variant. */ 16817 if (offset & low) 16818 return 4; 16819 if (offset & ~mask) 16820 return 4; 16821 return 2; 16822} 16823 16824/* Get the address of a symbol during relaxation. */ 16825static addressT 16826relaxed_symbol_addr(fragS *fragp, long stretch) 16827{ 16828 fragS *sym_frag; 16829 addressT addr; 16830 symbolS *sym; 16831 16832 sym = fragp->fr_symbol; 16833 sym_frag = symbol_get_frag (sym); 16834 know (S_GET_SEGMENT (sym) != absolute_section 16835 || sym_frag == &zero_address_frag); 16836 addr = S_GET_VALUE (sym) + fragp->fr_offset; 16837 16838 /* If frag has yet to be reached on this pass, assume it will 16839 move by STRETCH just as we did. If this is not so, it will 16840 be because some frag between grows, and that will force 16841 another pass. */ 16842 16843 if (stretch != 0 16844 && sym_frag->relax_marker != fragp->relax_marker) 16845 addr += stretch; 16846 16847 return addr; 16848} 16849 16850/* Return the size of a relaxable adr pseudo-instruction or PC-relative 16851 load. */ 16852static int 16853relax_adr (fragS *fragp, asection *sec, long stretch) 16854{ 16855 addressT addr; 16856 offsetT val; 16857 16858 /* Assume worst case for symbols not known to be in the same section. */ 16859 if (!S_IS_DEFINED(fragp->fr_symbol) 16860 || sec != S_GET_SEGMENT (fragp->fr_symbol)) 16861 return 4; 16862 16863 val = relaxed_symbol_addr(fragp, stretch); 16864 addr = fragp->fr_address + fragp->fr_fix; 16865 addr = (addr + 4) & ~3; 16866 /* Force misaligned targets to 32-bit variant. */ 16867 if (val & 3) 16868 return 4; 16869 val -= addr; 16870 if (val < 0 || val > 1020) 16871 return 4; 16872 return 2; 16873} 16874 16875/* Return the size of a relaxable add/sub immediate instruction. */ 16876static int 16877relax_addsub (fragS *fragp, asection *sec) 16878{ 16879 char *buf; 16880 int op; 16881 16882 buf = fragp->fr_literal + fragp->fr_fix; 16883 op = bfd_get_16(sec->owner, buf); 16884 if ((op & 0xf) == ((op >> 4) & 0xf)) 16885 return relax_immediate (fragp, 8, 0); 16886 else 16887 return relax_immediate (fragp, 3, 0); 16888} 16889 16890 16891/* Return the size of a relaxable branch instruction. BITS is the 16892 size of the offset field in the narrow instruction. */ 16893 16894static int 16895relax_branch (fragS *fragp, asection *sec, int bits, long stretch) 16896{ 16897 addressT addr; 16898 offsetT val; 16899 offsetT limit; 16900 16901 /* Assume worst case for symbols not known to be in the same section. */ 16902 if (!S_IS_DEFINED(fragp->fr_symbol) 16903 || sec != S_GET_SEGMENT (fragp->fr_symbol)) 16904 return 4; 16905 16906 val = relaxed_symbol_addr(fragp, stretch); 16907 addr = fragp->fr_address + fragp->fr_fix + 4; 16908 val -= addr; 16909 16910 /* Offset is a signed value *2 */ 16911 limit = 1 << bits; 16912 if (val >= limit || val < -limit) 16913 return 4; 16914 return 2; 16915} 16916 16917 16918/* Relax a machine dependent frag. This returns the amount by which 16919 the current size of the frag should change. */ 16920 16921int 16922arm_relax_frag (asection *sec, fragS *fragp, long stretch) 16923{ 16924 int oldsize; 16925 int newsize; 16926 16927 oldsize = fragp->fr_var; 16928 switch (fragp->fr_subtype) 16929 { 16930 case T_MNEM_ldr_pc2: 16931 newsize = relax_adr(fragp, sec, stretch); 16932 break; 16933 case T_MNEM_ldr_pc: 16934 case T_MNEM_ldr_sp: 16935 case T_MNEM_str_sp: 16936 newsize = relax_immediate(fragp, 8, 2); 16937 break; 16938 case T_MNEM_ldr: 16939 case T_MNEM_str: 16940 newsize = relax_immediate(fragp, 5, 2); 16941 break; 16942 case T_MNEM_ldrh: 16943 case T_MNEM_strh: 16944 newsize = relax_immediate(fragp, 5, 1); 16945 break; 16946 case T_MNEM_ldrb: 16947 case T_MNEM_strb: 16948 newsize = relax_immediate(fragp, 5, 0); 16949 break; 16950 case T_MNEM_adr: 16951 newsize = relax_adr(fragp, sec, stretch); 16952 break; 16953 case T_MNEM_mov: 16954 case T_MNEM_movs: 16955 case T_MNEM_cmp: 16956 case T_MNEM_cmn: 16957 newsize = relax_immediate(fragp, 8, 0); 16958 break; 16959 case T_MNEM_b: 16960 newsize = relax_branch(fragp, sec, 11, stretch); 16961 break; 16962 case T_MNEM_bcond: 16963 newsize = relax_branch(fragp, sec, 8, stretch); 16964 break; 16965 case T_MNEM_add_sp: 16966 case T_MNEM_add_pc: 16967 newsize = relax_immediate (fragp, 8, 2); 16968 break; 16969 case T_MNEM_inc_sp: 16970 case T_MNEM_dec_sp: 16971 newsize = relax_immediate (fragp, 7, 2); 16972 break; 16973 case T_MNEM_addi: 16974 case T_MNEM_addis: 16975 case T_MNEM_subi: 16976 case T_MNEM_subis: 16977 newsize = relax_addsub (fragp, sec); 16978 break; 16979 default: 16980 abort(); 16981 } 16982 16983 fragp->fr_var = newsize; 16984 /* Freeze wide instructions that are at or before the same location as 16985 in the previous pass. This avoids infinite loops. 16986 Don't freeze them unconditionally because targets may be artificialy 16987 misaligned by the expansion of preceeding frags. */ 16988 if (stretch <= 0 && newsize > 2) 16989 { 16990 md_convert_frag (sec->owner, sec, fragp); 16991 frag_wane(fragp); 16992 } 16993 16994 return newsize - oldsize; 16995} 16996 16997/* Round up a section size to the appropriate boundary. */ 16998 16999valueT 17000md_section_align (segT segment ATTRIBUTE_UNUSED, 17001 valueT size) 17002{ 17003#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 17004 if (OUTPUT_FLAVOR == bfd_target_aout_flavour) 17005 { 17006 /* For a.out, force the section size to be aligned. If we don't do 17007 this, BFD will align it for us, but it will not write out the 17008 final bytes of the section. This may be a bug in BFD, but it is 17009 easier to fix it here since that is how the other a.out targets 17010 work. */ 17011 int align; 17012 17013 align = bfd_get_section_alignment (stdoutput, segment); 17014 size = ((size + (1 << align) - 1) & ((valueT) -1 << align)); 17015 } 17016#endif 17017 17018 return size; 17019} 17020 17021/* This is called from HANDLE_ALIGN in write.c. Fill in the contents 17022 of an rs_align_code fragment. */ 17023 17024void 17025arm_handle_align (fragS * fragP) 17026{ 17027 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 }; 17028 static char const thumb_noop[2] = { 0xc0, 0x46 }; 17029 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 }; 17030 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 }; 17031 17032 int bytes, fix, noop_size; 17033 char * p; 17034 const char * noop; 17035 17036 if (fragP->fr_type != rs_align_code) 17037 return; 17038 17039 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 17040 p = fragP->fr_literal + fragP->fr_fix; 17041 fix = 0; 17042 17043 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE) 17044 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE; 17045 17046 if (fragP->tc_frag_data) 17047 { 17048 if (target_big_endian) 17049 noop = thumb_bigend_noop; 17050 else 17051 noop = thumb_noop; 17052 noop_size = sizeof (thumb_noop); 17053 } 17054 else 17055 { 17056 if (target_big_endian) 17057 noop = arm_bigend_noop; 17058 else 17059 noop = arm_noop; 17060 noop_size = sizeof (arm_noop); 17061 } 17062 17063 if (bytes & (noop_size - 1)) 17064 { 17065 fix = bytes & (noop_size - 1); 17066 memset (p, 0, fix); 17067 p += fix; 17068 bytes -= fix; 17069 } 17070 17071 while (bytes >= noop_size) 17072 { 17073 memcpy (p, noop, noop_size); 17074 p += noop_size; 17075 bytes -= noop_size; 17076 fix += noop_size; 17077 } 17078 17079 fragP->fr_fix += fix; 17080 fragP->fr_var = noop_size; 17081} 17082 17083/* Called from md_do_align. Used to create an alignment 17084 frag in a code section. */ 17085 17086void 17087arm_frag_align_code (int n, int max) 17088{ 17089 char * p; 17090 17091 /* We assume that there will never be a requirement 17092 to support alignments greater than 32 bytes. */ 17093 if (max > MAX_MEM_FOR_RS_ALIGN_CODE) 17094 as_fatal (_("alignments greater than 32 bytes not supported in .text sections.")); 17095 17096 p = frag_var (rs_align_code, 17097 MAX_MEM_FOR_RS_ALIGN_CODE, 17098 1, 17099 (relax_substateT) max, 17100 (symbolS *) NULL, 17101 (offsetT) n, 17102 (char *) NULL); 17103 *p = 0; 17104} 17105 17106/* Perform target specific initialisation of a frag. */ 17107 17108void 17109arm_init_frag (fragS * fragP) 17110{ 17111 /* Record whether this frag is in an ARM or a THUMB area. */ 17112 fragP->tc_frag_data = thumb_mode; 17113} 17114 17115#ifdef OBJ_ELF 17116/* When we change sections we need to issue a new mapping symbol. */ 17117 17118void 17119arm_elf_change_section (void) 17120{ 17121 flagword flags; 17122 segment_info_type *seginfo; 17123 17124 /* Link an unlinked unwind index table section to the .text section. */ 17125 if (elf_section_type (now_seg) == SHT_ARM_EXIDX 17126 && elf_linked_to_section (now_seg) == NULL) 17127 elf_linked_to_section (now_seg) = text_section; 17128 17129 if (!SEG_NORMAL (now_seg)) 17130 return; 17131 17132 flags = bfd_get_section_flags (stdoutput, now_seg); 17133 17134 /* We can ignore sections that only contain debug info. */ 17135 if ((flags & SEC_ALLOC) == 0) 17136 return; 17137 17138 seginfo = seg_info (now_seg); 17139 mapstate = seginfo->tc_segment_info_data.mapstate; 17140 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency; 17141} 17142 17143int 17144arm_elf_section_type (const char * str, size_t len) 17145{ 17146 if (len == 5 && strncmp (str, "exidx", 5) == 0) 17147 return SHT_ARM_EXIDX; 17148 17149 return -1; 17150} 17151 17152/* Code to deal with unwinding tables. */ 17153 17154static void add_unwind_adjustsp (offsetT); 17155 17156/* Cenerate and deferred unwind frame offset. */ 17157 17158static void 17159flush_pending_unwind (void) 17160{ 17161 offsetT offset; 17162 17163 offset = unwind.pending_offset; 17164 unwind.pending_offset = 0; 17165 if (offset != 0) 17166 add_unwind_adjustsp (offset); 17167} 17168 17169/* Add an opcode to this list for this function. Two-byte opcodes should 17170 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse 17171 order. */ 17172 17173static void 17174add_unwind_opcode (valueT op, int length) 17175{ 17176 /* Add any deferred stack adjustment. */ 17177 if (unwind.pending_offset) 17178 flush_pending_unwind (); 17179 17180 unwind.sp_restored = 0; 17181 17182 if (unwind.opcode_count + length > unwind.opcode_alloc) 17183 { 17184 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE; 17185 if (unwind.opcodes) 17186 unwind.opcodes = xrealloc (unwind.opcodes, 17187 unwind.opcode_alloc); 17188 else 17189 unwind.opcodes = xmalloc (unwind.opcode_alloc); 17190 } 17191 while (length > 0) 17192 { 17193 length--; 17194 unwind.opcodes[unwind.opcode_count] = op & 0xff; 17195 op >>= 8; 17196 unwind.opcode_count++; 17197 } 17198} 17199 17200/* Add unwind opcodes to adjust the stack pointer. */ 17201 17202static void 17203add_unwind_adjustsp (offsetT offset) 17204{ 17205 valueT op; 17206 17207 if (offset > 0x200) 17208 { 17209 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */ 17210 char bytes[5]; 17211 int n; 17212 valueT o; 17213 17214 /* Long form: 0xb2, uleb128. */ 17215 /* This might not fit in a word so add the individual bytes, 17216 remembering the list is built in reverse order. */ 17217 o = (valueT) ((offset - 0x204) >> 2); 17218 if (o == 0) 17219 add_unwind_opcode (0, 1); 17220 17221 /* Calculate the uleb128 encoding of the offset. */ 17222 n = 0; 17223 while (o) 17224 { 17225 bytes[n] = o & 0x7f; 17226 o >>= 7; 17227 if (o) 17228 bytes[n] |= 0x80; 17229 n++; 17230 } 17231 /* Add the insn. */ 17232 for (; n; n--) 17233 add_unwind_opcode (bytes[n - 1], 1); 17234 add_unwind_opcode (0xb2, 1); 17235 } 17236 else if (offset > 0x100) 17237 { 17238 /* Two short opcodes. */ 17239 add_unwind_opcode (0x3f, 1); 17240 op = (offset - 0x104) >> 2; 17241 add_unwind_opcode (op, 1); 17242 } 17243 else if (offset > 0) 17244 { 17245 /* Short opcode. */ 17246 op = (offset - 4) >> 2; 17247 add_unwind_opcode (op, 1); 17248 } 17249 else if (offset < 0) 17250 { 17251 offset = -offset; 17252 while (offset > 0x100) 17253 { 17254 add_unwind_opcode (0x7f, 1); 17255 offset -= 0x100; 17256 } 17257 op = ((offset - 4) >> 2) | 0x40; 17258 add_unwind_opcode (op, 1); 17259 } 17260} 17261 17262/* Finish the list of unwind opcodes for this function. */ 17263static void 17264finish_unwind_opcodes (void) 17265{ 17266 valueT op; 17267 17268 if (unwind.fp_used) 17269 { 17270 /* Adjust sp as necessary. */ 17271 unwind.pending_offset += unwind.fp_offset - unwind.frame_size; 17272 flush_pending_unwind (); 17273 17274 /* After restoring sp from the frame pointer. */ 17275 op = 0x90 | unwind.fp_reg; 17276 add_unwind_opcode (op, 1); 17277 } 17278 else 17279 flush_pending_unwind (); 17280} 17281 17282 17283/* Start an exception table entry. If idx is nonzero this is an index table 17284 entry. */ 17285 17286static void 17287start_unwind_section (const segT text_seg, int idx) 17288{ 17289 const char * text_name; 17290 const char * prefix; 17291 const char * prefix_once; 17292 const char * group_name; 17293 size_t prefix_len; 17294 size_t text_len; 17295 char * sec_name; 17296 size_t sec_name_len; 17297 int type; 17298 int flags; 17299 int linkonce; 17300 17301 if (idx) 17302 { 17303 prefix = ELF_STRING_ARM_unwind; 17304 prefix_once = ELF_STRING_ARM_unwind_once; 17305 type = SHT_ARM_EXIDX; 17306 } 17307 else 17308 { 17309 prefix = ELF_STRING_ARM_unwind_info; 17310 prefix_once = ELF_STRING_ARM_unwind_info_once; 17311 type = SHT_PROGBITS; 17312 } 17313 17314 text_name = segment_name (text_seg); 17315 if (streq (text_name, ".text")) 17316 text_name = ""; 17317 17318 if (strncmp (text_name, ".gnu.linkonce.t.", 17319 strlen (".gnu.linkonce.t.")) == 0) 17320 { 17321 prefix = prefix_once; 17322 text_name += strlen (".gnu.linkonce.t."); 17323 } 17324 17325 prefix_len = strlen (prefix); 17326 text_len = strlen (text_name); 17327 sec_name_len = prefix_len + text_len; 17328 sec_name = xmalloc (sec_name_len + 1); 17329 memcpy (sec_name, prefix, prefix_len); 17330 memcpy (sec_name + prefix_len, text_name, text_len); 17331 sec_name[prefix_len + text_len] = '\0'; 17332 17333 flags = SHF_ALLOC; 17334 linkonce = 0; 17335 group_name = 0; 17336 17337 /* Handle COMDAT group. */ 17338 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0) 17339 { 17340 group_name = elf_group_name (text_seg); 17341 if (group_name == NULL) 17342 { 17343 as_bad ("Group section `%s' has no group signature", 17344 segment_name (text_seg)); 17345 ignore_rest_of_line (); 17346 return; 17347 } 17348 flags |= SHF_GROUP; 17349 linkonce = 1; 17350 } 17351 17352 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0); 17353 17354 /* Set the setion link for index tables. */ 17355 if (idx) 17356 elf_linked_to_section (now_seg) = text_seg; 17357} 17358 17359 17360/* Start an unwind table entry. HAVE_DATA is nonzero if we have additional 17361 personality routine data. Returns zero, or the index table value for 17362 and inline entry. */ 17363 17364static valueT 17365create_unwind_entry (int have_data) 17366{ 17367 int size; 17368 addressT where; 17369 char *ptr; 17370 /* The current word of data. */ 17371 valueT data; 17372 /* The number of bytes left in this word. */ 17373 int n; 17374 17375 finish_unwind_opcodes (); 17376 17377 /* Remember the current text section. */ 17378 unwind.saved_seg = now_seg; 17379 unwind.saved_subseg = now_subseg; 17380 17381 start_unwind_section (now_seg, 0); 17382 17383 if (unwind.personality_routine == NULL) 17384 { 17385 if (unwind.personality_index == -2) 17386 { 17387 if (have_data) 17388 as_bad (_("handerdata in cantunwind frame")); 17389 return 1; /* EXIDX_CANTUNWIND. */ 17390 } 17391 17392 /* Use a default personality routine if none is specified. */ 17393 if (unwind.personality_index == -1) 17394 { 17395 if (unwind.opcode_count > 3) 17396 unwind.personality_index = 1; 17397 else 17398 unwind.personality_index = 0; 17399 } 17400 17401 /* Space for the personality routine entry. */ 17402 if (unwind.personality_index == 0) 17403 { 17404 if (unwind.opcode_count > 3) 17405 as_bad (_("too many unwind opcodes for personality routine 0")); 17406 17407 if (!have_data) 17408 { 17409 /* All the data is inline in the index table. */ 17410 data = 0x80; 17411 n = 3; 17412 while (unwind.opcode_count > 0) 17413 { 17414 unwind.opcode_count--; 17415 data = (data << 8) | unwind.opcodes[unwind.opcode_count]; 17416 n--; 17417 } 17418 17419 /* Pad with "finish" opcodes. */ 17420 while (n--) 17421 data = (data << 8) | 0xb0; 17422 17423 return data; 17424 } 17425 size = 0; 17426 } 17427 else 17428 /* We get two opcodes "free" in the first word. */ 17429 size = unwind.opcode_count - 2; 17430 } 17431 else 17432 /* An extra byte is required for the opcode count. */ 17433 size = unwind.opcode_count + 1; 17434 17435 size = (size + 3) >> 2; 17436 if (size > 0xff) 17437 as_bad (_("too many unwind opcodes")); 17438 17439 frag_align (2, 0, 0); 17440 record_alignment (now_seg, 2); 17441 unwind.table_entry = expr_build_dot (); 17442 17443 /* Allocate the table entry. */ 17444 ptr = frag_more ((size << 2) + 4); 17445 memset(ptr, 0, (size << 2) + 4); 17446 where = frag_now_fix () - ((size << 2) + 4); 17447 17448 switch (unwind.personality_index) 17449 { 17450 case -1: 17451 /* ??? Should this be a PLT generating relocation? */ 17452 /* Custom personality routine. */ 17453 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1, 17454 BFD_RELOC_ARM_PREL31); 17455 17456 where += 4; 17457 ptr += 4; 17458 17459 /* Set the first byte to the number of additional words. */ 17460 data = size - 1; 17461 n = 3; 17462 break; 17463 17464 /* ABI defined personality routines. */ 17465 case 0: 17466 /* Three opcodes bytes are packed into the first word. */ 17467 data = 0x80; 17468 n = 3; 17469 break; 17470 17471 case 1: 17472 case 2: 17473 /* The size and first two opcode bytes go in the first word. */ 17474 data = ((0x80 + unwind.personality_index) << 8) | size; 17475 n = 2; 17476 break; 17477 17478 default: 17479 /* Should never happen. */ 17480 abort (); 17481 } 17482 17483 /* Pack the opcodes into words (MSB first), reversing the list at the same 17484 time. */ 17485 while (unwind.opcode_count > 0) 17486 { 17487 if (n == 0) 17488 { 17489 md_number_to_chars (ptr, data, 4); 17490 ptr += 4; 17491 n = 4; 17492 data = 0; 17493 } 17494 unwind.opcode_count--; 17495 n--; 17496 data = (data << 8) | unwind.opcodes[unwind.opcode_count]; 17497 } 17498 17499 /* Finish off the last word. */ 17500 if (n < 4) 17501 { 17502 /* Pad with "finish" opcodes. */ 17503 while (n--) 17504 data = (data << 8) | 0xb0; 17505 17506 md_number_to_chars (ptr, data, 4); 17507 } 17508 17509 if (!have_data) 17510 { 17511 /* Add an empty descriptor if there is no user-specified data. */ 17512 ptr = frag_more (4); 17513 md_number_to_chars (ptr, 0, 4); 17514 } 17515 17516 return 0; 17517} 17518 17519 17520/* Initialize the DWARF-2 unwind information for this procedure. */ 17521 17522void 17523tc_arm_frame_initial_instructions (void) 17524{ 17525 cfi_add_CFA_def_cfa (REG_SP, 0); 17526} 17527#endif /* OBJ_ELF */ 17528 17529/* Convert REGNAME to a DWARF-2 register number. */ 17530 17531int 17532tc_arm_regname_to_dw2regnum (char *regname) 17533{ 17534 int reg = arm_reg_parse (®name, REG_TYPE_RN); 17535 17536 if (reg == FAIL) 17537 return -1; 17538 17539 return reg; 17540} 17541 17542#ifdef TE_PE 17543void 17544tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size) 17545{ 17546 expressionS expr; 17547 17548 expr.X_op = O_secrel; 17549 expr.X_add_symbol = symbol; 17550 expr.X_add_number = 0; 17551 emit_expr (&expr, size); 17552} 17553#endif 17554 17555/* MD interface: Symbol and relocation handling. */ 17556 17557/* Return the address within the segment that a PC-relative fixup is 17558 relative to. For ARM, PC-relative fixups applied to instructions 17559 are generally relative to the location of the fixup plus 8 bytes. 17560 Thumb branches are offset by 4, and Thumb loads relative to PC 17561 require special handling. */ 17562 17563long 17564md_pcrel_from_section (fixS * fixP, segT seg) 17565{ 17566 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 17567 17568 /* If this is pc-relative and we are going to emit a relocation 17569 then we just want to put out any pipeline compensation that the linker 17570 will need. Otherwise we want to use the calculated base. 17571 For WinCE we skip the bias for externals as well, since this 17572 is how the MS ARM-CE assembler behaves and we want to be compatible. */ 17573 if (fixP->fx_pcrel 17574 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 17575 || (arm_force_relocation (fixP) 17576#ifdef TE_WINCE 17577 && !S_IS_EXTERNAL (fixP->fx_addsy) 17578#endif 17579 ))) 17580 base = 0; 17581 17582 switch (fixP->fx_r_type) 17583 { 17584 /* PC relative addressing on the Thumb is slightly odd as the 17585 bottom two bits of the PC are forced to zero for the 17586 calculation. This happens *after* application of the 17587 pipeline offset. However, Thumb adrl already adjusts for 17588 this, so we need not do it again. */ 17589 case BFD_RELOC_ARM_THUMB_ADD: 17590 return base & ~3; 17591 17592 case BFD_RELOC_ARM_THUMB_OFFSET: 17593 case BFD_RELOC_ARM_T32_OFFSET_IMM: 17594 case BFD_RELOC_ARM_T32_ADD_PC12: 17595 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 17596 return (base + 4) & ~3; 17597 17598 /* Thumb branches are simply offset by +4. */ 17599 case BFD_RELOC_THUMB_PCREL_BRANCH7: 17600 case BFD_RELOC_THUMB_PCREL_BRANCH9: 17601 case BFD_RELOC_THUMB_PCREL_BRANCH12: 17602 case BFD_RELOC_THUMB_PCREL_BRANCH20: 17603 case BFD_RELOC_THUMB_PCREL_BRANCH23: 17604 case BFD_RELOC_THUMB_PCREL_BRANCH25: 17605 case BFD_RELOC_THUMB_PCREL_BLX: 17606 return base + 4; 17607 17608 /* ARM mode branches are offset by +8. However, the Windows CE 17609 loader expects the relocation not to take this into account. */ 17610 case BFD_RELOC_ARM_PCREL_BRANCH: 17611 case BFD_RELOC_ARM_PCREL_CALL: 17612 case BFD_RELOC_ARM_PCREL_JUMP: 17613 case BFD_RELOC_ARM_PCREL_BLX: 17614 case BFD_RELOC_ARM_PLT32: 17615#ifdef TE_WINCE 17616 /* When handling fixups immediately, because we have already 17617 discovered the value of a symbol, or the address of the frag involved 17618 we must account for the offset by +8, as the OS loader will never see the reloc. 17619 see fixup_segment() in write.c 17620 The S_IS_EXTERNAL test handles the case of global symbols. 17621 Those need the calculated base, not just the pipe compensation the linker will need. */ 17622 if (fixP->fx_pcrel 17623 && fixP->fx_addsy != NULL 17624 && (S_GET_SEGMENT (fixP->fx_addsy) == seg) 17625 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP))) 17626 return base + 8; 17627 return base; 17628#else 17629 return base + 8; 17630#endif 17631 17632 /* ARM mode loads relative to PC are also offset by +8. Unlike 17633 branches, the Windows CE loader *does* expect the relocation 17634 to take this into account. */ 17635 case BFD_RELOC_ARM_OFFSET_IMM: 17636 case BFD_RELOC_ARM_OFFSET_IMM8: 17637 case BFD_RELOC_ARM_HWLITERAL: 17638 case BFD_RELOC_ARM_LITERAL: 17639 case BFD_RELOC_ARM_CP_OFF_IMM: 17640 return base + 8; 17641 17642 17643 /* Other PC-relative relocations are un-offset. */ 17644 default: 17645 return base; 17646 } 17647} 17648 17649/* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 17650 Otherwise we have no need to default values of symbols. */ 17651 17652symbolS * 17653md_undefined_symbol (char * name ATTRIBUTE_UNUSED) 17654{ 17655#ifdef OBJ_ELF 17656 if (name[0] == '_' && name[1] == 'G' 17657 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 17658 { 17659 if (!GOT_symbol) 17660 { 17661 if (symbol_find (name)) 17662 as_bad ("GOT already in the symbol table"); 17663 17664 GOT_symbol = symbol_new (name, undefined_section, 17665 (valueT) 0, & zero_address_frag); 17666 } 17667 17668 return GOT_symbol; 17669 } 17670#endif 17671 17672 return 0; 17673} 17674 17675/* Subroutine of md_apply_fix. Check to see if an immediate can be 17676 computed as two separate immediate values, added together. We 17677 already know that this value cannot be computed by just one ARM 17678 instruction. */ 17679 17680static unsigned int 17681validate_immediate_twopart (unsigned int val, 17682 unsigned int * highpart) 17683{ 17684 unsigned int a; 17685 unsigned int i; 17686 17687 for (i = 0; i < 32; i += 2) 17688 if (((a = rotate_left (val, i)) & 0xff) != 0) 17689 { 17690 if (a & 0xff00) 17691 { 17692 if (a & ~ 0xffff) 17693 continue; 17694 * highpart = (a >> 8) | ((i + 24) << 7); 17695 } 17696 else if (a & 0xff0000) 17697 { 17698 if (a & 0xff000000) 17699 continue; 17700 * highpart = (a >> 16) | ((i + 16) << 7); 17701 } 17702 else 17703 { 17704 assert (a & 0xff000000); 17705 * highpart = (a >> 24) | ((i + 8) << 7); 17706 } 17707 17708 return (a & 0xff) | (i << 7); 17709 } 17710 17711 return FAIL; 17712} 17713 17714static int 17715validate_offset_imm (unsigned int val, int hwse) 17716{ 17717 if ((hwse && val > 255) || val > 4095) 17718 return FAIL; 17719 return val; 17720} 17721 17722/* Subroutine of md_apply_fix. Do those data_ops which can take a 17723 negative immediate constant by altering the instruction. A bit of 17724 a hack really. 17725 MOV <-> MVN 17726 AND <-> BIC 17727 ADC <-> SBC 17728 by inverting the second operand, and 17729 ADD <-> SUB 17730 CMP <-> CMN 17731 by negating the second operand. */ 17732 17733static int 17734negate_data_op (unsigned long * instruction, 17735 unsigned long value) 17736{ 17737 int op, new_inst; 17738 unsigned long negated, inverted; 17739 17740 negated = encode_arm_immediate (-value); 17741 inverted = encode_arm_immediate (~value); 17742 17743 op = (*instruction >> DATA_OP_SHIFT) & 0xf; 17744 switch (op) 17745 { 17746 /* First negates. */ 17747 case OPCODE_SUB: /* ADD <-> SUB */ 17748 new_inst = OPCODE_ADD; 17749 value = negated; 17750 break; 17751 17752 case OPCODE_ADD: 17753 new_inst = OPCODE_SUB; 17754 value = negated; 17755 break; 17756 17757 case OPCODE_CMP: /* CMP <-> CMN */ 17758 new_inst = OPCODE_CMN; 17759 value = negated; 17760 break; 17761 17762 case OPCODE_CMN: 17763 new_inst = OPCODE_CMP; 17764 value = negated; 17765 break; 17766 17767 /* Now Inverted ops. */ 17768 case OPCODE_MOV: /* MOV <-> MVN */ 17769 new_inst = OPCODE_MVN; 17770 value = inverted; 17771 break; 17772 17773 case OPCODE_MVN: 17774 new_inst = OPCODE_MOV; 17775 value = inverted; 17776 break; 17777 17778 case OPCODE_AND: /* AND <-> BIC */ 17779 new_inst = OPCODE_BIC; 17780 value = inverted; 17781 break; 17782 17783 case OPCODE_BIC: 17784 new_inst = OPCODE_AND; 17785 value = inverted; 17786 break; 17787 17788 case OPCODE_ADC: /* ADC <-> SBC */ 17789 new_inst = OPCODE_SBC; 17790 value = inverted; 17791 break; 17792 17793 case OPCODE_SBC: 17794 new_inst = OPCODE_ADC; 17795 value = inverted; 17796 break; 17797 17798 /* We cannot do anything. */ 17799 default: 17800 return FAIL; 17801 } 17802 17803 if (value == (unsigned) FAIL) 17804 return FAIL; 17805 17806 *instruction &= OPCODE_MASK; 17807 *instruction |= new_inst << DATA_OP_SHIFT; 17808 return value; 17809} 17810 17811/* Like negate_data_op, but for Thumb-2. */ 17812 17813static unsigned int 17814thumb32_negate_data_op (offsetT *instruction, unsigned int value) 17815{ 17816 int op, new_inst; 17817 int rd; 17818 unsigned int negated, inverted; 17819 17820 negated = encode_thumb32_immediate (-value); 17821 inverted = encode_thumb32_immediate (~value); 17822 17823 rd = (*instruction >> 8) & 0xf; 17824 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf; 17825 switch (op) 17826 { 17827 /* ADD <-> SUB. Includes CMP <-> CMN. */ 17828 case T2_OPCODE_SUB: 17829 new_inst = T2_OPCODE_ADD; 17830 value = negated; 17831 break; 17832 17833 case T2_OPCODE_ADD: 17834 new_inst = T2_OPCODE_SUB; 17835 value = negated; 17836 break; 17837 17838 /* ORR <-> ORN. Includes MOV <-> MVN. */ 17839 case T2_OPCODE_ORR: 17840 new_inst = T2_OPCODE_ORN; 17841 value = inverted; 17842 break; 17843 17844 case T2_OPCODE_ORN: 17845 new_inst = T2_OPCODE_ORR; 17846 value = inverted; 17847 break; 17848 17849 /* AND <-> BIC. TST has no inverted equivalent. */ 17850 case T2_OPCODE_AND: 17851 new_inst = T2_OPCODE_BIC; 17852 if (rd == 15) 17853 value = FAIL; 17854 else 17855 value = inverted; 17856 break; 17857 17858 case T2_OPCODE_BIC: 17859 new_inst = T2_OPCODE_AND; 17860 value = inverted; 17861 break; 17862 17863 /* ADC <-> SBC */ 17864 case T2_OPCODE_ADC: 17865 new_inst = T2_OPCODE_SBC; 17866 value = inverted; 17867 break; 17868 17869 case T2_OPCODE_SBC: 17870 new_inst = T2_OPCODE_ADC; 17871 value = inverted; 17872 break; 17873 17874 /* We cannot do anything. */ 17875 default: 17876 return FAIL; 17877 } 17878 17879 if (value == (unsigned int)FAIL) 17880 return FAIL; 17881 17882 *instruction &= T2_OPCODE_MASK; 17883 *instruction |= new_inst << T2_DATA_OP_SHIFT; 17884 return value; 17885} 17886 17887/* Read a 32-bit thumb instruction from buf. */ 17888static unsigned long 17889get_thumb32_insn (char * buf) 17890{ 17891 unsigned long insn; 17892 insn = md_chars_to_number (buf, THUMB_SIZE) << 16; 17893 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 17894 17895 return insn; 17896} 17897 17898 17899/* We usually want to set the low bit on the address of thumb function 17900 symbols. In particular .word foo - . should have the low bit set. 17901 Generic code tries to fold the difference of two symbols to 17902 a constant. Prevent this and force a relocation when the first symbols 17903 is a thumb function. */ 17904int 17905arm_optimize_expr (expressionS *l, operatorT op, expressionS *r) 17906{ 17907 if (op == O_subtract 17908 && l->X_op == O_symbol 17909 && r->X_op == O_symbol 17910 && THUMB_IS_FUNC (l->X_add_symbol)) 17911 { 17912 l->X_op = O_subtract; 17913 l->X_op_symbol = r->X_add_symbol; 17914 l->X_add_number -= r->X_add_number; 17915 return 1; 17916 } 17917 /* Process as normal. */ 17918 return 0; 17919} 17920 17921void 17922md_apply_fix (fixS * fixP, 17923 valueT * valP, 17924 segT seg) 17925{ 17926 offsetT value = * valP; 17927 offsetT newval; 17928 unsigned int newimm; 17929 unsigned long temp; 17930 int sign; 17931 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal; 17932 17933 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 17934 17935 /* Note whether this will delete the relocation. */ 17936 17937 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 17938 fixP->fx_done = 1; 17939 17940 /* On a 64-bit host, silently truncate 'value' to 32 bits for 17941 consistency with the behavior on 32-bit hosts. Remember value 17942 for emit_reloc. */ 17943 value &= 0xffffffff; 17944 value ^= 0x80000000; 17945 value -= 0x80000000; 17946 17947 *valP = value; 17948 fixP->fx_addnumber = value; 17949 17950 /* Same treatment for fixP->fx_offset. */ 17951 fixP->fx_offset &= 0xffffffff; 17952 fixP->fx_offset ^= 0x80000000; 17953 fixP->fx_offset -= 0x80000000; 17954 17955 switch (fixP->fx_r_type) 17956 { 17957 case BFD_RELOC_NONE: 17958 /* This will need to go in the object file. */ 17959 fixP->fx_done = 0; 17960 break; 17961 17962 case BFD_RELOC_ARM_IMMEDIATE: 17963 /* We claim that this fixup has been processed here, 17964 even if in fact we generate an error because we do 17965 not have a reloc for it, so tc_gen_reloc will reject it. */ 17966 fixP->fx_done = 1; 17967 17968 if (fixP->fx_addsy 17969 && ! S_IS_DEFINED (fixP->fx_addsy)) 17970 { 17971 as_bad_where (fixP->fx_file, fixP->fx_line, 17972 _("undefined symbol %s used as an immediate value"), 17973 S_GET_NAME (fixP->fx_addsy)); 17974 break; 17975 } 17976 17977 newimm = encode_arm_immediate (value); 17978 temp = md_chars_to_number (buf, INSN_SIZE); 17979 17980 /* If the instruction will fail, see if we can fix things up by 17981 changing the opcode. */ 17982 if (newimm == (unsigned int) FAIL 17983 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL) 17984 { 17985 as_bad_where (fixP->fx_file, fixP->fx_line, 17986 _("invalid constant (%lx) after fixup"), 17987 (unsigned long) value); 17988 break; 17989 } 17990 17991 newimm |= (temp & 0xfffff000); 17992 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 17993 break; 17994 17995 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 17996 { 17997 unsigned int highpart = 0; 17998 unsigned int newinsn = 0xe1a00000; /* nop. */ 17999 18000 newimm = encode_arm_immediate (value); 18001 temp = md_chars_to_number (buf, INSN_SIZE); 18002 18003 /* If the instruction will fail, see if we can fix things up by 18004 changing the opcode. */ 18005 if (newimm == (unsigned int) FAIL 18006 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL) 18007 { 18008 /* No ? OK - try using two ADD instructions to generate 18009 the value. */ 18010 newimm = validate_immediate_twopart (value, & highpart); 18011 18012 /* Yes - then make sure that the second instruction is 18013 also an add. */ 18014 if (newimm != (unsigned int) FAIL) 18015 newinsn = temp; 18016 /* Still No ? Try using a negated value. */ 18017 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL) 18018 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT; 18019 /* Otherwise - give up. */ 18020 else 18021 { 18022 as_bad_where (fixP->fx_file, fixP->fx_line, 18023 _("unable to compute ADRL instructions for PC offset of 0x%lx"), 18024 (long) value); 18025 break; 18026 } 18027 18028 /* Replace the first operand in the 2nd instruction (which 18029 is the PC) with the destination register. We have 18030 already added in the PC in the first instruction and we 18031 do not want to do it again. */ 18032 newinsn &= ~ 0xf0000; 18033 newinsn |= ((newinsn & 0x0f000) << 4); 18034 } 18035 18036 newimm |= (temp & 0xfffff000); 18037 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 18038 18039 highpart |= (newinsn & 0xfffff000); 18040 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE); 18041 } 18042 break; 18043 18044 case BFD_RELOC_ARM_OFFSET_IMM: 18045 if (!fixP->fx_done && seg->use_rela_p) 18046 value = 0; 18047 18048 case BFD_RELOC_ARM_LITERAL: 18049 sign = value >= 0; 18050 18051 if (value < 0) 18052 value = - value; 18053 18054 if (validate_offset_imm (value, 0) == FAIL) 18055 { 18056 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL) 18057 as_bad_where (fixP->fx_file, fixP->fx_line, 18058 _("invalid literal constant: pool needs to be closer")); 18059 else 18060 as_bad_where (fixP->fx_file, fixP->fx_line, 18061 _("bad immediate value for offset (%ld)"), 18062 (long) value); 18063 break; 18064 } 18065 18066 newval = md_chars_to_number (buf, INSN_SIZE); 18067 newval &= 0xff7ff000; 18068 newval |= value | (sign ? INDEX_UP : 0); 18069 md_number_to_chars (buf, newval, INSN_SIZE); 18070 break; 18071 18072 case BFD_RELOC_ARM_OFFSET_IMM8: 18073 case BFD_RELOC_ARM_HWLITERAL: 18074 sign = value >= 0; 18075 18076 if (value < 0) 18077 value = - value; 18078 18079 if (validate_offset_imm (value, 1) == FAIL) 18080 { 18081 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL) 18082 as_bad_where (fixP->fx_file, fixP->fx_line, 18083 _("invalid literal constant: pool needs to be closer")); 18084 else 18085 as_bad (_("bad immediate value for 8-bit offset (%ld)"), 18086 (long) value); 18087 break; 18088 } 18089 18090 newval = md_chars_to_number (buf, INSN_SIZE); 18091 newval &= 0xff7ff0f0; 18092 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0); 18093 md_number_to_chars (buf, newval, INSN_SIZE); 18094 break; 18095 18096 case BFD_RELOC_ARM_T32_OFFSET_U8: 18097 if (value < 0 || value > 1020 || value % 4 != 0) 18098 as_bad_where (fixP->fx_file, fixP->fx_line, 18099 _("bad immediate value for offset (%ld)"), (long) value); 18100 value /= 4; 18101 18102 newval = md_chars_to_number (buf+2, THUMB_SIZE); 18103 newval |= value; 18104 md_number_to_chars (buf+2, newval, THUMB_SIZE); 18105 break; 18106 18107 case BFD_RELOC_ARM_T32_OFFSET_IMM: 18108 /* This is a complicated relocation used for all varieties of Thumb32 18109 load/store instruction with immediate offset: 18110 18111 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit, 18112 *4, optional writeback(W) 18113 (doubleword load/store) 18114 18115 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel 18116 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit 18117 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction) 18118 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit 18119 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit 18120 18121 Uppercase letters indicate bits that are already encoded at 18122 this point. Lowercase letters are our problem. For the 18123 second block of instructions, the secondary opcode nybble 18124 (bits 8..11) is present, and bit 23 is zero, even if this is 18125 a PC-relative operation. */ 18126 newval = md_chars_to_number (buf, THUMB_SIZE); 18127 newval <<= 16; 18128 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE); 18129 18130 if ((newval & 0xf0000000) == 0xe0000000) 18131 { 18132 /* Doubleword load/store: 8-bit offset, scaled by 4. */ 18133 if (value >= 0) 18134 newval |= (1 << 23); 18135 else 18136 value = -value; 18137 if (value % 4 != 0) 18138 { 18139 as_bad_where (fixP->fx_file, fixP->fx_line, 18140 _("offset not a multiple of 4")); 18141 break; 18142 } 18143 value /= 4; 18144 if (value > 0xff) 18145 { 18146 as_bad_where (fixP->fx_file, fixP->fx_line, 18147 _("offset out of range")); 18148 break; 18149 } 18150 newval &= ~0xff; 18151 } 18152 else if ((newval & 0x000f0000) == 0x000f0000) 18153 { 18154 /* PC-relative, 12-bit offset. */ 18155 if (value >= 0) 18156 newval |= (1 << 23); 18157 else 18158 value = -value; 18159 if (value > 0xfff) 18160 { 18161 as_bad_where (fixP->fx_file, fixP->fx_line, 18162 _("offset out of range")); 18163 break; 18164 } 18165 newval &= ~0xfff; 18166 } 18167 else if ((newval & 0x00000100) == 0x00000100) 18168 { 18169 /* Writeback: 8-bit, +/- offset. */ 18170 if (value >= 0) 18171 newval |= (1 << 9); 18172 else 18173 value = -value; 18174 if (value > 0xff) 18175 { 18176 as_bad_where (fixP->fx_file, fixP->fx_line, 18177 _("offset out of range")); 18178 break; 18179 } 18180 newval &= ~0xff; 18181 } 18182 else if ((newval & 0x00000f00) == 0x00000e00) 18183 { 18184 /* T-instruction: positive 8-bit offset. */ 18185 if (value < 0 || value > 0xff) 18186 { 18187 as_bad_where (fixP->fx_file, fixP->fx_line, 18188 _("offset out of range")); 18189 break; 18190 } 18191 newval &= ~0xff; 18192 newval |= value; 18193 } 18194 else 18195 { 18196 /* Positive 12-bit or negative 8-bit offset. */ 18197 int limit; 18198 if (value >= 0) 18199 { 18200 newval |= (1 << 23); 18201 limit = 0xfff; 18202 } 18203 else 18204 { 18205 value = -value; 18206 limit = 0xff; 18207 } 18208 if (value > limit) 18209 { 18210 as_bad_where (fixP->fx_file, fixP->fx_line, 18211 _("offset out of range")); 18212 break; 18213 } 18214 newval &= ~limit; 18215 } 18216 18217 newval |= value; 18218 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE); 18219 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE); 18220 break; 18221 18222 case BFD_RELOC_ARM_SHIFT_IMM: 18223 newval = md_chars_to_number (buf, INSN_SIZE); 18224 if (((unsigned long) value) > 32 18225 || (value == 32 18226 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60))) 18227 { 18228 as_bad_where (fixP->fx_file, fixP->fx_line, 18229 _("shift expression is too large")); 18230 break; 18231 } 18232 18233 if (value == 0) 18234 /* Shifts of zero must be done as lsl. */ 18235 newval &= ~0x60; 18236 else if (value == 32) 18237 value = 0; 18238 newval &= 0xfffff07f; 18239 newval |= (value & 0x1f) << 7; 18240 md_number_to_chars (buf, newval, INSN_SIZE); 18241 break; 18242 18243 case BFD_RELOC_ARM_T32_IMMEDIATE: 18244 case BFD_RELOC_ARM_T32_ADD_IMM: 18245 case BFD_RELOC_ARM_T32_IMM12: 18246 case BFD_RELOC_ARM_T32_ADD_PC12: 18247 /* We claim that this fixup has been processed here, 18248 even if in fact we generate an error because we do 18249 not have a reloc for it, so tc_gen_reloc will reject it. */ 18250 fixP->fx_done = 1; 18251 18252 if (fixP->fx_addsy 18253 && ! S_IS_DEFINED (fixP->fx_addsy)) 18254 { 18255 as_bad_where (fixP->fx_file, fixP->fx_line, 18256 _("undefined symbol %s used as an immediate value"), 18257 S_GET_NAME (fixP->fx_addsy)); 18258 break; 18259 } 18260 18261 newval = md_chars_to_number (buf, THUMB_SIZE); 18262 newval <<= 16; 18263 newval |= md_chars_to_number (buf+2, THUMB_SIZE); 18264 18265 newimm = FAIL; 18266 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 18267 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 18268 { 18269 newimm = encode_thumb32_immediate (value); 18270 if (newimm == (unsigned int) FAIL) 18271 newimm = thumb32_negate_data_op (&newval, value); 18272 } 18273 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE 18274 && newimm == (unsigned int) FAIL) 18275 { 18276 /* Turn add/sum into addw/subw. */ 18277 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 18278 newval = (newval & 0xfeffffff) | 0x02000000; 18279 18280 /* 12 bit immediate for addw/subw. */ 18281 if (value < 0) 18282 { 18283 value = -value; 18284 newval ^= 0x00a00000; 18285 } 18286 if (value > 0xfff) 18287 newimm = (unsigned int) FAIL; 18288 else 18289 newimm = value; 18290 } 18291 18292 if (newimm == (unsigned int)FAIL) 18293 { 18294 as_bad_where (fixP->fx_file, fixP->fx_line, 18295 _("invalid constant (%lx) after fixup"), 18296 (unsigned long) value); 18297 break; 18298 } 18299 18300 newval |= (newimm & 0x800) << 15; 18301 newval |= (newimm & 0x700) << 4; 18302 newval |= (newimm & 0x0ff); 18303 18304 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE); 18305 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE); 18306 break; 18307 18308 case BFD_RELOC_ARM_SMC: 18309 if (((unsigned long) value) > 0xffff) 18310 as_bad_where (fixP->fx_file, fixP->fx_line, 18311 _("invalid smc expression")); 18312 newval = md_chars_to_number (buf, INSN_SIZE); 18313 newval |= (value & 0xf) | ((value & 0xfff0) << 4); 18314 md_number_to_chars (buf, newval, INSN_SIZE); 18315 break; 18316 18317 case BFD_RELOC_ARM_SWI: 18318 if (fixP->tc_fix_data != 0) 18319 { 18320 if (((unsigned long) value) > 0xff) 18321 as_bad_where (fixP->fx_file, fixP->fx_line, 18322 _("invalid swi expression")); 18323 newval = md_chars_to_number (buf, THUMB_SIZE); 18324 newval |= value; 18325 md_number_to_chars (buf, newval, THUMB_SIZE); 18326 } 18327 else 18328 { 18329 if (((unsigned long) value) > 0x00ffffff) 18330 as_bad_where (fixP->fx_file, fixP->fx_line, 18331 _("invalid swi expression")); 18332 newval = md_chars_to_number (buf, INSN_SIZE); 18333 newval |= value; 18334 md_number_to_chars (buf, newval, INSN_SIZE); 18335 } 18336 break; 18337 18338 case BFD_RELOC_ARM_MULTI: 18339 if (((unsigned long) value) > 0xffff) 18340 as_bad_where (fixP->fx_file, fixP->fx_line, 18341 _("invalid expression in load/store multiple")); 18342 newval = value | md_chars_to_number (buf, INSN_SIZE); 18343 md_number_to_chars (buf, newval, INSN_SIZE); 18344 break; 18345 18346#ifdef OBJ_ELF 18347 case BFD_RELOC_ARM_PCREL_CALL: 18348 newval = md_chars_to_number (buf, INSN_SIZE); 18349 if ((newval & 0xf0000000) == 0xf0000000) 18350 temp = 1; 18351 else 18352 temp = 3; 18353 goto arm_branch_common; 18354 18355 case BFD_RELOC_ARM_PCREL_JUMP: 18356 case BFD_RELOC_ARM_PLT32: 18357#endif 18358 case BFD_RELOC_ARM_PCREL_BRANCH: 18359 temp = 3; 18360 goto arm_branch_common; 18361 18362 case BFD_RELOC_ARM_PCREL_BLX: 18363 temp = 1; 18364 arm_branch_common: 18365 /* We are going to store value (shifted right by two) in the 18366 instruction, in a 24 bit, signed field. Bits 26 through 32 either 18367 all clear or all set and bit 0 must be clear. For B/BL bit 1 must 18368 also be be clear. */ 18369 if (value & temp) 18370 as_bad_where (fixP->fx_file, fixP->fx_line, 18371 _("misaligned branch destination")); 18372 if ((value & (offsetT)0xfe000000) != (offsetT)0 18373 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000) 18374 as_bad_where (fixP->fx_file, fixP->fx_line, 18375 _("branch out of range")); 18376 18377 if (fixP->fx_done || !seg->use_rela_p) 18378 { 18379 newval = md_chars_to_number (buf, INSN_SIZE); 18380 newval |= (value >> 2) & 0x00ffffff; 18381 /* Set the H bit on BLX instructions. */ 18382 if (temp == 1) 18383 { 18384 if (value & 2) 18385 newval |= 0x01000000; 18386 else 18387 newval &= ~0x01000000; 18388 } 18389 md_number_to_chars (buf, newval, INSN_SIZE); 18390 } 18391 break; 18392 18393 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */ 18394 /* CBZ can only branch forward. */ 18395 18396 /* Attempts to use CBZ to branch to the next instruction 18397 (which, strictly speaking, are prohibited) will be turned into 18398 no-ops. 18399 18400 FIXME: It may be better to remove the instruction completely and 18401 perform relaxation. */ 18402 if (value == -2) 18403 { 18404 newval = md_chars_to_number (buf, THUMB_SIZE); 18405 newval = 0xbf00; /* NOP encoding T1 */ 18406 md_number_to_chars (buf, newval, THUMB_SIZE); 18407 } 18408 else 18409 { 18410 if (value & ~0x7e) 18411 as_bad_where (fixP->fx_file, fixP->fx_line, 18412 _("branch out of range")); 18413 18414 if (fixP->fx_done || !seg->use_rela_p) 18415 { 18416 newval = md_chars_to_number (buf, THUMB_SIZE); 18417 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3); 18418 md_number_to_chars (buf, newval, THUMB_SIZE); 18419 } 18420 } 18421 break; 18422 18423 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */ 18424 if ((value & ~0xff) && ((value & ~0xff) != ~0xff)) 18425 as_bad_where (fixP->fx_file, fixP->fx_line, 18426 _("branch out of range")); 18427 18428 if (fixP->fx_done || !seg->use_rela_p) 18429 { 18430 newval = md_chars_to_number (buf, THUMB_SIZE); 18431 newval |= (value & 0x1ff) >> 1; 18432 md_number_to_chars (buf, newval, THUMB_SIZE); 18433 } 18434 break; 18435 18436 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */ 18437 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff)) 18438 as_bad_where (fixP->fx_file, fixP->fx_line, 18439 _("branch out of range")); 18440 18441 if (fixP->fx_done || !seg->use_rela_p) 18442 { 18443 newval = md_chars_to_number (buf, THUMB_SIZE); 18444 newval |= (value & 0xfff) >> 1; 18445 md_number_to_chars (buf, newval, THUMB_SIZE); 18446 } 18447 break; 18448 18449 case BFD_RELOC_THUMB_PCREL_BRANCH20: 18450 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff)) 18451 as_bad_where (fixP->fx_file, fixP->fx_line, 18452 _("conditional branch out of range")); 18453 18454 if (fixP->fx_done || !seg->use_rela_p) 18455 { 18456 offsetT newval2; 18457 addressT S, J1, J2, lo, hi; 18458 18459 S = (value & 0x00100000) >> 20; 18460 J2 = (value & 0x00080000) >> 19; 18461 J1 = (value & 0x00040000) >> 18; 18462 hi = (value & 0x0003f000) >> 12; 18463 lo = (value & 0x00000ffe) >> 1; 18464 18465 newval = md_chars_to_number (buf, THUMB_SIZE); 18466 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18467 newval |= (S << 10) | hi; 18468 newval2 |= (J1 << 13) | (J2 << 11) | lo; 18469 md_number_to_chars (buf, newval, THUMB_SIZE); 18470 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18471 } 18472 break; 18473 18474 case BFD_RELOC_THUMB_PCREL_BLX: 18475 case BFD_RELOC_THUMB_PCREL_BRANCH23: 18476 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff)) 18477 as_bad_where (fixP->fx_file, fixP->fx_line, 18478 _("branch out of range")); 18479 18480 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX) 18481 /* For a BLX instruction, make sure that the relocation is rounded up 18482 to a word boundary. This follows the semantics of the instruction 18483 which specifies that bit 1 of the target address will come from bit 18484 1 of the base address. */ 18485 value = (value + 1) & ~ 1; 18486 18487 if (fixP->fx_done || !seg->use_rela_p) 18488 { 18489 offsetT newval2; 18490 18491 newval = md_chars_to_number (buf, THUMB_SIZE); 18492 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18493 newval |= (value & 0x7fffff) >> 12; 18494 newval2 |= (value & 0xfff) >> 1; 18495 md_number_to_chars (buf, newval, THUMB_SIZE); 18496 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18497 } 18498 break; 18499 18500 case BFD_RELOC_THUMB_PCREL_BRANCH25: 18501 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff)) 18502 as_bad_where (fixP->fx_file, fixP->fx_line, 18503 _("branch out of range")); 18504 18505 if (fixP->fx_done || !seg->use_rela_p) 18506 { 18507 offsetT newval2; 18508 addressT S, I1, I2, lo, hi; 18509 18510 S = (value & 0x01000000) >> 24; 18511 I1 = (value & 0x00800000) >> 23; 18512 I2 = (value & 0x00400000) >> 22; 18513 hi = (value & 0x003ff000) >> 12; 18514 lo = (value & 0x00000ffe) >> 1; 18515 18516 I1 = !(I1 ^ S); 18517 I2 = !(I2 ^ S); 18518 18519 newval = md_chars_to_number (buf, THUMB_SIZE); 18520 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18521 newval |= (S << 10) | hi; 18522 newval2 |= (I1 << 13) | (I2 << 11) | lo; 18523 md_number_to_chars (buf, newval, THUMB_SIZE); 18524 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18525 } 18526 break; 18527 18528 case BFD_RELOC_8: 18529 if (fixP->fx_done || !seg->use_rela_p) 18530 md_number_to_chars (buf, value, 1); 18531 break; 18532 18533 case BFD_RELOC_16: 18534 if (fixP->fx_done || !seg->use_rela_p) 18535 md_number_to_chars (buf, value, 2); 18536 break; 18537 18538#ifdef OBJ_ELF 18539 case BFD_RELOC_ARM_TLS_GD32: 18540 case BFD_RELOC_ARM_TLS_LE32: 18541 case BFD_RELOC_ARM_TLS_IE32: 18542 case BFD_RELOC_ARM_TLS_LDM32: 18543 case BFD_RELOC_ARM_TLS_LDO32: 18544 S_SET_THREAD_LOCAL (fixP->fx_addsy); 18545 /* fall through */ 18546 18547 case BFD_RELOC_ARM_GOT32: 18548 case BFD_RELOC_ARM_GOTOFF: 18549 case BFD_RELOC_ARM_TARGET2: 18550 if (fixP->fx_done || !seg->use_rela_p) 18551 md_number_to_chars (buf, 0, 4); 18552 break; 18553#endif 18554 18555 case BFD_RELOC_RVA: 18556 case BFD_RELOC_32: 18557 case BFD_RELOC_ARM_TARGET1: 18558 case BFD_RELOC_ARM_ROSEGREL32: 18559 case BFD_RELOC_ARM_SBREL32: 18560 case BFD_RELOC_32_PCREL: 18561#ifdef TE_PE 18562 case BFD_RELOC_32_SECREL: 18563#endif 18564 if (fixP->fx_done || !seg->use_rela_p) 18565#ifdef TE_WINCE 18566 /* For WinCE we only do this for pcrel fixups. */ 18567 if (fixP->fx_done || fixP->fx_pcrel) 18568#endif 18569 md_number_to_chars (buf, value, 4); 18570 break; 18571 18572#ifdef OBJ_ELF 18573 case BFD_RELOC_ARM_PREL31: 18574 if (fixP->fx_done || !seg->use_rela_p) 18575 { 18576 newval = md_chars_to_number (buf, 4) & 0x80000000; 18577 if ((value ^ (value >> 1)) & 0x40000000) 18578 { 18579 as_bad_where (fixP->fx_file, fixP->fx_line, 18580 _("rel31 relocation overflow")); 18581 } 18582 newval |= value & 0x7fffffff; 18583 md_number_to_chars (buf, newval, 4); 18584 } 18585 break; 18586#endif 18587 18588 case BFD_RELOC_ARM_CP_OFF_IMM: 18589 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 18590 if (value < -1023 || value > 1023 || (value & 3)) 18591 as_bad_where (fixP->fx_file, fixP->fx_line, 18592 _("co-processor offset out of range")); 18593 cp_off_common: 18594 sign = value >= 0; 18595 if (value < 0) 18596 value = -value; 18597 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 18598 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 18599 newval = md_chars_to_number (buf, INSN_SIZE); 18600 else 18601 newval = get_thumb32_insn (buf); 18602 newval &= 0xff7fff00; 18603 newval |= (value >> 2) | (sign ? INDEX_UP : 0); 18604 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 18605 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 18606 md_number_to_chars (buf, newval, INSN_SIZE); 18607 else 18608 put_thumb32_insn (buf, newval); 18609 break; 18610 18611 case BFD_RELOC_ARM_CP_OFF_IMM_S2: 18612 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2: 18613 if (value < -255 || value > 255) 18614 as_bad_where (fixP->fx_file, fixP->fx_line, 18615 _("co-processor offset out of range")); 18616 value *= 4; 18617 goto cp_off_common; 18618 18619 case BFD_RELOC_ARM_THUMB_OFFSET: 18620 newval = md_chars_to_number (buf, THUMB_SIZE); 18621 /* Exactly what ranges, and where the offset is inserted depends 18622 on the type of instruction, we can establish this from the 18623 top 4 bits. */ 18624 switch (newval >> 12) 18625 { 18626 case 4: /* PC load. */ 18627 /* Thumb PC loads are somewhat odd, bit 1 of the PC is 18628 forced to zero for these loads; md_pcrel_from has already 18629 compensated for this. */ 18630 if (value & 3) 18631 as_bad_where (fixP->fx_file, fixP->fx_line, 18632 _("invalid offset, target not word aligned (0x%08lX)"), 18633 (((unsigned long) fixP->fx_frag->fr_address 18634 + (unsigned long) fixP->fx_where) & ~3) 18635 + (unsigned long) value); 18636 18637 if (value & ~0x3fc) 18638 as_bad_where (fixP->fx_file, fixP->fx_line, 18639 _("invalid offset, value too big (0x%08lX)"), 18640 (long) value); 18641 18642 newval |= value >> 2; 18643 break; 18644 18645 case 9: /* SP load/store. */ 18646 if (value & ~0x3fc) 18647 as_bad_where (fixP->fx_file, fixP->fx_line, 18648 _("invalid offset, value too big (0x%08lX)"), 18649 (long) value); 18650 newval |= value >> 2; 18651 break; 18652 18653 case 6: /* Word load/store. */ 18654 if (value & ~0x7c) 18655 as_bad_where (fixP->fx_file, fixP->fx_line, 18656 _("invalid offset, value too big (0x%08lX)"), 18657 (long) value); 18658 newval |= value << 4; /* 6 - 2. */ 18659 break; 18660 18661 case 7: /* Byte load/store. */ 18662 if (value & ~0x1f) 18663 as_bad_where (fixP->fx_file, fixP->fx_line, 18664 _("invalid offset, value too big (0x%08lX)"), 18665 (long) value); 18666 newval |= value << 6; 18667 break; 18668 18669 case 8: /* Halfword load/store. */ 18670 if (value & ~0x3e) 18671 as_bad_where (fixP->fx_file, fixP->fx_line, 18672 _("invalid offset, value too big (0x%08lX)"), 18673 (long) value); 18674 newval |= value << 5; /* 6 - 1. */ 18675 break; 18676 18677 default: 18678 as_bad_where (fixP->fx_file, fixP->fx_line, 18679 "Unable to process relocation for thumb opcode: %lx", 18680 (unsigned long) newval); 18681 break; 18682 } 18683 md_number_to_chars (buf, newval, THUMB_SIZE); 18684 break; 18685 18686 case BFD_RELOC_ARM_THUMB_ADD: 18687 /* This is a complicated relocation, since we use it for all of 18688 the following immediate relocations: 18689 18690 3bit ADD/SUB 18691 8bit ADD/SUB 18692 9bit ADD/SUB SP word-aligned 18693 10bit ADD PC/SP word-aligned 18694 18695 The type of instruction being processed is encoded in the 18696 instruction field: 18697 18698 0x8000 SUB 18699 0x00F0 Rd 18700 0x000F Rs 18701 */ 18702 newval = md_chars_to_number (buf, THUMB_SIZE); 18703 { 18704 int rd = (newval >> 4) & 0xf; 18705 int rs = newval & 0xf; 18706 int subtract = !!(newval & 0x8000); 18707 18708 /* Check for HI regs, only very restricted cases allowed: 18709 Adjusting SP, and using PC or SP to get an address. */ 18710 if ((rd > 7 && (rd != REG_SP || rs != REG_SP)) 18711 || (rs > 7 && rs != REG_SP && rs != REG_PC)) 18712 as_bad_where (fixP->fx_file, fixP->fx_line, 18713 _("invalid Hi register with immediate")); 18714 18715 /* If value is negative, choose the opposite instruction. */ 18716 if (value < 0) 18717 { 18718 value = -value; 18719 subtract = !subtract; 18720 if (value < 0) 18721 as_bad_where (fixP->fx_file, fixP->fx_line, 18722 _("immediate value out of range")); 18723 } 18724 18725 if (rd == REG_SP) 18726 { 18727 if (value & ~0x1fc) 18728 as_bad_where (fixP->fx_file, fixP->fx_line, 18729 _("invalid immediate for stack address calculation")); 18730 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST; 18731 newval |= value >> 2; 18732 } 18733 else if (rs == REG_PC || rs == REG_SP) 18734 { 18735 if (subtract || value & ~0x3fc) 18736 as_bad_where (fixP->fx_file, fixP->fx_line, 18737 _("invalid immediate for address calculation (value = 0x%08lX)"), 18738 (unsigned long) value); 18739 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP); 18740 newval |= rd << 8; 18741 newval |= value >> 2; 18742 } 18743 else if (rs == rd) 18744 { 18745 if (value & ~0xff) 18746 as_bad_where (fixP->fx_file, fixP->fx_line, 18747 _("immediate value out of range")); 18748 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8; 18749 newval |= (rd << 8) | value; 18750 } 18751 else 18752 { 18753 if (value & ~0x7) 18754 as_bad_where (fixP->fx_file, fixP->fx_line, 18755 _("immediate value out of range")); 18756 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3; 18757 newval |= rd | (rs << 3) | (value << 6); 18758 } 18759 } 18760 md_number_to_chars (buf, newval, THUMB_SIZE); 18761 break; 18762 18763 case BFD_RELOC_ARM_THUMB_IMM: 18764 newval = md_chars_to_number (buf, THUMB_SIZE); 18765 if (value < 0 || value > 255) 18766 as_bad_where (fixP->fx_file, fixP->fx_line, 18767 _("invalid immediate: %ld is too large"), 18768 (long) value); 18769 newval |= value; 18770 md_number_to_chars (buf, newval, THUMB_SIZE); 18771 break; 18772 18773 case BFD_RELOC_ARM_THUMB_SHIFT: 18774 /* 5bit shift value (0..32). LSL cannot take 32. */ 18775 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f; 18776 temp = newval & 0xf800; 18777 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I)) 18778 as_bad_where (fixP->fx_file, fixP->fx_line, 18779 _("invalid shift value: %ld"), (long) value); 18780 /* Shifts of zero must be encoded as LSL. */ 18781 if (value == 0) 18782 newval = (newval & 0x003f) | T_OPCODE_LSL_I; 18783 /* Shifts of 32 are encoded as zero. */ 18784 else if (value == 32) 18785 value = 0; 18786 newval |= value << 6; 18787 md_number_to_chars (buf, newval, THUMB_SIZE); 18788 break; 18789 18790 case BFD_RELOC_VTABLE_INHERIT: 18791 case BFD_RELOC_VTABLE_ENTRY: 18792 fixP->fx_done = 0; 18793 return; 18794 18795 case BFD_RELOC_ARM_MOVW: 18796 case BFD_RELOC_ARM_MOVT: 18797 case BFD_RELOC_ARM_THUMB_MOVW: 18798 case BFD_RELOC_ARM_THUMB_MOVT: 18799 if (fixP->fx_done || !seg->use_rela_p) 18800 { 18801 /* REL format relocations are limited to a 16-bit addend. */ 18802 if (!fixP->fx_done) 18803 { 18804 if (value < -0x1000 || value > 0xffff) 18805 as_bad_where (fixP->fx_file, fixP->fx_line, 18806 _("offset too big")); 18807 } 18808 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT 18809 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 18810 { 18811 value >>= 16; 18812 } 18813 18814 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW 18815 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 18816 { 18817 newval = get_thumb32_insn (buf); 18818 newval &= 0xfbf08f00; 18819 newval |= (value & 0xf000) << 4; 18820 newval |= (value & 0x0800) << 15; 18821 newval |= (value & 0x0700) << 4; 18822 newval |= (value & 0x00ff); 18823 put_thumb32_insn (buf, newval); 18824 } 18825 else 18826 { 18827 newval = md_chars_to_number (buf, 4); 18828 newval &= 0xfff0f000; 18829 newval |= value & 0x0fff; 18830 newval |= (value & 0xf000) << 4; 18831 md_number_to_chars (buf, newval, 4); 18832 } 18833 } 18834 return; 18835 18836 case BFD_RELOC_ARM_ALU_PC_G0_NC: 18837 case BFD_RELOC_ARM_ALU_PC_G0: 18838 case BFD_RELOC_ARM_ALU_PC_G1_NC: 18839 case BFD_RELOC_ARM_ALU_PC_G1: 18840 case BFD_RELOC_ARM_ALU_PC_G2: 18841 case BFD_RELOC_ARM_ALU_SB_G0_NC: 18842 case BFD_RELOC_ARM_ALU_SB_G0: 18843 case BFD_RELOC_ARM_ALU_SB_G1_NC: 18844 case BFD_RELOC_ARM_ALU_SB_G1: 18845 case BFD_RELOC_ARM_ALU_SB_G2: 18846 assert (!fixP->fx_done); 18847 if (!seg->use_rela_p) 18848 { 18849 bfd_vma insn; 18850 bfd_vma encoded_addend; 18851 bfd_vma addend_abs = abs (value); 18852 18853 /* Check that the absolute value of the addend can be 18854 expressed as an 8-bit constant plus a rotation. */ 18855 encoded_addend = encode_arm_immediate (addend_abs); 18856 if (encoded_addend == (unsigned int) FAIL) 18857 as_bad_where (fixP->fx_file, fixP->fx_line, 18858 _("the offset 0x%08lX is not representable"), 18859 (unsigned long) addend_abs); 18860 18861 /* Extract the instruction. */ 18862 insn = md_chars_to_number (buf, INSN_SIZE); 18863 18864 /* If the addend is positive, use an ADD instruction. 18865 Otherwise use a SUB. Take care not to destroy the S bit. */ 18866 insn &= 0xff1fffff; 18867 if (value < 0) 18868 insn |= 1 << 22; 18869 else 18870 insn |= 1 << 23; 18871 18872 /* Place the encoded addend into the first 12 bits of the 18873 instruction. */ 18874 insn &= 0xfffff000; 18875 insn |= encoded_addend; 18876 18877 /* Update the instruction. */ 18878 md_number_to_chars (buf, insn, INSN_SIZE); 18879 } 18880 break; 18881 18882 case BFD_RELOC_ARM_LDR_PC_G0: 18883 case BFD_RELOC_ARM_LDR_PC_G1: 18884 case BFD_RELOC_ARM_LDR_PC_G2: 18885 case BFD_RELOC_ARM_LDR_SB_G0: 18886 case BFD_RELOC_ARM_LDR_SB_G1: 18887 case BFD_RELOC_ARM_LDR_SB_G2: 18888 assert (!fixP->fx_done); 18889 if (!seg->use_rela_p) 18890 { 18891 bfd_vma insn; 18892 bfd_vma addend_abs = abs (value); 18893 18894 /* Check that the absolute value of the addend can be 18895 encoded in 12 bits. */ 18896 if (addend_abs >= 0x1000) 18897 as_bad_where (fixP->fx_file, fixP->fx_line, 18898 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"), 18899 (unsigned long) addend_abs); 18900 18901 /* Extract the instruction. */ 18902 insn = md_chars_to_number (buf, INSN_SIZE); 18903 18904 /* If the addend is negative, clear bit 23 of the instruction. 18905 Otherwise set it. */ 18906 if (value < 0) 18907 insn &= ~(1 << 23); 18908 else 18909 insn |= 1 << 23; 18910 18911 /* Place the absolute value of the addend into the first 12 bits 18912 of the instruction. */ 18913 insn &= 0xfffff000; 18914 insn |= addend_abs; 18915 18916 /* Update the instruction. */ 18917 md_number_to_chars (buf, insn, INSN_SIZE); 18918 } 18919 break; 18920 18921 case BFD_RELOC_ARM_LDRS_PC_G0: 18922 case BFD_RELOC_ARM_LDRS_PC_G1: 18923 case BFD_RELOC_ARM_LDRS_PC_G2: 18924 case BFD_RELOC_ARM_LDRS_SB_G0: 18925 case BFD_RELOC_ARM_LDRS_SB_G1: 18926 case BFD_RELOC_ARM_LDRS_SB_G2: 18927 assert (!fixP->fx_done); 18928 if (!seg->use_rela_p) 18929 { 18930 bfd_vma insn; 18931 bfd_vma addend_abs = abs (value); 18932 18933 /* Check that the absolute value of the addend can be 18934 encoded in 8 bits. */ 18935 if (addend_abs >= 0x100) 18936 as_bad_where (fixP->fx_file, fixP->fx_line, 18937 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"), 18938 (unsigned long) addend_abs); 18939 18940 /* Extract the instruction. */ 18941 insn = md_chars_to_number (buf, INSN_SIZE); 18942 18943 /* If the addend is negative, clear bit 23 of the instruction. 18944 Otherwise set it. */ 18945 if (value < 0) 18946 insn &= ~(1 << 23); 18947 else 18948 insn |= 1 << 23; 18949 18950 /* Place the first four bits of the absolute value of the addend 18951 into the first 4 bits of the instruction, and the remaining 18952 four into bits 8 .. 11. */ 18953 insn &= 0xfffff0f0; 18954 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4); 18955 18956 /* Update the instruction. */ 18957 md_number_to_chars (buf, insn, INSN_SIZE); 18958 } 18959 break; 18960 18961 case BFD_RELOC_ARM_LDC_PC_G0: 18962 case BFD_RELOC_ARM_LDC_PC_G1: 18963 case BFD_RELOC_ARM_LDC_PC_G2: 18964 case BFD_RELOC_ARM_LDC_SB_G0: 18965 case BFD_RELOC_ARM_LDC_SB_G1: 18966 case BFD_RELOC_ARM_LDC_SB_G2: 18967 assert (!fixP->fx_done); 18968 if (!seg->use_rela_p) 18969 { 18970 bfd_vma insn; 18971 bfd_vma addend_abs = abs (value); 18972 18973 /* Check that the absolute value of the addend is a multiple of 18974 four and, when divided by four, fits in 8 bits. */ 18975 if (addend_abs & 0x3) 18976 as_bad_where (fixP->fx_file, fixP->fx_line, 18977 _("bad offset 0x%08lX (must be word-aligned)"), 18978 (unsigned long) addend_abs); 18979 18980 if ((addend_abs >> 2) > 0xff) 18981 as_bad_where (fixP->fx_file, fixP->fx_line, 18982 _("bad offset 0x%08lX (must be an 8-bit number of words)"), 18983 (unsigned long) addend_abs); 18984 18985 /* Extract the instruction. */ 18986 insn = md_chars_to_number (buf, INSN_SIZE); 18987 18988 /* If the addend is negative, clear bit 23 of the instruction. 18989 Otherwise set it. */ 18990 if (value < 0) 18991 insn &= ~(1 << 23); 18992 else 18993 insn |= 1 << 23; 18994 18995 /* Place the addend (divided by four) into the first eight 18996 bits of the instruction. */ 18997 insn &= 0xfffffff0; 18998 insn |= addend_abs >> 2; 18999 19000 /* Update the instruction. */ 19001 md_number_to_chars (buf, insn, INSN_SIZE); 19002 } 19003 break; 19004 19005 case BFD_RELOC_UNUSED: 19006 default: 19007 as_bad_where (fixP->fx_file, fixP->fx_line, 19008 _("bad relocation fixup type (%d)"), fixP->fx_r_type); 19009 } 19010} 19011 19012/* Translate internal representation of relocation info to BFD target 19013 format. */ 19014 19015arelent * 19016tc_gen_reloc (asection *section, fixS *fixp) 19017{ 19018 arelent * reloc; 19019 bfd_reloc_code_real_type code; 19020 19021 reloc = xmalloc (sizeof (arelent)); 19022 19023 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *)); 19024 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 19025 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 19026 19027 if (fixp->fx_pcrel) 19028 { 19029 if (section->use_rela_p) 19030 fixp->fx_offset -= md_pcrel_from_section (fixp, section); 19031 else 19032 fixp->fx_offset = reloc->address; 19033 } 19034 reloc->addend = fixp->fx_offset; 19035 19036 switch (fixp->fx_r_type) 19037 { 19038 case BFD_RELOC_8: 19039 if (fixp->fx_pcrel) 19040 { 19041 code = BFD_RELOC_8_PCREL; 19042 break; 19043 } 19044 19045 case BFD_RELOC_16: 19046 if (fixp->fx_pcrel) 19047 { 19048 code = BFD_RELOC_16_PCREL; 19049 break; 19050 } 19051 19052 case BFD_RELOC_32: 19053 if (fixp->fx_pcrel) 19054 { 19055 code = BFD_RELOC_32_PCREL; 19056 break; 19057 } 19058 19059 case BFD_RELOC_ARM_MOVW: 19060 if (fixp->fx_pcrel) 19061 { 19062 code = BFD_RELOC_ARM_MOVW_PCREL; 19063 break; 19064 } 19065 19066 case BFD_RELOC_ARM_MOVT: 19067 if (fixp->fx_pcrel) 19068 { 19069 code = BFD_RELOC_ARM_MOVT_PCREL; 19070 break; 19071 } 19072 19073 case BFD_RELOC_ARM_THUMB_MOVW: 19074 if (fixp->fx_pcrel) 19075 { 19076 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL; 19077 break; 19078 } 19079 19080 case BFD_RELOC_ARM_THUMB_MOVT: 19081 if (fixp->fx_pcrel) 19082 { 19083 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL; 19084 break; 19085 } 19086 19087 case BFD_RELOC_NONE: 19088 case BFD_RELOC_ARM_PCREL_BRANCH: 19089 case BFD_RELOC_ARM_PCREL_BLX: 19090 case BFD_RELOC_RVA: 19091 case BFD_RELOC_THUMB_PCREL_BRANCH7: 19092 case BFD_RELOC_THUMB_PCREL_BRANCH9: 19093 case BFD_RELOC_THUMB_PCREL_BRANCH12: 19094 case BFD_RELOC_THUMB_PCREL_BRANCH20: 19095 case BFD_RELOC_THUMB_PCREL_BRANCH23: 19096 case BFD_RELOC_THUMB_PCREL_BRANCH25: 19097 case BFD_RELOC_THUMB_PCREL_BLX: 19098 case BFD_RELOC_VTABLE_ENTRY: 19099 case BFD_RELOC_VTABLE_INHERIT: 19100#ifdef TE_PE 19101 case BFD_RELOC_32_SECREL: 19102#endif 19103 code = fixp->fx_r_type; 19104 break; 19105 19106 case BFD_RELOC_ARM_LITERAL: 19107 case BFD_RELOC_ARM_HWLITERAL: 19108 /* If this is called then the a literal has 19109 been referenced across a section boundary. */ 19110 as_bad_where (fixp->fx_file, fixp->fx_line, 19111 _("literal referenced across section boundary")); 19112 return NULL; 19113 19114#ifdef OBJ_ELF 19115 case BFD_RELOC_ARM_GOT32: 19116 case BFD_RELOC_ARM_GOTOFF: 19117 case BFD_RELOC_ARM_PLT32: 19118 case BFD_RELOC_ARM_TARGET1: 19119 case BFD_RELOC_ARM_ROSEGREL32: 19120 case BFD_RELOC_ARM_SBREL32: 19121 case BFD_RELOC_ARM_PREL31: 19122 case BFD_RELOC_ARM_TARGET2: 19123 case BFD_RELOC_ARM_TLS_LE32: 19124 case BFD_RELOC_ARM_TLS_LDO32: 19125 case BFD_RELOC_ARM_PCREL_CALL: 19126 case BFD_RELOC_ARM_PCREL_JUMP: 19127 case BFD_RELOC_ARM_ALU_PC_G0_NC: 19128 case BFD_RELOC_ARM_ALU_PC_G0: 19129 case BFD_RELOC_ARM_ALU_PC_G1_NC: 19130 case BFD_RELOC_ARM_ALU_PC_G1: 19131 case BFD_RELOC_ARM_ALU_PC_G2: 19132 case BFD_RELOC_ARM_LDR_PC_G0: 19133 case BFD_RELOC_ARM_LDR_PC_G1: 19134 case BFD_RELOC_ARM_LDR_PC_G2: 19135 case BFD_RELOC_ARM_LDRS_PC_G0: 19136 case BFD_RELOC_ARM_LDRS_PC_G1: 19137 case BFD_RELOC_ARM_LDRS_PC_G2: 19138 case BFD_RELOC_ARM_LDC_PC_G0: 19139 case BFD_RELOC_ARM_LDC_PC_G1: 19140 case BFD_RELOC_ARM_LDC_PC_G2: 19141 case BFD_RELOC_ARM_ALU_SB_G0_NC: 19142 case BFD_RELOC_ARM_ALU_SB_G0: 19143 case BFD_RELOC_ARM_ALU_SB_G1_NC: 19144 case BFD_RELOC_ARM_ALU_SB_G1: 19145 case BFD_RELOC_ARM_ALU_SB_G2: 19146 case BFD_RELOC_ARM_LDR_SB_G0: 19147 case BFD_RELOC_ARM_LDR_SB_G1: 19148 case BFD_RELOC_ARM_LDR_SB_G2: 19149 case BFD_RELOC_ARM_LDRS_SB_G0: 19150 case BFD_RELOC_ARM_LDRS_SB_G1: 19151 case BFD_RELOC_ARM_LDRS_SB_G2: 19152 case BFD_RELOC_ARM_LDC_SB_G0: 19153 case BFD_RELOC_ARM_LDC_SB_G1: 19154 case BFD_RELOC_ARM_LDC_SB_G2: 19155 code = fixp->fx_r_type; 19156 break; 19157 19158 case BFD_RELOC_ARM_TLS_GD32: 19159 case BFD_RELOC_ARM_TLS_IE32: 19160 case BFD_RELOC_ARM_TLS_LDM32: 19161 /* BFD will include the symbol's address in the addend. 19162 But we don't want that, so subtract it out again here. */ 19163 if (!S_IS_COMMON (fixp->fx_addsy)) 19164 reloc->addend -= (*reloc->sym_ptr_ptr)->value; 19165 code = fixp->fx_r_type; 19166 break; 19167#endif 19168 19169 case BFD_RELOC_ARM_IMMEDIATE: 19170 as_bad_where (fixp->fx_file, fixp->fx_line, 19171 _("internal relocation (type: IMMEDIATE) not fixed up")); 19172 return NULL; 19173 19174 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 19175 as_bad_where (fixp->fx_file, fixp->fx_line, 19176 _("ADRL used for a symbol not defined in the same file")); 19177 return NULL; 19178 19179 case BFD_RELOC_ARM_OFFSET_IMM: 19180 if (section->use_rela_p) 19181 { 19182 code = fixp->fx_r_type; 19183 break; 19184 } 19185 19186 if (fixp->fx_addsy != NULL 19187 && !S_IS_DEFINED (fixp->fx_addsy) 19188 && S_IS_LOCAL (fixp->fx_addsy)) 19189 { 19190 as_bad_where (fixp->fx_file, fixp->fx_line, 19191 _("undefined local label `%s'"), 19192 S_GET_NAME (fixp->fx_addsy)); 19193 return NULL; 19194 } 19195 19196 as_bad_where (fixp->fx_file, fixp->fx_line, 19197 _("internal_relocation (type: OFFSET_IMM) not fixed up")); 19198 return NULL; 19199 19200 default: 19201 { 19202 char * type; 19203 19204 switch (fixp->fx_r_type) 19205 { 19206 case BFD_RELOC_NONE: type = "NONE"; break; 19207 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break; 19208 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break; 19209 case BFD_RELOC_ARM_SMC: type = "SMC"; break; 19210 case BFD_RELOC_ARM_SWI: type = "SWI"; break; 19211 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break; 19212 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break; 19213 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break; 19214 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break; 19215 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break; 19216 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break; 19217 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break; 19218 default: type = _("<unknown>"); break; 19219 } 19220 as_bad_where (fixp->fx_file, fixp->fx_line, 19221 _("cannot represent %s relocation in this object file format"), 19222 type); 19223 return NULL; 19224 } 19225 } 19226 19227#ifdef OBJ_ELF 19228 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32) 19229 && GOT_symbol 19230 && fixp->fx_addsy == GOT_symbol) 19231 { 19232 code = BFD_RELOC_ARM_GOTPC; 19233 reloc->addend = fixp->fx_offset = reloc->address; 19234 } 19235#endif 19236 19237 reloc->howto = bfd_reloc_type_lookup (stdoutput, code); 19238 19239 if (reloc->howto == NULL) 19240 { 19241 as_bad_where (fixp->fx_file, fixp->fx_line, 19242 _("cannot represent %s relocation in this object file format"), 19243 bfd_get_reloc_code_name (code)); 19244 return NULL; 19245 } 19246 19247 /* HACK: Since arm ELF uses Rel instead of Rela, encode the 19248 vtable entry to be used in the relocation's section offset. */ 19249 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 19250 reloc->address = fixp->fx_offset; 19251 19252 return reloc; 19253} 19254 19255/* This fix_new is called by cons via TC_CONS_FIX_NEW. */ 19256 19257void 19258cons_fix_new_arm (fragS * frag, 19259 int where, 19260 int size, 19261 expressionS * exp) 19262{ 19263 bfd_reloc_code_real_type type; 19264 int pcrel = 0; 19265 19266 /* Pick a reloc. 19267 FIXME: @@ Should look at CPU word size. */ 19268 switch (size) 19269 { 19270 case 1: 19271 type = BFD_RELOC_8; 19272 break; 19273 case 2: 19274 type = BFD_RELOC_16; 19275 break; 19276 case 4: 19277 default: 19278 type = BFD_RELOC_32; 19279 break; 19280 case 8: 19281 type = BFD_RELOC_64; 19282 break; 19283 } 19284 19285#ifdef TE_PE 19286 if (exp->X_op == O_secrel) 19287 { 19288 exp->X_op = O_symbol; 19289 type = BFD_RELOC_32_SECREL; 19290 } 19291#endif 19292 19293 fix_new_exp (frag, where, (int) size, exp, pcrel, type); 19294} 19295 19296#if defined OBJ_COFF || defined OBJ_ELF 19297void 19298arm_validate_fix (fixS * fixP) 19299{ 19300 /* If the destination of the branch is a defined symbol which does not have 19301 the THUMB_FUNC attribute, then we must be calling a function which has 19302 the (interfacearm) attribute. We look for the Thumb entry point to that 19303 function and change the branch to refer to that function instead. */ 19304 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23 19305 && fixP->fx_addsy != NULL 19306 && S_IS_DEFINED (fixP->fx_addsy) 19307 && ! THUMB_IS_FUNC (fixP->fx_addsy)) 19308 { 19309 fixP->fx_addsy = find_real_start (fixP->fx_addsy); 19310 } 19311} 19312#endif 19313 19314int 19315arm_force_relocation (struct fix * fixp) 19316{ 19317#if defined (OBJ_COFF) && defined (TE_PE) 19318 if (fixp->fx_r_type == BFD_RELOC_RVA) 19319 return 1; 19320#endif 19321 19322 /* Resolve these relocations even if the symbol is extern or weak. */ 19323 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE 19324 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM 19325 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE 19326 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM 19327 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 19328 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12 19329 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12) 19330 return 0; 19331 19332 /* Always leave these relocations for the linker. */ 19333 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC 19334 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) 19335 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) 19336 return 1; 19337 19338 /* Always generate relocations against function symbols. */ 19339 if (fixp->fx_r_type == BFD_RELOC_32 19340 && fixp->fx_addsy 19341 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION)) 19342 return 1; 19343 19344 return generic_force_reloc (fixp); 19345} 19346 19347#if defined (OBJ_ELF) || defined (OBJ_COFF) 19348/* Relocations against function names must be left unadjusted, 19349 so that the linker can use this information to generate interworking 19350 stubs. The MIPS version of this function 19351 also prevents relocations that are mips-16 specific, but I do not 19352 know why it does this. 19353 19354 FIXME: 19355 There is one other problem that ought to be addressed here, but 19356 which currently is not: Taking the address of a label (rather 19357 than a function) and then later jumping to that address. Such 19358 addresses also ought to have their bottom bit set (assuming that 19359 they reside in Thumb code), but at the moment they will not. */ 19360 19361bfd_boolean 19362arm_fix_adjustable (fixS * fixP) 19363{ 19364 if (fixP->fx_addsy == NULL) 19365 return 1; 19366 19367 /* Preserve relocations against symbols with function type. */ 19368 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION) 19369 return 0; 19370 19371 if (THUMB_IS_FUNC (fixP->fx_addsy) 19372 && fixP->fx_subsy == NULL) 19373 return 0; 19374 19375 /* We need the symbol name for the VTABLE entries. */ 19376 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT 19377 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 19378 return 0; 19379 19380 /* Don't allow symbols to be discarded on GOT related relocs. */ 19381 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32 19382 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32 19383 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF 19384 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32 19385 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32 19386 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32 19387 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32 19388 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32 19389 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2) 19390 return 0; 19391 19392 /* Similarly for group relocations. */ 19393 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC 19394 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) 19395 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) 19396 return 0; 19397 19398 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW 19399 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT 19400 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW 19401 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 19402 return 0; 19403 19404 return 1; 19405} 19406#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */ 19407 19408#ifdef OBJ_ELF 19409 19410const char * 19411elf32_arm_target_format (void) 19412{ 19413#ifdef TE_SYMBIAN 19414 return (target_big_endian 19415 ? "elf32-bigarm-symbian" 19416 : "elf32-littlearm-symbian"); 19417#elif defined (TE_VXWORKS) 19418 return (target_big_endian 19419 ? "elf32-bigarm-vxworks" 19420 : "elf32-littlearm-vxworks"); 19421#else 19422 if (target_big_endian) 19423 return "elf32-bigarm"; 19424 else 19425 return "elf32-littlearm"; 19426#endif 19427} 19428 19429void 19430armelf_frob_symbol (symbolS * symp, 19431 int * puntp) 19432{ 19433 elf_frob_symbol (symp, puntp); 19434} 19435#endif 19436 19437/* MD interface: Finalization. */ 19438 19439/* A good place to do this, although this was probably not intended 19440 for this kind of use. We need to dump the literal pool before 19441 references are made to a null symbol pointer. */ 19442 19443void 19444arm_cleanup (void) 19445{ 19446 literal_pool * pool; 19447 19448 for (pool = list_of_pools; pool; pool = pool->next) 19449 { 19450 /* Put it at the end of the relevent section. */ 19451 subseg_set (pool->section, pool->sub_section); 19452#ifdef OBJ_ELF 19453 arm_elf_change_section (); 19454#endif 19455 s_ltorg (0); 19456 } 19457} 19458 19459/* Adjust the symbol table. This marks Thumb symbols as distinct from 19460 ARM ones. */ 19461 19462void 19463arm_adjust_symtab (void) 19464{ 19465#ifdef OBJ_COFF 19466 symbolS * sym; 19467 19468 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) 19469 { 19470 if (ARM_IS_THUMB (sym)) 19471 { 19472 if (THUMB_IS_FUNC (sym)) 19473 { 19474 /* Mark the symbol as a Thumb function. */ 19475 if ( S_GET_STORAGE_CLASS (sym) == C_STAT 19476 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */ 19477 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC); 19478 19479 else if (S_GET_STORAGE_CLASS (sym) == C_EXT) 19480 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC); 19481 else 19482 as_bad (_("%s: unexpected function type: %d"), 19483 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym)); 19484 } 19485 else switch (S_GET_STORAGE_CLASS (sym)) 19486 { 19487 case C_EXT: 19488 S_SET_STORAGE_CLASS (sym, C_THUMBEXT); 19489 break; 19490 case C_STAT: 19491 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT); 19492 break; 19493 case C_LABEL: 19494 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL); 19495 break; 19496 default: 19497 /* Do nothing. */ 19498 break; 19499 } 19500 } 19501 19502 if (ARM_IS_INTERWORK (sym)) 19503 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF; 19504 } 19505#endif 19506#ifdef OBJ_ELF 19507 symbolS * sym; 19508 char bind; 19509 19510 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) 19511 { 19512 if (ARM_IS_THUMB (sym)) 19513 { 19514 elf_symbol_type * elf_sym; 19515 19516 elf_sym = elf_symbol (symbol_get_bfdsym (sym)); 19517 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info); 19518 19519 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name, 19520 BFD_ARM_SPECIAL_SYM_TYPE_ANY)) 19521 { 19522 /* If it's a .thumb_func, declare it as so, 19523 otherwise tag label as .code 16. */ 19524 if (THUMB_IS_FUNC (sym)) 19525 elf_sym->internal_elf_sym.st_info = 19526 ELF_ST_INFO (bind, STT_ARM_TFUNC); 19527 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) 19528 elf_sym->internal_elf_sym.st_info = 19529 ELF_ST_INFO (bind, STT_ARM_16BIT); 19530 } 19531 } 19532 } 19533#endif 19534} 19535 19536/* MD interface: Initialization. */ 19537 19538static void 19539set_constant_flonums (void) 19540{ 19541 int i; 19542 19543 for (i = 0; i < NUM_FLOAT_VALS; i++) 19544 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL) 19545 abort (); 19546} 19547 19548/* Auto-select Thumb mode if it's the only available instruction set for the 19549 given architecture. */ 19550 19551static void 19552autoselect_thumb_from_cpu_variant (void) 19553{ 19554 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 19555 opcode_select (16); 19556} 19557 19558void 19559md_begin (void) 19560{ 19561 unsigned mach; 19562 unsigned int i; 19563 19564 if ( (arm_ops_hsh = hash_new ()) == NULL 19565 || (arm_cond_hsh = hash_new ()) == NULL 19566 || (arm_shift_hsh = hash_new ()) == NULL 19567 || (arm_psr_hsh = hash_new ()) == NULL 19568 || (arm_v7m_psr_hsh = hash_new ()) == NULL 19569 || (arm_reg_hsh = hash_new ()) == NULL 19570 || (arm_reloc_hsh = hash_new ()) == NULL 19571 || (arm_barrier_opt_hsh = hash_new ()) == NULL) 19572 as_fatal (_("virtual memory exhausted")); 19573 19574 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++) 19575 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i)); 19576 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++) 19577 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i)); 19578 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++) 19579 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i)); 19580 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++) 19581 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i)); 19582 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++) 19583 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i)); 19584 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++) 19585 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i)); 19586 for (i = 0; 19587 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt); 19588 i++) 19589 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template, 19590 (PTR) (barrier_opt_names + i)); 19591#ifdef OBJ_ELF 19592 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++) 19593 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i)); 19594#endif 19595 19596 set_constant_flonums (); 19597 19598 /* Set the cpu variant based on the command-line options. We prefer 19599 -mcpu= over -march= if both are set (as for GCC); and we prefer 19600 -mfpu= over any other way of setting the floating point unit. 19601 Use of legacy options with new options are faulted. */ 19602 if (legacy_cpu) 19603 { 19604 if (mcpu_cpu_opt || march_cpu_opt) 19605 as_bad (_("use of old and new-style options to set CPU type")); 19606 19607 mcpu_cpu_opt = legacy_cpu; 19608 } 19609 else if (!mcpu_cpu_opt) 19610 mcpu_cpu_opt = march_cpu_opt; 19611 19612 if (legacy_fpu) 19613 { 19614 if (mfpu_opt) 19615 as_bad (_("use of old and new-style options to set FPU type")); 19616 19617 mfpu_opt = legacy_fpu; 19618 } 19619 else if (!mfpu_opt) 19620 { 19621#if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS)) 19622 /* Some environments specify a default FPU. If they don't, infer it 19623 from the processor. */ 19624 if (mcpu_fpu_opt) 19625 mfpu_opt = mcpu_fpu_opt; 19626 else 19627 mfpu_opt = march_fpu_opt; 19628#else 19629 mfpu_opt = &fpu_default; 19630#endif 19631 } 19632 19633 if (!mfpu_opt) 19634 { 19635 if (mcpu_cpu_opt != NULL) 19636 mfpu_opt = &fpu_default; 19637 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5)) 19638 mfpu_opt = &fpu_arch_vfp_v2; 19639 else 19640 mfpu_opt = &fpu_arch_fpa; 19641 } 19642 19643#ifdef CPU_DEFAULT 19644 if (!mcpu_cpu_opt) 19645 { 19646 mcpu_cpu_opt = &cpu_default; 19647 selected_cpu = cpu_default; 19648 } 19649#else 19650 if (mcpu_cpu_opt) 19651 selected_cpu = *mcpu_cpu_opt; 19652 else 19653 mcpu_cpu_opt = &arm_arch_any; 19654#endif 19655 19656 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 19657 19658 autoselect_thumb_from_cpu_variant (); 19659 19660 arm_arch_used = thumb_arch_used = arm_arch_none; 19661 19662#if defined OBJ_COFF || defined OBJ_ELF 19663 { 19664 unsigned int flags = 0; 19665 19666#if defined OBJ_ELF 19667 flags = meabi_flags; 19668 19669 switch (meabi_flags) 19670 { 19671 case EF_ARM_EABI_UNKNOWN: 19672#endif 19673 /* Set the flags in the private structure. */ 19674 if (uses_apcs_26) flags |= F_APCS26; 19675 if (support_interwork) flags |= F_INTERWORK; 19676 if (uses_apcs_float) flags |= F_APCS_FLOAT; 19677 if (pic_code) flags |= F_PIC; 19678 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard)) 19679 flags |= F_SOFT_FLOAT; 19680 19681 switch (mfloat_abi_opt) 19682 { 19683 case ARM_FLOAT_ABI_SOFT: 19684 case ARM_FLOAT_ABI_SOFTFP: 19685 flags |= F_SOFT_FLOAT; 19686 break; 19687 19688 case ARM_FLOAT_ABI_HARD: 19689 if (flags & F_SOFT_FLOAT) 19690 as_bad (_("hard-float conflicts with specified fpu")); 19691 break; 19692 } 19693 19694 /* Using pure-endian doubles (even if soft-float). */ 19695 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 19696 flags |= F_VFP_FLOAT; 19697 19698#if defined OBJ_ELF 19699 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick)) 19700 flags |= EF_ARM_MAVERICK_FLOAT; 19701 break; 19702 19703 case EF_ARM_EABI_VER4: 19704 case EF_ARM_EABI_VER5: 19705 /* No additional flags to set. */ 19706 break; 19707 19708 default: 19709 abort (); 19710 } 19711#endif 19712 bfd_set_private_flags (stdoutput, flags); 19713 19714 /* We have run out flags in the COFF header to encode the 19715 status of ATPCS support, so instead we create a dummy, 19716 empty, debug section called .arm.atpcs. */ 19717 if (atpcs) 19718 { 19719 asection * sec; 19720 19721 sec = bfd_make_section (stdoutput, ".arm.atpcs"); 19722 19723 if (sec != NULL) 19724 { 19725 bfd_set_section_flags 19726 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */); 19727 bfd_set_section_size (stdoutput, sec, 0); 19728 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0); 19729 } 19730 } 19731 } 19732#endif 19733 19734 /* Record the CPU type as well. */ 19735 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)) 19736 mach = bfd_mach_arm_iWMMXt2; 19737 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt)) 19738 mach = bfd_mach_arm_iWMMXt; 19739 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale)) 19740 mach = bfd_mach_arm_XScale; 19741 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick)) 19742 mach = bfd_mach_arm_ep9312; 19743 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e)) 19744 mach = bfd_mach_arm_5TE; 19745 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5)) 19746 { 19747 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 19748 mach = bfd_mach_arm_5T; 19749 else 19750 mach = bfd_mach_arm_5; 19751 } 19752 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4)) 19753 { 19754 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 19755 mach = bfd_mach_arm_4T; 19756 else 19757 mach = bfd_mach_arm_4; 19758 } 19759 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m)) 19760 mach = bfd_mach_arm_3M; 19761 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3)) 19762 mach = bfd_mach_arm_3; 19763 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s)) 19764 mach = bfd_mach_arm_2a; 19765 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2)) 19766 mach = bfd_mach_arm_2; 19767 else 19768 mach = bfd_mach_arm_unknown; 19769 19770 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 19771} 19772 19773/* Command line processing. */ 19774 19775/* md_parse_option 19776 Invocation line includes a switch not recognized by the base assembler. 19777 See if it's a processor-specific option. 19778 19779 This routine is somewhat complicated by the need for backwards 19780 compatibility (since older releases of gcc can't be changed). 19781 The new options try to make the interface as compatible as 19782 possible with GCC. 19783 19784 New options (supported) are: 19785 19786 -mcpu=<cpu name> Assemble for selected processor 19787 -march=<architecture name> Assemble for selected architecture 19788 -mfpu=<fpu architecture> Assemble for selected FPU. 19789 -EB/-mbig-endian Big-endian 19790 -EL/-mlittle-endian Little-endian 19791 -k Generate PIC code 19792 -mthumb Start in Thumb mode 19793 -mthumb-interwork Code supports ARM/Thumb interworking 19794 19795 For now we will also provide support for: 19796 19797 -mapcs-32 32-bit Program counter 19798 -mapcs-26 26-bit Program counter 19799 -macps-float Floats passed in FP registers 19800 -mapcs-reentrant Reentrant code 19801 -matpcs 19802 (sometime these will probably be replaced with -mapcs=<list of options> 19803 and -matpcs=<list of options>) 19804 19805 The remaining options are only supported for back-wards compatibility. 19806 Cpu variants, the arm part is optional: 19807 -m[arm]1 Currently not supported. 19808 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor 19809 -m[arm]3 Arm 3 processor 19810 -m[arm]6[xx], Arm 6 processors 19811 -m[arm]7[xx][t][[d]m] Arm 7 processors 19812 -m[arm]8[10] Arm 8 processors 19813 -m[arm]9[20][tdmi] Arm 9 processors 19814 -mstrongarm[110[0]] StrongARM processors 19815 -mxscale XScale processors 19816 -m[arm]v[2345[t[e]]] Arm architectures 19817 -mall All (except the ARM1) 19818 FP variants: 19819 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions 19820 -mfpe-old (No float load/store multiples) 19821 -mvfpxd VFP Single precision 19822 -mvfp All VFP 19823 -mno-fpu Disable all floating point instructions 19824 19825 The following CPU names are recognized: 19826 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620, 19827 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700, 19828 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c, 19829 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9, 19830 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e, 19831 arm10t arm10e, arm1020t, arm1020e, arm10200e, 19832 strongarm, strongarm110, strongarm1100, strongarm1110, xscale. 19833 19834 */ 19835 19836const char * md_shortopts = "m:k"; 19837 19838#ifdef ARM_BI_ENDIAN 19839#define OPTION_EB (OPTION_MD_BASE + 0) 19840#define OPTION_EL (OPTION_MD_BASE + 1) 19841#else 19842#if TARGET_BYTES_BIG_ENDIAN 19843#define OPTION_EB (OPTION_MD_BASE + 0) 19844#else 19845#define OPTION_EL (OPTION_MD_BASE + 1) 19846#endif 19847#endif 19848 19849struct option md_longopts[] = 19850{ 19851#ifdef OPTION_EB 19852 {"EB", no_argument, NULL, OPTION_EB}, 19853#endif 19854#ifdef OPTION_EL 19855 {"EL", no_argument, NULL, OPTION_EL}, 19856#endif 19857 {NULL, no_argument, NULL, 0} 19858}; 19859 19860size_t md_longopts_size = sizeof (md_longopts); 19861 19862struct arm_option_table 19863{ 19864 char *option; /* Option name to match. */ 19865 char *help; /* Help information. */ 19866 int *var; /* Variable to change. */ 19867 int value; /* What to change it to. */ 19868 char *deprecated; /* If non-null, print this message. */ 19869}; 19870 19871struct arm_option_table arm_opts[] = 19872{ 19873 {"k", N_("generate PIC code"), &pic_code, 1, NULL}, 19874 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL}, 19875 {"mthumb-interwork", N_("support ARM/Thumb interworking"), 19876 &support_interwork, 1, NULL}, 19877 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL}, 19878 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL}, 19879 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float, 19880 1, NULL}, 19881 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL}, 19882 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL}, 19883 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, 19884 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, 19885 NULL}, 19886 19887 /* These are recognized by the assembler, but have no affect on code. */ 19888 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL}, 19889 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL}, 19890 {NULL, NULL, NULL, 0, NULL} 19891}; 19892 19893struct arm_legacy_option_table 19894{ 19895 char *option; /* Option name to match. */ 19896 const arm_feature_set **var; /* Variable to change. */ 19897 const arm_feature_set value; /* What to change it to. */ 19898 char *deprecated; /* If non-null, print this message. */ 19899}; 19900 19901const struct arm_legacy_option_table arm_legacy_opts[] = 19902{ 19903 /* DON'T add any new processors to this list -- we want the whole list 19904 to go away... Add them to the processors table instead. */ 19905 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, 19906 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, 19907 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, 19908 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, 19909 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, 19910 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, 19911 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, 19912 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, 19913 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, 19914 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, 19915 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, 19916 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, 19917 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, 19918 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, 19919 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, 19920 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, 19921 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, 19922 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, 19923 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, 19924 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, 19925 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, 19926 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, 19927 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, 19928 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, 19929 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, 19930 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, 19931 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, 19932 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, 19933 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, 19934 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, 19935 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, 19936 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, 19937 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, 19938 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, 19939 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, 19940 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, 19941 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, 19942 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, 19943 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, 19944 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, 19945 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, 19946 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, 19947 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, 19948 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, 19949 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, 19950 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, 19951 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19952 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19953 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19954 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19955 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, 19956 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, 19957 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, 19958 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, 19959 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, 19960 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, 19961 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, 19962 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, 19963 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, 19964 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, 19965 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, 19966 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, 19967 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, 19968 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, 19969 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, 19970 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, 19971 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, 19972 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, 19973 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")}, 19974 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4, 19975 N_("use -mcpu=strongarm110")}, 19976 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4, 19977 N_("use -mcpu=strongarm1100")}, 19978 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4, 19979 N_("use -mcpu=strongarm1110")}, 19980 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")}, 19981 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")}, 19982 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")}, 19983 19984 /* Architecture variants -- don't add any more to this list either. */ 19985 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, 19986 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, 19987 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, 19988 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, 19989 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, 19990 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, 19991 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, 19992 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, 19993 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, 19994 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, 19995 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, 19996 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, 19997 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, 19998 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, 19999 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, 20000 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, 20001 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, 20002 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, 20003 20004 /* Floating point variants -- don't add any more to this list either. */ 20005 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")}, 20006 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")}, 20007 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")}, 20008 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE, 20009 N_("use either -mfpu=softfpa or -mfpu=softvfp")}, 20010 20011 {NULL, NULL, ARM_ARCH_NONE, NULL} 20012}; 20013 20014struct arm_cpu_option_table 20015{ 20016 char *name; 20017 const arm_feature_set value; 20018 /* For some CPUs we assume an FPU unless the user explicitly sets 20019 -mfpu=... */ 20020 const arm_feature_set default_fpu; 20021 /* The canonical name of the CPU, or NULL to use NAME converted to upper 20022 case. */ 20023 const char *canonical_name; 20024}; 20025 20026/* This list should, at a minimum, contain all the cpu names 20027 recognized by GCC. */ 20028static const struct arm_cpu_option_table arm_cpus[] = 20029{ 20030 {"all", ARM_ANY, FPU_ARCH_FPA, NULL}, 20031 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL}, 20032 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL}, 20033 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, 20034 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, 20035 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20036 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20037 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20038 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20039 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20040 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20041 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 20042 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20043 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 20044 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20045 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 20046 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20047 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20048 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20049 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20050 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20051 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20052 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20053 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20054 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20055 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20056 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20057 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 20058 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20059 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20060 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20061 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20062 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20063 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20064 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20065 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20066 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20067 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 20068 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20069 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"}, 20070 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20071 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20072 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20073 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 20074 /* For V5 or later processors we default to using VFP; but the user 20075 should really set the FPU type explicitly. */ 20076 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 20077 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20078 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, 20079 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, 20080 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, 20081 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 20082 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"}, 20083 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20084 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 20085 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"}, 20086 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20087 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20088 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 20089 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 20090 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20091 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"}, 20092 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 20093 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20094 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 20095 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"}, 20096 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, 20097 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"}, 20098 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL}, 20099 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"}, 20100 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL}, 20101 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL}, 20102 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL}, 20103 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL}, 20104 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL}, 20105 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL}, 20106 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL}, 20107 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3 20108 | FPU_NEON_EXT_V1), 20109 NULL}, 20110 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3 20111 | FPU_NEON_EXT_V1), 20112 NULL}, 20113 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL}, 20114 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL}, 20115 /* ??? XSCALE is really an architecture. */ 20116 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, 20117 /* ??? iwmmxt is not a processor. */ 20118 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL}, 20119 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL}, 20120 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, 20121 /* Maverick */ 20122 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"}, 20123 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL} 20124}; 20125 20126struct arm_arch_option_table 20127{ 20128 char *name; 20129 const arm_feature_set value; 20130 const arm_feature_set default_fpu; 20131}; 20132 20133/* This list should, at a minimum, contain all the architecture names 20134 recognized by GCC. */ 20135static const struct arm_arch_option_table arm_archs[] = 20136{ 20137 {"all", ARM_ANY, FPU_ARCH_FPA}, 20138 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA}, 20139 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA}, 20140 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA}, 20141 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA}, 20142 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA}, 20143 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA}, 20144 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA}, 20145 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA}, 20146 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA}, 20147 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA}, 20148 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP}, 20149 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP}, 20150 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP}, 20151 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP}, 20152 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP}, 20153 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP}, 20154 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP}, 20155 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP}, 20156 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP}, 20157 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP}, 20158 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP}, 20159 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP}, 20160 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP}, 20161 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP}, 20162 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP}, 20163 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP}, 20164 /* The official spelling of the ARMv7 profile variants is the dashed form. 20165 Accept the non-dashed form for compatibility with old toolchains. */ 20166 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP}, 20167 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP}, 20168 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP}, 20169 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP}, 20170 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP}, 20171 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP}, 20172 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP}, 20173 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP}, 20174 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP}, 20175 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE} 20176}; 20177 20178/* ISA extensions in the co-processor space. */ 20179struct arm_option_cpu_value_table 20180{ 20181 char *name; 20182 const arm_feature_set value; 20183}; 20184 20185static const struct arm_option_cpu_value_table arm_extensions[] = 20186{ 20187 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)}, 20188 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)}, 20189 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)}, 20190 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)}, 20191 {"sec", ARM_FEATURE (ARM_EXT_V6Z, 0)}, 20192 {NULL, ARM_ARCH_NONE} 20193}; 20194 20195/* This list should, at a minimum, contain all the fpu names 20196 recognized by GCC. */ 20197static const struct arm_option_cpu_value_table arm_fpus[] = 20198{ 20199 {"softfpa", FPU_NONE}, 20200 {"fpe", FPU_ARCH_FPE}, 20201 {"fpe2", FPU_ARCH_FPE}, 20202 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */ 20203 {"fpa", FPU_ARCH_FPA}, 20204 {"fpa10", FPU_ARCH_FPA}, 20205 {"fpa11", FPU_ARCH_FPA}, 20206 {"arm7500fe", FPU_ARCH_FPA}, 20207 {"softvfp", FPU_ARCH_VFP}, 20208 {"softvfp+vfp", FPU_ARCH_VFP_V2}, 20209 {"vfp", FPU_ARCH_VFP_V2}, 20210 {"vfpv2", FPU_ARCH_VFP_V2}, 20211 {"vfp9", FPU_ARCH_VFP_V2}, 20212 {"vfp3", FPU_ARCH_VFP_V3}, 20213 {"vfpv3", FPU_ARCH_VFP_V3}, 20214 {"vfp10", FPU_ARCH_VFP_V2}, 20215 {"vfp10-r0", FPU_ARCH_VFP_V1}, 20216 {"vfpxd", FPU_ARCH_VFP_V1xD}, 20217 {"arm1020t", FPU_ARCH_VFP_V1}, 20218 {"arm1020e", FPU_ARCH_VFP_V2}, 20219 {"arm1136jfs", FPU_ARCH_VFP_V2}, 20220 {"arm1136jf-s", FPU_ARCH_VFP_V2}, 20221 {"maverick", FPU_ARCH_MAVERICK}, 20222 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1}, 20223 {NULL, ARM_ARCH_NONE} 20224}; 20225 20226struct arm_option_value_table 20227{ 20228 char *name; 20229 long value; 20230}; 20231 20232static const struct arm_option_value_table arm_float_abis[] = 20233{ 20234 {"hard", ARM_FLOAT_ABI_HARD}, 20235 {"softfp", ARM_FLOAT_ABI_SOFTFP}, 20236 {"soft", ARM_FLOAT_ABI_SOFT}, 20237 {NULL, 0} 20238}; 20239 20240#ifdef OBJ_ELF 20241/* We only know how to output GNU and ver 4/5 (AAELF) formats. */ 20242static const struct arm_option_value_table arm_eabis[] = 20243{ 20244 {"gnu", EF_ARM_EABI_UNKNOWN}, 20245 {"4", EF_ARM_EABI_VER4}, 20246 {"5", EF_ARM_EABI_VER5}, 20247 {NULL, 0} 20248}; 20249#endif 20250 20251struct arm_long_option_table 20252{ 20253 char * option; /* Substring to match. */ 20254 char * help; /* Help information. */ 20255 int (* func) (char * subopt); /* Function to decode sub-option. */ 20256 char * deprecated; /* If non-null, print this message. */ 20257}; 20258 20259static int 20260arm_parse_extension (char * str, const arm_feature_set **opt_p) 20261{ 20262 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set)); 20263 20264 /* Copy the feature set, so that we can modify it. */ 20265 *ext_set = **opt_p; 20266 *opt_p = ext_set; 20267 20268 while (str != NULL && *str != 0) 20269 { 20270 const struct arm_option_cpu_value_table * opt; 20271 char * ext; 20272 int optlen; 20273 20274 if (*str != '+') 20275 { 20276 as_bad (_("invalid architectural extension")); 20277 return 0; 20278 } 20279 20280 str++; 20281 ext = strchr (str, '+'); 20282 20283 if (ext != NULL) 20284 optlen = ext - str; 20285 else 20286 optlen = strlen (str); 20287 20288 if (optlen == 0) 20289 { 20290 as_bad (_("missing architectural extension")); 20291 return 0; 20292 } 20293 20294 for (opt = arm_extensions; opt->name != NULL; opt++) 20295 if (strncmp (opt->name, str, optlen) == 0) 20296 { 20297 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value); 20298 break; 20299 } 20300 20301 if (opt->name == NULL) 20302 { 20303 as_bad (_("unknown architectural extnsion `%s'"), str); 20304 return 0; 20305 } 20306 20307 str = ext; 20308 }; 20309 20310 return 1; 20311} 20312 20313static int 20314arm_parse_cpu (char * str) 20315{ 20316 const struct arm_cpu_option_table * opt; 20317 char * ext = strchr (str, '+'); 20318 int optlen; 20319 20320 if (ext != NULL) 20321 optlen = ext - str; 20322 else 20323 optlen = strlen (str); 20324 20325 if (optlen == 0) 20326 { 20327 as_bad (_("missing cpu name `%s'"), str); 20328 return 0; 20329 } 20330 20331 for (opt = arm_cpus; opt->name != NULL; opt++) 20332 if (strncmp (opt->name, str, optlen) == 0) 20333 { 20334 mcpu_cpu_opt = &opt->value; 20335 mcpu_fpu_opt = &opt->default_fpu; 20336 if (opt->canonical_name) 20337 strcpy(selected_cpu_name, opt->canonical_name); 20338 else 20339 { 20340 int i; 20341 for (i = 0; i < optlen; i++) 20342 selected_cpu_name[i] = TOUPPER (opt->name[i]); 20343 selected_cpu_name[i] = 0; 20344 } 20345 20346 if (ext != NULL) 20347 return arm_parse_extension (ext, &mcpu_cpu_opt); 20348 20349 return 1; 20350 } 20351 20352 as_bad (_("unknown cpu `%s'"), str); 20353 return 0; 20354} 20355 20356static int 20357arm_parse_arch (char * str) 20358{ 20359 const struct arm_arch_option_table *opt; 20360 char *ext = strchr (str, '+'); 20361 int optlen; 20362 20363 if (ext != NULL) 20364 optlen = ext - str; 20365 else 20366 optlen = strlen (str); 20367 20368 if (optlen == 0) 20369 { 20370 as_bad (_("missing architecture name `%s'"), str); 20371 return 0; 20372 } 20373 20374 for (opt = arm_archs; opt->name != NULL; opt++) 20375 if (strncmp (opt->name, str, optlen) == 0) 20376 { 20377 march_cpu_opt = &opt->value; 20378 march_fpu_opt = &opt->default_fpu; 20379 strcpy(selected_cpu_name, opt->name); 20380 20381 if (ext != NULL) 20382 return arm_parse_extension (ext, &march_cpu_opt); 20383 20384 return 1; 20385 } 20386 20387 as_bad (_("unknown architecture `%s'\n"), str); 20388 return 0; 20389} 20390 20391static int 20392arm_parse_fpu (char * str) 20393{ 20394 const struct arm_option_cpu_value_table * opt; 20395 20396 for (opt = arm_fpus; opt->name != NULL; opt++) 20397 if (streq (opt->name, str)) 20398 { 20399 mfpu_opt = &opt->value; 20400 return 1; 20401 } 20402 20403 as_bad (_("unknown floating point format `%s'\n"), str); 20404 return 0; 20405} 20406 20407static int 20408arm_parse_float_abi (char * str) 20409{ 20410 const struct arm_option_value_table * opt; 20411 20412 for (opt = arm_float_abis; opt->name != NULL; opt++) 20413 if (streq (opt->name, str)) 20414 { 20415 mfloat_abi_opt = opt->value; 20416 return 1; 20417 } 20418 20419 as_bad (_("unknown floating point abi `%s'\n"), str); 20420 return 0; 20421} 20422 20423#ifdef OBJ_ELF 20424static int 20425arm_parse_eabi (char * str) 20426{ 20427 const struct arm_option_value_table *opt; 20428 20429 for (opt = arm_eabis; opt->name != NULL; opt++) 20430 if (streq (opt->name, str)) 20431 { 20432 meabi_flags = opt->value; 20433 return 1; 20434 } 20435 as_bad (_("unknown EABI `%s'\n"), str); 20436 return 0; 20437} 20438#endif 20439 20440struct arm_long_option_table arm_long_opts[] = 20441{ 20442 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), 20443 arm_parse_cpu, NULL}, 20444 {"march=", N_("<arch name>\t assemble for architecture <arch name>"), 20445 arm_parse_arch, NULL}, 20446 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"), 20447 arm_parse_fpu, NULL}, 20448 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"), 20449 arm_parse_float_abi, NULL}, 20450#ifdef OBJ_ELF 20451 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"), 20452 arm_parse_eabi, NULL}, 20453#endif 20454 {NULL, NULL, 0, NULL} 20455}; 20456 20457int 20458md_parse_option (int c, char * arg) 20459{ 20460 struct arm_option_table *opt; 20461 const struct arm_legacy_option_table *fopt; 20462 struct arm_long_option_table *lopt; 20463 20464 switch (c) 20465 { 20466#ifdef OPTION_EB 20467 case OPTION_EB: 20468 target_big_endian = 1; 20469 break; 20470#endif 20471 20472#ifdef OPTION_EL 20473 case OPTION_EL: 20474 target_big_endian = 0; 20475 break; 20476#endif 20477 20478 case 'a': 20479 /* Listing option. Just ignore these, we don't support additional 20480 ones. */ 20481 return 0; 20482 20483 default: 20484 for (opt = arm_opts; opt->option != NULL; opt++) 20485 { 20486 if (c == opt->option[0] 20487 && ((arg == NULL && opt->option[1] == 0) 20488 || streq (arg, opt->option + 1))) 20489 { 20490#if WARN_DEPRECATED 20491 /* If the option is deprecated, tell the user. */ 20492 if (opt->deprecated != NULL) 20493 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 20494 arg ? arg : "", _(opt->deprecated)); 20495#endif 20496 20497 if (opt->var != NULL) 20498 *opt->var = opt->value; 20499 20500 return 1; 20501 } 20502 } 20503 20504 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++) 20505 { 20506 if (c == fopt->option[0] 20507 && ((arg == NULL && fopt->option[1] == 0) 20508 || streq (arg, fopt->option + 1))) 20509 { 20510#if WARN_DEPRECATED 20511 /* If the option is deprecated, tell the user. */ 20512 if (fopt->deprecated != NULL) 20513 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 20514 arg ? arg : "", _(fopt->deprecated)); 20515#endif 20516 20517 if (fopt->var != NULL) 20518 *fopt->var = &fopt->value; 20519 20520 return 1; 20521 } 20522 } 20523 20524 for (lopt = arm_long_opts; lopt->option != NULL; lopt++) 20525 { 20526 /* These options are expected to have an argument. */ 20527 if (c == lopt->option[0] 20528 && arg != NULL 20529 && strncmp (arg, lopt->option + 1, 20530 strlen (lopt->option + 1)) == 0) 20531 { 20532#if WARN_DEPRECATED 20533 /* If the option is deprecated, tell the user. */ 20534 if (lopt->deprecated != NULL) 20535 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, 20536 _(lopt->deprecated)); 20537#endif 20538 20539 /* Call the sup-option parser. */ 20540 return lopt->func (arg + strlen (lopt->option) - 1); 20541 } 20542 } 20543 20544 return 0; 20545 } 20546 20547 return 1; 20548} 20549 20550void 20551md_show_usage (FILE * fp) 20552{ 20553 struct arm_option_table *opt; 20554 struct arm_long_option_table *lopt; 20555 20556 fprintf (fp, _(" ARM-specific assembler options:\n")); 20557 20558 for (opt = arm_opts; opt->option != NULL; opt++) 20559 if (opt->help != NULL) 20560 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); 20561 20562 for (lopt = arm_long_opts; lopt->option != NULL; lopt++) 20563 if (lopt->help != NULL) 20564 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); 20565 20566#ifdef OPTION_EB 20567 fprintf (fp, _("\ 20568 -EB assemble code for a big-endian cpu\n")); 20569#endif 20570 20571#ifdef OPTION_EL 20572 fprintf (fp, _("\ 20573 -EL assemble code for a little-endian cpu\n")); 20574#endif 20575} 20576 20577 20578#ifdef OBJ_ELF 20579typedef struct 20580{ 20581 int val; 20582 arm_feature_set flags; 20583} cpu_arch_ver_table; 20584 20585/* Mapping from CPU features to EABI CPU arch values. Table must be sorted 20586 least features first. */ 20587static const cpu_arch_ver_table cpu_arch_ver[] = 20588{ 20589 {1, ARM_ARCH_V4}, 20590 {2, ARM_ARCH_V4T}, 20591 {3, ARM_ARCH_V5}, 20592 {4, ARM_ARCH_V5TE}, 20593 {5, ARM_ARCH_V5TEJ}, 20594 {6, ARM_ARCH_V6}, 20595 {7, ARM_ARCH_V6Z}, 20596 {8, ARM_ARCH_V6K}, 20597 {9, ARM_ARCH_V6T2}, 20598 {10, ARM_ARCH_V7A}, 20599 {10, ARM_ARCH_V7R}, 20600 {10, ARM_ARCH_V7M}, 20601 {0, ARM_ARCH_NONE} 20602}; 20603 20604/* Set the public EABI object attributes. */ 20605static void 20606aeabi_set_public_attributes (void) 20607{ 20608 int arch; 20609 arm_feature_set flags; 20610 arm_feature_set tmp; 20611 const cpu_arch_ver_table *p; 20612 20613 /* Choose the architecture based on the capabilities of the requested cpu 20614 (if any) and/or the instructions actually used. */ 20615 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used); 20616 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt); 20617 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu); 20618 /*Allow the user to override the reported architecture. */ 20619 if (object_arch) 20620 { 20621 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any); 20622 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch); 20623 } 20624 20625 tmp = flags; 20626 arch = 0; 20627 for (p = cpu_arch_ver; p->val; p++) 20628 { 20629 if (ARM_CPU_HAS_FEATURE (tmp, p->flags)) 20630 { 20631 arch = p->val; 20632 ARM_CLEAR_FEATURE (tmp, tmp, p->flags); 20633 } 20634 } 20635 20636 /* Tag_CPU_name. */ 20637 if (selected_cpu_name[0]) 20638 { 20639 char *p; 20640 20641 p = selected_cpu_name; 20642 if (strncmp(p, "armv", 4) == 0) 20643 { 20644 int i; 20645 20646 p += 4; 20647 for (i = 0; p[i]; i++) 20648 p[i] = TOUPPER (p[i]); 20649 } 20650 bfd_elf_add_proc_attr_string (stdoutput, 5, p); 20651 } 20652 /* Tag_CPU_arch. */ 20653 bfd_elf_add_proc_attr_int (stdoutput, 6, arch); 20654 /* Tag_CPU_arch_profile. */ 20655 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)) 20656 bfd_elf_add_proc_attr_int (stdoutput, 7, 'A'); 20657 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r)) 20658 bfd_elf_add_proc_attr_int (stdoutput, 7, 'R'); 20659 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)) 20660 bfd_elf_add_proc_attr_int (stdoutput, 7, 'M'); 20661 /* Tag_ARM_ISA_use. */ 20662 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full)) 20663 bfd_elf_add_proc_attr_int (stdoutput, 8, 1); 20664 /* Tag_THUMB_ISA_use. */ 20665 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full)) 20666 bfd_elf_add_proc_attr_int (stdoutput, 9, 20667 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1); 20668 /* Tag_VFP_arch. */ 20669 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3) 20670 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3)) 20671 bfd_elf_add_proc_attr_int (stdoutput, 10, 3); 20672 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2) 20673 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2)) 20674 bfd_elf_add_proc_attr_int (stdoutput, 10, 2); 20675 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1) 20676 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1) 20677 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd) 20678 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd)) 20679 bfd_elf_add_proc_attr_int (stdoutput, 10, 1); 20680 /* Tag_WMMX_arch. */ 20681 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt) 20682 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt)) 20683 bfd_elf_add_proc_attr_int (stdoutput, 11, 1); 20684 /* Tag_NEON_arch. */ 20685 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1) 20686 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1)) 20687 bfd_elf_add_proc_attr_int (stdoutput, 12, 1); 20688} 20689 20690/* Add the default contents for the .ARM.attributes section. */ 20691void 20692arm_md_end (void) 20693{ 20694 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) 20695 return; 20696 20697 aeabi_set_public_attributes (); 20698} 20699#endif /* OBJ_ELF */ 20700 20701 20702/* Parse a .cpu directive. */ 20703 20704static void 20705s_arm_cpu (int ignored ATTRIBUTE_UNUSED) 20706{ 20707 const struct arm_cpu_option_table *opt; 20708 char *name; 20709 char saved_char; 20710 20711 name = input_line_pointer; 20712 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20713 input_line_pointer++; 20714 saved_char = *input_line_pointer; 20715 *input_line_pointer = 0; 20716 20717 /* Skip the first "all" entry. */ 20718 for (opt = arm_cpus + 1; opt->name != NULL; opt++) 20719 if (streq (opt->name, name)) 20720 { 20721 mcpu_cpu_opt = &opt->value; 20722 selected_cpu = opt->value; 20723 if (opt->canonical_name) 20724 strcpy(selected_cpu_name, opt->canonical_name); 20725 else 20726 { 20727 int i; 20728 for (i = 0; opt->name[i]; i++) 20729 selected_cpu_name[i] = TOUPPER (opt->name[i]); 20730 selected_cpu_name[i] = 0; 20731 } 20732 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20733 *input_line_pointer = saved_char; 20734 demand_empty_rest_of_line (); 20735 return; 20736 } 20737 as_bad (_("unknown cpu `%s'"), name); 20738 *input_line_pointer = saved_char; 20739 ignore_rest_of_line (); 20740} 20741 20742 20743/* Parse a .arch directive. */ 20744 20745static void 20746s_arm_arch (int ignored ATTRIBUTE_UNUSED) 20747{ 20748 const struct arm_arch_option_table *opt; 20749 char saved_char; 20750 char *name; 20751 20752 name = input_line_pointer; 20753 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20754 input_line_pointer++; 20755 saved_char = *input_line_pointer; 20756 *input_line_pointer = 0; 20757 20758 /* Skip the first "all" entry. */ 20759 for (opt = arm_archs + 1; opt->name != NULL; opt++) 20760 if (streq (opt->name, name)) 20761 { 20762 mcpu_cpu_opt = &opt->value; 20763 selected_cpu = opt->value; 20764 strcpy(selected_cpu_name, opt->name); 20765 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20766 *input_line_pointer = saved_char; 20767 demand_empty_rest_of_line (); 20768 return; 20769 } 20770 20771 as_bad (_("unknown architecture `%s'\n"), name); 20772 *input_line_pointer = saved_char; 20773 ignore_rest_of_line (); 20774} 20775 20776/* Parse a .arch_extension directive. */ 20777 20778static void 20779s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED) 20780{ 20781 const struct arm_option_cpu_value_table *opt; 20782 char saved_char; 20783 char *name; 20784 20785 name = input_line_pointer; 20786 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20787 input_line_pointer++; 20788 saved_char = *input_line_pointer; 20789 *input_line_pointer = 0; 20790 20791 for (opt = arm_extensions; opt->name != NULL; opt++) 20792 if (streq (opt->name, name)) 20793 { 20794 ARM_MERGE_FEATURE_SETS (cpu_variant, cpu_variant, opt->value); 20795 *input_line_pointer = saved_char; 20796 demand_empty_rest_of_line (); 20797 return; 20798 } 20799 20800 as_bad (_("unknown architecture `%s'\n"), name); 20801 *input_line_pointer = saved_char; 20802 ignore_rest_of_line (); 20803} 20804 20805/* Parse a .object_arch directive. */ 20806 20807static void 20808s_arm_object_arch (int ignored ATTRIBUTE_UNUSED) 20809{ 20810 const struct arm_arch_option_table *opt; 20811 char saved_char; 20812 char *name; 20813 20814 name = input_line_pointer; 20815 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20816 input_line_pointer++; 20817 saved_char = *input_line_pointer; 20818 *input_line_pointer = 0; 20819 20820 /* Skip the first "all" entry. */ 20821 for (opt = arm_archs + 1; opt->name != NULL; opt++) 20822 if (streq (opt->name, name)) 20823 { 20824 object_arch = &opt->value; 20825 *input_line_pointer = saved_char; 20826 demand_empty_rest_of_line (); 20827 return; 20828 } 20829 20830 as_bad (_("unknown architecture `%s'\n"), name); 20831 *input_line_pointer = saved_char; 20832 ignore_rest_of_line (); 20833} 20834 20835 20836/* Parse a .fpu directive. */ 20837 20838static void 20839s_arm_fpu (int ignored ATTRIBUTE_UNUSED) 20840{ 20841 const struct arm_option_cpu_value_table *opt; 20842 char saved_char; 20843 char *name; 20844 20845 name = input_line_pointer; 20846 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20847 input_line_pointer++; 20848 saved_char = *input_line_pointer; 20849 *input_line_pointer = 0; 20850 20851 for (opt = arm_fpus; opt->name != NULL; opt++) 20852 if (streq (opt->name, name)) 20853 { 20854 mfpu_opt = &opt->value; 20855 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20856 *input_line_pointer = saved_char; 20857 demand_empty_rest_of_line (); 20858 return; 20859 } 20860 20861 as_bad (_("unknown floating point format `%s'\n"), name); 20862 *input_line_pointer = saved_char; 20863 ignore_rest_of_line (); 20864} 20865 20866/* Copy symbol information. */ 20867void 20868arm_copy_symbol_attributes (symbolS *dest, symbolS *src) 20869{ 20870 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src); 20871} 20872