tc-arm.c revision 248459
1/* tc-arm.c -- Assemble for the ARM 2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 3 2004, 2005, 2006 4 Free Software Foundation, Inc. 5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) 6 Modified by David Taylor (dtaylor@armltd.co.uk) 7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com) 8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com) 9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com) 10 11 This file is part of GAS, the GNU Assembler. 12 13 GAS is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2, or (at your option) 16 any later version. 17 18 GAS is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with GAS; see the file COPYING. If not, write to the Free 25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 26 02110-1301, USA. */ 27 28#include <limits.h> 29#include <stdarg.h> 30#define NO_RELOC 0 31#include "as.h" 32#include "safe-ctype.h" 33#include "subsegs.h" 34#include "obstack.h" 35 36#include "opcode/arm.h" 37 38#ifdef OBJ_ELF 39#include "elf/arm.h" 40#include "dw2gencfi.h" 41#endif 42 43#include "dwarf2dbg.h" 44 45#define WARN_DEPRECATED 1 46 47#ifdef OBJ_ELF 48/* Must be at least the size of the largest unwind opcode (currently two). */ 49#define ARM_OPCODE_CHUNK_SIZE 8 50 51/* This structure holds the unwinding state. */ 52 53static struct 54{ 55 symbolS * proc_start; 56 symbolS * table_entry; 57 symbolS * personality_routine; 58 int personality_index; 59 /* The segment containing the function. */ 60 segT saved_seg; 61 subsegT saved_subseg; 62 /* Opcodes generated from this function. */ 63 unsigned char * opcodes; 64 int opcode_count; 65 int opcode_alloc; 66 /* The number of bytes pushed to the stack. */ 67 offsetT frame_size; 68 /* We don't add stack adjustment opcodes immediately so that we can merge 69 multiple adjustments. We can also omit the final adjustment 70 when using a frame pointer. */ 71 offsetT pending_offset; 72 /* These two fields are set by both unwind_movsp and unwind_setfp. They 73 hold the reg+offset to use when restoring sp from a frame pointer. */ 74 offsetT fp_offset; 75 int fp_reg; 76 /* Nonzero if an unwind_setfp directive has been seen. */ 77 unsigned fp_used:1; 78 /* Nonzero if the last opcode restores sp from fp_reg. */ 79 unsigned sp_restored:1; 80} unwind; 81 82/* Bit N indicates that an R_ARM_NONE relocation has been output for 83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be 84 emitted only once per section, to save unnecessary bloat. */ 85static unsigned int marked_pr_dependency = 0; 86 87#endif /* OBJ_ELF */ 88 89/* Results from operand parsing worker functions. */ 90 91typedef enum 92{ 93 PARSE_OPERAND_SUCCESS, 94 PARSE_OPERAND_FAIL, 95 PARSE_OPERAND_FAIL_NO_BACKTRACK 96} parse_operand_result; 97 98enum arm_float_abi 99{ 100 ARM_FLOAT_ABI_HARD, 101 ARM_FLOAT_ABI_SOFTFP, 102 ARM_FLOAT_ABI_SOFT 103}; 104 105/* Types of processor to assemble for. */ 106#ifndef CPU_DEFAULT 107#if defined __XSCALE__ 108#define CPU_DEFAULT ARM_ARCH_XSCALE 109#else 110#if defined __thumb__ 111#define CPU_DEFAULT ARM_ARCH_V5T 112#endif 113#endif 114#endif 115 116#ifndef FPU_DEFAULT 117# ifdef TE_LINUX 118# define FPU_DEFAULT FPU_ARCH_FPA 119# elif defined (TE_NetBSD) 120# ifdef OBJ_ELF 121# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */ 122# else 123 /* Legacy a.out format. */ 124# define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */ 125# endif 126# elif defined (TE_VXWORKS) 127# define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */ 128# else 129 /* For backwards compatibility, default to FPA. */ 130# define FPU_DEFAULT FPU_ARCH_FPA 131# endif 132#endif /* ifndef FPU_DEFAULT */ 133 134#define streq(a, b) (strcmp (a, b) == 0) 135 136static arm_feature_set cpu_variant; 137static arm_feature_set arm_arch_used; 138static arm_feature_set thumb_arch_used; 139 140/* Flags stored in private area of BFD structure. */ 141static int uses_apcs_26 = FALSE; 142static int atpcs = FALSE; 143static int support_interwork = FALSE; 144static int uses_apcs_float = FALSE; 145static int pic_code = FALSE; 146 147/* Variables that we set while parsing command-line options. Once all 148 options have been read we re-process these values to set the real 149 assembly flags. */ 150static const arm_feature_set *legacy_cpu = NULL; 151static const arm_feature_set *legacy_fpu = NULL; 152 153static const arm_feature_set *mcpu_cpu_opt = NULL; 154static const arm_feature_set *mcpu_fpu_opt = NULL; 155static const arm_feature_set *march_cpu_opt = NULL; 156static const arm_feature_set *march_fpu_opt = NULL; 157static const arm_feature_set *mfpu_opt = NULL; 158static const arm_feature_set *object_arch = NULL; 159 160/* Constants for known architecture features. */ 161static const arm_feature_set fpu_default = FPU_DEFAULT; 162static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1; 163static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; 164static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3; 165static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1; 166static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA; 167static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; 168static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK; 169static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; 170 171#ifdef CPU_DEFAULT 172static const arm_feature_set cpu_default = CPU_DEFAULT; 173#endif 174 175static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0); 176static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0); 177static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0); 178static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0); 179static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0); 180static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0); 181static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0); 182static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0); 183static const arm_feature_set arm_ext_v4t_5 = 184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0); 185static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0); 186static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0); 187static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0); 188static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0); 189static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0); 190static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0); 191static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0); 192static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0); 193static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0); 194static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0); 195static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0); 196static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0); 197static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0); 198static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0); 199 200static const arm_feature_set arm_arch_any = ARM_ANY; 201static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1); 202static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; 203static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; 204 205static const arm_feature_set arm_cext_iwmmxt2 = 206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2); 207static const arm_feature_set arm_cext_iwmmxt = 208 ARM_FEATURE (0, ARM_CEXT_IWMMXT); 209static const arm_feature_set arm_cext_xscale = 210 ARM_FEATURE (0, ARM_CEXT_XSCALE); 211static const arm_feature_set arm_cext_maverick = 212 ARM_FEATURE (0, ARM_CEXT_MAVERICK); 213static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1); 214static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2); 215static const arm_feature_set fpu_vfp_ext_v1xd = 216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD); 217static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1); 218static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2); 219static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3); 220static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1); 221static const arm_feature_set fpu_vfp_v3_or_neon_ext = 222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); 223 224static int mfloat_abi_opt = -1; 225/* Record user cpu selection for object attributes. */ 226static arm_feature_set selected_cpu = ARM_ARCH_NONE; 227/* Must be long enough to hold any of the names in arm_cpus. */ 228static char selected_cpu_name[16]; 229#ifdef OBJ_ELF 230# ifdef EABI_DEFAULT 231static int meabi_flags = EABI_DEFAULT; 232# else 233static int meabi_flags = EF_ARM_EABI_UNKNOWN; 234# endif 235 236bfd_boolean 237arm_is_eabi(void) 238{ 239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4); 240} 241#endif 242 243#ifdef OBJ_ELF 244/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ 245symbolS * GOT_symbol; 246#endif 247 248/* 0: assemble for ARM, 249 1: assemble for Thumb, 250 2: assemble for Thumb even though target CPU does not support thumb 251 instructions. */ 252static int thumb_mode = 0; 253 254/* If unified_syntax is true, we are processing the new unified 255 ARM/Thumb syntax. Important differences from the old ARM mode: 256 257 - Immediate operands do not require a # prefix. 258 - Conditional affixes always appear at the end of the 259 instruction. (For backward compatibility, those instructions 260 that formerly had them in the middle, continue to accept them 261 there.) 262 - The IT instruction may appear, and if it does is validated 263 against subsequent conditional affixes. It does not generate 264 machine code. 265 266 Important differences from the old Thumb mode: 267 268 - Immediate operands do not require a # prefix. 269 - Most of the V6T2 instructions are only available in unified mode. 270 - The .N and .W suffixes are recognized and honored (it is an error 271 if they cannot be honored). 272 - All instructions set the flags if and only if they have an 's' affix. 273 - Conditional affixes may be used. They are validated against 274 preceding IT instructions. Unlike ARM mode, you cannot use a 275 conditional affix except in the scope of an IT instruction. */ 276 277static bfd_boolean unified_syntax = FALSE; 278 279enum neon_el_type 280{ 281 NT_invtype, 282 NT_untyped, 283 NT_integer, 284 NT_float, 285 NT_poly, 286 NT_signed, 287 NT_unsigned 288}; 289 290struct neon_type_el 291{ 292 enum neon_el_type type; 293 unsigned size; 294}; 295 296#define NEON_MAX_TYPE_ELS 4 297 298struct neon_type 299{ 300 struct neon_type_el el[NEON_MAX_TYPE_ELS]; 301 unsigned elems; 302}; 303 304struct arm_it 305{ 306 const char * error; 307 unsigned long instruction; 308 int size; 309 int size_req; 310 int cond; 311 /* "uncond_value" is set to the value in place of the conditional field in 312 unconditional versions of the instruction, or -1 if nothing is 313 appropriate. */ 314 int uncond_value; 315 struct neon_type vectype; 316 /* Set to the opcode if the instruction needs relaxation. 317 Zero if the instruction is not relaxed. */ 318 unsigned long relax; 319 struct 320 { 321 bfd_reloc_code_real_type type; 322 expressionS exp; 323 int pc_rel; 324 } reloc; 325 326 struct 327 { 328 unsigned reg; 329 signed int imm; 330 struct neon_type_el vectype; 331 unsigned present : 1; /* Operand present. */ 332 unsigned isreg : 1; /* Operand was a register. */ 333 unsigned immisreg : 1; /* .imm field is a second register. */ 334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */ 335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */ 336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */ 337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV 338 instructions. This allows us to disambiguate ARM <-> vector insns. */ 339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ 340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ 341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */ 342 unsigned issingle : 1; /* Operand is VFP single-precision register. */ 343 unsigned hasreloc : 1; /* Operand has relocation suffix. */ 344 unsigned writeback : 1; /* Operand has trailing ! */ 345 unsigned preind : 1; /* Preindexed address. */ 346 unsigned postind : 1; /* Postindexed address. */ 347 unsigned negative : 1; /* Index register was negated. */ 348 unsigned shifted : 1; /* Shift applied to operation. */ 349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */ 350 } operands[6]; 351}; 352 353static struct arm_it inst; 354 355#define NUM_FLOAT_VALS 8 356 357const char * fp_const[] = 358{ 359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 360}; 361 362/* Number of littlenums required to hold an extended precision number. */ 363#define MAX_LITTLENUMS 6 364 365LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; 366 367#define FAIL (-1) 368#define SUCCESS (0) 369 370#define SUFF_S 1 371#define SUFF_D 2 372#define SUFF_E 3 373#define SUFF_P 4 374 375#define CP_T_X 0x00008000 376#define CP_T_Y 0x00400000 377 378#define CONDS_BIT 0x00100000 379#define LOAD_BIT 0x00100000 380 381#define DOUBLE_LOAD_FLAG 0x00000001 382 383struct asm_cond 384{ 385 const char * template; 386 unsigned long value; 387}; 388 389#define COND_ALWAYS 0xE 390 391struct asm_psr 392{ 393 const char *template; 394 unsigned long field; 395}; 396 397struct asm_barrier_opt 398{ 399 const char *template; 400 unsigned long value; 401}; 402 403/* The bit that distinguishes CPSR and SPSR. */ 404#define SPSR_BIT (1 << 22) 405 406/* The individual PSR flag bits. */ 407#define PSR_c (1 << 16) 408#define PSR_x (1 << 17) 409#define PSR_s (1 << 18) 410#define PSR_f (1 << 19) 411 412struct reloc_entry 413{ 414 char *name; 415 bfd_reloc_code_real_type reloc; 416}; 417 418enum vfp_reg_pos 419{ 420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn, 421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn 422}; 423 424enum vfp_ldstm_type 425{ 426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX 427}; 428 429/* Bits for DEFINED field in neon_typed_alias. */ 430#define NTA_HASTYPE 1 431#define NTA_HASINDEX 2 432 433struct neon_typed_alias 434{ 435 unsigned char defined; 436 unsigned char index; 437 struct neon_type_el eltype; 438}; 439 440/* ARM register categories. This includes coprocessor numbers and various 441 architecture extensions' registers. */ 442enum arm_reg_type 443{ 444 REG_TYPE_RN, 445 REG_TYPE_CP, 446 REG_TYPE_CN, 447 REG_TYPE_FN, 448 REG_TYPE_VFS, 449 REG_TYPE_VFD, 450 REG_TYPE_NQ, 451 REG_TYPE_VFSD, 452 REG_TYPE_NDQ, 453 REG_TYPE_NSDQ, 454 REG_TYPE_VFC, 455 REG_TYPE_MVF, 456 REG_TYPE_MVD, 457 REG_TYPE_MVFX, 458 REG_TYPE_MVDX, 459 REG_TYPE_MVAX, 460 REG_TYPE_DSPSC, 461 REG_TYPE_MMXWR, 462 REG_TYPE_MMXWC, 463 REG_TYPE_MMXWCG, 464 REG_TYPE_XSCALE, 465}; 466 467/* Structure for a hash table entry for a register. 468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra 469 information which states whether a vector type or index is specified (for a 470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */ 471struct reg_entry 472{ 473 const char *name; 474 unsigned char number; 475 unsigned char type; 476 unsigned char builtin; 477 struct neon_typed_alias *neon; 478}; 479 480/* Diagnostics used when we don't get a register of the expected type. */ 481const char *const reg_expected_msgs[] = 482{ 483 N_("ARM register expected"), 484 N_("bad or missing co-processor number"), 485 N_("co-processor register expected"), 486 N_("FPA register expected"), 487 N_("VFP single precision register expected"), 488 N_("VFP/Neon double precision register expected"), 489 N_("Neon quad precision register expected"), 490 N_("VFP single or double precision register expected"), 491 N_("Neon double or quad precision register expected"), 492 N_("VFP single, double or Neon quad precision register expected"), 493 N_("VFP system register expected"), 494 N_("Maverick MVF register expected"), 495 N_("Maverick MVD register expected"), 496 N_("Maverick MVFX register expected"), 497 N_("Maverick MVDX register expected"), 498 N_("Maverick MVAX register expected"), 499 N_("Maverick DSPSC register expected"), 500 N_("iWMMXt data register expected"), 501 N_("iWMMXt control register expected"), 502 N_("iWMMXt scalar register expected"), 503 N_("XScale accumulator register expected"), 504}; 505 506/* Some well known registers that we refer to directly elsewhere. */ 507#define REG_SP 13 508#define REG_LR 14 509#define REG_PC 15 510 511/* ARM instructions take 4bytes in the object file, Thumb instructions 512 take 2: */ 513#define INSN_SIZE 4 514 515struct asm_opcode 516{ 517 /* Basic string to match. */ 518 const char *template; 519 520 /* Parameters to instruction. */ 521 unsigned char operands[8]; 522 523 /* Conditional tag - see opcode_lookup. */ 524 unsigned int tag : 4; 525 526 /* Basic instruction code. */ 527 unsigned int avalue : 28; 528 529 /* Thumb-format instruction code. */ 530 unsigned int tvalue; 531 532 /* Which architecture variant provides this instruction. */ 533 const arm_feature_set *avariant; 534 const arm_feature_set *tvariant; 535 536 /* Function to call to encode instruction in ARM format. */ 537 void (* aencode) (void); 538 539 /* Function to call to encode instruction in Thumb format. */ 540 void (* tencode) (void); 541}; 542 543/* Defines for various bits that we will want to toggle. */ 544#define INST_IMMEDIATE 0x02000000 545#define OFFSET_REG 0x02000000 546#define HWOFFSET_IMM 0x00400000 547#define SHIFT_BY_REG 0x00000010 548#define PRE_INDEX 0x01000000 549#define INDEX_UP 0x00800000 550#define WRITE_BACK 0x00200000 551#define LDM_TYPE_2_OR_3 0x00400000 552#define CPSI_MMOD 0x00020000 553 554#define LITERAL_MASK 0xf000f000 555#define OPCODE_MASK 0xfe1fffff 556#define V4_STR_BIT 0x00000020 557 558#define T2_SUBS_PC_LR 0xf3de8f00 559 560#define DATA_OP_SHIFT 21 561 562#define T2_OPCODE_MASK 0xfe1fffff 563#define T2_DATA_OP_SHIFT 21 564 565/* Codes to distinguish the arithmetic instructions. */ 566#define OPCODE_AND 0 567#define OPCODE_EOR 1 568#define OPCODE_SUB 2 569#define OPCODE_RSB 3 570#define OPCODE_ADD 4 571#define OPCODE_ADC 5 572#define OPCODE_SBC 6 573#define OPCODE_RSC 7 574#define OPCODE_TST 8 575#define OPCODE_TEQ 9 576#define OPCODE_CMP 10 577#define OPCODE_CMN 11 578#define OPCODE_ORR 12 579#define OPCODE_MOV 13 580#define OPCODE_BIC 14 581#define OPCODE_MVN 15 582 583#define T2_OPCODE_AND 0 584#define T2_OPCODE_BIC 1 585#define T2_OPCODE_ORR 2 586#define T2_OPCODE_ORN 3 587#define T2_OPCODE_EOR 4 588#define T2_OPCODE_ADD 8 589#define T2_OPCODE_ADC 10 590#define T2_OPCODE_SBC 11 591#define T2_OPCODE_SUB 13 592#define T2_OPCODE_RSB 14 593 594#define T_OPCODE_MUL 0x4340 595#define T_OPCODE_TST 0x4200 596#define T_OPCODE_CMN 0x42c0 597#define T_OPCODE_NEG 0x4240 598#define T_OPCODE_MVN 0x43c0 599 600#define T_OPCODE_ADD_R3 0x1800 601#define T_OPCODE_SUB_R3 0x1a00 602#define T_OPCODE_ADD_HI 0x4400 603#define T_OPCODE_ADD_ST 0xb000 604#define T_OPCODE_SUB_ST 0xb080 605#define T_OPCODE_ADD_SP 0xa800 606#define T_OPCODE_ADD_PC 0xa000 607#define T_OPCODE_ADD_I8 0x3000 608#define T_OPCODE_SUB_I8 0x3800 609#define T_OPCODE_ADD_I3 0x1c00 610#define T_OPCODE_SUB_I3 0x1e00 611 612#define T_OPCODE_ASR_R 0x4100 613#define T_OPCODE_LSL_R 0x4080 614#define T_OPCODE_LSR_R 0x40c0 615#define T_OPCODE_ROR_R 0x41c0 616#define T_OPCODE_ASR_I 0x1000 617#define T_OPCODE_LSL_I 0x0000 618#define T_OPCODE_LSR_I 0x0800 619 620#define T_OPCODE_MOV_I8 0x2000 621#define T_OPCODE_CMP_I8 0x2800 622#define T_OPCODE_CMP_LR 0x4280 623#define T_OPCODE_MOV_HR 0x4600 624#define T_OPCODE_CMP_HR 0x4500 625 626#define T_OPCODE_LDR_PC 0x4800 627#define T_OPCODE_LDR_SP 0x9800 628#define T_OPCODE_STR_SP 0x9000 629#define T_OPCODE_LDR_IW 0x6800 630#define T_OPCODE_STR_IW 0x6000 631#define T_OPCODE_LDR_IH 0x8800 632#define T_OPCODE_STR_IH 0x8000 633#define T_OPCODE_LDR_IB 0x7800 634#define T_OPCODE_STR_IB 0x7000 635#define T_OPCODE_LDR_RW 0x5800 636#define T_OPCODE_STR_RW 0x5000 637#define T_OPCODE_LDR_RH 0x5a00 638#define T_OPCODE_STR_RH 0x5200 639#define T_OPCODE_LDR_RB 0x5c00 640#define T_OPCODE_STR_RB 0x5400 641 642#define T_OPCODE_PUSH 0xb400 643#define T_OPCODE_POP 0xbc00 644 645#define T_OPCODE_BRANCH 0xe000 646 647#define THUMB_SIZE 2 /* Size of thumb instruction. */ 648#define THUMB_PP_PC_LR 0x0100 649#define THUMB_LOAD_BIT 0x0800 650#define THUMB2_LOAD_BIT 0x00100000 651 652#define BAD_ARGS _("bad arguments to instruction") 653#define BAD_PC _("r15 not allowed here") 654#define BAD_COND _("instruction cannot be conditional") 655#define BAD_OVERLAP _("registers may not be the same") 656#define BAD_HIREG _("lo register required") 657#define BAD_THUMB32 _("instruction not supported in Thumb16 mode") 658#define BAD_ADDR_MODE _("instruction does not accept this addressing mode"); 659#define BAD_BRANCH _("branch must be last instruction in IT block") 660#define BAD_NOT_IT _("instruction not allowed in IT block") 661#define BAD_FPU _("selected FPU does not support instruction") 662 663static struct hash_control *arm_ops_hsh; 664static struct hash_control *arm_cond_hsh; 665static struct hash_control *arm_shift_hsh; 666static struct hash_control *arm_psr_hsh; 667static struct hash_control *arm_v7m_psr_hsh; 668static struct hash_control *arm_reg_hsh; 669static struct hash_control *arm_reloc_hsh; 670static struct hash_control *arm_barrier_opt_hsh; 671 672/* Stuff needed to resolve the label ambiguity 673 As: 674 ... 675 label: <insn> 676 may differ from: 677 ... 678 label: 679 <insn> 680*/ 681 682symbolS * last_label_seen; 683static int label_is_thumb_function_name = FALSE; 684 685/* Literal pool structure. Held on a per-section 686 and per-sub-section basis. */ 687 688#define MAX_LITERAL_POOL_SIZE 1024 689typedef struct literal_pool 690{ 691 expressionS literals [MAX_LITERAL_POOL_SIZE]; 692 unsigned int next_free_entry; 693 unsigned int id; 694 symbolS * symbol; 695 segT section; 696 subsegT sub_section; 697 struct literal_pool * next; 698} literal_pool; 699 700/* Pointer to a linked list of literal pools. */ 701literal_pool * list_of_pools = NULL; 702 703/* State variables for IT block handling. */ 704static bfd_boolean current_it_mask = 0; 705static int current_cc; 706 707 708/* Pure syntax. */ 709 710/* This array holds the chars that always start a comment. If the 711 pre-processor is disabled, these aren't very useful. */ 712const char comment_chars[] = "@"; 713 714/* This array holds the chars that only start a comment at the beginning of 715 a line. If the line seems to have the form '# 123 filename' 716 .line and .file directives will appear in the pre-processed output. */ 717/* Note that input_file.c hand checks for '#' at the beginning of the 718 first line of the input file. This is because the compiler outputs 719 #NO_APP at the beginning of its output. */ 720/* Also note that comments like this one will always work. */ 721const char line_comment_chars[] = "#"; 722 723const char line_separator_chars[] = ";"; 724 725/* Chars that can be used to separate mant 726 from exp in floating point numbers. */ 727const char EXP_CHARS[] = "eE"; 728 729/* Chars that mean this number is a floating point constant. */ 730/* As in 0f12.456 */ 731/* or 0d1.2345e12 */ 732 733const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; 734 735/* Prefix characters that indicate the start of an immediate 736 value. */ 737#define is_immediate_prefix(C) ((C) == '#' || (C) == '$') 738 739/* Separator character handling. */ 740 741#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 742 743static inline int 744skip_past_char (char ** str, char c) 745{ 746 if (**str == c) 747 { 748 (*str)++; 749 return SUCCESS; 750 } 751 else 752 return FAIL; 753} 754#define skip_past_comma(str) skip_past_char (str, ',') 755 756/* Arithmetic expressions (possibly involving symbols). */ 757 758/* Return TRUE if anything in the expression is a bignum. */ 759 760static int 761walk_no_bignums (symbolS * sp) 762{ 763 if (symbol_get_value_expression (sp)->X_op == O_big) 764 return 1; 765 766 if (symbol_get_value_expression (sp)->X_add_symbol) 767 { 768 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol) 769 || (symbol_get_value_expression (sp)->X_op_symbol 770 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol))); 771 } 772 773 return 0; 774} 775 776static int in_my_get_expression = 0; 777 778/* Third argument to my_get_expression. */ 779#define GE_NO_PREFIX 0 780#define GE_IMM_PREFIX 1 781#define GE_OPT_PREFIX 2 782/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit) 783 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */ 784#define GE_OPT_PREFIX_BIG 3 785 786static int 787my_get_expression (expressionS * ep, char ** str, int prefix_mode) 788{ 789 char * save_in; 790 segT seg; 791 792 /* In unified syntax, all prefixes are optional. */ 793 if (unified_syntax) 794 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode 795 : GE_OPT_PREFIX; 796 797 switch (prefix_mode) 798 { 799 case GE_NO_PREFIX: break; 800 case GE_IMM_PREFIX: 801 if (!is_immediate_prefix (**str)) 802 { 803 inst.error = _("immediate expression requires a # prefix"); 804 return FAIL; 805 } 806 (*str)++; 807 break; 808 case GE_OPT_PREFIX: 809 case GE_OPT_PREFIX_BIG: 810 if (is_immediate_prefix (**str)) 811 (*str)++; 812 break; 813 default: abort (); 814 } 815 816 memset (ep, 0, sizeof (expressionS)); 817 818 save_in = input_line_pointer; 819 input_line_pointer = *str; 820 in_my_get_expression = 1; 821 seg = expression (ep); 822 in_my_get_expression = 0; 823 824 if (ep->X_op == O_illegal) 825 { 826 /* We found a bad expression in md_operand(). */ 827 *str = input_line_pointer; 828 input_line_pointer = save_in; 829 if (inst.error == NULL) 830 inst.error = _("bad expression"); 831 return 1; 832 } 833 834#ifdef OBJ_AOUT 835 if (seg != absolute_section 836 && seg != text_section 837 && seg != data_section 838 && seg != bss_section 839 && seg != undefined_section) 840 { 841 inst.error = _("bad segment"); 842 *str = input_line_pointer; 843 input_line_pointer = save_in; 844 return 1; 845 } 846#endif 847 848 /* Get rid of any bignums now, so that we don't generate an error for which 849 we can't establish a line number later on. Big numbers are never valid 850 in instructions, which is where this routine is always called. */ 851 if (prefix_mode != GE_OPT_PREFIX_BIG 852 && (ep->X_op == O_big 853 || (ep->X_add_symbol 854 && (walk_no_bignums (ep->X_add_symbol) 855 || (ep->X_op_symbol 856 && walk_no_bignums (ep->X_op_symbol)))))) 857 { 858 inst.error = _("invalid constant"); 859 *str = input_line_pointer; 860 input_line_pointer = save_in; 861 return 1; 862 } 863 864 *str = input_line_pointer; 865 input_line_pointer = save_in; 866 return 0; 867} 868 869/* Turn a string in input_line_pointer into a floating point constant 870 of type TYPE, and store the appropriate bytes in *LITP. The number 871 of LITTLENUMS emitted is stored in *SIZEP. An error message is 872 returned, or NULL on OK. 873 874 Note that fp constants aren't represent in the normal way on the ARM. 875 In big endian mode, things are as expected. However, in little endian 876 mode fp constants are big-endian word-wise, and little-endian byte-wise 877 within the words. For example, (double) 1.1 in big endian mode is 878 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is 879 the byte sequence 99 99 f1 3f 9a 99 99 99. 880 881 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ 882 883char * 884md_atof (int type, char * litP, int * sizeP) 885{ 886 int prec; 887 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 888 char *t; 889 int i; 890 891 switch (type) 892 { 893 case 'f': 894 case 'F': 895 case 's': 896 case 'S': 897 prec = 2; 898 break; 899 900 case 'd': 901 case 'D': 902 case 'r': 903 case 'R': 904 prec = 4; 905 break; 906 907 case 'x': 908 case 'X': 909 prec = 6; 910 break; 911 912 case 'p': 913 case 'P': 914 prec = 6; 915 break; 916 917 default: 918 *sizeP = 0; 919 return _("bad call to MD_ATOF()"); 920 } 921 922 t = atof_ieee (input_line_pointer, type, words); 923 if (t) 924 input_line_pointer = t; 925 *sizeP = prec * 2; 926 927 if (target_big_endian) 928 { 929 for (i = 0; i < prec; i++) 930 { 931 md_number_to_chars (litP, (valueT) words[i], 2); 932 litP += 2; 933 } 934 } 935 else 936 { 937 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 938 for (i = prec - 1; i >= 0; i--) 939 { 940 md_number_to_chars (litP, (valueT) words[i], 2); 941 litP += 2; 942 } 943 else 944 /* For a 4 byte float the order of elements in `words' is 1 0. 945 For an 8 byte float the order is 1 0 3 2. */ 946 for (i = 0; i < prec; i += 2) 947 { 948 md_number_to_chars (litP, (valueT) words[i + 1], 2); 949 md_number_to_chars (litP + 2, (valueT) words[i], 2); 950 litP += 4; 951 } 952 } 953 954 return 0; 955} 956 957/* We handle all bad expressions here, so that we can report the faulty 958 instruction in the error message. */ 959void 960md_operand (expressionS * expr) 961{ 962 if (in_my_get_expression) 963 expr->X_op = O_illegal; 964} 965 966/* Immediate values. */ 967 968/* Generic immediate-value read function for use in directives. 969 Accepts anything that 'expression' can fold to a constant. 970 *val receives the number. */ 971#ifdef OBJ_ELF 972static int 973immediate_for_directive (int *val) 974{ 975 expressionS exp; 976 exp.X_op = O_illegal; 977 978 if (is_immediate_prefix (*input_line_pointer)) 979 { 980 input_line_pointer++; 981 expression (&exp); 982 } 983 984 if (exp.X_op != O_constant) 985 { 986 as_bad (_("expected #constant")); 987 ignore_rest_of_line (); 988 return FAIL; 989 } 990 *val = exp.X_add_number; 991 return SUCCESS; 992} 993#endif 994 995/* Register parsing. */ 996 997/* Generic register parser. CCP points to what should be the 998 beginning of a register name. If it is indeed a valid register 999 name, advance CCP over it and return the reg_entry structure; 1000 otherwise return NULL. Does not issue diagnostics. */ 1001 1002static struct reg_entry * 1003arm_reg_parse_multi (char **ccp) 1004{ 1005 char *start = *ccp; 1006 char *p; 1007 struct reg_entry *reg; 1008 1009#ifdef REGISTER_PREFIX 1010 if (*start != REGISTER_PREFIX) 1011 return NULL; 1012 start++; 1013#endif 1014#ifdef OPTIONAL_REGISTER_PREFIX 1015 if (*start == OPTIONAL_REGISTER_PREFIX) 1016 start++; 1017#endif 1018 1019 p = start; 1020 if (!ISALPHA (*p) || !is_name_beginner (*p)) 1021 return NULL; 1022 1023 do 1024 p++; 1025 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 1026 1027 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start); 1028 1029 if (!reg) 1030 return NULL; 1031 1032 *ccp = p; 1033 return reg; 1034} 1035 1036static int 1037arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg, 1038 enum arm_reg_type type) 1039{ 1040 /* Alternative syntaxes are accepted for a few register classes. */ 1041 switch (type) 1042 { 1043 case REG_TYPE_MVF: 1044 case REG_TYPE_MVD: 1045 case REG_TYPE_MVFX: 1046 case REG_TYPE_MVDX: 1047 /* Generic coprocessor register names are allowed for these. */ 1048 if (reg && reg->type == REG_TYPE_CN) 1049 return reg->number; 1050 break; 1051 1052 case REG_TYPE_CP: 1053 /* For backward compatibility, a bare number is valid here. */ 1054 { 1055 unsigned long processor = strtoul (start, ccp, 10); 1056 if (*ccp != start && processor <= 15) 1057 return processor; 1058 } 1059 1060 case REG_TYPE_MMXWC: 1061 /* WC includes WCG. ??? I'm not sure this is true for all 1062 instructions that take WC registers. */ 1063 if (reg && reg->type == REG_TYPE_MMXWCG) 1064 return reg->number; 1065 break; 1066 1067 default: 1068 break; 1069 } 1070 1071 return FAIL; 1072} 1073 1074/* As arm_reg_parse_multi, but the register must be of type TYPE, and the 1075 return value is the register number or FAIL. */ 1076 1077static int 1078arm_reg_parse (char **ccp, enum arm_reg_type type) 1079{ 1080 char *start = *ccp; 1081 struct reg_entry *reg = arm_reg_parse_multi (ccp); 1082 int ret; 1083 1084 /* Do not allow a scalar (reg+index) to parse as a register. */ 1085 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX)) 1086 return FAIL; 1087 1088 if (reg && reg->type == type) 1089 return reg->number; 1090 1091 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL) 1092 return ret; 1093 1094 *ccp = start; 1095 return FAIL; 1096} 1097 1098/* Parse a Neon type specifier. *STR should point at the leading '.' 1099 character. Does no verification at this stage that the type fits the opcode 1100 properly. E.g., 1101 1102 .i32.i32.s16 1103 .s32.f32 1104 .u16 1105 1106 Can all be legally parsed by this function. 1107 1108 Fills in neon_type struct pointer with parsed information, and updates STR 1109 to point after the parsed type specifier. Returns SUCCESS if this was a legal 1110 type, FAIL if not. */ 1111 1112static int 1113parse_neon_type (struct neon_type *type, char **str) 1114{ 1115 char *ptr = *str; 1116 1117 if (type) 1118 type->elems = 0; 1119 1120 while (type->elems < NEON_MAX_TYPE_ELS) 1121 { 1122 enum neon_el_type thistype = NT_untyped; 1123 unsigned thissize = -1u; 1124 1125 if (*ptr != '.') 1126 break; 1127 1128 ptr++; 1129 1130 /* Just a size without an explicit type. */ 1131 if (ISDIGIT (*ptr)) 1132 goto parsesize; 1133 1134 switch (TOLOWER (*ptr)) 1135 { 1136 case 'i': thistype = NT_integer; break; 1137 case 'f': thistype = NT_float; break; 1138 case 'p': thistype = NT_poly; break; 1139 case 's': thistype = NT_signed; break; 1140 case 'u': thistype = NT_unsigned; break; 1141 case 'd': 1142 thistype = NT_float; 1143 thissize = 64; 1144 ptr++; 1145 goto done; 1146 default: 1147 as_bad (_("unexpected character `%c' in type specifier"), *ptr); 1148 return FAIL; 1149 } 1150 1151 ptr++; 1152 1153 /* .f is an abbreviation for .f32. */ 1154 if (thistype == NT_float && !ISDIGIT (*ptr)) 1155 thissize = 32; 1156 else 1157 { 1158 parsesize: 1159 thissize = strtoul (ptr, &ptr, 10); 1160 1161 if (thissize != 8 && thissize != 16 && thissize != 32 1162 && thissize != 64) 1163 { 1164 as_bad (_("bad size %d in type specifier"), thissize); 1165 return FAIL; 1166 } 1167 } 1168 1169 done: 1170 if (type) 1171 { 1172 type->el[type->elems].type = thistype; 1173 type->el[type->elems].size = thissize; 1174 type->elems++; 1175 } 1176 } 1177 1178 /* Empty/missing type is not a successful parse. */ 1179 if (type->elems == 0) 1180 return FAIL; 1181 1182 *str = ptr; 1183 1184 return SUCCESS; 1185} 1186 1187/* Errors may be set multiple times during parsing or bit encoding 1188 (particularly in the Neon bits), but usually the earliest error which is set 1189 will be the most meaningful. Avoid overwriting it with later (cascading) 1190 errors by calling this function. */ 1191 1192static void 1193first_error (const char *err) 1194{ 1195 if (!inst.error) 1196 inst.error = err; 1197} 1198 1199/* Parse a single type, e.g. ".s32", leading period included. */ 1200static int 1201parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) 1202{ 1203 char *str = *ccp; 1204 struct neon_type optype; 1205 1206 if (*str == '.') 1207 { 1208 if (parse_neon_type (&optype, &str) == SUCCESS) 1209 { 1210 if (optype.elems == 1) 1211 *vectype = optype.el[0]; 1212 else 1213 { 1214 first_error (_("only one type should be specified for operand")); 1215 return FAIL; 1216 } 1217 } 1218 else 1219 { 1220 first_error (_("vector type expected")); 1221 return FAIL; 1222 } 1223 } 1224 else 1225 return FAIL; 1226 1227 *ccp = str; 1228 1229 return SUCCESS; 1230} 1231 1232/* Special meanings for indices (which have a range of 0-7), which will fit into 1233 a 4-bit integer. */ 1234 1235#define NEON_ALL_LANES 15 1236#define NEON_INTERLEAVE_LANES 14 1237 1238/* Parse either a register or a scalar, with an optional type. Return the 1239 register number, and optionally fill in the actual type of the register 1240 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and 1241 type/index information in *TYPEINFO. */ 1242 1243static int 1244parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, 1245 enum arm_reg_type *rtype, 1246 struct neon_typed_alias *typeinfo) 1247{ 1248 char *str = *ccp; 1249 struct reg_entry *reg = arm_reg_parse_multi (&str); 1250 struct neon_typed_alias atype; 1251 struct neon_type_el parsetype; 1252 1253 atype.defined = 0; 1254 atype.index = -1; 1255 atype.eltype.type = NT_invtype; 1256 atype.eltype.size = -1; 1257 1258 /* Try alternate syntax for some types of register. Note these are mutually 1259 exclusive with the Neon syntax extensions. */ 1260 if (reg == NULL) 1261 { 1262 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type); 1263 if (altreg != FAIL) 1264 *ccp = str; 1265 if (typeinfo) 1266 *typeinfo = atype; 1267 return altreg; 1268 } 1269 1270 /* Undo polymorphism when a set of register types may be accepted. */ 1271 if ((type == REG_TYPE_NDQ 1272 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD)) 1273 || (type == REG_TYPE_VFSD 1274 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) 1275 || (type == REG_TYPE_NSDQ 1276 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD 1277 || reg->type == REG_TYPE_NQ)) 1278 || (type == REG_TYPE_MMXWC 1279 && (reg->type == REG_TYPE_MMXWCG))) 1280 type = reg->type; 1281 1282 if (type != reg->type) 1283 return FAIL; 1284 1285 if (reg->neon) 1286 atype = *reg->neon; 1287 1288 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS) 1289 { 1290 if ((atype.defined & NTA_HASTYPE) != 0) 1291 { 1292 first_error (_("can't redefine type for operand")); 1293 return FAIL; 1294 } 1295 atype.defined |= NTA_HASTYPE; 1296 atype.eltype = parsetype; 1297 } 1298 1299 if (skip_past_char (&str, '[') == SUCCESS) 1300 { 1301 if (type != REG_TYPE_VFD) 1302 { 1303 first_error (_("only D registers may be indexed")); 1304 return FAIL; 1305 } 1306 1307 if ((atype.defined & NTA_HASINDEX) != 0) 1308 { 1309 first_error (_("can't change index for operand")); 1310 return FAIL; 1311 } 1312 1313 atype.defined |= NTA_HASINDEX; 1314 1315 if (skip_past_char (&str, ']') == SUCCESS) 1316 atype.index = NEON_ALL_LANES; 1317 else 1318 { 1319 expressionS exp; 1320 1321 my_get_expression (&exp, &str, GE_NO_PREFIX); 1322 1323 if (exp.X_op != O_constant) 1324 { 1325 first_error (_("constant expression required")); 1326 return FAIL; 1327 } 1328 1329 if (skip_past_char (&str, ']') == FAIL) 1330 return FAIL; 1331 1332 atype.index = exp.X_add_number; 1333 } 1334 } 1335 1336 if (typeinfo) 1337 *typeinfo = atype; 1338 1339 if (rtype) 1340 *rtype = type; 1341 1342 *ccp = str; 1343 1344 return reg->number; 1345} 1346 1347/* Like arm_reg_parse, but allow allow the following extra features: 1348 - If RTYPE is non-zero, return the (possibly restricted) type of the 1349 register (e.g. Neon double or quad reg when either has been requested). 1350 - If this is a Neon vector type with additional type information, fill 1351 in the struct pointed to by VECTYPE (if non-NULL). 1352 This function will fault on encountering a scalar. 1353*/ 1354 1355static int 1356arm_typed_reg_parse (char **ccp, enum arm_reg_type type, 1357 enum arm_reg_type *rtype, struct neon_type_el *vectype) 1358{ 1359 struct neon_typed_alias atype; 1360 char *str = *ccp; 1361 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype); 1362 1363 if (reg == FAIL) 1364 return FAIL; 1365 1366 /* Do not allow a scalar (reg+index) to parse as a register. */ 1367 if ((atype.defined & NTA_HASINDEX) != 0) 1368 { 1369 first_error (_("register operand expected, but got scalar")); 1370 return FAIL; 1371 } 1372 1373 if (vectype) 1374 *vectype = atype.eltype; 1375 1376 *ccp = str; 1377 1378 return reg; 1379} 1380 1381#define NEON_SCALAR_REG(X) ((X) >> 4) 1382#define NEON_SCALAR_INDEX(X) ((X) & 15) 1383 1384/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't 1385 have enough information to be able to do a good job bounds-checking. So, we 1386 just do easy checks here, and do further checks later. */ 1387 1388static int 1389parse_scalar (char **ccp, int elsize, struct neon_type_el *type) 1390{ 1391 int reg; 1392 char *str = *ccp; 1393 struct neon_typed_alias atype; 1394 1395 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype); 1396 1397 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0) 1398 return FAIL; 1399 1400 if (atype.index == NEON_ALL_LANES) 1401 { 1402 first_error (_("scalar must have an index")); 1403 return FAIL; 1404 } 1405 else if (atype.index >= 64 / elsize) 1406 { 1407 first_error (_("scalar index out of range")); 1408 return FAIL; 1409 } 1410 1411 if (type) 1412 *type = atype.eltype; 1413 1414 *ccp = str; 1415 1416 return reg * 16 + atype.index; 1417} 1418 1419/* Parse an ARM register list. Returns the bitmask, or FAIL. */ 1420static long 1421parse_reg_list (char ** strp) 1422{ 1423 char * str = * strp; 1424 long range = 0; 1425 int another_range; 1426 1427 /* We come back here if we get ranges concatenated by '+' or '|'. */ 1428 do 1429 { 1430 another_range = 0; 1431 1432 if (*str == '{') 1433 { 1434 int in_range = 0; 1435 int cur_reg = -1; 1436 1437 str++; 1438 do 1439 { 1440 int reg; 1441 1442 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL) 1443 { 1444 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 1445 return FAIL; 1446 } 1447 1448 if (in_range) 1449 { 1450 int i; 1451 1452 if (reg <= cur_reg) 1453 { 1454 first_error (_("bad range in register list")); 1455 return FAIL; 1456 } 1457 1458 for (i = cur_reg + 1; i < reg; i++) 1459 { 1460 if (range & (1 << i)) 1461 as_tsktsk 1462 (_("Warning: duplicated register (r%d) in register list"), 1463 i); 1464 else 1465 range |= 1 << i; 1466 } 1467 in_range = 0; 1468 } 1469 1470 if (range & (1 << reg)) 1471 as_tsktsk (_("Warning: duplicated register (r%d) in register list"), 1472 reg); 1473 else if (reg <= cur_reg) 1474 as_tsktsk (_("Warning: register range not in ascending order")); 1475 1476 range |= 1 << reg; 1477 cur_reg = reg; 1478 } 1479 while (skip_past_comma (&str) != FAIL 1480 || (in_range = 1, *str++ == '-')); 1481 str--; 1482 1483 if (*str++ != '}') 1484 { 1485 first_error (_("missing `}'")); 1486 return FAIL; 1487 } 1488 } 1489 else 1490 { 1491 expressionS expr; 1492 1493 if (my_get_expression (&expr, &str, GE_NO_PREFIX)) 1494 return FAIL; 1495 1496 if (expr.X_op == O_constant) 1497 { 1498 if (expr.X_add_number 1499 != (expr.X_add_number & 0x0000ffff)) 1500 { 1501 inst.error = _("invalid register mask"); 1502 return FAIL; 1503 } 1504 1505 if ((range & expr.X_add_number) != 0) 1506 { 1507 int regno = range & expr.X_add_number; 1508 1509 regno &= -regno; 1510 regno = (1 << regno) - 1; 1511 as_tsktsk 1512 (_("Warning: duplicated register (r%d) in register list"), 1513 regno); 1514 } 1515 1516 range |= expr.X_add_number; 1517 } 1518 else 1519 { 1520 if (inst.reloc.type != 0) 1521 { 1522 inst.error = _("expression too complex"); 1523 return FAIL; 1524 } 1525 1526 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS)); 1527 inst.reloc.type = BFD_RELOC_ARM_MULTI; 1528 inst.reloc.pc_rel = 0; 1529 } 1530 } 1531 1532 if (*str == '|' || *str == '+') 1533 { 1534 str++; 1535 another_range = 1; 1536 } 1537 } 1538 while (another_range); 1539 1540 *strp = str; 1541 return range; 1542} 1543 1544/* Types of registers in a list. */ 1545 1546enum reg_list_els 1547{ 1548 REGLIST_VFP_S, 1549 REGLIST_VFP_D, 1550 REGLIST_NEON_D 1551}; 1552 1553/* Parse a VFP register list. If the string is invalid return FAIL. 1554 Otherwise return the number of registers, and set PBASE to the first 1555 register. Parses registers of type ETYPE. 1556 If REGLIST_NEON_D is used, several syntax enhancements are enabled: 1557 - Q registers can be used to specify pairs of D registers 1558 - { } can be omitted from around a singleton register list 1559 FIXME: This is not implemented, as it would require backtracking in 1560 some cases, e.g.: 1561 vtbl.8 d3,d4,d5 1562 This could be done (the meaning isn't really ambiguous), but doesn't 1563 fit in well with the current parsing framework. 1564 - 32 D registers may be used (also true for VFPv3). 1565 FIXME: Types are ignored in these register lists, which is probably a 1566 bug. */ 1567 1568static int 1569parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) 1570{ 1571 char *str = *ccp; 1572 int base_reg; 1573 int new_base; 1574 enum arm_reg_type regtype = 0; 1575 int max_regs = 0; 1576 int count = 0; 1577 int warned = 0; 1578 unsigned long mask = 0; 1579 int i; 1580 1581 if (*str != '{') 1582 { 1583 inst.error = _("expecting {"); 1584 return FAIL; 1585 } 1586 1587 str++; 1588 1589 switch (etype) 1590 { 1591 case REGLIST_VFP_S: 1592 regtype = REG_TYPE_VFS; 1593 max_regs = 32; 1594 break; 1595 1596 case REGLIST_VFP_D: 1597 regtype = REG_TYPE_VFD; 1598 break; 1599 1600 case REGLIST_NEON_D: 1601 regtype = REG_TYPE_NDQ; 1602 break; 1603 } 1604 1605 if (etype != REGLIST_VFP_S) 1606 { 1607 /* VFPv3 allows 32 D registers. */ 1608 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 1609 { 1610 max_regs = 32; 1611 if (thumb_mode) 1612 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 1613 fpu_vfp_ext_v3); 1614 else 1615 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 1616 fpu_vfp_ext_v3); 1617 } 1618 else 1619 max_regs = 16; 1620 } 1621 1622 base_reg = max_regs; 1623 1624 do 1625 { 1626 int setmask = 1, addregs = 1; 1627 1628 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); 1629 1630 if (new_base == FAIL) 1631 { 1632 first_error (_(reg_expected_msgs[regtype])); 1633 return FAIL; 1634 } 1635 1636 if (new_base >= max_regs) 1637 { 1638 first_error (_("register out of range in list")); 1639 return FAIL; 1640 } 1641 1642 /* Note: a value of 2 * n is returned for the register Q<n>. */ 1643 if (regtype == REG_TYPE_NQ) 1644 { 1645 setmask = 3; 1646 addregs = 2; 1647 } 1648 1649 if (new_base < base_reg) 1650 base_reg = new_base; 1651 1652 if (mask & (setmask << new_base)) 1653 { 1654 first_error (_("invalid register list")); 1655 return FAIL; 1656 } 1657 1658 if ((mask >> new_base) != 0 && ! warned) 1659 { 1660 as_tsktsk (_("register list not in ascending order")); 1661 warned = 1; 1662 } 1663 1664 mask |= setmask << new_base; 1665 count += addregs; 1666 1667 if (*str == '-') /* We have the start of a range expression */ 1668 { 1669 int high_range; 1670 1671 str++; 1672 1673 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL)) 1674 == FAIL) 1675 { 1676 inst.error = gettext (reg_expected_msgs[regtype]); 1677 return FAIL; 1678 } 1679 1680 if (high_range >= max_regs) 1681 { 1682 first_error (_("register out of range in list")); 1683 return FAIL; 1684 } 1685 1686 if (regtype == REG_TYPE_NQ) 1687 high_range = high_range + 1; 1688 1689 if (high_range <= new_base) 1690 { 1691 inst.error = _("register range not in ascending order"); 1692 return FAIL; 1693 } 1694 1695 for (new_base += addregs; new_base <= high_range; new_base += addregs) 1696 { 1697 if (mask & (setmask << new_base)) 1698 { 1699 inst.error = _("invalid register list"); 1700 return FAIL; 1701 } 1702 1703 mask |= setmask << new_base; 1704 count += addregs; 1705 } 1706 } 1707 } 1708 while (skip_past_comma (&str) != FAIL); 1709 1710 str++; 1711 1712 /* Sanity check -- should have raised a parse error above. */ 1713 if (count == 0 || count > max_regs) 1714 abort (); 1715 1716 *pbase = base_reg; 1717 1718 /* Final test -- the registers must be consecutive. */ 1719 mask >>= base_reg; 1720 for (i = 0; i < count; i++) 1721 { 1722 if ((mask & (1u << i)) == 0) 1723 { 1724 inst.error = _("non-contiguous register range"); 1725 return FAIL; 1726 } 1727 } 1728 1729 *ccp = str; 1730 1731 return count; 1732} 1733 1734/* True if two alias types are the same. */ 1735 1736static int 1737neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b) 1738{ 1739 if (!a && !b) 1740 return 1; 1741 1742 if (!a || !b) 1743 return 0; 1744 1745 if (a->defined != b->defined) 1746 return 0; 1747 1748 if ((a->defined & NTA_HASTYPE) != 0 1749 && (a->eltype.type != b->eltype.type 1750 || a->eltype.size != b->eltype.size)) 1751 return 0; 1752 1753 if ((a->defined & NTA_HASINDEX) != 0 1754 && (a->index != b->index)) 1755 return 0; 1756 1757 return 1; 1758} 1759 1760/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions. 1761 The base register is put in *PBASE. 1762 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of 1763 the return value. 1764 The register stride (minus one) is put in bit 4 of the return value. 1765 Bits [6:5] encode the list length (minus one). 1766 The type of the list elements is put in *ELTYPE, if non-NULL. */ 1767 1768#define NEON_LANE(X) ((X) & 0xf) 1769#define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1) 1770#define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1) 1771 1772static int 1773parse_neon_el_struct_list (char **str, unsigned *pbase, 1774 struct neon_type_el *eltype) 1775{ 1776 char *ptr = *str; 1777 int base_reg = -1; 1778 int reg_incr = -1; 1779 int count = 0; 1780 int lane = -1; 1781 int leading_brace = 0; 1782 enum arm_reg_type rtype = REG_TYPE_NDQ; 1783 int addregs = 1; 1784 const char *const incr_error = "register stride must be 1 or 2"; 1785 const char *const type_error = "mismatched element/structure types in list"; 1786 struct neon_typed_alias firsttype; 1787 1788 if (skip_past_char (&ptr, '{') == SUCCESS) 1789 leading_brace = 1; 1790 1791 do 1792 { 1793 struct neon_typed_alias atype; 1794 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype); 1795 1796 if (getreg == FAIL) 1797 { 1798 first_error (_(reg_expected_msgs[rtype])); 1799 return FAIL; 1800 } 1801 1802 if (base_reg == -1) 1803 { 1804 base_reg = getreg; 1805 if (rtype == REG_TYPE_NQ) 1806 { 1807 reg_incr = 1; 1808 addregs = 2; 1809 } 1810 firsttype = atype; 1811 } 1812 else if (reg_incr == -1) 1813 { 1814 reg_incr = getreg - base_reg; 1815 if (reg_incr < 1 || reg_incr > 2) 1816 { 1817 first_error (_(incr_error)); 1818 return FAIL; 1819 } 1820 } 1821 else if (getreg != base_reg + reg_incr * count) 1822 { 1823 first_error (_(incr_error)); 1824 return FAIL; 1825 } 1826 1827 if (!neon_alias_types_same (&atype, &firsttype)) 1828 { 1829 first_error (_(type_error)); 1830 return FAIL; 1831 } 1832 1833 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list 1834 modes. */ 1835 if (ptr[0] == '-') 1836 { 1837 struct neon_typed_alias htype; 1838 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1; 1839 if (lane == -1) 1840 lane = NEON_INTERLEAVE_LANES; 1841 else if (lane != NEON_INTERLEAVE_LANES) 1842 { 1843 first_error (_(type_error)); 1844 return FAIL; 1845 } 1846 if (reg_incr == -1) 1847 reg_incr = 1; 1848 else if (reg_incr != 1) 1849 { 1850 first_error (_("don't use Rn-Rm syntax with non-unit stride")); 1851 return FAIL; 1852 } 1853 ptr++; 1854 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype); 1855 if (hireg == FAIL) 1856 { 1857 first_error (_(reg_expected_msgs[rtype])); 1858 return FAIL; 1859 } 1860 if (!neon_alias_types_same (&htype, &firsttype)) 1861 { 1862 first_error (_(type_error)); 1863 return FAIL; 1864 } 1865 count += hireg + dregs - getreg; 1866 continue; 1867 } 1868 1869 /* If we're using Q registers, we can't use [] or [n] syntax. */ 1870 if (rtype == REG_TYPE_NQ) 1871 { 1872 count += 2; 1873 continue; 1874 } 1875 1876 if ((atype.defined & NTA_HASINDEX) != 0) 1877 { 1878 if (lane == -1) 1879 lane = atype.index; 1880 else if (lane != atype.index) 1881 { 1882 first_error (_(type_error)); 1883 return FAIL; 1884 } 1885 } 1886 else if (lane == -1) 1887 lane = NEON_INTERLEAVE_LANES; 1888 else if (lane != NEON_INTERLEAVE_LANES) 1889 { 1890 first_error (_(type_error)); 1891 return FAIL; 1892 } 1893 count++; 1894 } 1895 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL); 1896 1897 /* No lane set by [x]. We must be interleaving structures. */ 1898 if (lane == -1) 1899 lane = NEON_INTERLEAVE_LANES; 1900 1901 /* Sanity check. */ 1902 if (lane == -1 || base_reg == -1 || count < 1 || count > 4 1903 || (count > 1 && reg_incr == -1)) 1904 { 1905 first_error (_("error parsing element/structure list")); 1906 return FAIL; 1907 } 1908 1909 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL) 1910 { 1911 first_error (_("expected }")); 1912 return FAIL; 1913 } 1914 1915 if (reg_incr == -1) 1916 reg_incr = 1; 1917 1918 if (eltype) 1919 *eltype = firsttype.eltype; 1920 1921 *pbase = base_reg; 1922 *str = ptr; 1923 1924 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5); 1925} 1926 1927/* Parse an explicit relocation suffix on an expression. This is 1928 either nothing, or a word in parentheses. Note that if !OBJ_ELF, 1929 arm_reloc_hsh contains no entries, so this function can only 1930 succeed if there is no () after the word. Returns -1 on error, 1931 BFD_RELOC_UNUSED if there wasn't any suffix. */ 1932static int 1933parse_reloc (char **str) 1934{ 1935 struct reloc_entry *r; 1936 char *p, *q; 1937 1938 if (**str != '(') 1939 return BFD_RELOC_UNUSED; 1940 1941 p = *str + 1; 1942 q = p; 1943 1944 while (*q && *q != ')' && *q != ',') 1945 q++; 1946 if (*q != ')') 1947 return -1; 1948 1949 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL) 1950 return -1; 1951 1952 *str = q + 1; 1953 return r->reloc; 1954} 1955 1956/* Directives: register aliases. */ 1957 1958static struct reg_entry * 1959insert_reg_alias (char *str, int number, int type) 1960{ 1961 struct reg_entry *new; 1962 const char *name; 1963 1964 if ((new = hash_find (arm_reg_hsh, str)) != 0) 1965 { 1966 if (new->builtin) 1967 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str); 1968 1969 /* Only warn about a redefinition if it's not defined as the 1970 same register. */ 1971 else if (new->number != number || new->type != type) 1972 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1973 1974 return 0; 1975 } 1976 1977 name = xstrdup (str); 1978 new = xmalloc (sizeof (struct reg_entry)); 1979 1980 new->name = name; 1981 new->number = number; 1982 new->type = type; 1983 new->builtin = FALSE; 1984 new->neon = NULL; 1985 1986 if (hash_insert (arm_reg_hsh, name, (PTR) new)) 1987 abort (); 1988 1989 return new; 1990} 1991 1992static void 1993insert_neon_reg_alias (char *str, int number, int type, 1994 struct neon_typed_alias *atype) 1995{ 1996 struct reg_entry *reg = insert_reg_alias (str, number, type); 1997 1998 if (!reg) 1999 { 2000 first_error (_("attempt to redefine typed alias")); 2001 return; 2002 } 2003 2004 if (atype) 2005 { 2006 reg->neon = xmalloc (sizeof (struct neon_typed_alias)); 2007 *reg->neon = *atype; 2008 } 2009} 2010 2011/* Look for the .req directive. This is of the form: 2012 2013 new_register_name .req existing_register_name 2014 2015 If we find one, or if it looks sufficiently like one that we want to 2016 handle any error here, return non-zero. Otherwise return zero. */ 2017 2018static int 2019create_register_alias (char * newname, char *p) 2020{ 2021 struct reg_entry *old; 2022 char *oldname, *nbuf; 2023 size_t nlen; 2024 2025 /* The input scrubber ensures that whitespace after the mnemonic is 2026 collapsed to single spaces. */ 2027 oldname = p; 2028 if (strncmp (oldname, " .req ", 6) != 0) 2029 return 0; 2030 2031 oldname += 6; 2032 if (*oldname == '\0') 2033 return 0; 2034 2035 old = hash_find (arm_reg_hsh, oldname); 2036 if (!old) 2037 { 2038 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 2039 return 1; 2040 } 2041 2042 /* If TC_CASE_SENSITIVE is defined, then newname already points to 2043 the desired alias name, and p points to its end. If not, then 2044 the desired alias name is in the global original_case_string. */ 2045#ifdef TC_CASE_SENSITIVE 2046 nlen = p - newname; 2047#else 2048 newname = original_case_string; 2049 nlen = strlen (newname); 2050#endif 2051 2052 nbuf = alloca (nlen + 1); 2053 memcpy (nbuf, newname, nlen); 2054 nbuf[nlen] = '\0'; 2055 2056 /* Create aliases under the new name as stated; an all-lowercase 2057 version of the new name; and an all-uppercase version of the new 2058 name. */ 2059 insert_reg_alias (nbuf, old->number, old->type); 2060 2061 for (p = nbuf; *p; p++) 2062 *p = TOUPPER (*p); 2063 2064 if (strncmp (nbuf, newname, nlen)) 2065 insert_reg_alias (nbuf, old->number, old->type); 2066 2067 for (p = nbuf; *p; p++) 2068 *p = TOLOWER (*p); 2069 2070 if (strncmp (nbuf, newname, nlen)) 2071 insert_reg_alias (nbuf, old->number, old->type); 2072 2073 return 1; 2074} 2075 2076/* Create a Neon typed/indexed register alias using directives, e.g.: 2077 X .dn d5.s32[1] 2078 Y .qn 6.s16 2079 Z .dn d7 2080 T .dn Z[0] 2081 These typed registers can be used instead of the types specified after the 2082 Neon mnemonic, so long as all operands given have types. Types can also be 2083 specified directly, e.g.: 2084 vadd d0.s32, d1.s32, d2.s32 2085*/ 2086 2087static int 2088create_neon_reg_alias (char *newname, char *p) 2089{ 2090 enum arm_reg_type basetype; 2091 struct reg_entry *basereg; 2092 struct reg_entry mybasereg; 2093 struct neon_type ntype; 2094 struct neon_typed_alias typeinfo; 2095 char *namebuf, *nameend; 2096 int namelen; 2097 2098 typeinfo.defined = 0; 2099 typeinfo.eltype.type = NT_invtype; 2100 typeinfo.eltype.size = -1; 2101 typeinfo.index = -1; 2102 2103 nameend = p; 2104 2105 if (strncmp (p, " .dn ", 5) == 0) 2106 basetype = REG_TYPE_VFD; 2107 else if (strncmp (p, " .qn ", 5) == 0) 2108 basetype = REG_TYPE_NQ; 2109 else 2110 return 0; 2111 2112 p += 5; 2113 2114 if (*p == '\0') 2115 return 0; 2116 2117 basereg = arm_reg_parse_multi (&p); 2118 2119 if (basereg && basereg->type != basetype) 2120 { 2121 as_bad (_("bad type for register")); 2122 return 0; 2123 } 2124 2125 if (basereg == NULL) 2126 { 2127 expressionS exp; 2128 /* Try parsing as an integer. */ 2129 my_get_expression (&exp, &p, GE_NO_PREFIX); 2130 if (exp.X_op != O_constant) 2131 { 2132 as_bad (_("expression must be constant")); 2133 return 0; 2134 } 2135 basereg = &mybasereg; 2136 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2 2137 : exp.X_add_number; 2138 basereg->neon = 0; 2139 } 2140 2141 if (basereg->neon) 2142 typeinfo = *basereg->neon; 2143 2144 if (parse_neon_type (&ntype, &p) == SUCCESS) 2145 { 2146 /* We got a type. */ 2147 if (typeinfo.defined & NTA_HASTYPE) 2148 { 2149 as_bad (_("can't redefine the type of a register alias")); 2150 return 0; 2151 } 2152 2153 typeinfo.defined |= NTA_HASTYPE; 2154 if (ntype.elems != 1) 2155 { 2156 as_bad (_("you must specify a single type only")); 2157 return 0; 2158 } 2159 typeinfo.eltype = ntype.el[0]; 2160 } 2161 2162 if (skip_past_char (&p, '[') == SUCCESS) 2163 { 2164 expressionS exp; 2165 /* We got a scalar index. */ 2166 2167 if (typeinfo.defined & NTA_HASINDEX) 2168 { 2169 as_bad (_("can't redefine the index of a scalar alias")); 2170 return 0; 2171 } 2172 2173 my_get_expression (&exp, &p, GE_NO_PREFIX); 2174 2175 if (exp.X_op != O_constant) 2176 { 2177 as_bad (_("scalar index must be constant")); 2178 return 0; 2179 } 2180 2181 typeinfo.defined |= NTA_HASINDEX; 2182 typeinfo.index = exp.X_add_number; 2183 2184 if (skip_past_char (&p, ']') == FAIL) 2185 { 2186 as_bad (_("expecting ]")); 2187 return 0; 2188 } 2189 } 2190 2191 namelen = nameend - newname; 2192 namebuf = alloca (namelen + 1); 2193 strncpy (namebuf, newname, namelen); 2194 namebuf[namelen] = '\0'; 2195 2196 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2197 typeinfo.defined != 0 ? &typeinfo : NULL); 2198 2199 /* Insert name in all uppercase. */ 2200 for (p = namebuf; *p; p++) 2201 *p = TOUPPER (*p); 2202 2203 if (strncmp (namebuf, newname, namelen)) 2204 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2205 typeinfo.defined != 0 ? &typeinfo : NULL); 2206 2207 /* Insert name in all lowercase. */ 2208 for (p = namebuf; *p; p++) 2209 *p = TOLOWER (*p); 2210 2211 if (strncmp (namebuf, newname, namelen)) 2212 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2213 typeinfo.defined != 0 ? &typeinfo : NULL); 2214 2215 return 1; 2216} 2217 2218/* Should never be called, as .req goes between the alias and the 2219 register name, not at the beginning of the line. */ 2220static void 2221s_req (int a ATTRIBUTE_UNUSED) 2222{ 2223 as_bad (_("invalid syntax for .req directive")); 2224} 2225 2226static void 2227s_dn (int a ATTRIBUTE_UNUSED) 2228{ 2229 as_bad (_("invalid syntax for .dn directive")); 2230} 2231 2232static void 2233s_qn (int a ATTRIBUTE_UNUSED) 2234{ 2235 as_bad (_("invalid syntax for .qn directive")); 2236} 2237 2238/* The .unreq directive deletes an alias which was previously defined 2239 by .req. For example: 2240 2241 my_alias .req r11 2242 .unreq my_alias */ 2243 2244static void 2245s_unreq (int a ATTRIBUTE_UNUSED) 2246{ 2247 char * name; 2248 char saved_char; 2249 2250 name = input_line_pointer; 2251 2252 while (*input_line_pointer != 0 2253 && *input_line_pointer != ' ' 2254 && *input_line_pointer != '\n') 2255 ++input_line_pointer; 2256 2257 saved_char = *input_line_pointer; 2258 *input_line_pointer = 0; 2259 2260 if (!*name) 2261 as_bad (_("invalid syntax for .unreq directive")); 2262 else 2263 { 2264 struct reg_entry *reg = hash_find (arm_reg_hsh, name); 2265 2266 if (!reg) 2267 as_bad (_("unknown register alias '%s'"), name); 2268 else if (reg->builtin) 2269 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 2270 name); 2271 else 2272 { 2273 hash_delete (arm_reg_hsh, name); 2274 free ((char *) reg->name); 2275 if (reg->neon) 2276 free (reg->neon); 2277 free (reg); 2278 } 2279 } 2280 2281 *input_line_pointer = saved_char; 2282 demand_empty_rest_of_line (); 2283} 2284 2285/* Directives: Instruction set selection. */ 2286 2287#ifdef OBJ_ELF 2288/* This code is to handle mapping symbols as defined in the ARM ELF spec. 2289 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0). 2290 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), 2291 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ 2292 2293static enum mstate mapstate = MAP_UNDEFINED; 2294 2295void 2296mapping_state (enum mstate state) 2297{ 2298 symbolS * symbolP; 2299 const char * symname; 2300 int type; 2301 2302 if (mapstate == state) 2303 /* The mapping symbol has already been emitted. 2304 There is nothing else to do. */ 2305 return; 2306 2307 mapstate = state; 2308 2309 switch (state) 2310 { 2311 case MAP_DATA: 2312 symname = "$d"; 2313 type = BSF_NO_FLAGS; 2314 break; 2315 case MAP_ARM: 2316 symname = "$a"; 2317 type = BSF_NO_FLAGS; 2318 break; 2319 case MAP_THUMB: 2320 symname = "$t"; 2321 type = BSF_NO_FLAGS; 2322 break; 2323 case MAP_UNDEFINED: 2324 return; 2325 default: 2326 abort (); 2327 } 2328 2329 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 2330 2331 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now); 2332 symbol_table_insert (symbolP); 2333 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; 2334 2335 switch (state) 2336 { 2337 case MAP_ARM: 2338 THUMB_SET_FUNC (symbolP, 0); 2339 ARM_SET_THUMB (symbolP, 0); 2340 ARM_SET_INTERWORK (symbolP, support_interwork); 2341 break; 2342 2343 case MAP_THUMB: 2344 THUMB_SET_FUNC (symbolP, 1); 2345 ARM_SET_THUMB (symbolP, 1); 2346 ARM_SET_INTERWORK (symbolP, support_interwork); 2347 break; 2348 2349 case MAP_DATA: 2350 default: 2351 return; 2352 } 2353} 2354#else 2355#define mapping_state(x) /* nothing */ 2356#endif 2357 2358/* Find the real, Thumb encoded start of a Thumb function. */ 2359 2360static symbolS * 2361find_real_start (symbolS * symbolP) 2362{ 2363 char * real_start; 2364 const char * name = S_GET_NAME (symbolP); 2365 symbolS * new_target; 2366 2367 /* This definition must agree with the one in gcc/config/arm/thumb.c. */ 2368#define STUB_NAME ".real_start_of" 2369 2370 if (name == NULL) 2371 abort (); 2372 2373 /* The compiler may generate BL instructions to local labels because 2374 it needs to perform a branch to a far away location. These labels 2375 do not have a corresponding ".real_start_of" label. We check 2376 both for S_IS_LOCAL and for a leading dot, to give a way to bypass 2377 the ".real_start_of" convention for nonlocal branches. */ 2378 if (S_IS_LOCAL (symbolP) || name[0] == '.') 2379 return symbolP; 2380 2381 real_start = ACONCAT ((STUB_NAME, name, NULL)); 2382 new_target = symbol_find (real_start); 2383 2384 if (new_target == NULL) 2385 { 2386 as_warn ("Failed to find real start of function: %s\n", name); 2387 new_target = symbolP; 2388 } 2389 2390 return new_target; 2391} 2392 2393static void 2394opcode_select (int width) 2395{ 2396 switch (width) 2397 { 2398 case 16: 2399 if (! thumb_mode) 2400 { 2401 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 2402 as_bad (_("selected processor does not support THUMB opcodes")); 2403 2404 thumb_mode = 1; 2405 /* No need to force the alignment, since we will have been 2406 coming from ARM mode, which is word-aligned. */ 2407 record_alignment (now_seg, 1); 2408 } 2409 mapping_state (MAP_THUMB); 2410 break; 2411 2412 case 32: 2413 if (thumb_mode) 2414 { 2415 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 2416 as_bad (_("selected processor does not support ARM opcodes")); 2417 2418 thumb_mode = 0; 2419 2420 if (!need_pass_2) 2421 frag_align (2, 0, 0); 2422 2423 record_alignment (now_seg, 1); 2424 } 2425 mapping_state (MAP_ARM); 2426 break; 2427 2428 default: 2429 as_bad (_("invalid instruction size selected (%d)"), width); 2430 } 2431} 2432 2433static void 2434s_arm (int ignore ATTRIBUTE_UNUSED) 2435{ 2436 opcode_select (32); 2437 demand_empty_rest_of_line (); 2438} 2439 2440static void 2441s_thumb (int ignore ATTRIBUTE_UNUSED) 2442{ 2443 opcode_select (16); 2444 demand_empty_rest_of_line (); 2445} 2446 2447static void 2448s_code (int unused ATTRIBUTE_UNUSED) 2449{ 2450 int temp; 2451 2452 temp = get_absolute_expression (); 2453 switch (temp) 2454 { 2455 case 16: 2456 case 32: 2457 opcode_select (temp); 2458 break; 2459 2460 default: 2461 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp); 2462 } 2463} 2464 2465static void 2466s_force_thumb (int ignore ATTRIBUTE_UNUSED) 2467{ 2468 /* If we are not already in thumb mode go into it, EVEN if 2469 the target processor does not support thumb instructions. 2470 This is used by gcc/config/arm/lib1funcs.asm for example 2471 to compile interworking support functions even if the 2472 target processor should not support interworking. */ 2473 if (! thumb_mode) 2474 { 2475 thumb_mode = 2; 2476 record_alignment (now_seg, 1); 2477 } 2478 2479 demand_empty_rest_of_line (); 2480} 2481 2482static void 2483s_thumb_func (int ignore ATTRIBUTE_UNUSED) 2484{ 2485 s_thumb (0); 2486 2487 /* The following label is the name/address of the start of a Thumb function. 2488 We need to know this for the interworking support. */ 2489 label_is_thumb_function_name = TRUE; 2490} 2491 2492/* Perform a .set directive, but also mark the alias as 2493 being a thumb function. */ 2494 2495static void 2496s_thumb_set (int equiv) 2497{ 2498 /* XXX the following is a duplicate of the code for s_set() in read.c 2499 We cannot just call that code as we need to get at the symbol that 2500 is created. */ 2501 char * name; 2502 char delim; 2503 char * end_name; 2504 symbolS * symbolP; 2505 2506 /* Especial apologies for the random logic: 2507 This just grew, and could be parsed much more simply! 2508 Dean - in haste. */ 2509 name = input_line_pointer; 2510 delim = get_symbol_end (); 2511 end_name = input_line_pointer; 2512 *end_name = delim; 2513 2514 if (*input_line_pointer != ',') 2515 { 2516 *end_name = 0; 2517 as_bad (_("expected comma after name \"%s\""), name); 2518 *end_name = delim; 2519 ignore_rest_of_line (); 2520 return; 2521 } 2522 2523 input_line_pointer++; 2524 *end_name = 0; 2525 2526 if (name[0] == '.' && name[1] == '\0') 2527 { 2528 /* XXX - this should not happen to .thumb_set. */ 2529 abort (); 2530 } 2531 2532 if ((symbolP = symbol_find (name)) == NULL 2533 && (symbolP = md_undefined_symbol (name)) == NULL) 2534 { 2535#ifndef NO_LISTING 2536 /* When doing symbol listings, play games with dummy fragments living 2537 outside the normal fragment chain to record the file and line info 2538 for this symbol. */ 2539 if (listing & LISTING_SYMBOLS) 2540 { 2541 extern struct list_info_struct * listing_tail; 2542 fragS * dummy_frag = xmalloc (sizeof (fragS)); 2543 2544 memset (dummy_frag, 0, sizeof (fragS)); 2545 dummy_frag->fr_type = rs_fill; 2546 dummy_frag->line = listing_tail; 2547 symbolP = symbol_new (name, undefined_section, 0, dummy_frag); 2548 dummy_frag->fr_symbol = symbolP; 2549 } 2550 else 2551#endif 2552 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag); 2553 2554#ifdef OBJ_COFF 2555 /* "set" symbols are local unless otherwise specified. */ 2556 SF_SET_LOCAL (symbolP); 2557#endif /* OBJ_COFF */ 2558 } /* Make a new symbol. */ 2559 2560 symbol_table_insert (symbolP); 2561 2562 * end_name = delim; 2563 2564 if (equiv 2565 && S_IS_DEFINED (symbolP) 2566 && S_GET_SEGMENT (symbolP) != reg_section) 2567 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP)); 2568 2569 pseudo_set (symbolP); 2570 2571 demand_empty_rest_of_line (); 2572 2573 /* XXX Now we come to the Thumb specific bit of code. */ 2574 2575 THUMB_SET_FUNC (symbolP, 1); 2576 ARM_SET_THUMB (symbolP, 1); 2577#if defined OBJ_ELF || defined OBJ_COFF 2578 ARM_SET_INTERWORK (symbolP, support_interwork); 2579#endif 2580} 2581 2582/* Directives: Mode selection. */ 2583 2584/* .syntax [unified|divided] - choose the new unified syntax 2585 (same for Arm and Thumb encoding, modulo slight differences in what 2586 can be represented) or the old divergent syntax for each mode. */ 2587static void 2588s_syntax (int unused ATTRIBUTE_UNUSED) 2589{ 2590 char *name, delim; 2591 2592 name = input_line_pointer; 2593 delim = get_symbol_end (); 2594 2595 if (!strcasecmp (name, "unified")) 2596 unified_syntax = TRUE; 2597 else if (!strcasecmp (name, "divided")) 2598 unified_syntax = FALSE; 2599 else 2600 { 2601 as_bad (_("unrecognized syntax mode \"%s\""), name); 2602 return; 2603 } 2604 *input_line_pointer = delim; 2605 demand_empty_rest_of_line (); 2606} 2607 2608/* Directives: sectioning and alignment. */ 2609 2610/* Same as s_align_ptwo but align 0 => align 2. */ 2611 2612static void 2613s_align (int unused ATTRIBUTE_UNUSED) 2614{ 2615 int temp; 2616 bfd_boolean fill_p; 2617 long temp_fill; 2618 long max_alignment = 15; 2619 2620 temp = get_absolute_expression (); 2621 if (temp > max_alignment) 2622 as_bad (_("alignment too large: %d assumed"), temp = max_alignment); 2623 else if (temp < 0) 2624 { 2625 as_bad (_("alignment negative. 0 assumed.")); 2626 temp = 0; 2627 } 2628 2629 if (*input_line_pointer == ',') 2630 { 2631 input_line_pointer++; 2632 temp_fill = get_absolute_expression (); 2633 fill_p = TRUE; 2634 } 2635 else 2636 { 2637 fill_p = FALSE; 2638 temp_fill = 0; 2639 } 2640 2641 if (!temp) 2642 temp = 2; 2643 2644 /* Only make a frag if we HAVE to. */ 2645 if (temp && !need_pass_2) 2646 { 2647 if (!fill_p && subseg_text_p (now_seg)) 2648 frag_align_code (temp, 0); 2649 else 2650 frag_align (temp, (int) temp_fill, 0); 2651 } 2652 demand_empty_rest_of_line (); 2653 2654 record_alignment (now_seg, temp); 2655} 2656 2657static void 2658s_bss (int ignore ATTRIBUTE_UNUSED) 2659{ 2660 /* We don't support putting frags in the BSS segment, we fake it by 2661 marking in_bss, then looking at s_skip for clues. */ 2662 subseg_set (bss_section, 0); 2663 demand_empty_rest_of_line (); 2664 mapping_state (MAP_DATA); 2665} 2666 2667static void 2668s_even (int ignore ATTRIBUTE_UNUSED) 2669{ 2670 /* Never make frag if expect extra pass. */ 2671 if (!need_pass_2) 2672 frag_align (1, 0, 0); 2673 2674 record_alignment (now_seg, 1); 2675 2676 demand_empty_rest_of_line (); 2677} 2678 2679/* Directives: Literal pools. */ 2680 2681static literal_pool * 2682find_literal_pool (void) 2683{ 2684 literal_pool * pool; 2685 2686 for (pool = list_of_pools; pool != NULL; pool = pool->next) 2687 { 2688 if (pool->section == now_seg 2689 && pool->sub_section == now_subseg) 2690 break; 2691 } 2692 2693 return pool; 2694} 2695 2696static literal_pool * 2697find_or_make_literal_pool (void) 2698{ 2699 /* Next literal pool ID number. */ 2700 static unsigned int latest_pool_num = 1; 2701 literal_pool * pool; 2702 2703 pool = find_literal_pool (); 2704 2705 if (pool == NULL) 2706 { 2707 /* Create a new pool. */ 2708 pool = xmalloc (sizeof (* pool)); 2709 if (! pool) 2710 return NULL; 2711 2712 pool->next_free_entry = 0; 2713 pool->section = now_seg; 2714 pool->sub_section = now_subseg; 2715 pool->next = list_of_pools; 2716 pool->symbol = NULL; 2717 2718 /* Add it to the list. */ 2719 list_of_pools = pool; 2720 } 2721 2722 /* New pools, and emptied pools, will have a NULL symbol. */ 2723 if (pool->symbol == NULL) 2724 { 2725 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 2726 (valueT) 0, &zero_address_frag); 2727 pool->id = latest_pool_num ++; 2728 } 2729 2730 /* Done. */ 2731 return pool; 2732} 2733 2734/* Add the literal in the global 'inst' 2735 structure to the relevent literal pool. */ 2736 2737static int 2738add_to_lit_pool (void) 2739{ 2740 literal_pool * pool; 2741 unsigned int entry; 2742 2743 pool = find_or_make_literal_pool (); 2744 2745 /* Check if this literal value is already in the pool. */ 2746 for (entry = 0; entry < pool->next_free_entry; entry ++) 2747 { 2748 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2749 && (inst.reloc.exp.X_op == O_constant) 2750 && (pool->literals[entry].X_add_number 2751 == inst.reloc.exp.X_add_number) 2752 && (pool->literals[entry].X_unsigned 2753 == inst.reloc.exp.X_unsigned)) 2754 break; 2755 2756 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2757 && (inst.reloc.exp.X_op == O_symbol) 2758 && (pool->literals[entry].X_add_number 2759 == inst.reloc.exp.X_add_number) 2760 && (pool->literals[entry].X_add_symbol 2761 == inst.reloc.exp.X_add_symbol) 2762 && (pool->literals[entry].X_op_symbol 2763 == inst.reloc.exp.X_op_symbol)) 2764 break; 2765 } 2766 2767 /* Do we need to create a new entry? */ 2768 if (entry == pool->next_free_entry) 2769 { 2770 if (entry >= MAX_LITERAL_POOL_SIZE) 2771 { 2772 inst.error = _("literal pool overflow"); 2773 return FAIL; 2774 } 2775 2776 pool->literals[entry] = inst.reloc.exp; 2777 pool->next_free_entry += 1; 2778 } 2779 2780 inst.reloc.exp.X_op = O_symbol; 2781 inst.reloc.exp.X_add_number = ((int) entry) * 4; 2782 inst.reloc.exp.X_add_symbol = pool->symbol; 2783 2784 return SUCCESS; 2785} 2786 2787/* Can't use symbol_new here, so have to create a symbol and then at 2788 a later date assign it a value. Thats what these functions do. */ 2789 2790static void 2791symbol_locate (symbolS * symbolP, 2792 const char * name, /* It is copied, the caller can modify. */ 2793 segT segment, /* Segment identifier (SEG_<something>). */ 2794 valueT valu, /* Symbol value. */ 2795 fragS * frag) /* Associated fragment. */ 2796{ 2797 unsigned int name_length; 2798 char * preserved_copy_of_name; 2799 2800 name_length = strlen (name) + 1; /* +1 for \0. */ 2801 obstack_grow (¬es, name, name_length); 2802 preserved_copy_of_name = obstack_finish (¬es); 2803 2804#ifdef tc_canonicalize_symbol_name 2805 preserved_copy_of_name = 2806 tc_canonicalize_symbol_name (preserved_copy_of_name); 2807#endif 2808 2809 S_SET_NAME (symbolP, preserved_copy_of_name); 2810 2811 S_SET_SEGMENT (symbolP, segment); 2812 S_SET_VALUE (symbolP, valu); 2813 symbol_clear_list_pointers (symbolP); 2814 2815 symbol_set_frag (symbolP, frag); 2816 2817 /* Link to end of symbol chain. */ 2818 { 2819 extern int symbol_table_frozen; 2820 2821 if (symbol_table_frozen) 2822 abort (); 2823 } 2824 2825 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP); 2826 2827 obj_symbol_new_hook (symbolP); 2828 2829#ifdef tc_symbol_new_hook 2830 tc_symbol_new_hook (symbolP); 2831#endif 2832 2833#ifdef DEBUG_SYMS 2834 verify_symbol_chain (symbol_rootP, symbol_lastP); 2835#endif /* DEBUG_SYMS */ 2836} 2837 2838 2839static void 2840s_ltorg (int ignored ATTRIBUTE_UNUSED) 2841{ 2842 unsigned int entry; 2843 literal_pool * pool; 2844 char sym_name[20]; 2845 2846 pool = find_literal_pool (); 2847 if (pool == NULL 2848 || pool->symbol == NULL 2849 || pool->next_free_entry == 0) 2850 return; 2851 2852 mapping_state (MAP_DATA); 2853 2854 /* Align pool as you have word accesses. 2855 Only make a frag if we have to. */ 2856 if (!need_pass_2) 2857 frag_align (2, 0, 0); 2858 2859 record_alignment (now_seg, 2); 2860 2861 sprintf (sym_name, "$$lit_\002%x", pool->id); 2862 2863 symbol_locate (pool->symbol, sym_name, now_seg, 2864 (valueT) frag_now_fix (), frag_now); 2865 symbol_table_insert (pool->symbol); 2866 2867 ARM_SET_THUMB (pool->symbol, thumb_mode); 2868 2869#if defined OBJ_COFF || defined OBJ_ELF 2870 ARM_SET_INTERWORK (pool->symbol, support_interwork); 2871#endif 2872 2873 for (entry = 0; entry < pool->next_free_entry; entry ++) 2874 /* First output the expression in the instruction to the pool. */ 2875 emit_expr (&(pool->literals[entry]), 4); /* .word */ 2876 2877 /* Mark the pool as empty. */ 2878 pool->next_free_entry = 0; 2879 pool->symbol = NULL; 2880} 2881 2882#ifdef OBJ_ELF 2883/* Forward declarations for functions below, in the MD interface 2884 section. */ 2885static void fix_new_arm (fragS *, int, short, expressionS *, int, int); 2886static valueT create_unwind_entry (int); 2887static void start_unwind_section (const segT, int); 2888static void add_unwind_opcode (valueT, int); 2889static void flush_pending_unwind (void); 2890 2891/* Directives: Data. */ 2892 2893static void 2894s_arm_elf_cons (int nbytes) 2895{ 2896 expressionS exp; 2897 2898#ifdef md_flush_pending_output 2899 md_flush_pending_output (); 2900#endif 2901 2902 if (is_it_end_of_statement ()) 2903 { 2904 demand_empty_rest_of_line (); 2905 return; 2906 } 2907 2908#ifdef md_cons_align 2909 md_cons_align (nbytes); 2910#endif 2911 2912 mapping_state (MAP_DATA); 2913 do 2914 { 2915 int reloc; 2916 char *base = input_line_pointer; 2917 2918 expression (& exp); 2919 2920 if (exp.X_op != O_symbol) 2921 emit_expr (&exp, (unsigned int) nbytes); 2922 else 2923 { 2924 char *before_reloc = input_line_pointer; 2925 reloc = parse_reloc (&input_line_pointer); 2926 if (reloc == -1) 2927 { 2928 as_bad (_("unrecognized relocation suffix")); 2929 ignore_rest_of_line (); 2930 return; 2931 } 2932 else if (reloc == BFD_RELOC_UNUSED) 2933 emit_expr (&exp, (unsigned int) nbytes); 2934 else 2935 { 2936 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc); 2937 int size = bfd_get_reloc_size (howto); 2938 2939 if (reloc == BFD_RELOC_ARM_PLT32) 2940 { 2941 as_bad (_("(plt) is only valid on branch targets")); 2942 reloc = BFD_RELOC_UNUSED; 2943 size = 0; 2944 } 2945 2946 if (size > nbytes) 2947 as_bad (_("%s relocations do not fit in %d bytes"), 2948 howto->name, nbytes); 2949 else 2950 { 2951 /* We've parsed an expression stopping at O_symbol. 2952 But there may be more expression left now that we 2953 have parsed the relocation marker. Parse it again. 2954 XXX Surely there is a cleaner way to do this. */ 2955 char *p = input_line_pointer; 2956 int offset; 2957 char *save_buf = alloca (input_line_pointer - base); 2958 memcpy (save_buf, base, input_line_pointer - base); 2959 memmove (base + (input_line_pointer - before_reloc), 2960 base, before_reloc - base); 2961 2962 input_line_pointer = base + (input_line_pointer-before_reloc); 2963 expression (&exp); 2964 memcpy (base, save_buf, p - base); 2965 2966 offset = nbytes - size; 2967 p = frag_more ((int) nbytes); 2968 fix_new_exp (frag_now, p - frag_now->fr_literal + offset, 2969 size, &exp, 0, reloc); 2970 } 2971 } 2972 } 2973 } 2974 while (*input_line_pointer++ == ','); 2975 2976 /* Put terminator back into stream. */ 2977 input_line_pointer --; 2978 demand_empty_rest_of_line (); 2979} 2980 2981 2982/* Parse a .rel31 directive. */ 2983 2984static void 2985s_arm_rel31 (int ignored ATTRIBUTE_UNUSED) 2986{ 2987 expressionS exp; 2988 char *p; 2989 valueT highbit; 2990 2991 highbit = 0; 2992 if (*input_line_pointer == '1') 2993 highbit = 0x80000000; 2994 else if (*input_line_pointer != '0') 2995 as_bad (_("expected 0 or 1")); 2996 2997 input_line_pointer++; 2998 if (*input_line_pointer != ',') 2999 as_bad (_("missing comma")); 3000 input_line_pointer++; 3001 3002#ifdef md_flush_pending_output 3003 md_flush_pending_output (); 3004#endif 3005 3006#ifdef md_cons_align 3007 md_cons_align (4); 3008#endif 3009 3010 mapping_state (MAP_DATA); 3011 3012 expression (&exp); 3013 3014 p = frag_more (4); 3015 md_number_to_chars (p, highbit, 4); 3016 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1, 3017 BFD_RELOC_ARM_PREL31); 3018 3019 demand_empty_rest_of_line (); 3020} 3021 3022/* Directives: AEABI stack-unwind tables. */ 3023 3024/* Parse an unwind_fnstart directive. Simply records the current location. */ 3025 3026static void 3027s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED) 3028{ 3029 demand_empty_rest_of_line (); 3030 /* Mark the start of the function. */ 3031 unwind.proc_start = expr_build_dot (); 3032 3033 /* Reset the rest of the unwind info. */ 3034 unwind.opcode_count = 0; 3035 unwind.table_entry = NULL; 3036 unwind.personality_routine = NULL; 3037 unwind.personality_index = -1; 3038 unwind.frame_size = 0; 3039 unwind.fp_offset = 0; 3040 unwind.fp_reg = 13; 3041 unwind.fp_used = 0; 3042 unwind.sp_restored = 0; 3043} 3044 3045 3046/* Parse a handlerdata directive. Creates the exception handling table entry 3047 for the function. */ 3048 3049static void 3050s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED) 3051{ 3052 demand_empty_rest_of_line (); 3053 if (unwind.table_entry) 3054 as_bad (_("dupicate .handlerdata directive")); 3055 3056 create_unwind_entry (1); 3057} 3058 3059/* Parse an unwind_fnend directive. Generates the index table entry. */ 3060 3061static void 3062s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED) 3063{ 3064 long where; 3065 char *ptr; 3066 valueT val; 3067 3068 demand_empty_rest_of_line (); 3069 3070 /* Add eh table entry. */ 3071 if (unwind.table_entry == NULL) 3072 val = create_unwind_entry (0); 3073 else 3074 val = 0; 3075 3076 /* Add index table entry. This is two words. */ 3077 start_unwind_section (unwind.saved_seg, 1); 3078 frag_align (2, 0, 0); 3079 record_alignment (now_seg, 2); 3080 3081 ptr = frag_more (8); 3082 memset(ptr, 0, 8); 3083 where = frag_now_fix () - 8; 3084 3085 /* Self relative offset of the function start. */ 3086 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1, 3087 BFD_RELOC_ARM_PREL31); 3088 3089 /* Indicate dependency on EHABI-defined personality routines to the 3090 linker, if it hasn't been done already. */ 3091 if (unwind.personality_index >= 0 && unwind.personality_index < 3 3092 && !(marked_pr_dependency & (1 << unwind.personality_index))) 3093 { 3094 static const char *const name[] = { 3095 "__aeabi_unwind_cpp_pr0", 3096 "__aeabi_unwind_cpp_pr1", 3097 "__aeabi_unwind_cpp_pr2" 3098 }; 3099 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]); 3100 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE); 3101 marked_pr_dependency |= 1 << unwind.personality_index; 3102 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency 3103 = marked_pr_dependency; 3104 } 3105 3106 if (val) 3107 /* Inline exception table entry. */ 3108 md_number_to_chars (ptr + 4, val, 4); 3109 else 3110 /* Self relative offset of the table entry. */ 3111 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1, 3112 BFD_RELOC_ARM_PREL31); 3113 3114 /* Restore the original section. */ 3115 subseg_set (unwind.saved_seg, unwind.saved_subseg); 3116} 3117 3118 3119/* Parse an unwind_cantunwind directive. */ 3120 3121static void 3122s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED) 3123{ 3124 demand_empty_rest_of_line (); 3125 if (unwind.personality_routine || unwind.personality_index != -1) 3126 as_bad (_("personality routine specified for cantunwind frame")); 3127 3128 unwind.personality_index = -2; 3129} 3130 3131 3132/* Parse a personalityindex directive. */ 3133 3134static void 3135s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED) 3136{ 3137 expressionS exp; 3138 3139 if (unwind.personality_routine || unwind.personality_index != -1) 3140 as_bad (_("duplicate .personalityindex directive")); 3141 3142 expression (&exp); 3143 3144 if (exp.X_op != O_constant 3145 || exp.X_add_number < 0 || exp.X_add_number > 15) 3146 { 3147 as_bad (_("bad personality routine number")); 3148 ignore_rest_of_line (); 3149 return; 3150 } 3151 3152 unwind.personality_index = exp.X_add_number; 3153 3154 demand_empty_rest_of_line (); 3155} 3156 3157 3158/* Parse a personality directive. */ 3159 3160static void 3161s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED) 3162{ 3163 char *name, *p, c; 3164 3165 if (unwind.personality_routine || unwind.personality_index != -1) 3166 as_bad (_("duplicate .personality directive")); 3167 3168 name = input_line_pointer; 3169 c = get_symbol_end (); 3170 p = input_line_pointer; 3171 unwind.personality_routine = symbol_find_or_make (name); 3172 *p = c; 3173 demand_empty_rest_of_line (); 3174} 3175 3176 3177/* Parse a directive saving core registers. */ 3178 3179static void 3180s_arm_unwind_save_core (void) 3181{ 3182 valueT op; 3183 long range; 3184 int n; 3185 3186 range = parse_reg_list (&input_line_pointer); 3187 if (range == FAIL) 3188 { 3189 as_bad (_("expected register list")); 3190 ignore_rest_of_line (); 3191 return; 3192 } 3193 3194 demand_empty_rest_of_line (); 3195 3196 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...} 3197 into .unwind_save {..., sp...}. We aren't bothered about the value of 3198 ip because it is clobbered by calls. */ 3199 if (unwind.sp_restored && unwind.fp_reg == 12 3200 && (range & 0x3000) == 0x1000) 3201 { 3202 unwind.opcode_count--; 3203 unwind.sp_restored = 0; 3204 range = (range | 0x2000) & ~0x1000; 3205 unwind.pending_offset = 0; 3206 } 3207 3208 /* Pop r4-r15. */ 3209 if (range & 0xfff0) 3210 { 3211 /* See if we can use the short opcodes. These pop a block of up to 8 3212 registers starting with r4, plus maybe r14. */ 3213 for (n = 0; n < 8; n++) 3214 { 3215 /* Break at the first non-saved register. */ 3216 if ((range & (1 << (n + 4))) == 0) 3217 break; 3218 } 3219 /* See if there are any other bits set. */ 3220 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0) 3221 { 3222 /* Use the long form. */ 3223 op = 0x8000 | ((range >> 4) & 0xfff); 3224 add_unwind_opcode (op, 2); 3225 } 3226 else 3227 { 3228 /* Use the short form. */ 3229 if (range & 0x4000) 3230 op = 0xa8; /* Pop r14. */ 3231 else 3232 op = 0xa0; /* Do not pop r14. */ 3233 op |= (n - 1); 3234 add_unwind_opcode (op, 1); 3235 } 3236 } 3237 3238 /* Pop r0-r3. */ 3239 if (range & 0xf) 3240 { 3241 op = 0xb100 | (range & 0xf); 3242 add_unwind_opcode (op, 2); 3243 } 3244 3245 /* Record the number of bytes pushed. */ 3246 for (n = 0; n < 16; n++) 3247 { 3248 if (range & (1 << n)) 3249 unwind.frame_size += 4; 3250 } 3251} 3252 3253 3254/* Parse a directive saving FPA registers. */ 3255 3256static void 3257s_arm_unwind_save_fpa (int reg) 3258{ 3259 expressionS exp; 3260 int num_regs; 3261 valueT op; 3262 3263 /* Get Number of registers to transfer. */ 3264 if (skip_past_comma (&input_line_pointer) != FAIL) 3265 expression (&exp); 3266 else 3267 exp.X_op = O_illegal; 3268 3269 if (exp.X_op != O_constant) 3270 { 3271 as_bad (_("expected , <constant>")); 3272 ignore_rest_of_line (); 3273 return; 3274 } 3275 3276 num_regs = exp.X_add_number; 3277 3278 if (num_regs < 1 || num_regs > 4) 3279 { 3280 as_bad (_("number of registers must be in the range [1:4]")); 3281 ignore_rest_of_line (); 3282 return; 3283 } 3284 3285 demand_empty_rest_of_line (); 3286 3287 if (reg == 4) 3288 { 3289 /* Short form. */ 3290 op = 0xb4 | (num_regs - 1); 3291 add_unwind_opcode (op, 1); 3292 } 3293 else 3294 { 3295 /* Long form. */ 3296 op = 0xc800 | (reg << 4) | (num_regs - 1); 3297 add_unwind_opcode (op, 2); 3298 } 3299 unwind.frame_size += num_regs * 12; 3300} 3301 3302 3303/* Parse a directive saving VFP registers for ARMv6 and above. */ 3304 3305static void 3306s_arm_unwind_save_vfp_armv6 (void) 3307{ 3308 int count; 3309 unsigned int start; 3310 valueT op; 3311 int num_vfpv3_regs = 0; 3312 int num_regs_below_16; 3313 3314 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D); 3315 if (count == FAIL) 3316 { 3317 as_bad (_("expected register list")); 3318 ignore_rest_of_line (); 3319 return; 3320 } 3321 3322 demand_empty_rest_of_line (); 3323 3324 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather 3325 than FSTMX/FLDMX-style ones). */ 3326 3327 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */ 3328 if (start >= 16) 3329 num_vfpv3_regs = count; 3330 else if (start + count > 16) 3331 num_vfpv3_regs = start + count - 16; 3332 3333 if (num_vfpv3_regs > 0) 3334 { 3335 int start_offset = start > 16 ? start - 16 : 0; 3336 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1); 3337 add_unwind_opcode (op, 2); 3338 } 3339 3340 /* Generate opcode for registers numbered in the range 0 .. 15. */ 3341 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count; 3342 assert (num_regs_below_16 + num_vfpv3_regs == count); 3343 if (num_regs_below_16 > 0) 3344 { 3345 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1); 3346 add_unwind_opcode (op, 2); 3347 } 3348 3349 unwind.frame_size += count * 8; 3350} 3351 3352 3353/* Parse a directive saving VFP registers for pre-ARMv6. */ 3354 3355static void 3356s_arm_unwind_save_vfp (void) 3357{ 3358 int count; 3359 unsigned int reg; 3360 valueT op; 3361 3362 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D); 3363 if (count == FAIL) 3364 { 3365 as_bad (_("expected register list")); 3366 ignore_rest_of_line (); 3367 return; 3368 } 3369 3370 demand_empty_rest_of_line (); 3371 3372 if (reg == 8) 3373 { 3374 /* Short form. */ 3375 op = 0xb8 | (count - 1); 3376 add_unwind_opcode (op, 1); 3377 } 3378 else 3379 { 3380 /* Long form. */ 3381 op = 0xb300 | (reg << 4) | (count - 1); 3382 add_unwind_opcode (op, 2); 3383 } 3384 unwind.frame_size += count * 8 + 4; 3385} 3386 3387 3388/* Parse a directive saving iWMMXt data registers. */ 3389 3390static void 3391s_arm_unwind_save_mmxwr (void) 3392{ 3393 int reg; 3394 int hi_reg; 3395 int i; 3396 unsigned mask = 0; 3397 valueT op; 3398 3399 if (*input_line_pointer == '{') 3400 input_line_pointer++; 3401 3402 do 3403 { 3404 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); 3405 3406 if (reg == FAIL) 3407 { 3408 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); 3409 goto error; 3410 } 3411 3412 if (mask >> reg) 3413 as_tsktsk (_("register list not in ascending order")); 3414 mask |= 1 << reg; 3415 3416 if (*input_line_pointer == '-') 3417 { 3418 input_line_pointer++; 3419 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); 3420 if (hi_reg == FAIL) 3421 { 3422 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); 3423 goto error; 3424 } 3425 else if (reg >= hi_reg) 3426 { 3427 as_bad (_("bad register range")); 3428 goto error; 3429 } 3430 for (; reg < hi_reg; reg++) 3431 mask |= 1 << reg; 3432 } 3433 } 3434 while (skip_past_comma (&input_line_pointer) != FAIL); 3435 3436 if (*input_line_pointer == '}') 3437 input_line_pointer++; 3438 3439 demand_empty_rest_of_line (); 3440 3441 /* Generate any deferred opcodes because we're going to be looking at 3442 the list. */ 3443 flush_pending_unwind (); 3444 3445 for (i = 0; i < 16; i++) 3446 { 3447 if (mask & (1 << i)) 3448 unwind.frame_size += 8; 3449 } 3450 3451 /* Attempt to combine with a previous opcode. We do this because gcc 3452 likes to output separate unwind directives for a single block of 3453 registers. */ 3454 if (unwind.opcode_count > 0) 3455 { 3456 i = unwind.opcodes[unwind.opcode_count - 1]; 3457 if ((i & 0xf8) == 0xc0) 3458 { 3459 i &= 7; 3460 /* Only merge if the blocks are contiguous. */ 3461 if (i < 6) 3462 { 3463 if ((mask & 0xfe00) == (1 << 9)) 3464 { 3465 mask |= ((1 << (i + 11)) - 1) & 0xfc00; 3466 unwind.opcode_count--; 3467 } 3468 } 3469 else if (i == 6 && unwind.opcode_count >= 2) 3470 { 3471 i = unwind.opcodes[unwind.opcode_count - 2]; 3472 reg = i >> 4; 3473 i &= 0xf; 3474 3475 op = 0xffff << (reg - 1); 3476 if (reg > 0 3477 && ((mask & op) == (1u << (reg - 1)))) 3478 { 3479 op = (1 << (reg + i + 1)) - 1; 3480 op &= ~((1 << reg) - 1); 3481 mask |= op; 3482 unwind.opcode_count -= 2; 3483 } 3484 } 3485 } 3486 } 3487 3488 hi_reg = 15; 3489 /* We want to generate opcodes in the order the registers have been 3490 saved, ie. descending order. */ 3491 for (reg = 15; reg >= -1; reg--) 3492 { 3493 /* Save registers in blocks. */ 3494 if (reg < 0 3495 || !(mask & (1 << reg))) 3496 { 3497 /* We found an unsaved reg. Generate opcodes to save the 3498 preceeding block. */ 3499 if (reg != hi_reg) 3500 { 3501 if (reg == 9) 3502 { 3503 /* Short form. */ 3504 op = 0xc0 | (hi_reg - 10); 3505 add_unwind_opcode (op, 1); 3506 } 3507 else 3508 { 3509 /* Long form. */ 3510 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1); 3511 add_unwind_opcode (op, 2); 3512 } 3513 } 3514 hi_reg = reg - 1; 3515 } 3516 } 3517 3518 return; 3519error: 3520 ignore_rest_of_line (); 3521} 3522 3523static void 3524s_arm_unwind_save_mmxwcg (void) 3525{ 3526 int reg; 3527 int hi_reg; 3528 unsigned mask = 0; 3529 valueT op; 3530 3531 if (*input_line_pointer == '{') 3532 input_line_pointer++; 3533 3534 do 3535 { 3536 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); 3537 3538 if (reg == FAIL) 3539 { 3540 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); 3541 goto error; 3542 } 3543 3544 reg -= 8; 3545 if (mask >> reg) 3546 as_tsktsk (_("register list not in ascending order")); 3547 mask |= 1 << reg; 3548 3549 if (*input_line_pointer == '-') 3550 { 3551 input_line_pointer++; 3552 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); 3553 if (hi_reg == FAIL) 3554 { 3555 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); 3556 goto error; 3557 } 3558 else if (reg >= hi_reg) 3559 { 3560 as_bad (_("bad register range")); 3561 goto error; 3562 } 3563 for (; reg < hi_reg; reg++) 3564 mask |= 1 << reg; 3565 } 3566 } 3567 while (skip_past_comma (&input_line_pointer) != FAIL); 3568 3569 if (*input_line_pointer == '}') 3570 input_line_pointer++; 3571 3572 demand_empty_rest_of_line (); 3573 3574 /* Generate any deferred opcodes because we're going to be looking at 3575 the list. */ 3576 flush_pending_unwind (); 3577 3578 for (reg = 0; reg < 16; reg++) 3579 { 3580 if (mask & (1 << reg)) 3581 unwind.frame_size += 4; 3582 } 3583 op = 0xc700 | mask; 3584 add_unwind_opcode (op, 2); 3585 return; 3586error: 3587 ignore_rest_of_line (); 3588} 3589 3590 3591/* Parse an unwind_save directive. 3592 If the argument is non-zero, this is a .vsave directive. */ 3593 3594static void 3595s_arm_unwind_save (int arch_v6) 3596{ 3597 char *peek; 3598 struct reg_entry *reg; 3599 bfd_boolean had_brace = FALSE; 3600 3601 /* Figure out what sort of save we have. */ 3602 peek = input_line_pointer; 3603 3604 if (*peek == '{') 3605 { 3606 had_brace = TRUE; 3607 peek++; 3608 } 3609 3610 reg = arm_reg_parse_multi (&peek); 3611 3612 if (!reg) 3613 { 3614 as_bad (_("register expected")); 3615 ignore_rest_of_line (); 3616 return; 3617 } 3618 3619 switch (reg->type) 3620 { 3621 case REG_TYPE_FN: 3622 if (had_brace) 3623 { 3624 as_bad (_("FPA .unwind_save does not take a register list")); 3625 ignore_rest_of_line (); 3626 return; 3627 } 3628 s_arm_unwind_save_fpa (reg->number); 3629 return; 3630 3631 case REG_TYPE_RN: s_arm_unwind_save_core (); return; 3632 case REG_TYPE_VFD: 3633 if (arch_v6) 3634 s_arm_unwind_save_vfp_armv6 (); 3635 else 3636 s_arm_unwind_save_vfp (); 3637 return; 3638 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return; 3639 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return; 3640 3641 default: 3642 as_bad (_(".unwind_save does not support this kind of register")); 3643 ignore_rest_of_line (); 3644 } 3645} 3646 3647 3648/* Parse an unwind_movsp directive. */ 3649 3650static void 3651s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED) 3652{ 3653 int reg; 3654 valueT op; 3655 int offset; 3656 3657 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3658 if (reg == FAIL) 3659 { 3660 as_bad (_(reg_expected_msgs[REG_TYPE_RN])); 3661 ignore_rest_of_line (); 3662 return; 3663 } 3664 3665 /* Optional constant. */ 3666 if (skip_past_comma (&input_line_pointer) != FAIL) 3667 { 3668 if (immediate_for_directive (&offset) == FAIL) 3669 return; 3670 } 3671 else 3672 offset = 0; 3673 3674 demand_empty_rest_of_line (); 3675 3676 if (reg == REG_SP || reg == REG_PC) 3677 { 3678 as_bad (_("SP and PC not permitted in .unwind_movsp directive")); 3679 return; 3680 } 3681 3682 if (unwind.fp_reg != REG_SP) 3683 as_bad (_("unexpected .unwind_movsp directive")); 3684 3685 /* Generate opcode to restore the value. */ 3686 op = 0x90 | reg; 3687 add_unwind_opcode (op, 1); 3688 3689 /* Record the information for later. */ 3690 unwind.fp_reg = reg; 3691 unwind.fp_offset = unwind.frame_size - offset; 3692 unwind.sp_restored = 1; 3693} 3694 3695/* Parse an unwind_pad directive. */ 3696 3697static void 3698s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED) 3699{ 3700 int offset; 3701 3702 if (immediate_for_directive (&offset) == FAIL) 3703 return; 3704 3705 if (offset & 3) 3706 { 3707 as_bad (_("stack increment must be multiple of 4")); 3708 ignore_rest_of_line (); 3709 return; 3710 } 3711 3712 /* Don't generate any opcodes, just record the details for later. */ 3713 unwind.frame_size += offset; 3714 unwind.pending_offset += offset; 3715 3716 demand_empty_rest_of_line (); 3717} 3718 3719/* Parse an unwind_setfp directive. */ 3720 3721static void 3722s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED) 3723{ 3724 int sp_reg; 3725 int fp_reg; 3726 int offset; 3727 3728 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3729 if (skip_past_comma (&input_line_pointer) == FAIL) 3730 sp_reg = FAIL; 3731 else 3732 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); 3733 3734 if (fp_reg == FAIL || sp_reg == FAIL) 3735 { 3736 as_bad (_("expected <reg>, <reg>")); 3737 ignore_rest_of_line (); 3738 return; 3739 } 3740 3741 /* Optional constant. */ 3742 if (skip_past_comma (&input_line_pointer) != FAIL) 3743 { 3744 if (immediate_for_directive (&offset) == FAIL) 3745 return; 3746 } 3747 else 3748 offset = 0; 3749 3750 demand_empty_rest_of_line (); 3751 3752 if (sp_reg != 13 && sp_reg != unwind.fp_reg) 3753 { 3754 as_bad (_("register must be either sp or set by a previous" 3755 "unwind_movsp directive")); 3756 return; 3757 } 3758 3759 /* Don't generate any opcodes, just record the information for later. */ 3760 unwind.fp_reg = fp_reg; 3761 unwind.fp_used = 1; 3762 if (sp_reg == 13) 3763 unwind.fp_offset = unwind.frame_size - offset; 3764 else 3765 unwind.fp_offset -= offset; 3766} 3767 3768/* Parse an unwind_raw directive. */ 3769 3770static void 3771s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED) 3772{ 3773 expressionS exp; 3774 /* This is an arbitrary limit. */ 3775 unsigned char op[16]; 3776 int count; 3777 3778 expression (&exp); 3779 if (exp.X_op == O_constant 3780 && skip_past_comma (&input_line_pointer) != FAIL) 3781 { 3782 unwind.frame_size += exp.X_add_number; 3783 expression (&exp); 3784 } 3785 else 3786 exp.X_op = O_illegal; 3787 3788 if (exp.X_op != O_constant) 3789 { 3790 as_bad (_("expected <offset>, <opcode>")); 3791 ignore_rest_of_line (); 3792 return; 3793 } 3794 3795 count = 0; 3796 3797 /* Parse the opcode. */ 3798 for (;;) 3799 { 3800 if (count >= 16) 3801 { 3802 as_bad (_("unwind opcode too long")); 3803 ignore_rest_of_line (); 3804 } 3805 if (exp.X_op != O_constant || exp.X_add_number & ~0xff) 3806 { 3807 as_bad (_("invalid unwind opcode")); 3808 ignore_rest_of_line (); 3809 return; 3810 } 3811 op[count++] = exp.X_add_number; 3812 3813 /* Parse the next byte. */ 3814 if (skip_past_comma (&input_line_pointer) == FAIL) 3815 break; 3816 3817 expression (&exp); 3818 } 3819 3820 /* Add the opcode bytes in reverse order. */ 3821 while (count--) 3822 add_unwind_opcode (op[count], 1); 3823 3824 demand_empty_rest_of_line (); 3825} 3826 3827 3828/* Parse a .eabi_attribute directive. */ 3829 3830static void 3831s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED) 3832{ 3833 s_vendor_attribute (OBJ_ATTR_PROC); 3834} 3835#endif /* OBJ_ELF */ 3836 3837static void s_arm_arch (int); 3838static void s_arm_object_arch (int); 3839static void s_arm_cpu (int); 3840static void s_arm_fpu (int); 3841 3842#ifdef TE_PE 3843 3844static void 3845pe_directive_secrel (int dummy ATTRIBUTE_UNUSED) 3846{ 3847 expressionS exp; 3848 3849 do 3850 { 3851 expression (&exp); 3852 if (exp.X_op == O_symbol) 3853 exp.X_op = O_secrel; 3854 3855 emit_expr (&exp, 4); 3856 } 3857 while (*input_line_pointer++ == ','); 3858 3859 input_line_pointer--; 3860 demand_empty_rest_of_line (); 3861} 3862#endif /* TE_PE */ 3863 3864/* This table describes all the machine specific pseudo-ops the assembler 3865 has to support. The fields are: 3866 pseudo-op name without dot 3867 function to call to execute this pseudo-op 3868 Integer arg to pass to the function. */ 3869 3870const pseudo_typeS md_pseudo_table[] = 3871{ 3872 /* Never called because '.req' does not start a line. */ 3873 { "req", s_req, 0 }, 3874 /* Following two are likewise never called. */ 3875 { "dn", s_dn, 0 }, 3876 { "qn", s_qn, 0 }, 3877 { "unreq", s_unreq, 0 }, 3878 { "bss", s_bss, 0 }, 3879 { "align", s_align, 0 }, 3880 { "arm", s_arm, 0 }, 3881 { "thumb", s_thumb, 0 }, 3882 { "code", s_code, 0 }, 3883 { "force_thumb", s_force_thumb, 0 }, 3884 { "thumb_func", s_thumb_func, 0 }, 3885 { "thumb_set", s_thumb_set, 0 }, 3886 { "even", s_even, 0 }, 3887 { "ltorg", s_ltorg, 0 }, 3888 { "pool", s_ltorg, 0 }, 3889 { "syntax", s_syntax, 0 }, 3890 { "cpu", s_arm_cpu, 0 }, 3891 { "arch", s_arm_arch, 0 }, 3892 { "object_arch", s_arm_object_arch, 0 }, 3893 { "fpu", s_arm_fpu, 0 }, 3894#ifdef OBJ_ELF 3895 { "word", s_arm_elf_cons, 4 }, 3896 { "long", s_arm_elf_cons, 4 }, 3897 { "rel31", s_arm_rel31, 0 }, 3898 { "fnstart", s_arm_unwind_fnstart, 0 }, 3899 { "fnend", s_arm_unwind_fnend, 0 }, 3900 { "cantunwind", s_arm_unwind_cantunwind, 0 }, 3901 { "personality", s_arm_unwind_personality, 0 }, 3902 { "personalityindex", s_arm_unwind_personalityindex, 0 }, 3903 { "handlerdata", s_arm_unwind_handlerdata, 0 }, 3904 { "save", s_arm_unwind_save, 0 }, 3905 { "vsave", s_arm_unwind_save, 1 }, 3906 { "movsp", s_arm_unwind_movsp, 0 }, 3907 { "pad", s_arm_unwind_pad, 0 }, 3908 { "setfp", s_arm_unwind_setfp, 0 }, 3909 { "unwind_raw", s_arm_unwind_raw, 0 }, 3910 { "eabi_attribute", s_arm_eabi_attribute, 0 }, 3911#else 3912 { "word", cons, 4}, 3913 3914 /* These are used for dwarf. */ 3915 {"2byte", cons, 2}, 3916 {"4byte", cons, 4}, 3917 {"8byte", cons, 8}, 3918 /* These are used for dwarf2. */ 3919 { "file", (void (*) (int)) dwarf2_directive_file, 0 }, 3920 { "loc", dwarf2_directive_loc, 0 }, 3921 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 }, 3922#endif 3923 { "extend", float_cons, 'x' }, 3924 { "ldouble", float_cons, 'x' }, 3925 { "packed", float_cons, 'p' }, 3926#ifdef TE_PE 3927 {"secrel32", pe_directive_secrel, 0}, 3928#endif 3929 { 0, 0, 0 } 3930}; 3931 3932/* Parser functions used exclusively in instruction operands. */ 3933 3934/* Generic immediate-value read function for use in insn parsing. 3935 STR points to the beginning of the immediate (the leading #); 3936 VAL receives the value; if the value is outside [MIN, MAX] 3937 issue an error. PREFIX_OPT is true if the immediate prefix is 3938 optional. */ 3939 3940static int 3941parse_immediate (char **str, int *val, int min, int max, 3942 bfd_boolean prefix_opt) 3943{ 3944 expressionS exp; 3945 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX); 3946 if (exp.X_op != O_constant) 3947 { 3948 inst.error = _("constant expression required"); 3949 return FAIL; 3950 } 3951 3952 if (exp.X_add_number < min || exp.X_add_number > max) 3953 { 3954 inst.error = _("immediate value out of range"); 3955 return FAIL; 3956 } 3957 3958 *val = exp.X_add_number; 3959 return SUCCESS; 3960} 3961 3962/* Less-generic immediate-value read function with the possibility of loading a 3963 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate 3964 instructions. Puts the result directly in inst.operands[i]. */ 3965 3966static int 3967parse_big_immediate (char **str, int i) 3968{ 3969 expressionS exp; 3970 char *ptr = *str; 3971 3972 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG); 3973 3974 if (exp.X_op == O_constant) 3975 { 3976 inst.operands[i].imm = exp.X_add_number & 0xffffffff; 3977 /* If we're on a 64-bit host, then a 64-bit number can be returned using 3978 O_constant. We have to be careful not to break compilation for 3979 32-bit X_add_number, though. */ 3980 if ((exp.X_add_number & ~0xffffffffl) != 0) 3981 { 3982 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */ 3983 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff; 3984 inst.operands[i].regisimm = 1; 3985 } 3986 } 3987 else if (exp.X_op == O_big 3988 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32 3989 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64) 3990 { 3991 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; 3992 /* Bignums have their least significant bits in 3993 generic_bignum[0]. Make sure we put 32 bits in imm and 3994 32 bits in reg, in a (hopefully) portable way. */ 3995 assert (parts != 0); 3996 inst.operands[i].imm = 0; 3997 for (j = 0; j < parts; j++, idx++) 3998 inst.operands[i].imm |= generic_bignum[idx] 3999 << (LITTLENUM_NUMBER_OF_BITS * j); 4000 inst.operands[i].reg = 0; 4001 for (j = 0; j < parts; j++, idx++) 4002 inst.operands[i].reg |= generic_bignum[idx] 4003 << (LITTLENUM_NUMBER_OF_BITS * j); 4004 inst.operands[i].regisimm = 1; 4005 } 4006 else 4007 return FAIL; 4008 4009 *str = ptr; 4010 4011 return SUCCESS; 4012} 4013 4014/* Returns the pseudo-register number of an FPA immediate constant, 4015 or FAIL if there isn't a valid constant here. */ 4016 4017static int 4018parse_fpa_immediate (char ** str) 4019{ 4020 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 4021 char * save_in; 4022 expressionS exp; 4023 int i; 4024 int j; 4025 4026 /* First try and match exact strings, this is to guarantee 4027 that some formats will work even for cross assembly. */ 4028 4029 for (i = 0; fp_const[i]; i++) 4030 { 4031 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0) 4032 { 4033 char *start = *str; 4034 4035 *str += strlen (fp_const[i]); 4036 if (is_end_of_line[(unsigned char) **str]) 4037 return i + 8; 4038 *str = start; 4039 } 4040 } 4041 4042 /* Just because we didn't get a match doesn't mean that the constant 4043 isn't valid, just that it is in a format that we don't 4044 automatically recognize. Try parsing it with the standard 4045 expression routines. */ 4046 4047 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE)); 4048 4049 /* Look for a raw floating point number. */ 4050 if ((save_in = atof_ieee (*str, 'x', words)) != NULL 4051 && is_end_of_line[(unsigned char) *save_in]) 4052 { 4053 for (i = 0; i < NUM_FLOAT_VALS; i++) 4054 { 4055 for (j = 0; j < MAX_LITTLENUMS; j++) 4056 { 4057 if (words[j] != fp_values[i][j]) 4058 break; 4059 } 4060 4061 if (j == MAX_LITTLENUMS) 4062 { 4063 *str = save_in; 4064 return i + 8; 4065 } 4066 } 4067 } 4068 4069 /* Try and parse a more complex expression, this will probably fail 4070 unless the code uses a floating point prefix (eg "0f"). */ 4071 save_in = input_line_pointer; 4072 input_line_pointer = *str; 4073 if (expression (&exp) == absolute_section 4074 && exp.X_op == O_big 4075 && exp.X_add_number < 0) 4076 { 4077 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it. 4078 Ditto for 15. */ 4079 if (gen_to_words (words, 5, (long) 15) == 0) 4080 { 4081 for (i = 0; i < NUM_FLOAT_VALS; i++) 4082 { 4083 for (j = 0; j < MAX_LITTLENUMS; j++) 4084 { 4085 if (words[j] != fp_values[i][j]) 4086 break; 4087 } 4088 4089 if (j == MAX_LITTLENUMS) 4090 { 4091 *str = input_line_pointer; 4092 input_line_pointer = save_in; 4093 return i + 8; 4094 } 4095 } 4096 } 4097 } 4098 4099 *str = input_line_pointer; 4100 input_line_pointer = save_in; 4101 inst.error = _("invalid FPA immediate expression"); 4102 return FAIL; 4103} 4104 4105/* Returns 1 if a number has "quarter-precision" float format 4106 0baBbbbbbc defgh000 00000000 00000000. */ 4107 4108static int 4109is_quarter_float (unsigned imm) 4110{ 4111 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000; 4112 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; 4113} 4114 4115/* Parse an 8-bit "quarter-precision" floating point number of the form: 4116 0baBbbbbbc defgh000 00000000 00000000. 4117 The zero and minus-zero cases need special handling, since they can't be 4118 encoded in the "quarter-precision" float format, but can nonetheless be 4119 loaded as integer constants. */ 4120 4121static unsigned 4122parse_qfloat_immediate (char **ccp, int *immed) 4123{ 4124 char *str = *ccp; 4125 char *fpnum; 4126 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 4127 int found_fpchar = 0; 4128 4129 skip_past_char (&str, '#'); 4130 4131 /* We must not accidentally parse an integer as a floating-point number. Make 4132 sure that the value we parse is not an integer by checking for special 4133 characters '.' or 'e'. 4134 FIXME: This is a horrible hack, but doing better is tricky because type 4135 information isn't in a very usable state at parse time. */ 4136 fpnum = str; 4137 skip_whitespace (fpnum); 4138 4139 if (strncmp (fpnum, "0x", 2) == 0) 4140 return FAIL; 4141 else 4142 { 4143 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) 4144 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') 4145 { 4146 found_fpchar = 1; 4147 break; 4148 } 4149 4150 if (!found_fpchar) 4151 return FAIL; 4152 } 4153 4154 if ((str = atof_ieee (str, 's', words)) != NULL) 4155 { 4156 unsigned fpword = 0; 4157 int i; 4158 4159 /* Our FP word must be 32 bits (single-precision FP). */ 4160 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 4161 { 4162 fpword <<= LITTLENUM_NUMBER_OF_BITS; 4163 fpword |= words[i]; 4164 } 4165 4166 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0) 4167 *immed = fpword; 4168 else 4169 return FAIL; 4170 4171 *ccp = str; 4172 4173 return SUCCESS; 4174 } 4175 4176 return FAIL; 4177} 4178 4179/* Shift operands. */ 4180enum shift_kind 4181{ 4182 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX 4183}; 4184 4185struct asm_shift_name 4186{ 4187 const char *name; 4188 enum shift_kind kind; 4189}; 4190 4191/* Third argument to parse_shift. */ 4192enum parse_shift_mode 4193{ 4194 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */ 4195 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */ 4196 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */ 4197 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */ 4198 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */ 4199}; 4200 4201/* Parse a <shift> specifier on an ARM data processing instruction. 4202 This has three forms: 4203 4204 (LSL|LSR|ASL|ASR|ROR) Rs 4205 (LSL|LSR|ASL|ASR|ROR) #imm 4206 RRX 4207 4208 Note that ASL is assimilated to LSL in the instruction encoding, and 4209 RRX to ROR #0 (which cannot be written as such). */ 4210 4211static int 4212parse_shift (char **str, int i, enum parse_shift_mode mode) 4213{ 4214 const struct asm_shift_name *shift_name; 4215 enum shift_kind shift; 4216 char *s = *str; 4217 char *p = s; 4218 int reg; 4219 4220 for (p = *str; ISALPHA (*p); p++) 4221 ; 4222 4223 if (p == *str) 4224 { 4225 inst.error = _("shift expression expected"); 4226 return FAIL; 4227 } 4228 4229 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str); 4230 4231 if (shift_name == NULL) 4232 { 4233 inst.error = _("shift expression expected"); 4234 return FAIL; 4235 } 4236 4237 shift = shift_name->kind; 4238 4239 switch (mode) 4240 { 4241 case NO_SHIFT_RESTRICT: 4242 case SHIFT_IMMEDIATE: break; 4243 4244 case SHIFT_LSL_OR_ASR_IMMEDIATE: 4245 if (shift != SHIFT_LSL && shift != SHIFT_ASR) 4246 { 4247 inst.error = _("'LSL' or 'ASR' required"); 4248 return FAIL; 4249 } 4250 break; 4251 4252 case SHIFT_LSL_IMMEDIATE: 4253 if (shift != SHIFT_LSL) 4254 { 4255 inst.error = _("'LSL' required"); 4256 return FAIL; 4257 } 4258 break; 4259 4260 case SHIFT_ASR_IMMEDIATE: 4261 if (shift != SHIFT_ASR) 4262 { 4263 inst.error = _("'ASR' required"); 4264 return FAIL; 4265 } 4266 break; 4267 4268 default: abort (); 4269 } 4270 4271 if (shift != SHIFT_RRX) 4272 { 4273 /* Whitespace can appear here if the next thing is a bare digit. */ 4274 skip_whitespace (p); 4275 4276 if (mode == NO_SHIFT_RESTRICT 4277 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4278 { 4279 inst.operands[i].imm = reg; 4280 inst.operands[i].immisreg = 1; 4281 } 4282 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4283 return FAIL; 4284 } 4285 inst.operands[i].shift_kind = shift; 4286 inst.operands[i].shifted = 1; 4287 *str = p; 4288 return SUCCESS; 4289} 4290 4291/* Parse a <shifter_operand> for an ARM data processing instruction: 4292 4293 #<immediate> 4294 #<immediate>, <rotate> 4295 <Rm> 4296 <Rm>, <shift> 4297 4298 where <shift> is defined by parse_shift above, and <rotate> is a 4299 multiple of 2 between 0 and 30. Validation of immediate operands 4300 is deferred to md_apply_fix. */ 4301 4302static int 4303parse_shifter_operand (char **str, int i) 4304{ 4305 int value; 4306 expressionS expr; 4307 4308 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL) 4309 { 4310 inst.operands[i].reg = value; 4311 inst.operands[i].isreg = 1; 4312 4313 /* parse_shift will override this if appropriate */ 4314 inst.reloc.exp.X_op = O_constant; 4315 inst.reloc.exp.X_add_number = 0; 4316 4317 if (skip_past_comma (str) == FAIL) 4318 return SUCCESS; 4319 4320 /* Shift operation on register. */ 4321 return parse_shift (str, i, NO_SHIFT_RESTRICT); 4322 } 4323 4324 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX)) 4325 return FAIL; 4326 4327 if (skip_past_comma (str) == SUCCESS) 4328 { 4329 /* #x, y -- ie explicit rotation by Y. */ 4330 if (my_get_expression (&expr, str, GE_NO_PREFIX)) 4331 return FAIL; 4332 4333 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant) 4334 { 4335 inst.error = _("constant expression expected"); 4336 return FAIL; 4337 } 4338 4339 value = expr.X_add_number; 4340 if (value < 0 || value > 30 || value % 2 != 0) 4341 { 4342 inst.error = _("invalid rotation"); 4343 return FAIL; 4344 } 4345 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255) 4346 { 4347 inst.error = _("invalid constant"); 4348 return FAIL; 4349 } 4350 4351 /* Convert to decoded value. md_apply_fix will put it back. */ 4352 inst.reloc.exp.X_add_number 4353 = (((inst.reloc.exp.X_add_number << (32 - value)) 4354 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff); 4355 } 4356 4357 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 4358 inst.reloc.pc_rel = 0; 4359 return SUCCESS; 4360} 4361 4362/* Group relocation information. Each entry in the table contains the 4363 textual name of the relocation as may appear in assembler source 4364 and must end with a colon. 4365 Along with this textual name are the relocation codes to be used if 4366 the corresponding instruction is an ALU instruction (ADD or SUB only), 4367 an LDR, an LDRS, or an LDC. */ 4368 4369struct group_reloc_table_entry 4370{ 4371 const char *name; 4372 int alu_code; 4373 int ldr_code; 4374 int ldrs_code; 4375 int ldc_code; 4376}; 4377 4378typedef enum 4379{ 4380 /* Varieties of non-ALU group relocation. */ 4381 4382 GROUP_LDR, 4383 GROUP_LDRS, 4384 GROUP_LDC 4385} group_reloc_type; 4386 4387static struct group_reloc_table_entry group_reloc_table[] = 4388 { /* Program counter relative: */ 4389 { "pc_g0_nc", 4390 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */ 4391 0, /* LDR */ 4392 0, /* LDRS */ 4393 0 }, /* LDC */ 4394 { "pc_g0", 4395 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */ 4396 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */ 4397 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */ 4398 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */ 4399 { "pc_g1_nc", 4400 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */ 4401 0, /* LDR */ 4402 0, /* LDRS */ 4403 0 }, /* LDC */ 4404 { "pc_g1", 4405 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */ 4406 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */ 4407 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */ 4408 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */ 4409 { "pc_g2", 4410 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */ 4411 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */ 4412 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */ 4413 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */ 4414 /* Section base relative */ 4415 { "sb_g0_nc", 4416 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */ 4417 0, /* LDR */ 4418 0, /* LDRS */ 4419 0 }, /* LDC */ 4420 { "sb_g0", 4421 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */ 4422 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */ 4423 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */ 4424 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */ 4425 { "sb_g1_nc", 4426 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */ 4427 0, /* LDR */ 4428 0, /* LDRS */ 4429 0 }, /* LDC */ 4430 { "sb_g1", 4431 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */ 4432 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */ 4433 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */ 4434 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */ 4435 { "sb_g2", 4436 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */ 4437 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ 4438 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ 4439 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */ 4440 4441/* Given the address of a pointer pointing to the textual name of a group 4442 relocation as may appear in assembler source, attempt to find its details 4443 in group_reloc_table. The pointer will be updated to the character after 4444 the trailing colon. On failure, FAIL will be returned; SUCCESS 4445 otherwise. On success, *entry will be updated to point at the relevant 4446 group_reloc_table entry. */ 4447 4448static int 4449find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out) 4450{ 4451 unsigned int i; 4452 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++) 4453 { 4454 int length = strlen (group_reloc_table[i].name); 4455 4456 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 && 4457 (*str)[length] == ':') 4458 { 4459 *out = &group_reloc_table[i]; 4460 *str += (length + 1); 4461 return SUCCESS; 4462 } 4463 } 4464 4465 return FAIL; 4466} 4467 4468/* Parse a <shifter_operand> for an ARM data processing instruction 4469 (as for parse_shifter_operand) where group relocations are allowed: 4470 4471 #<immediate> 4472 #<immediate>, <rotate> 4473 #:<group_reloc>:<expression> 4474 <Rm> 4475 <Rm>, <shift> 4476 4477 where <group_reloc> is one of the strings defined in group_reloc_table. 4478 The hashes are optional. 4479 4480 Everything else is as for parse_shifter_operand. */ 4481 4482static parse_operand_result 4483parse_shifter_operand_group_reloc (char **str, int i) 4484{ 4485 /* Determine if we have the sequence of characters #: or just : 4486 coming next. If we do, then we check for a group relocation. 4487 If we don't, punt the whole lot to parse_shifter_operand. */ 4488 4489 if (((*str)[0] == '#' && (*str)[1] == ':') 4490 || (*str)[0] == ':') 4491 { 4492 struct group_reloc_table_entry *entry; 4493 4494 if ((*str)[0] == '#') 4495 (*str) += 2; 4496 else 4497 (*str)++; 4498 4499 /* Try to parse a group relocation. Anything else is an error. */ 4500 if (find_group_reloc_table_entry (str, &entry) == FAIL) 4501 { 4502 inst.error = _("unknown group relocation"); 4503 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4504 } 4505 4506 /* We now have the group relocation table entry corresponding to 4507 the name in the assembler source. Next, we parse the expression. */ 4508 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX)) 4509 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4510 4511 /* Record the relocation type (always the ALU variant here). */ 4512 inst.reloc.type = entry->alu_code; 4513 assert (inst.reloc.type != 0); 4514 4515 return PARSE_OPERAND_SUCCESS; 4516 } 4517 else 4518 return parse_shifter_operand (str, i) == SUCCESS 4519 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL; 4520 4521 /* Never reached. */ 4522} 4523 4524/* Parse all forms of an ARM address expression. Information is written 4525 to inst.operands[i] and/or inst.reloc. 4526 4527 Preindexed addressing (.preind=1): 4528 4529 [Rn, #offset] .reg=Rn .reloc.exp=offset 4530 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4531 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4532 .shift_kind=shift .reloc.exp=shift_imm 4533 4534 These three may have a trailing ! which causes .writeback to be set also. 4535 4536 Postindexed addressing (.postind=1, .writeback=1): 4537 4538 [Rn], #offset .reg=Rn .reloc.exp=offset 4539 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4540 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 4541 .shift_kind=shift .reloc.exp=shift_imm 4542 4543 Unindexed addressing (.preind=0, .postind=0): 4544 4545 [Rn], {option} .reg=Rn .imm=option .immisreg=0 4546 4547 Other: 4548 4549 [Rn]{!} shorthand for [Rn,#0]{!} 4550 =immediate .isreg=0 .reloc.exp=immediate 4551 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label 4552 4553 It is the caller's responsibility to check for addressing modes not 4554 supported by the instruction, and to set inst.reloc.type. */ 4555 4556static parse_operand_result 4557parse_address_main (char **str, int i, int group_relocations, 4558 group_reloc_type group_type) 4559{ 4560 char *p = *str; 4561 int reg; 4562 4563 if (skip_past_char (&p, '[') == FAIL) 4564 { 4565 if (skip_past_char (&p, '=') == FAIL) 4566 { 4567 /* bare address - translate to PC-relative offset */ 4568 inst.reloc.pc_rel = 1; 4569 inst.operands[i].reg = REG_PC; 4570 inst.operands[i].isreg = 1; 4571 inst.operands[i].preind = 1; 4572 } 4573 /* else a load-constant pseudo op, no special treatment needed here */ 4574 4575 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4576 return PARSE_OPERAND_FAIL; 4577 4578 *str = p; 4579 return PARSE_OPERAND_SUCCESS; 4580 } 4581 4582 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 4583 { 4584 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 4585 return PARSE_OPERAND_FAIL; 4586 } 4587 inst.operands[i].reg = reg; 4588 inst.operands[i].isreg = 1; 4589 4590 if (skip_past_comma (&p) == SUCCESS) 4591 { 4592 inst.operands[i].preind = 1; 4593 4594 if (*p == '+') p++; 4595 else if (*p == '-') p++, inst.operands[i].negative = 1; 4596 4597 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4598 { 4599 inst.operands[i].imm = reg; 4600 inst.operands[i].immisreg = 1; 4601 4602 if (skip_past_comma (&p) == SUCCESS) 4603 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 4604 return PARSE_OPERAND_FAIL; 4605 } 4606 else if (skip_past_char (&p, ':') == SUCCESS) 4607 { 4608 /* FIXME: '@' should be used here, but it's filtered out by generic 4609 code before we get to see it here. This may be subject to 4610 change. */ 4611 expressionS exp; 4612 my_get_expression (&exp, &p, GE_NO_PREFIX); 4613 if (exp.X_op != O_constant) 4614 { 4615 inst.error = _("alignment must be constant"); 4616 return PARSE_OPERAND_FAIL; 4617 } 4618 inst.operands[i].imm = exp.X_add_number << 8; 4619 inst.operands[i].immisalign = 1; 4620 /* Alignments are not pre-indexes. */ 4621 inst.operands[i].preind = 0; 4622 } 4623 else 4624 { 4625 if (inst.operands[i].negative) 4626 { 4627 inst.operands[i].negative = 0; 4628 p--; 4629 } 4630 4631 if (group_relocations && 4632 ((*p == '#' && *(p + 1) == ':') || *p == ':')) 4633 4634 { 4635 struct group_reloc_table_entry *entry; 4636 4637 /* Skip over the #: or : sequence. */ 4638 if (*p == '#') 4639 p += 2; 4640 else 4641 p++; 4642 4643 /* Try to parse a group relocation. Anything else is an 4644 error. */ 4645 if (find_group_reloc_table_entry (&p, &entry) == FAIL) 4646 { 4647 inst.error = _("unknown group relocation"); 4648 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4649 } 4650 4651 /* We now have the group relocation table entry corresponding to 4652 the name in the assembler source. Next, we parse the 4653 expression. */ 4654 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4655 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4656 4657 /* Record the relocation type. */ 4658 switch (group_type) 4659 { 4660 case GROUP_LDR: 4661 inst.reloc.type = entry->ldr_code; 4662 break; 4663 4664 case GROUP_LDRS: 4665 inst.reloc.type = entry->ldrs_code; 4666 break; 4667 4668 case GROUP_LDC: 4669 inst.reloc.type = entry->ldc_code; 4670 break; 4671 4672 default: 4673 assert (0); 4674 } 4675 4676 if (inst.reloc.type == 0) 4677 { 4678 inst.error = _("this group relocation is not allowed on this instruction"); 4679 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 4680 } 4681 } 4682 else 4683 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4684 return PARSE_OPERAND_FAIL; 4685 } 4686 } 4687 4688 if (skip_past_char (&p, ']') == FAIL) 4689 { 4690 inst.error = _("']' expected"); 4691 return PARSE_OPERAND_FAIL; 4692 } 4693 4694 if (skip_past_char (&p, '!') == SUCCESS) 4695 inst.operands[i].writeback = 1; 4696 4697 else if (skip_past_comma (&p) == SUCCESS) 4698 { 4699 if (skip_past_char (&p, '{') == SUCCESS) 4700 { 4701 /* [Rn], {expr} - unindexed, with option */ 4702 if (parse_immediate (&p, &inst.operands[i].imm, 4703 0, 255, TRUE) == FAIL) 4704 return PARSE_OPERAND_FAIL; 4705 4706 if (skip_past_char (&p, '}') == FAIL) 4707 { 4708 inst.error = _("'}' expected at end of 'option' field"); 4709 return PARSE_OPERAND_FAIL; 4710 } 4711 if (inst.operands[i].preind) 4712 { 4713 inst.error = _("cannot combine index with option"); 4714 return PARSE_OPERAND_FAIL; 4715 } 4716 *str = p; 4717 return PARSE_OPERAND_SUCCESS; 4718 } 4719 else 4720 { 4721 inst.operands[i].postind = 1; 4722 inst.operands[i].writeback = 1; 4723 4724 if (inst.operands[i].preind) 4725 { 4726 inst.error = _("cannot combine pre- and post-indexing"); 4727 return PARSE_OPERAND_FAIL; 4728 } 4729 4730 if (*p == '+') p++; 4731 else if (*p == '-') p++, inst.operands[i].negative = 1; 4732 4733 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 4734 { 4735 /* We might be using the immediate for alignment already. If we 4736 are, OR the register number into the low-order bits. */ 4737 if (inst.operands[i].immisalign) 4738 inst.operands[i].imm |= reg; 4739 else 4740 inst.operands[i].imm = reg; 4741 inst.operands[i].immisreg = 1; 4742 4743 if (skip_past_comma (&p) == SUCCESS) 4744 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 4745 return PARSE_OPERAND_FAIL; 4746 } 4747 else 4748 { 4749 if (inst.operands[i].negative) 4750 { 4751 inst.operands[i].negative = 0; 4752 p--; 4753 } 4754 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 4755 return PARSE_OPERAND_FAIL; 4756 } 4757 } 4758 } 4759 4760 /* If at this point neither .preind nor .postind is set, we have a 4761 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */ 4762 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0) 4763 { 4764 inst.operands[i].preind = 1; 4765 inst.reloc.exp.X_op = O_constant; 4766 inst.reloc.exp.X_add_number = 0; 4767 } 4768 *str = p; 4769 return PARSE_OPERAND_SUCCESS; 4770} 4771 4772static int 4773parse_address (char **str, int i) 4774{ 4775 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS 4776 ? SUCCESS : FAIL; 4777} 4778 4779static parse_operand_result 4780parse_address_group_reloc (char **str, int i, group_reloc_type type) 4781{ 4782 return parse_address_main (str, i, 1, type); 4783} 4784 4785/* Parse an operand for a MOVW or MOVT instruction. */ 4786static int 4787parse_half (char **str) 4788{ 4789 char * p; 4790 4791 p = *str; 4792 skip_past_char (&p, '#'); 4793 if (strncasecmp (p, ":lower16:", 9) == 0) 4794 inst.reloc.type = BFD_RELOC_ARM_MOVW; 4795 else if (strncasecmp (p, ":upper16:", 9) == 0) 4796 inst.reloc.type = BFD_RELOC_ARM_MOVT; 4797 4798 if (inst.reloc.type != BFD_RELOC_UNUSED) 4799 { 4800 p += 9; 4801 skip_whitespace(p); 4802 } 4803 4804 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 4805 return FAIL; 4806 4807 if (inst.reloc.type == BFD_RELOC_UNUSED) 4808 { 4809 if (inst.reloc.exp.X_op != O_constant) 4810 { 4811 inst.error = _("constant expression expected"); 4812 return FAIL; 4813 } 4814 if (inst.reloc.exp.X_add_number < 0 4815 || inst.reloc.exp.X_add_number > 0xffff) 4816 { 4817 inst.error = _("immediate value out of range"); 4818 return FAIL; 4819 } 4820 } 4821 *str = p; 4822 return SUCCESS; 4823} 4824 4825/* Miscellaneous. */ 4826 4827/* Parse a PSR flag operand. The value returned is FAIL on syntax error, 4828 or a bitmask suitable to be or-ed into the ARM msr instruction. */ 4829static int 4830parse_psr (char **str) 4831{ 4832 char *p; 4833 unsigned long psr_field; 4834 const struct asm_psr *psr; 4835 char *start; 4836 4837 /* CPSR's and SPSR's can now be lowercase. This is just a convenience 4838 feature for ease of use and backwards compatibility. */ 4839 p = *str; 4840 if (strncasecmp (p, "SPSR", 4) == 0) 4841 psr_field = SPSR_BIT; 4842 else if (strncasecmp (p, "CPSR", 4) == 0) 4843 psr_field = 0; 4844 else 4845 { 4846 start = p; 4847 do 4848 p++; 4849 while (ISALNUM (*p) || *p == '_'); 4850 4851 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start); 4852 if (!psr) 4853 return FAIL; 4854 4855 *str = p; 4856 return psr->field; 4857 } 4858 4859 p += 4; 4860 if (*p == '_') 4861 { 4862 /* A suffix follows. */ 4863 p++; 4864 start = p; 4865 4866 do 4867 p++; 4868 while (ISALNUM (*p) || *p == '_'); 4869 4870 psr = hash_find_n (arm_psr_hsh, start, p - start); 4871 if (!psr) 4872 goto error; 4873 4874 psr_field |= psr->field; 4875 } 4876 else 4877 { 4878 if (ISALNUM (*p)) 4879 goto error; /* Garbage after "[CS]PSR". */ 4880 4881 psr_field |= (PSR_c | PSR_f); 4882 } 4883 *str = p; 4884 return psr_field; 4885 4886 error: 4887 inst.error = _("flag for {c}psr instruction expected"); 4888 return FAIL; 4889} 4890 4891/* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a 4892 value suitable for splatting into the AIF field of the instruction. */ 4893 4894static int 4895parse_cps_flags (char **str) 4896{ 4897 int val = 0; 4898 int saw_a_flag = 0; 4899 char *s = *str; 4900 4901 for (;;) 4902 switch (*s++) 4903 { 4904 case '\0': case ',': 4905 goto done; 4906 4907 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break; 4908 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break; 4909 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break; 4910 4911 default: 4912 inst.error = _("unrecognized CPS flag"); 4913 return FAIL; 4914 } 4915 4916 done: 4917 if (saw_a_flag == 0) 4918 { 4919 inst.error = _("missing CPS flags"); 4920 return FAIL; 4921 } 4922 4923 *str = s - 1; 4924 return val; 4925} 4926 4927/* Parse an endian specifier ("BE" or "LE", case insensitive); 4928 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */ 4929 4930static int 4931parse_endian_specifier (char **str) 4932{ 4933 int little_endian; 4934 char *s = *str; 4935 4936 if (strncasecmp (s, "BE", 2)) 4937 little_endian = 0; 4938 else if (strncasecmp (s, "LE", 2)) 4939 little_endian = 1; 4940 else 4941 { 4942 inst.error = _("valid endian specifiers are be or le"); 4943 return FAIL; 4944 } 4945 4946 if (ISALNUM (s[2]) || s[2] == '_') 4947 { 4948 inst.error = _("valid endian specifiers are be or le"); 4949 return FAIL; 4950 } 4951 4952 *str = s + 2; 4953 return little_endian; 4954} 4955 4956/* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a 4957 value suitable for poking into the rotate field of an sxt or sxta 4958 instruction, or FAIL on error. */ 4959 4960static int 4961parse_ror (char **str) 4962{ 4963 int rot; 4964 char *s = *str; 4965 4966 if (strncasecmp (s, "ROR", 3) == 0) 4967 s += 3; 4968 else 4969 { 4970 inst.error = _("missing rotation field after comma"); 4971 return FAIL; 4972 } 4973 4974 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL) 4975 return FAIL; 4976 4977 switch (rot) 4978 { 4979 case 0: *str = s; return 0x0; 4980 case 8: *str = s; return 0x1; 4981 case 16: *str = s; return 0x2; 4982 case 24: *str = s; return 0x3; 4983 4984 default: 4985 inst.error = _("rotation can only be 0, 8, 16, or 24"); 4986 return FAIL; 4987 } 4988} 4989 4990/* Parse a conditional code (from conds[] below). The value returned is in the 4991 range 0 .. 14, or FAIL. */ 4992static int 4993parse_cond (char **str) 4994{ 4995 char *p, *q; 4996 const struct asm_cond *c; 4997 4998 p = q = *str; 4999 while (ISALPHA (*q)) 5000 q++; 5001 5002 c = hash_find_n (arm_cond_hsh, p, q - p); 5003 if (!c) 5004 { 5005 inst.error = _("condition required"); 5006 return FAIL; 5007 } 5008 5009 *str = q; 5010 return c->value; 5011} 5012 5013/* Parse an option for a barrier instruction. Returns the encoding for the 5014 option, or FAIL. */ 5015static int 5016parse_barrier (char **str) 5017{ 5018 char *p, *q; 5019 const struct asm_barrier_opt *o; 5020 5021 p = q = *str; 5022 while (ISALPHA (*q)) 5023 q++; 5024 5025 o = hash_find_n (arm_barrier_opt_hsh, p, q - p); 5026 if (!o) 5027 return FAIL; 5028 5029 *str = q; 5030 return o->value; 5031} 5032 5033/* Parse the operands of a table branch instruction. Similar to a memory 5034 operand. */ 5035static int 5036parse_tb (char **str) 5037{ 5038 char * p = *str; 5039 int reg; 5040 5041 if (skip_past_char (&p, '[') == FAIL) 5042 { 5043 inst.error = _("'[' expected"); 5044 return FAIL; 5045 } 5046 5047 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 5048 { 5049 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 5050 return FAIL; 5051 } 5052 inst.operands[0].reg = reg; 5053 5054 if (skip_past_comma (&p) == FAIL) 5055 { 5056 inst.error = _("',' expected"); 5057 return FAIL; 5058 } 5059 5060 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 5061 { 5062 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 5063 return FAIL; 5064 } 5065 inst.operands[0].imm = reg; 5066 5067 if (skip_past_comma (&p) == SUCCESS) 5068 { 5069 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL) 5070 return FAIL; 5071 if (inst.reloc.exp.X_add_number != 1) 5072 { 5073 inst.error = _("invalid shift"); 5074 return FAIL; 5075 } 5076 inst.operands[0].shifted = 1; 5077 } 5078 5079 if (skip_past_char (&p, ']') == FAIL) 5080 { 5081 inst.error = _("']' expected"); 5082 return FAIL; 5083 } 5084 *str = p; 5085 return SUCCESS; 5086} 5087 5088/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more 5089 information on the types the operands can take and how they are encoded. 5090 Up to four operands may be read; this function handles setting the 5091 ".present" field for each read operand itself. 5092 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS, 5093 else returns FAIL. */ 5094 5095static int 5096parse_neon_mov (char **str, int *which_operand) 5097{ 5098 int i = *which_operand, val; 5099 enum arm_reg_type rtype; 5100 char *ptr = *str; 5101 struct neon_type_el optype; 5102 5103 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 5104 { 5105 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */ 5106 inst.operands[i].reg = val; 5107 inst.operands[i].isscalar = 1; 5108 inst.operands[i].vectype = optype; 5109 inst.operands[i++].present = 1; 5110 5111 if (skip_past_comma (&ptr) == FAIL) 5112 goto wanted_comma; 5113 5114 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5115 goto wanted_arm; 5116 5117 inst.operands[i].reg = val; 5118 inst.operands[i].isreg = 1; 5119 inst.operands[i].present = 1; 5120 } 5121 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype)) 5122 != FAIL) 5123 { 5124 /* Cases 0, 1, 2, 3, 5 (D only). */ 5125 if (skip_past_comma (&ptr) == FAIL) 5126 goto wanted_comma; 5127 5128 inst.operands[i].reg = val; 5129 inst.operands[i].isreg = 1; 5130 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 5131 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5132 inst.operands[i].isvec = 1; 5133 inst.operands[i].vectype = optype; 5134 inst.operands[i++].present = 1; 5135 5136 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5137 { 5138 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. 5139 Case 13: VMOV <Sd>, <Rm> */ 5140 inst.operands[i].reg = val; 5141 inst.operands[i].isreg = 1; 5142 inst.operands[i].present = 1; 5143 5144 if (rtype == REG_TYPE_NQ) 5145 { 5146 first_error (_("can't use Neon quad register here")); 5147 return FAIL; 5148 } 5149 else if (rtype != REG_TYPE_VFS) 5150 { 5151 i++; 5152 if (skip_past_comma (&ptr) == FAIL) 5153 goto wanted_comma; 5154 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5155 goto wanted_arm; 5156 inst.operands[i].reg = val; 5157 inst.operands[i].isreg = 1; 5158 inst.operands[i].present = 1; 5159 } 5160 } 5161 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS) 5162 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm> 5163 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> 5164 Case 10: VMOV.F32 <Sd>, #<imm> 5165 Case 11: VMOV.F64 <Dd>, #<imm> */ 5166 inst.operands[i].immisfloat = 1; 5167 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, 5168 &optype)) != FAIL) 5169 { 5170 /* Case 0: VMOV<c><q> <Qd>, <Qm> 5171 Case 1: VMOV<c><q> <Dd>, <Dm> 5172 Case 8: VMOV.F32 <Sd>, <Sm> 5173 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */ 5174 5175 inst.operands[i].reg = val; 5176 inst.operands[i].isreg = 1; 5177 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 5178 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5179 inst.operands[i].isvec = 1; 5180 inst.operands[i].vectype = optype; 5181 inst.operands[i].present = 1; 5182 5183 if (skip_past_comma (&ptr) == SUCCESS) 5184 { 5185 /* Case 15. */ 5186 i++; 5187 5188 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5189 goto wanted_arm; 5190 5191 inst.operands[i].reg = val; 5192 inst.operands[i].isreg = 1; 5193 inst.operands[i++].present = 1; 5194 5195 if (skip_past_comma (&ptr) == FAIL) 5196 goto wanted_comma; 5197 5198 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 5199 goto wanted_arm; 5200 5201 inst.operands[i].reg = val; 5202 inst.operands[i].isreg = 1; 5203 inst.operands[i++].present = 1; 5204 } 5205 } 5206 else if (parse_big_immediate (&ptr, i) == SUCCESS) 5207 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm> 5208 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */ 5209 ; 5210 else 5211 { 5212 first_error (_("expected <Rm> or <Dm> or <Qm> operand")); 5213 return FAIL; 5214 } 5215 } 5216 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5217 { 5218 /* Cases 6, 7. */ 5219 inst.operands[i].reg = val; 5220 inst.operands[i].isreg = 1; 5221 inst.operands[i++].present = 1; 5222 5223 if (skip_past_comma (&ptr) == FAIL) 5224 goto wanted_comma; 5225 5226 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 5227 { 5228 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */ 5229 inst.operands[i].reg = val; 5230 inst.operands[i].isscalar = 1; 5231 inst.operands[i].present = 1; 5232 inst.operands[i].vectype = optype; 5233 } 5234 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 5235 { 5236 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */ 5237 inst.operands[i].reg = val; 5238 inst.operands[i].isreg = 1; 5239 inst.operands[i++].present = 1; 5240 5241 if (skip_past_comma (&ptr) == FAIL) 5242 goto wanted_comma; 5243 5244 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype)) 5245 == FAIL) 5246 { 5247 first_error (_(reg_expected_msgs[REG_TYPE_VFSD])); 5248 return FAIL; 5249 } 5250 5251 inst.operands[i].reg = val; 5252 inst.operands[i].isreg = 1; 5253 inst.operands[i].isvec = 1; 5254 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 5255 inst.operands[i].vectype = optype; 5256 inst.operands[i].present = 1; 5257 5258 if (rtype == REG_TYPE_VFS) 5259 { 5260 /* Case 14. */ 5261 i++; 5262 if (skip_past_comma (&ptr) == FAIL) 5263 goto wanted_comma; 5264 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, 5265 &optype)) == FAIL) 5266 { 5267 first_error (_(reg_expected_msgs[REG_TYPE_VFS])); 5268 return FAIL; 5269 } 5270 inst.operands[i].reg = val; 5271 inst.operands[i].isreg = 1; 5272 inst.operands[i].isvec = 1; 5273 inst.operands[i].issingle = 1; 5274 inst.operands[i].vectype = optype; 5275 inst.operands[i].present = 1; 5276 } 5277 } 5278 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype)) 5279 != FAIL) 5280 { 5281 /* Case 13. */ 5282 inst.operands[i].reg = val; 5283 inst.operands[i].isreg = 1; 5284 inst.operands[i].isvec = 1; 5285 inst.operands[i].issingle = 1; 5286 inst.operands[i].vectype = optype; 5287 inst.operands[i++].present = 1; 5288 } 5289 } 5290 else 5291 { 5292 first_error (_("parse error")); 5293 return FAIL; 5294 } 5295 5296 /* Successfully parsed the operands. Update args. */ 5297 *which_operand = i; 5298 *str = ptr; 5299 return SUCCESS; 5300 5301 wanted_comma: 5302 first_error (_("expected comma")); 5303 return FAIL; 5304 5305 wanted_arm: 5306 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 5307 return FAIL; 5308} 5309 5310/* Matcher codes for parse_operands. */ 5311enum operand_parse_code 5312{ 5313 OP_stop, /* end of line */ 5314 5315 OP_RR, /* ARM register */ 5316 OP_RRnpc, /* ARM register, not r15 */ 5317 OP_RRnpcb, /* ARM register, not r15, in square brackets */ 5318 OP_RRw, /* ARM register, not r15, optional trailing ! */ 5319 OP_RCP, /* Coprocessor number */ 5320 OP_RCN, /* Coprocessor register */ 5321 OP_RF, /* FPA register */ 5322 OP_RVS, /* VFP single precision register */ 5323 OP_RVD, /* VFP double precision register (0..15) */ 5324 OP_RND, /* Neon double precision register (0..31) */ 5325 OP_RNQ, /* Neon quad precision register */ 5326 OP_RVSD, /* VFP single or double precision register */ 5327 OP_RNDQ, /* Neon double or quad precision register */ 5328 OP_RNSDQ, /* Neon single, double or quad precision register */ 5329 OP_RNSC, /* Neon scalar D[X] */ 5330 OP_RVC, /* VFP control register */ 5331 OP_RMF, /* Maverick F register */ 5332 OP_RMD, /* Maverick D register */ 5333 OP_RMFX, /* Maverick FX register */ 5334 OP_RMDX, /* Maverick DX register */ 5335 OP_RMAX, /* Maverick AX register */ 5336 OP_RMDS, /* Maverick DSPSC register */ 5337 OP_RIWR, /* iWMMXt wR register */ 5338 OP_RIWC, /* iWMMXt wC register */ 5339 OP_RIWG, /* iWMMXt wCG register */ 5340 OP_RXA, /* XScale accumulator register */ 5341 5342 OP_REGLST, /* ARM register list */ 5343 OP_VRSLST, /* VFP single-precision register list */ 5344 OP_VRDLST, /* VFP double-precision register list */ 5345 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ 5346 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ 5347 OP_NSTRLST, /* Neon element/structure list */ 5348 5349 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */ 5350 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ 5351 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ 5352 OP_RR_RNSC, /* ARM reg or Neon scalar. */ 5353 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ 5354 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ 5355 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ 5356 OP_VMOV, /* Neon VMOV operands. */ 5357 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */ 5358 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ 5359 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ 5360 5361 OP_I0, /* immediate zero */ 5362 OP_I7, /* immediate value 0 .. 7 */ 5363 OP_I15, /* 0 .. 15 */ 5364 OP_I16, /* 1 .. 16 */ 5365 OP_I16z, /* 0 .. 16 */ 5366 OP_I31, /* 0 .. 31 */ 5367 OP_I31w, /* 0 .. 31, optional trailing ! */ 5368 OP_I32, /* 1 .. 32 */ 5369 OP_I32z, /* 0 .. 32 */ 5370 OP_I63, /* 0 .. 63 */ 5371 OP_I63s, /* -64 .. 63 */ 5372 OP_I64, /* 1 .. 64 */ 5373 OP_I64z, /* 0 .. 64 */ 5374 OP_I255, /* 0 .. 255 */ 5375 5376 OP_I4b, /* immediate, prefix optional, 1 .. 4 */ 5377 OP_I7b, /* 0 .. 7 */ 5378 OP_I15b, /* 0 .. 15 */ 5379 OP_I31b, /* 0 .. 31 */ 5380 5381 OP_SH, /* shifter operand */ 5382 OP_SHG, /* shifter operand with possible group relocation */ 5383 OP_ADDR, /* Memory address expression (any mode) */ 5384 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */ 5385 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */ 5386 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */ 5387 OP_EXP, /* arbitrary expression */ 5388 OP_EXPi, /* same, with optional immediate prefix */ 5389 OP_EXPr, /* same, with optional relocation suffix */ 5390 OP_HALF, /* 0 .. 65535 or low/high reloc. */ 5391 5392 OP_CPSF, /* CPS flags */ 5393 OP_ENDI, /* Endianness specifier */ 5394 OP_PSR, /* CPSR/SPSR mask for msr */ 5395 OP_COND, /* conditional code */ 5396 OP_TB, /* Table branch. */ 5397 5398 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */ 5399 OP_APSR_RR, /* ARM register or "APSR_nzcv". */ 5400 5401 OP_RRnpc_I0, /* ARM register or literal 0 */ 5402 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */ 5403 OP_RR_EXi, /* ARM register or expression with imm prefix */ 5404 OP_RF_IF, /* FPA register or immediate */ 5405 OP_RIWR_RIWC, /* iWMMXt R or C reg */ 5406 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */ 5407 5408 /* Optional operands. */ 5409 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */ 5410 OP_oI31b, /* 0 .. 31 */ 5411 OP_oI32b, /* 1 .. 32 */ 5412 OP_oIffffb, /* 0 .. 65535 */ 5413 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ 5414 5415 OP_oRR, /* ARM register */ 5416 OP_oRRnpc, /* ARM register, not the PC */ 5417 OP_oRRw, /* ARM register, not r15, optional trailing ! */ 5418 OP_oRND, /* Optional Neon double precision register */ 5419 OP_oRNQ, /* Optional Neon quad precision register */ 5420 OP_oRNDQ, /* Optional Neon double or quad precision register */ 5421 OP_oRNSDQ, /* Optional single, double or quad precision vector register */ 5422 OP_oSHll, /* LSL immediate */ 5423 OP_oSHar, /* ASR immediate */ 5424 OP_oSHllar, /* LSL or ASR immediate */ 5425 OP_oROR, /* ROR 0/8/16/24 */ 5426 OP_oBARRIER, /* Option argument for a barrier instruction. */ 5427 5428 OP_FIRST_OPTIONAL = OP_oI7b 5429}; 5430 5431/* Generic instruction operand parser. This does no encoding and no 5432 semantic validation; it merely squirrels values away in the inst 5433 structure. Returns SUCCESS or FAIL depending on whether the 5434 specified grammar matched. */ 5435static int 5436parse_operands (char *str, const unsigned char *pattern) 5437{ 5438 unsigned const char *upat = pattern; 5439 char *backtrack_pos = 0; 5440 const char *backtrack_error = 0; 5441 int i, val, backtrack_index = 0; 5442 enum arm_reg_type rtype; 5443 parse_operand_result result; 5444 5445#define po_char_or_fail(chr) do { \ 5446 if (skip_past_char (&str, chr) == FAIL) \ 5447 goto bad_args; \ 5448} while (0) 5449 5450#define po_reg_or_fail(regtype) do { \ 5451 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 5452 &inst.operands[i].vectype); \ 5453 if (val == FAIL) \ 5454 { \ 5455 first_error (_(reg_expected_msgs[regtype])); \ 5456 goto failure; \ 5457 } \ 5458 inst.operands[i].reg = val; \ 5459 inst.operands[i].isreg = 1; \ 5460 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 5461 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 5462 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 5463 || rtype == REG_TYPE_VFD \ 5464 || rtype == REG_TYPE_NQ); \ 5465} while (0) 5466 5467#define po_reg_or_goto(regtype, label) do { \ 5468 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 5469 &inst.operands[i].vectype); \ 5470 if (val == FAIL) \ 5471 goto label; \ 5472 \ 5473 inst.operands[i].reg = val; \ 5474 inst.operands[i].isreg = 1; \ 5475 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 5476 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 5477 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 5478 || rtype == REG_TYPE_VFD \ 5479 || rtype == REG_TYPE_NQ); \ 5480} while (0) 5481 5482#define po_imm_or_fail(min, max, popt) do { \ 5483 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \ 5484 goto failure; \ 5485 inst.operands[i].imm = val; \ 5486} while (0) 5487 5488#define po_scalar_or_goto(elsz, label) do { \ 5489 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \ 5490 if (val == FAIL) \ 5491 goto label; \ 5492 inst.operands[i].reg = val; \ 5493 inst.operands[i].isscalar = 1; \ 5494} while (0) 5495 5496#define po_misc_or_fail(expr) do { \ 5497 if (expr) \ 5498 goto failure; \ 5499} while (0) 5500 5501#define po_misc_or_fail_no_backtrack(expr) do { \ 5502 result = expr; \ 5503 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\ 5504 backtrack_pos = 0; \ 5505 if (result != PARSE_OPERAND_SUCCESS) \ 5506 goto failure; \ 5507} while (0) 5508 5509 skip_whitespace (str); 5510 5511 for (i = 0; upat[i] != OP_stop; i++) 5512 { 5513 if (upat[i] >= OP_FIRST_OPTIONAL) 5514 { 5515 /* Remember where we are in case we need to backtrack. */ 5516 assert (!backtrack_pos); 5517 backtrack_pos = str; 5518 backtrack_error = inst.error; 5519 backtrack_index = i; 5520 } 5521 5522 if (i > 0 && (i > 1 || inst.operands[0].present)) 5523 po_char_or_fail (','); 5524 5525 switch (upat[i]) 5526 { 5527 /* Registers */ 5528 case OP_oRRnpc: 5529 case OP_RRnpc: 5530 case OP_oRR: 5531 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; 5532 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; 5533 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; 5534 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break; 5535 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; 5536 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; 5537 case OP_oRND: 5538 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; 5539 case OP_RVC: 5540 po_reg_or_goto (REG_TYPE_VFC, coproc_reg); 5541 break; 5542 /* Also accept generic coprocessor regs for unknown registers. */ 5543 coproc_reg: 5544 po_reg_or_fail (REG_TYPE_CN); 5545 break; 5546 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break; 5547 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break; 5548 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break; 5549 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break; 5550 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break; 5551 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break; 5552 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break; 5553 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break; 5554 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; 5555 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; 5556 case OP_oRNQ: 5557 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; 5558 case OP_oRNDQ: 5559 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; 5560 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; 5561 case OP_oRNSDQ: 5562 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; 5563 5564 /* Neon scalar. Using an element size of 8 means that some invalid 5565 scalars are accepted here, so deal with those in later code. */ 5566 case OP_RNSC: po_scalar_or_goto (8, failure); break; 5567 5568 /* WARNING: We can expand to two operands here. This has the potential 5569 to totally confuse the backtracking mechanism! It will be OK at 5570 least as long as we don't try to use optional args as well, 5571 though. */ 5572 case OP_NILO: 5573 { 5574 po_reg_or_goto (REG_TYPE_NDQ, try_imm); 5575 inst.operands[i].present = 1; 5576 i++; 5577 skip_past_comma (&str); 5578 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only); 5579 break; 5580 one_reg_only: 5581 /* Optional register operand was omitted. Unfortunately, it's in 5582 operands[i-1] and we need it to be in inst.operands[i]. Fix that 5583 here (this is a bit grotty). */ 5584 inst.operands[i] = inst.operands[i-1]; 5585 inst.operands[i-1].present = 0; 5586 break; 5587 try_imm: 5588 /* There's a possibility of getting a 64-bit immediate here, so 5589 we need special handling. */ 5590 if (parse_big_immediate (&str, i) == FAIL) 5591 { 5592 inst.error = _("immediate value is out of range"); 5593 goto failure; 5594 } 5595 } 5596 break; 5597 5598 case OP_RNDQ_I0: 5599 { 5600 po_reg_or_goto (REG_TYPE_NDQ, try_imm0); 5601 break; 5602 try_imm0: 5603 po_imm_or_fail (0, 0, TRUE); 5604 } 5605 break; 5606 5607 case OP_RVSD_I0: 5608 po_reg_or_goto (REG_TYPE_VFSD, try_imm0); 5609 break; 5610 5611 case OP_RR_RNSC: 5612 { 5613 po_scalar_or_goto (8, try_rr); 5614 break; 5615 try_rr: 5616 po_reg_or_fail (REG_TYPE_RN); 5617 } 5618 break; 5619 5620 case OP_RNSDQ_RNSC: 5621 { 5622 po_scalar_or_goto (8, try_nsdq); 5623 break; 5624 try_nsdq: 5625 po_reg_or_fail (REG_TYPE_NSDQ); 5626 } 5627 break; 5628 5629 case OP_RNDQ_RNSC: 5630 { 5631 po_scalar_or_goto (8, try_ndq); 5632 break; 5633 try_ndq: 5634 po_reg_or_fail (REG_TYPE_NDQ); 5635 } 5636 break; 5637 5638 case OP_RND_RNSC: 5639 { 5640 po_scalar_or_goto (8, try_vfd); 5641 break; 5642 try_vfd: 5643 po_reg_or_fail (REG_TYPE_VFD); 5644 } 5645 break; 5646 5647 case OP_VMOV: 5648 /* WARNING: parse_neon_mov can move the operand counter, i. If we're 5649 not careful then bad things might happen. */ 5650 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL); 5651 break; 5652 5653 case OP_RNDQ_IMVNb: 5654 { 5655 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm); 5656 break; 5657 try_mvnimm: 5658 /* There's a possibility of getting a 64-bit immediate here, so 5659 we need special handling. */ 5660 if (parse_big_immediate (&str, i) == FAIL) 5661 { 5662 inst.error = _("immediate value is out of range"); 5663 goto failure; 5664 } 5665 } 5666 break; 5667 5668 case OP_RNDQ_I63b: 5669 { 5670 po_reg_or_goto (REG_TYPE_NDQ, try_shimm); 5671 break; 5672 try_shimm: 5673 po_imm_or_fail (0, 63, TRUE); 5674 } 5675 break; 5676 5677 case OP_RRnpcb: 5678 po_char_or_fail ('['); 5679 po_reg_or_fail (REG_TYPE_RN); 5680 po_char_or_fail (']'); 5681 break; 5682 5683 case OP_RRw: 5684 case OP_oRRw: 5685 po_reg_or_fail (REG_TYPE_RN); 5686 if (skip_past_char (&str, '!') == SUCCESS) 5687 inst.operands[i].writeback = 1; 5688 break; 5689 5690 /* Immediates */ 5691 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break; 5692 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break; 5693 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break; 5694 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break; 5695 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break; 5696 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break; 5697 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break; 5698 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break; 5699 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break; 5700 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break; 5701 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break; 5702 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break; 5703 5704 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break; 5705 case OP_oI7b: 5706 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break; 5707 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break; 5708 case OP_oI31b: 5709 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break; 5710 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break; 5711 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break; 5712 5713 /* Immediate variants */ 5714 case OP_oI255c: 5715 po_char_or_fail ('{'); 5716 po_imm_or_fail (0, 255, TRUE); 5717 po_char_or_fail ('}'); 5718 break; 5719 5720 case OP_I31w: 5721 /* The expression parser chokes on a trailing !, so we have 5722 to find it first and zap it. */ 5723 { 5724 char *s = str; 5725 while (*s && *s != ',') 5726 s++; 5727 if (s[-1] == '!') 5728 { 5729 s[-1] = '\0'; 5730 inst.operands[i].writeback = 1; 5731 } 5732 po_imm_or_fail (0, 31, TRUE); 5733 if (str == s - 1) 5734 str = s; 5735 } 5736 break; 5737 5738 /* Expressions */ 5739 case OP_EXPi: EXPi: 5740 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5741 GE_OPT_PREFIX)); 5742 break; 5743 5744 case OP_EXP: 5745 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5746 GE_NO_PREFIX)); 5747 break; 5748 5749 case OP_EXPr: EXPr: 5750 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5751 GE_NO_PREFIX)); 5752 if (inst.reloc.exp.X_op == O_symbol) 5753 { 5754 val = parse_reloc (&str); 5755 if (val == -1) 5756 { 5757 inst.error = _("unrecognized relocation suffix"); 5758 goto failure; 5759 } 5760 else if (val != BFD_RELOC_UNUSED) 5761 { 5762 inst.operands[i].imm = val; 5763 inst.operands[i].hasreloc = 1; 5764 } 5765 } 5766 break; 5767 5768 /* Operand for MOVW or MOVT. */ 5769 case OP_HALF: 5770 po_misc_or_fail (parse_half (&str)); 5771 break; 5772 5773 /* Register or expression */ 5774 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break; 5775 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break; 5776 5777 /* Register or immediate */ 5778 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break; 5779 I0: po_imm_or_fail (0, 0, FALSE); break; 5780 5781 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break; 5782 IF: 5783 if (!is_immediate_prefix (*str)) 5784 goto bad_args; 5785 str++; 5786 val = parse_fpa_immediate (&str); 5787 if (val == FAIL) 5788 goto failure; 5789 /* FPA immediates are encoded as registers 8-15. 5790 parse_fpa_immediate has already applied the offset. */ 5791 inst.operands[i].reg = val; 5792 inst.operands[i].isreg = 1; 5793 break; 5794 5795 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break; 5796 I32z: po_imm_or_fail (0, 32, FALSE); break; 5797 5798 /* Two kinds of register */ 5799 case OP_RIWR_RIWC: 5800 { 5801 struct reg_entry *rege = arm_reg_parse_multi (&str); 5802 if (!rege 5803 || (rege->type != REG_TYPE_MMXWR 5804 && rege->type != REG_TYPE_MMXWC 5805 && rege->type != REG_TYPE_MMXWCG)) 5806 { 5807 inst.error = _("iWMMXt data or control register expected"); 5808 goto failure; 5809 } 5810 inst.operands[i].reg = rege->number; 5811 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR); 5812 } 5813 break; 5814 5815 case OP_RIWC_RIWG: 5816 { 5817 struct reg_entry *rege = arm_reg_parse_multi (&str); 5818 if (!rege 5819 || (rege->type != REG_TYPE_MMXWC 5820 && rege->type != REG_TYPE_MMXWCG)) 5821 { 5822 inst.error = _("iWMMXt control register expected"); 5823 goto failure; 5824 } 5825 inst.operands[i].reg = rege->number; 5826 inst.operands[i].isreg = 1; 5827 } 5828 break; 5829 5830 /* Misc */ 5831 case OP_CPSF: val = parse_cps_flags (&str); break; 5832 case OP_ENDI: val = parse_endian_specifier (&str); break; 5833 case OP_oROR: val = parse_ror (&str); break; 5834 case OP_PSR: val = parse_psr (&str); break; 5835 case OP_COND: val = parse_cond (&str); break; 5836 case OP_oBARRIER:val = parse_barrier (&str); break; 5837 5838 case OP_RVC_PSR: 5839 po_reg_or_goto (REG_TYPE_VFC, try_psr); 5840 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */ 5841 break; 5842 try_psr: 5843 val = parse_psr (&str); 5844 break; 5845 5846 case OP_APSR_RR: 5847 po_reg_or_goto (REG_TYPE_RN, try_apsr); 5848 break; 5849 try_apsr: 5850 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS 5851 instruction). */ 5852 if (strncasecmp (str, "APSR_", 5) == 0) 5853 { 5854 unsigned found = 0; 5855 str += 5; 5856 while (found < 15) 5857 switch (*str++) 5858 { 5859 case 'c': found = (found & 1) ? 16 : found | 1; break; 5860 case 'n': found = (found & 2) ? 16 : found | 2; break; 5861 case 'z': found = (found & 4) ? 16 : found | 4; break; 5862 case 'v': found = (found & 8) ? 16 : found | 8; break; 5863 default: found = 16; 5864 } 5865 if (found != 15) 5866 goto failure; 5867 inst.operands[i].isvec = 1; 5868 } 5869 else 5870 goto failure; 5871 break; 5872 5873 case OP_TB: 5874 po_misc_or_fail (parse_tb (&str)); 5875 break; 5876 5877 /* Register lists */ 5878 case OP_REGLST: 5879 val = parse_reg_list (&str); 5880 if (*str == '^') 5881 { 5882 inst.operands[1].writeback = 1; 5883 str++; 5884 } 5885 break; 5886 5887 case OP_VRSLST: 5888 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S); 5889 break; 5890 5891 case OP_VRDLST: 5892 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D); 5893 break; 5894 5895 case OP_VRSDLST: 5896 /* Allow Q registers too. */ 5897 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5898 REGLIST_NEON_D); 5899 if (val == FAIL) 5900 { 5901 inst.error = NULL; 5902 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5903 REGLIST_VFP_S); 5904 inst.operands[i].issingle = 1; 5905 } 5906 break; 5907 5908 case OP_NRDLST: 5909 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 5910 REGLIST_NEON_D); 5911 break; 5912 5913 case OP_NSTRLST: 5914 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, 5915 &inst.operands[i].vectype); 5916 break; 5917 5918 /* Addressing modes */ 5919 case OP_ADDR: 5920 po_misc_or_fail (parse_address (&str, i)); 5921 break; 5922 5923 case OP_ADDRGLDR: 5924 po_misc_or_fail_no_backtrack ( 5925 parse_address_group_reloc (&str, i, GROUP_LDR)); 5926 break; 5927 5928 case OP_ADDRGLDRS: 5929 po_misc_or_fail_no_backtrack ( 5930 parse_address_group_reloc (&str, i, GROUP_LDRS)); 5931 break; 5932 5933 case OP_ADDRGLDC: 5934 po_misc_or_fail_no_backtrack ( 5935 parse_address_group_reloc (&str, i, GROUP_LDC)); 5936 break; 5937 5938 case OP_SH: 5939 po_misc_or_fail (parse_shifter_operand (&str, i)); 5940 break; 5941 5942 case OP_SHG: 5943 po_misc_or_fail_no_backtrack ( 5944 parse_shifter_operand_group_reloc (&str, i)); 5945 break; 5946 5947 case OP_oSHll: 5948 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE)); 5949 break; 5950 5951 case OP_oSHar: 5952 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE)); 5953 break; 5954 5955 case OP_oSHllar: 5956 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE)); 5957 break; 5958 5959 default: 5960 as_fatal ("unhandled operand code %d", upat[i]); 5961 } 5962 5963 /* Various value-based sanity checks and shared operations. We 5964 do not signal immediate failures for the register constraints; 5965 this allows a syntax error to take precedence. */ 5966 switch (upat[i]) 5967 { 5968 case OP_oRRnpc: 5969 case OP_RRnpc: 5970 case OP_RRnpcb: 5971 case OP_RRw: 5972 case OP_oRRw: 5973 case OP_RRnpc_I0: 5974 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC) 5975 inst.error = BAD_PC; 5976 break; 5977 5978 case OP_CPSF: 5979 case OP_ENDI: 5980 case OP_oROR: 5981 case OP_PSR: 5982 case OP_RVC_PSR: 5983 case OP_COND: 5984 case OP_oBARRIER: 5985 case OP_REGLST: 5986 case OP_VRSLST: 5987 case OP_VRDLST: 5988 case OP_VRSDLST: 5989 case OP_NRDLST: 5990 case OP_NSTRLST: 5991 if (val == FAIL) 5992 goto failure; 5993 inst.operands[i].imm = val; 5994 break; 5995 5996 default: 5997 break; 5998 } 5999 6000 /* If we get here, this operand was successfully parsed. */ 6001 inst.operands[i].present = 1; 6002 continue; 6003 6004 bad_args: 6005 inst.error = BAD_ARGS; 6006 6007 failure: 6008 if (!backtrack_pos) 6009 { 6010 /* The parse routine should already have set inst.error, but set a 6011 defaut here just in case. */ 6012 if (!inst.error) 6013 inst.error = _("syntax error"); 6014 return FAIL; 6015 } 6016 6017 /* Do not backtrack over a trailing optional argument that 6018 absorbed some text. We will only fail again, with the 6019 'garbage following instruction' error message, which is 6020 probably less helpful than the current one. */ 6021 if (backtrack_index == i && backtrack_pos != str 6022 && upat[i+1] == OP_stop) 6023 { 6024 if (!inst.error) 6025 inst.error = _("syntax error"); 6026 return FAIL; 6027 } 6028 6029 /* Try again, skipping the optional argument at backtrack_pos. */ 6030 str = backtrack_pos; 6031 inst.error = backtrack_error; 6032 inst.operands[backtrack_index].present = 0; 6033 i = backtrack_index; 6034 backtrack_pos = 0; 6035 } 6036 6037 /* Check that we have parsed all the arguments. */ 6038 if (*str != '\0' && !inst.error) 6039 inst.error = _("garbage following instruction"); 6040 6041 return inst.error ? FAIL : SUCCESS; 6042} 6043 6044#undef po_char_or_fail 6045#undef po_reg_or_fail 6046#undef po_reg_or_goto 6047#undef po_imm_or_fail 6048#undef po_scalar_or_fail 6049 6050/* Shorthand macro for instruction encoding functions issuing errors. */ 6051#define constraint(expr, err) do { \ 6052 if (expr) \ 6053 { \ 6054 inst.error = err; \ 6055 return; \ 6056 } \ 6057} while (0) 6058 6059/* Functions for operand encoding. ARM, then Thumb. */ 6060 6061#define rotate_left(v, n) (v << n | v >> (32 - n)) 6062 6063/* If VAL can be encoded in the immediate field of an ARM instruction, 6064 return the encoded form. Otherwise, return FAIL. */ 6065 6066static unsigned int 6067encode_arm_immediate (unsigned int val) 6068{ 6069 unsigned int a, i; 6070 6071 for (i = 0; i < 32; i += 2) 6072 if ((a = rotate_left (val, i)) <= 0xff) 6073 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ 6074 6075 return FAIL; 6076} 6077 6078/* If VAL can be encoded in the immediate field of a Thumb32 instruction, 6079 return the encoded form. Otherwise, return FAIL. */ 6080static unsigned int 6081encode_thumb32_immediate (unsigned int val) 6082{ 6083 unsigned int a, i; 6084 6085 if (val <= 0xff) 6086 return val; 6087 6088 for (i = 1; i <= 24; i++) 6089 { 6090 a = val >> i; 6091 if ((val & ~(0xff << i)) == 0) 6092 return ((val >> i) & 0x7f) | ((32 - i) << 7); 6093 } 6094 6095 a = val & 0xff; 6096 if (val == ((a << 16) | a)) 6097 return 0x100 | a; 6098 if (val == ((a << 24) | (a << 16) | (a << 8) | a)) 6099 return 0x300 | a; 6100 6101 a = val & 0xff00; 6102 if (val == ((a << 16) | a)) 6103 return 0x200 | (a >> 8); 6104 6105 return FAIL; 6106} 6107/* Encode a VFP SP or DP register number into inst.instruction. */ 6108 6109static void 6110encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos) 6111{ 6112 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm) 6113 && reg > 15) 6114 { 6115 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 6116 { 6117 if (thumb_mode) 6118 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 6119 fpu_vfp_ext_v3); 6120 else 6121 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 6122 fpu_vfp_ext_v3); 6123 } 6124 else 6125 { 6126 first_error (_("D register out of range for selected VFP version")); 6127 return; 6128 } 6129 } 6130 6131 switch (pos) 6132 { 6133 case VFP_REG_Sd: 6134 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22); 6135 break; 6136 6137 case VFP_REG_Sn: 6138 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7); 6139 break; 6140 6141 case VFP_REG_Sm: 6142 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5); 6143 break; 6144 6145 case VFP_REG_Dd: 6146 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22); 6147 break; 6148 6149 case VFP_REG_Dn: 6150 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7); 6151 break; 6152 6153 case VFP_REG_Dm: 6154 inst.instruction |= (reg & 15) | ((reg >> 4) << 5); 6155 break; 6156 6157 default: 6158 abort (); 6159 } 6160} 6161 6162/* Encode a <shift> in an ARM-format instruction. The immediate, 6163 if any, is handled by md_apply_fix. */ 6164static void 6165encode_arm_shift (int i) 6166{ 6167 if (inst.operands[i].shift_kind == SHIFT_RRX) 6168 inst.instruction |= SHIFT_ROR << 5; 6169 else 6170 { 6171 inst.instruction |= inst.operands[i].shift_kind << 5; 6172 if (inst.operands[i].immisreg) 6173 { 6174 inst.instruction |= SHIFT_BY_REG; 6175 inst.instruction |= inst.operands[i].imm << 8; 6176 } 6177 else 6178 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 6179 } 6180} 6181 6182static void 6183encode_arm_shifter_operand (int i) 6184{ 6185 if (inst.operands[i].isreg) 6186 { 6187 inst.instruction |= inst.operands[i].reg; 6188 encode_arm_shift (i); 6189 } 6190 else 6191 inst.instruction |= INST_IMMEDIATE; 6192} 6193 6194/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */ 6195static void 6196encode_arm_addr_mode_common (int i, bfd_boolean is_t) 6197{ 6198 assert (inst.operands[i].isreg); 6199 inst.instruction |= inst.operands[i].reg << 16; 6200 6201 if (inst.operands[i].preind) 6202 { 6203 if (is_t) 6204 { 6205 inst.error = _("instruction does not accept preindexed addressing"); 6206 return; 6207 } 6208 inst.instruction |= PRE_INDEX; 6209 if (inst.operands[i].writeback) 6210 inst.instruction |= WRITE_BACK; 6211 6212 } 6213 else if (inst.operands[i].postind) 6214 { 6215 assert (inst.operands[i].writeback); 6216 if (is_t) 6217 inst.instruction |= WRITE_BACK; 6218 } 6219 else /* unindexed - only for coprocessor */ 6220 { 6221 inst.error = _("instruction does not accept unindexed addressing"); 6222 return; 6223 } 6224 6225 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX)) 6226 && (((inst.instruction & 0x000f0000) >> 16) 6227 == ((inst.instruction & 0x0000f000) >> 12))) 6228 as_warn ((inst.instruction & LOAD_BIT) 6229 ? _("destination register same as write-back base") 6230 : _("source register same as write-back base")); 6231} 6232 6233/* inst.operands[i] was set up by parse_address. Encode it into an 6234 ARM-format mode 2 load or store instruction. If is_t is true, 6235 reject forms that cannot be used with a T instruction (i.e. not 6236 post-indexed). */ 6237static void 6238encode_arm_addr_mode_2 (int i, bfd_boolean is_t) 6239{ 6240 encode_arm_addr_mode_common (i, is_t); 6241 6242 if (inst.operands[i].immisreg) 6243 { 6244 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */ 6245 inst.instruction |= inst.operands[i].imm; 6246 if (!inst.operands[i].negative) 6247 inst.instruction |= INDEX_UP; 6248 if (inst.operands[i].shifted) 6249 { 6250 if (inst.operands[i].shift_kind == SHIFT_RRX) 6251 inst.instruction |= SHIFT_ROR << 5; 6252 else 6253 { 6254 inst.instruction |= inst.operands[i].shift_kind << 5; 6255 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 6256 } 6257 } 6258 } 6259 else /* immediate offset in inst.reloc */ 6260 { 6261 if (inst.reloc.type == BFD_RELOC_UNUSED) 6262 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM; 6263 } 6264} 6265 6266/* inst.operands[i] was set up by parse_address. Encode it into an 6267 ARM-format mode 3 load or store instruction. Reject forms that 6268 cannot be used with such instructions. If is_t is true, reject 6269 forms that cannot be used with a T instruction (i.e. not 6270 post-indexed). */ 6271static void 6272encode_arm_addr_mode_3 (int i, bfd_boolean is_t) 6273{ 6274 if (inst.operands[i].immisreg && inst.operands[i].shifted) 6275 { 6276 inst.error = _("instruction does not accept scaled register index"); 6277 return; 6278 } 6279 6280 encode_arm_addr_mode_common (i, is_t); 6281 6282 if (inst.operands[i].immisreg) 6283 { 6284 inst.instruction |= inst.operands[i].imm; 6285 if (!inst.operands[i].negative) 6286 inst.instruction |= INDEX_UP; 6287 } 6288 else /* immediate offset in inst.reloc */ 6289 { 6290 inst.instruction |= HWOFFSET_IMM; 6291 if (inst.reloc.type == BFD_RELOC_UNUSED) 6292 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8; 6293 } 6294} 6295 6296/* inst.operands[i] was set up by parse_address. Encode it into an 6297 ARM-format instruction. Reject all forms which cannot be encoded 6298 into a coprocessor load/store instruction. If wb_ok is false, 6299 reject use of writeback; if unind_ok is false, reject use of 6300 unindexed addressing. If reloc_override is not 0, use it instead 6301 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one 6302 (in which case it is preserved). */ 6303 6304static int 6305encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) 6306{ 6307 inst.instruction |= inst.operands[i].reg << 16; 6308 6309 assert (!(inst.operands[i].preind && inst.operands[i].postind)); 6310 6311 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ 6312 { 6313 assert (!inst.operands[i].writeback); 6314 if (!unind_ok) 6315 { 6316 inst.error = _("instruction does not support unindexed addressing"); 6317 return FAIL; 6318 } 6319 inst.instruction |= inst.operands[i].imm; 6320 inst.instruction |= INDEX_UP; 6321 return SUCCESS; 6322 } 6323 6324 if (inst.operands[i].preind) 6325 inst.instruction |= PRE_INDEX; 6326 6327 if (inst.operands[i].writeback) 6328 { 6329 if (inst.operands[i].reg == REG_PC) 6330 { 6331 inst.error = _("pc may not be used with write-back"); 6332 return FAIL; 6333 } 6334 if (!wb_ok) 6335 { 6336 inst.error = _("instruction does not support writeback"); 6337 return FAIL; 6338 } 6339 inst.instruction |= WRITE_BACK; 6340 } 6341 6342 if (reloc_override) 6343 inst.reloc.type = reloc_override; 6344 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC 6345 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) 6346 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) 6347 { 6348 if (thumb_mode) 6349 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; 6350 else 6351 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; 6352 } 6353 6354 return SUCCESS; 6355} 6356 6357/* inst.reloc.exp describes an "=expr" load pseudo-operation. 6358 Determine whether it can be performed with a move instruction; if 6359 it can, convert inst.instruction to that move instruction and 6360 return 1; if it can't, convert inst.instruction to a literal-pool 6361 load and return 0. If this is not a valid thing to do in the 6362 current context, set inst.error and return 1. 6363 6364 inst.operands[i] describes the destination register. */ 6365 6366static int 6367move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) 6368{ 6369 unsigned long tbit; 6370 6371 if (thumb_p) 6372 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; 6373 else 6374 tbit = LOAD_BIT; 6375 6376 if ((inst.instruction & tbit) == 0) 6377 { 6378 inst.error = _("invalid pseudo operation"); 6379 return 1; 6380 } 6381 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol) 6382 { 6383 inst.error = _("constant expression expected"); 6384 return 1; 6385 } 6386 if (inst.reloc.exp.X_op == O_constant) 6387 { 6388 if (thumb_p) 6389 { 6390 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) 6391 { 6392 /* This can be done with a mov(1) instruction. */ 6393 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); 6394 inst.instruction |= inst.reloc.exp.X_add_number; 6395 return 1; 6396 } 6397 } 6398 else 6399 { 6400 int value = encode_arm_immediate (inst.reloc.exp.X_add_number); 6401 if (value != FAIL) 6402 { 6403 /* This can be done with a mov instruction. */ 6404 inst.instruction &= LITERAL_MASK; 6405 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); 6406 inst.instruction |= value & 0xfff; 6407 return 1; 6408 } 6409 6410 value = encode_arm_immediate (~inst.reloc.exp.X_add_number); 6411 if (value != FAIL) 6412 { 6413 /* This can be done with a mvn instruction. */ 6414 inst.instruction &= LITERAL_MASK; 6415 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); 6416 inst.instruction |= value & 0xfff; 6417 return 1; 6418 } 6419 } 6420 } 6421 6422 if (add_to_lit_pool () == FAIL) 6423 { 6424 inst.error = _("literal pool insertion failed"); 6425 return 1; 6426 } 6427 inst.operands[1].reg = REG_PC; 6428 inst.operands[1].isreg = 1; 6429 inst.operands[1].preind = 1; 6430 inst.reloc.pc_rel = 1; 6431 inst.reloc.type = (thumb_p 6432 ? BFD_RELOC_ARM_THUMB_OFFSET 6433 : (mode_3 6434 ? BFD_RELOC_ARM_HWLITERAL 6435 : BFD_RELOC_ARM_LITERAL)); 6436 return 0; 6437} 6438 6439/* Functions for instruction encoding, sorted by subarchitecture. 6440 First some generics; their names are taken from the conventional 6441 bit positions for register arguments in ARM format instructions. */ 6442 6443static void 6444do_noargs (void) 6445{ 6446} 6447 6448static void 6449do_rd (void) 6450{ 6451 inst.instruction |= inst.operands[0].reg << 12; 6452} 6453 6454static void 6455do_rd_rm (void) 6456{ 6457 inst.instruction |= inst.operands[0].reg << 12; 6458 inst.instruction |= inst.operands[1].reg; 6459} 6460 6461static void 6462do_rd_rn (void) 6463{ 6464 inst.instruction |= inst.operands[0].reg << 12; 6465 inst.instruction |= inst.operands[1].reg << 16; 6466} 6467 6468static void 6469do_rn_rd (void) 6470{ 6471 inst.instruction |= inst.operands[0].reg << 16; 6472 inst.instruction |= inst.operands[1].reg << 12; 6473} 6474 6475static void 6476do_rd_rm_rn (void) 6477{ 6478 unsigned Rn = inst.operands[2].reg; 6479 /* Enforce restrictions on SWP instruction. */ 6480 if ((inst.instruction & 0x0fbfffff) == 0x01000090) 6481 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg, 6482 _("Rn must not overlap other operands")); 6483 inst.instruction |= inst.operands[0].reg << 12; 6484 inst.instruction |= inst.operands[1].reg; 6485 inst.instruction |= Rn << 16; 6486} 6487 6488static void 6489do_rd_rn_rm (void) 6490{ 6491 inst.instruction |= inst.operands[0].reg << 12; 6492 inst.instruction |= inst.operands[1].reg << 16; 6493 inst.instruction |= inst.operands[2].reg; 6494} 6495 6496static void 6497do_rm_rd_rn (void) 6498{ 6499 inst.instruction |= inst.operands[0].reg; 6500 inst.instruction |= inst.operands[1].reg << 12; 6501 inst.instruction |= inst.operands[2].reg << 16; 6502} 6503 6504static void 6505do_imm0 (void) 6506{ 6507 inst.instruction |= inst.operands[0].imm; 6508} 6509 6510static void 6511do_rd_cpaddr (void) 6512{ 6513 inst.instruction |= inst.operands[0].reg << 12; 6514 encode_arm_cp_address (1, TRUE, TRUE, 0); 6515} 6516 6517/* ARM instructions, in alphabetical order by function name (except 6518 that wrapper functions appear immediately after the function they 6519 wrap). */ 6520 6521/* This is a pseudo-op of the form "adr rd, label" to be converted 6522 into a relative address of the form "add rd, pc, #label-.-8". */ 6523 6524static void 6525do_adr (void) 6526{ 6527 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 6528 6529 /* Frag hacking will turn this into a sub instruction if the offset turns 6530 out to be negative. */ 6531 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 6532 inst.reloc.pc_rel = 1; 6533 inst.reloc.exp.X_add_number -= 8; 6534} 6535 6536/* This is a pseudo-op of the form "adrl rd, label" to be converted 6537 into a relative address of the form: 6538 add rd, pc, #low(label-.-8)" 6539 add rd, rd, #high(label-.-8)" */ 6540 6541static void 6542do_adrl (void) 6543{ 6544 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 6545 6546 /* Frag hacking will turn this into a sub instruction if the offset turns 6547 out to be negative. */ 6548 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE; 6549 inst.reloc.pc_rel = 1; 6550 inst.size = INSN_SIZE * 2; 6551 inst.reloc.exp.X_add_number -= 8; 6552} 6553 6554static void 6555do_arit (void) 6556{ 6557 if (!inst.operands[1].present) 6558 inst.operands[1].reg = inst.operands[0].reg; 6559 inst.instruction |= inst.operands[0].reg << 12; 6560 inst.instruction |= inst.operands[1].reg << 16; 6561 encode_arm_shifter_operand (2); 6562} 6563 6564static void 6565do_barrier (void) 6566{ 6567 if (inst.operands[0].present) 6568 { 6569 constraint ((inst.instruction & 0xf0) != 0x40 6570 && inst.operands[0].imm != 0xf, 6571 "bad barrier type"); 6572 inst.instruction |= inst.operands[0].imm; 6573 } 6574 else 6575 inst.instruction |= 0xf; 6576} 6577 6578static void 6579do_bfc (void) 6580{ 6581 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 6582 constraint (msb > 32, _("bit-field extends past end of register")); 6583 /* The instruction encoding stores the LSB and MSB, 6584 not the LSB and width. */ 6585 inst.instruction |= inst.operands[0].reg << 12; 6586 inst.instruction |= inst.operands[1].imm << 7; 6587 inst.instruction |= (msb - 1) << 16; 6588} 6589 6590static void 6591do_bfi (void) 6592{ 6593 unsigned int msb; 6594 6595 /* #0 in second position is alternative syntax for bfc, which is 6596 the same instruction but with REG_PC in the Rm field. */ 6597 if (!inst.operands[1].isreg) 6598 inst.operands[1].reg = REG_PC; 6599 6600 msb = inst.operands[2].imm + inst.operands[3].imm; 6601 constraint (msb > 32, _("bit-field extends past end of register")); 6602 /* The instruction encoding stores the LSB and MSB, 6603 not the LSB and width. */ 6604 inst.instruction |= inst.operands[0].reg << 12; 6605 inst.instruction |= inst.operands[1].reg; 6606 inst.instruction |= inst.operands[2].imm << 7; 6607 inst.instruction |= (msb - 1) << 16; 6608} 6609 6610static void 6611do_bfx (void) 6612{ 6613 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 6614 _("bit-field extends past end of register")); 6615 inst.instruction |= inst.operands[0].reg << 12; 6616 inst.instruction |= inst.operands[1].reg; 6617 inst.instruction |= inst.operands[2].imm << 7; 6618 inst.instruction |= (inst.operands[3].imm - 1) << 16; 6619} 6620 6621/* ARM V5 breakpoint instruction (argument parse) 6622 BKPT <16 bit unsigned immediate> 6623 Instruction is not conditional. 6624 The bit pattern given in insns[] has the COND_ALWAYS condition, 6625 and it is an error if the caller tried to override that. */ 6626 6627static void 6628do_bkpt (void) 6629{ 6630 /* Top 12 of 16 bits to bits 19:8. */ 6631 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4; 6632 6633 /* Bottom 4 of 16 bits to bits 3:0. */ 6634 inst.instruction |= inst.operands[0].imm & 0xf; 6635} 6636 6637static void 6638encode_branch (int default_reloc) 6639{ 6640 if (inst.operands[0].hasreloc) 6641 { 6642 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32, 6643 _("the only suffix valid here is '(plt)'")); 6644 inst.reloc.type = BFD_RELOC_ARM_PLT32; 6645 } 6646 else 6647 { 6648 inst.reloc.type = default_reloc; 6649 } 6650 inst.reloc.pc_rel = 1; 6651} 6652 6653static void 6654do_branch (void) 6655{ 6656#ifdef OBJ_ELF 6657 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6658 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 6659 else 6660#endif 6661 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 6662} 6663 6664static void 6665do_bl (void) 6666{ 6667#ifdef OBJ_ELF 6668 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6669 { 6670 if (inst.cond == COND_ALWAYS) 6671 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 6672 else 6673 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 6674 } 6675 else 6676#endif 6677 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 6678} 6679 6680/* ARM V5 branch-link-exchange instruction (argument parse) 6681 BLX <target_addr> ie BLX(1) 6682 BLX{<condition>} <Rm> ie BLX(2) 6683 Unfortunately, there are two different opcodes for this mnemonic. 6684 So, the insns[].value is not used, and the code here zaps values 6685 into inst.instruction. 6686 Also, the <target_addr> can be 25 bits, hence has its own reloc. */ 6687 6688static void 6689do_blx (void) 6690{ 6691 if (inst.operands[0].isreg) 6692 { 6693 /* Arg is a register; the opcode provided by insns[] is correct. 6694 It is not illegal to do "blx pc", just useless. */ 6695 if (inst.operands[0].reg == REG_PC) 6696 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful")); 6697 6698 inst.instruction |= inst.operands[0].reg; 6699 } 6700 else 6701 { 6702 /* Arg is an address; this instruction cannot be executed 6703 conditionally, and the opcode must be adjusted. */ 6704 constraint (inst.cond != COND_ALWAYS, BAD_COND); 6705 inst.instruction = 0xfa000000; 6706#ifdef OBJ_ELF 6707 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 6708 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 6709 else 6710#endif 6711 encode_branch (BFD_RELOC_ARM_PCREL_BLX); 6712 } 6713} 6714 6715static void 6716do_bx (void) 6717{ 6718 if (inst.operands[0].reg == REG_PC) 6719 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful")); 6720 6721 inst.instruction |= inst.operands[0].reg; 6722} 6723 6724 6725/* ARM v5TEJ. Jump to Jazelle code. */ 6726 6727static void 6728do_bxj (void) 6729{ 6730 if (inst.operands[0].reg == REG_PC) 6731 as_tsktsk (_("use of r15 in bxj is not really useful")); 6732 6733 inst.instruction |= inst.operands[0].reg; 6734} 6735 6736/* Co-processor data operation: 6737 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} 6738 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */ 6739static void 6740do_cdp (void) 6741{ 6742 inst.instruction |= inst.operands[0].reg << 8; 6743 inst.instruction |= inst.operands[1].imm << 20; 6744 inst.instruction |= inst.operands[2].reg << 12; 6745 inst.instruction |= inst.operands[3].reg << 16; 6746 inst.instruction |= inst.operands[4].reg; 6747 inst.instruction |= inst.operands[5].imm << 5; 6748} 6749 6750static void 6751do_cmp (void) 6752{ 6753 inst.instruction |= inst.operands[0].reg << 16; 6754 encode_arm_shifter_operand (1); 6755} 6756 6757/* Transfer between coprocessor and ARM registers. 6758 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>} 6759 MRC2 6760 MCR{cond} 6761 MCR2 6762 6763 No special properties. */ 6764 6765static void 6766do_co_reg (void) 6767{ 6768 inst.instruction |= inst.operands[0].reg << 8; 6769 inst.instruction |= inst.operands[1].imm << 21; 6770 inst.instruction |= inst.operands[2].reg << 12; 6771 inst.instruction |= inst.operands[3].reg << 16; 6772 inst.instruction |= inst.operands[4].reg; 6773 inst.instruction |= inst.operands[5].imm << 5; 6774} 6775 6776/* Transfer between coprocessor register and pair of ARM registers. 6777 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>. 6778 MCRR2 6779 MRRC{cond} 6780 MRRC2 6781 6782 Two XScale instructions are special cases of these: 6783 6784 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0 6785 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0 6786 6787 Result unpredicatable if Rd or Rn is R15. */ 6788 6789static void 6790do_co_reg2c (void) 6791{ 6792 inst.instruction |= inst.operands[0].reg << 8; 6793 inst.instruction |= inst.operands[1].imm << 4; 6794 inst.instruction |= inst.operands[2].reg << 12; 6795 inst.instruction |= inst.operands[3].reg << 16; 6796 inst.instruction |= inst.operands[4].reg; 6797} 6798 6799static void 6800do_cpsi (void) 6801{ 6802 inst.instruction |= inst.operands[0].imm << 6; 6803 if (inst.operands[1].present) 6804 { 6805 inst.instruction |= CPSI_MMOD; 6806 inst.instruction |= inst.operands[1].imm; 6807 } 6808} 6809 6810static void 6811do_dbg (void) 6812{ 6813 inst.instruction |= inst.operands[0].imm; 6814} 6815 6816static void 6817do_it (void) 6818{ 6819 /* There is no IT instruction in ARM mode. We 6820 process it but do not generate code for it. */ 6821 inst.size = 0; 6822} 6823 6824static void 6825do_ldmstm (void) 6826{ 6827 int base_reg = inst.operands[0].reg; 6828 int range = inst.operands[1].imm; 6829 6830 inst.instruction |= base_reg << 16; 6831 inst.instruction |= range; 6832 6833 if (inst.operands[1].writeback) 6834 inst.instruction |= LDM_TYPE_2_OR_3; 6835 6836 if (inst.operands[0].writeback) 6837 { 6838 inst.instruction |= WRITE_BACK; 6839 /* Check for unpredictable uses of writeback. */ 6840 if (inst.instruction & LOAD_BIT) 6841 { 6842 /* Not allowed in LDM type 2. */ 6843 if ((inst.instruction & LDM_TYPE_2_OR_3) 6844 && ((range & (1 << REG_PC)) == 0)) 6845 as_warn (_("writeback of base register is UNPREDICTABLE")); 6846 /* Only allowed if base reg not in list for other types. */ 6847 else if (range & (1 << base_reg)) 6848 as_warn (_("writeback of base register when in register list is UNPREDICTABLE")); 6849 } 6850 else /* STM. */ 6851 { 6852 /* Not allowed for type 2. */ 6853 if (inst.instruction & LDM_TYPE_2_OR_3) 6854 as_warn (_("writeback of base register is UNPREDICTABLE")); 6855 /* Only allowed if base reg not in list, or first in list. */ 6856 else if ((range & (1 << base_reg)) 6857 && (range & ((1 << base_reg) - 1))) 6858 as_warn (_("if writeback register is in list, it must be the lowest reg in the list")); 6859 } 6860 } 6861} 6862 6863/* ARMv5TE load-consecutive (argument parse) 6864 Mode is like LDRH. 6865 6866 LDRccD R, mode 6867 STRccD R, mode. */ 6868 6869static void 6870do_ldrd (void) 6871{ 6872 constraint (inst.operands[0].reg % 2 != 0, 6873 _("first destination register must be even")); 6874 constraint (inst.operands[1].present 6875 && inst.operands[1].reg != inst.operands[0].reg + 1, 6876 _("can only load two consecutive registers")); 6877 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 6878 constraint (!inst.operands[2].isreg, _("'[' expected")); 6879 6880 if (!inst.operands[1].present) 6881 inst.operands[1].reg = inst.operands[0].reg + 1; 6882 6883 if (inst.instruction & LOAD_BIT) 6884 { 6885 /* encode_arm_addr_mode_3 will diagnose overlap between the base 6886 register and the first register written; we have to diagnose 6887 overlap between the base and the second register written here. */ 6888 6889 if (inst.operands[2].reg == inst.operands[1].reg 6890 && (inst.operands[2].writeback || inst.operands[2].postind)) 6891 as_warn (_("base register written back, and overlaps " 6892 "second destination register")); 6893 6894 /* For an index-register load, the index register must not overlap the 6895 destination (even if not write-back). */ 6896 else if (inst.operands[2].immisreg 6897 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg 6898 || (unsigned) inst.operands[2].imm == inst.operands[1].reg)) 6899 as_warn (_("index register overlaps destination register")); 6900 } 6901 6902 inst.instruction |= inst.operands[0].reg << 12; 6903 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE); 6904} 6905 6906static void 6907do_ldrex (void) 6908{ 6909 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 6910 || inst.operands[1].postind || inst.operands[1].writeback 6911 || inst.operands[1].immisreg || inst.operands[1].shifted 6912 || inst.operands[1].negative 6913 /* This can arise if the programmer has written 6914 strex rN, rM, foo 6915 or if they have mistakenly used a register name as the last 6916 operand, eg: 6917 strex rN, rM, rX 6918 It is very difficult to distinguish between these two cases 6919 because "rX" might actually be a label. ie the register 6920 name has been occluded by a symbol of the same name. So we 6921 just generate a general 'bad addressing mode' type error 6922 message and leave it up to the programmer to discover the 6923 true cause and fix their mistake. */ 6924 || (inst.operands[1].reg == REG_PC), 6925 BAD_ADDR_MODE); 6926 6927 constraint (inst.reloc.exp.X_op != O_constant 6928 || inst.reloc.exp.X_add_number != 0, 6929 _("offset must be zero in ARM encoding")); 6930 6931 inst.instruction |= inst.operands[0].reg << 12; 6932 inst.instruction |= inst.operands[1].reg << 16; 6933 inst.reloc.type = BFD_RELOC_UNUSED; 6934} 6935 6936static void 6937do_ldrexd (void) 6938{ 6939 constraint (inst.operands[0].reg % 2 != 0, 6940 _("even register required")); 6941 constraint (inst.operands[1].present 6942 && inst.operands[1].reg != inst.operands[0].reg + 1, 6943 _("can only load two consecutive registers")); 6944 /* If op 1 were present and equal to PC, this function wouldn't 6945 have been called in the first place. */ 6946 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 6947 6948 inst.instruction |= inst.operands[0].reg << 12; 6949 inst.instruction |= inst.operands[2].reg << 16; 6950} 6951 6952static void 6953do_ldst (void) 6954{ 6955 inst.instruction |= inst.operands[0].reg << 12; 6956 if (!inst.operands[1].isreg) 6957 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE)) 6958 return; 6959 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE); 6960} 6961 6962static void 6963do_ldstt (void) 6964{ 6965 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 6966 reject [Rn,...]. */ 6967 if (inst.operands[1].preind) 6968 { 6969 constraint (inst.reloc.exp.X_op != O_constant || 6970 inst.reloc.exp.X_add_number != 0, 6971 _("this instruction requires a post-indexed address")); 6972 6973 inst.operands[1].preind = 0; 6974 inst.operands[1].postind = 1; 6975 inst.operands[1].writeback = 1; 6976 } 6977 inst.instruction |= inst.operands[0].reg << 12; 6978 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE); 6979} 6980 6981/* Halfword and signed-byte load/store operations. */ 6982 6983static void 6984do_ldstv4 (void) 6985{ 6986 inst.instruction |= inst.operands[0].reg << 12; 6987 if (!inst.operands[1].isreg) 6988 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE)) 6989 return; 6990 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE); 6991} 6992 6993static void 6994do_ldsttv4 (void) 6995{ 6996 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 6997 reject [Rn,...]. */ 6998 if (inst.operands[1].preind) 6999 { 7000 constraint (inst.reloc.exp.X_op != O_constant || 7001 inst.reloc.exp.X_add_number != 0, 7002 _("this instruction requires a post-indexed address")); 7003 7004 inst.operands[1].preind = 0; 7005 inst.operands[1].postind = 1; 7006 inst.operands[1].writeback = 1; 7007 } 7008 inst.instruction |= inst.operands[0].reg << 12; 7009 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE); 7010} 7011 7012/* Co-processor register load/store. 7013 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */ 7014static void 7015do_lstc (void) 7016{ 7017 inst.instruction |= inst.operands[0].reg << 8; 7018 inst.instruction |= inst.operands[1].reg << 12; 7019 encode_arm_cp_address (2, TRUE, TRUE, 0); 7020} 7021 7022static void 7023do_mlas (void) 7024{ 7025 /* This restriction does not apply to mls (nor to mla in v6 or later). */ 7026 if (inst.operands[0].reg == inst.operands[1].reg 7027 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6) 7028 && !(inst.instruction & 0x00400000)) 7029 as_tsktsk (_("Rd and Rm should be different in mla")); 7030 7031 inst.instruction |= inst.operands[0].reg << 16; 7032 inst.instruction |= inst.operands[1].reg; 7033 inst.instruction |= inst.operands[2].reg << 8; 7034 inst.instruction |= inst.operands[3].reg << 12; 7035} 7036 7037static void 7038do_mov (void) 7039{ 7040 inst.instruction |= inst.operands[0].reg << 12; 7041 encode_arm_shifter_operand (1); 7042} 7043 7044/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */ 7045static void 7046do_mov16 (void) 7047{ 7048 bfd_vma imm; 7049 bfd_boolean top; 7050 7051 top = (inst.instruction & 0x00400000) != 0; 7052 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW, 7053 _(":lower16: not allowed this instruction")); 7054 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT, 7055 _(":upper16: not allowed instruction")); 7056 inst.instruction |= inst.operands[0].reg << 12; 7057 if (inst.reloc.type == BFD_RELOC_UNUSED) 7058 { 7059 imm = inst.reloc.exp.X_add_number; 7060 /* The value is in two pieces: 0:11, 16:19. */ 7061 inst.instruction |= (imm & 0x00000fff); 7062 inst.instruction |= (imm & 0x0000f000) << 4; 7063 } 7064} 7065 7066static void do_vfp_nsyn_opcode (const char *); 7067 7068static int 7069do_vfp_nsyn_mrs (void) 7070{ 7071 if (inst.operands[0].isvec) 7072 { 7073 if (inst.operands[1].reg != 1) 7074 first_error (_("operand 1 must be FPSCR")); 7075 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 7076 memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); 7077 do_vfp_nsyn_opcode ("fmstat"); 7078 } 7079 else if (inst.operands[1].isvec) 7080 do_vfp_nsyn_opcode ("fmrx"); 7081 else 7082 return FAIL; 7083 7084 return SUCCESS; 7085} 7086 7087static int 7088do_vfp_nsyn_msr (void) 7089{ 7090 if (inst.operands[0].isvec) 7091 do_vfp_nsyn_opcode ("fmxr"); 7092 else 7093 return FAIL; 7094 7095 return SUCCESS; 7096} 7097 7098static void 7099do_mrs (void) 7100{ 7101 if (do_vfp_nsyn_mrs () == SUCCESS) 7102 return; 7103 7104 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 7105 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f)) 7106 != (PSR_c|PSR_f), 7107 _("'CPSR' or 'SPSR' expected")); 7108 inst.instruction |= inst.operands[0].reg << 12; 7109 inst.instruction |= (inst.operands[1].imm & SPSR_BIT); 7110} 7111 7112/* Two possible forms: 7113 "{C|S}PSR_<field>, Rm", 7114 "{C|S}PSR_f, #expression". */ 7115 7116static void 7117do_msr (void) 7118{ 7119 if (do_vfp_nsyn_msr () == SUCCESS) 7120 return; 7121 7122 inst.instruction |= inst.operands[0].imm; 7123 if (inst.operands[1].isreg) 7124 inst.instruction |= inst.operands[1].reg; 7125 else 7126 { 7127 inst.instruction |= INST_IMMEDIATE; 7128 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 7129 inst.reloc.pc_rel = 0; 7130 } 7131} 7132 7133static void 7134do_mul (void) 7135{ 7136 if (!inst.operands[2].present) 7137 inst.operands[2].reg = inst.operands[0].reg; 7138 inst.instruction |= inst.operands[0].reg << 16; 7139 inst.instruction |= inst.operands[1].reg; 7140 inst.instruction |= inst.operands[2].reg << 8; 7141 7142 if (inst.operands[0].reg == inst.operands[1].reg 7143 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)) 7144 as_tsktsk (_("Rd and Rm should be different in mul")); 7145} 7146 7147/* Long Multiply Parser 7148 UMULL RdLo, RdHi, Rm, Rs 7149 SMULL RdLo, RdHi, Rm, Rs 7150 UMLAL RdLo, RdHi, Rm, Rs 7151 SMLAL RdLo, RdHi, Rm, Rs. */ 7152 7153static void 7154do_mull (void) 7155{ 7156 inst.instruction |= inst.operands[0].reg << 12; 7157 inst.instruction |= inst.operands[1].reg << 16; 7158 inst.instruction |= inst.operands[2].reg; 7159 inst.instruction |= inst.operands[3].reg << 8; 7160 7161 /* rdhi, rdlo and rm must all be different. */ 7162 if (inst.operands[0].reg == inst.operands[1].reg 7163 || inst.operands[0].reg == inst.operands[2].reg 7164 || inst.operands[1].reg == inst.operands[2].reg) 7165 as_tsktsk (_("rdhi, rdlo and rm must all be different")); 7166} 7167 7168static void 7169do_nop (void) 7170{ 7171 if (inst.operands[0].present) 7172 { 7173 /* Architectural NOP hints are CPSR sets with no bits selected. */ 7174 inst.instruction &= 0xf0000000; 7175 inst.instruction |= 0x0320f000 + inst.operands[0].imm; 7176 } 7177} 7178 7179/* ARM V6 Pack Halfword Bottom Top instruction (argument parse). 7180 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>} 7181 Condition defaults to COND_ALWAYS. 7182 Error if Rd, Rn or Rm are R15. */ 7183 7184static void 7185do_pkhbt (void) 7186{ 7187 inst.instruction |= inst.operands[0].reg << 12; 7188 inst.instruction |= inst.operands[1].reg << 16; 7189 inst.instruction |= inst.operands[2].reg; 7190 if (inst.operands[3].present) 7191 encode_arm_shift (3); 7192} 7193 7194/* ARM V6 PKHTB (Argument Parse). */ 7195 7196static void 7197do_pkhtb (void) 7198{ 7199 if (!inst.operands[3].present) 7200 { 7201 /* If the shift specifier is omitted, turn the instruction 7202 into pkhbt rd, rm, rn. */ 7203 inst.instruction &= 0xfff00010; 7204 inst.instruction |= inst.operands[0].reg << 12; 7205 inst.instruction |= inst.operands[1].reg; 7206 inst.instruction |= inst.operands[2].reg << 16; 7207 } 7208 else 7209 { 7210 inst.instruction |= inst.operands[0].reg << 12; 7211 inst.instruction |= inst.operands[1].reg << 16; 7212 inst.instruction |= inst.operands[2].reg; 7213 encode_arm_shift (3); 7214 } 7215} 7216 7217/* ARMv5TE: Preload-Cache 7218 7219 PLD <addr_mode> 7220 7221 Syntactically, like LDR with B=1, W=0, L=1. */ 7222 7223static void 7224do_pld (void) 7225{ 7226 constraint (!inst.operands[0].isreg, 7227 _("'[' expected after PLD mnemonic")); 7228 constraint (inst.operands[0].postind, 7229 _("post-indexed expression used in preload instruction")); 7230 constraint (inst.operands[0].writeback, 7231 _("writeback used in preload instruction")); 7232 constraint (!inst.operands[0].preind, 7233 _("unindexed addressing used in preload instruction")); 7234 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 7235} 7236 7237/* ARMv7: PLI <addr_mode> */ 7238static void 7239do_pli (void) 7240{ 7241 constraint (!inst.operands[0].isreg, 7242 _("'[' expected after PLI mnemonic")); 7243 constraint (inst.operands[0].postind, 7244 _("post-indexed expression used in preload instruction")); 7245 constraint (inst.operands[0].writeback, 7246 _("writeback used in preload instruction")); 7247 constraint (!inst.operands[0].preind, 7248 _("unindexed addressing used in preload instruction")); 7249 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 7250 inst.instruction &= ~PRE_INDEX; 7251} 7252 7253static void 7254do_push_pop (void) 7255{ 7256 inst.operands[1] = inst.operands[0]; 7257 memset (&inst.operands[0], 0, sizeof inst.operands[0]); 7258 inst.operands[0].isreg = 1; 7259 inst.operands[0].writeback = 1; 7260 inst.operands[0].reg = REG_SP; 7261 do_ldmstm (); 7262} 7263 7264/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the 7265 word at the specified address and the following word 7266 respectively. 7267 Unconditionally executed. 7268 Error if Rn is R15. */ 7269 7270static void 7271do_rfe (void) 7272{ 7273 inst.instruction |= inst.operands[0].reg << 16; 7274 if (inst.operands[0].writeback) 7275 inst.instruction |= WRITE_BACK; 7276} 7277 7278/* ARM V6 ssat (argument parse). */ 7279 7280static void 7281do_ssat (void) 7282{ 7283 inst.instruction |= inst.operands[0].reg << 12; 7284 inst.instruction |= (inst.operands[1].imm - 1) << 16; 7285 inst.instruction |= inst.operands[2].reg; 7286 7287 if (inst.operands[3].present) 7288 encode_arm_shift (3); 7289} 7290 7291/* ARM V6 usat (argument parse). */ 7292 7293static void 7294do_usat (void) 7295{ 7296 inst.instruction |= inst.operands[0].reg << 12; 7297 inst.instruction |= inst.operands[1].imm << 16; 7298 inst.instruction |= inst.operands[2].reg; 7299 7300 if (inst.operands[3].present) 7301 encode_arm_shift (3); 7302} 7303 7304/* ARM V6 ssat16 (argument parse). */ 7305 7306static void 7307do_ssat16 (void) 7308{ 7309 inst.instruction |= inst.operands[0].reg << 12; 7310 inst.instruction |= ((inst.operands[1].imm - 1) << 16); 7311 inst.instruction |= inst.operands[2].reg; 7312} 7313 7314static void 7315do_usat16 (void) 7316{ 7317 inst.instruction |= inst.operands[0].reg << 12; 7318 inst.instruction |= inst.operands[1].imm << 16; 7319 inst.instruction |= inst.operands[2].reg; 7320} 7321 7322/* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while 7323 preserving the other bits. 7324 7325 setend <endian_specifier>, where <endian_specifier> is either 7326 BE or LE. */ 7327 7328static void 7329do_setend (void) 7330{ 7331 if (inst.operands[0].imm) 7332 inst.instruction |= 0x200; 7333} 7334 7335static void 7336do_shift (void) 7337{ 7338 unsigned int Rm = (inst.operands[1].present 7339 ? inst.operands[1].reg 7340 : inst.operands[0].reg); 7341 7342 inst.instruction |= inst.operands[0].reg << 12; 7343 inst.instruction |= Rm; 7344 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */ 7345 { 7346 inst.instruction |= inst.operands[2].reg << 8; 7347 inst.instruction |= SHIFT_BY_REG; 7348 } 7349 else 7350 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 7351} 7352 7353static void 7354do_smc (void) 7355{ 7356 inst.reloc.type = BFD_RELOC_ARM_SMC; 7357 inst.reloc.pc_rel = 0; 7358} 7359 7360static void 7361do_swi (void) 7362{ 7363 inst.reloc.type = BFD_RELOC_ARM_SWI; 7364 inst.reloc.pc_rel = 0; 7365} 7366 7367/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse) 7368 SMLAxy{cond} Rd,Rm,Rs,Rn 7369 SMLAWy{cond} Rd,Rm,Rs,Rn 7370 Error if any register is R15. */ 7371 7372static void 7373do_smla (void) 7374{ 7375 inst.instruction |= inst.operands[0].reg << 16; 7376 inst.instruction |= inst.operands[1].reg; 7377 inst.instruction |= inst.operands[2].reg << 8; 7378 inst.instruction |= inst.operands[3].reg << 12; 7379} 7380 7381/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse) 7382 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs 7383 Error if any register is R15. 7384 Warning if Rdlo == Rdhi. */ 7385 7386static void 7387do_smlal (void) 7388{ 7389 inst.instruction |= inst.operands[0].reg << 12; 7390 inst.instruction |= inst.operands[1].reg << 16; 7391 inst.instruction |= inst.operands[2].reg; 7392 inst.instruction |= inst.operands[3].reg << 8; 7393 7394 if (inst.operands[0].reg == inst.operands[1].reg) 7395 as_tsktsk (_("rdhi and rdlo must be different")); 7396} 7397 7398/* ARM V5E (El Segundo) signed-multiply (argument parse) 7399 SMULxy{cond} Rd,Rm,Rs 7400 Error if any register is R15. */ 7401 7402static void 7403do_smul (void) 7404{ 7405 inst.instruction |= inst.operands[0].reg << 16; 7406 inst.instruction |= inst.operands[1].reg; 7407 inst.instruction |= inst.operands[2].reg << 8; 7408} 7409 7410/* ARM V6 srs (argument parse). The variable fields in the encoding are 7411 the same for both ARM and Thumb-2. */ 7412 7413static void 7414do_srs (void) 7415{ 7416 int reg; 7417 7418 if (inst.operands[0].present) 7419 { 7420 reg = inst.operands[0].reg; 7421 constraint (reg != 13, _("SRS base register must be r13")); 7422 } 7423 else 7424 reg = 13; 7425 7426 inst.instruction |= reg << 16; 7427 inst.instruction |= inst.operands[1].imm; 7428 if (inst.operands[0].writeback || inst.operands[1].writeback) 7429 inst.instruction |= WRITE_BACK; 7430} 7431 7432/* ARM V6 strex (argument parse). */ 7433 7434static void 7435do_strex (void) 7436{ 7437 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 7438 || inst.operands[2].postind || inst.operands[2].writeback 7439 || inst.operands[2].immisreg || inst.operands[2].shifted 7440 || inst.operands[2].negative 7441 /* See comment in do_ldrex(). */ 7442 || (inst.operands[2].reg == REG_PC), 7443 BAD_ADDR_MODE); 7444 7445 constraint (inst.operands[0].reg == inst.operands[1].reg 7446 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); 7447 7448 constraint (inst.reloc.exp.X_op != O_constant 7449 || inst.reloc.exp.X_add_number != 0, 7450 _("offset must be zero in ARM encoding")); 7451 7452 inst.instruction |= inst.operands[0].reg << 12; 7453 inst.instruction |= inst.operands[1].reg; 7454 inst.instruction |= inst.operands[2].reg << 16; 7455 inst.reloc.type = BFD_RELOC_UNUSED; 7456} 7457 7458static void 7459do_strexd (void) 7460{ 7461 constraint (inst.operands[1].reg % 2 != 0, 7462 _("even register required")); 7463 constraint (inst.operands[2].present 7464 && inst.operands[2].reg != inst.operands[1].reg + 1, 7465 _("can only store two consecutive registers")); 7466 /* If op 2 were present and equal to PC, this function wouldn't 7467 have been called in the first place. */ 7468 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here")); 7469 7470 constraint (inst.operands[0].reg == inst.operands[1].reg 7471 || inst.operands[0].reg == inst.operands[1].reg + 1 7472 || inst.operands[0].reg == inst.operands[3].reg, 7473 BAD_OVERLAP); 7474 7475 inst.instruction |= inst.operands[0].reg << 12; 7476 inst.instruction |= inst.operands[1].reg; 7477 inst.instruction |= inst.operands[3].reg << 16; 7478} 7479 7480/* ARM V6 SXTAH extracts a 16-bit value from a register, sign 7481 extends it to 32-bits, and adds the result to a value in another 7482 register. You can specify a rotation by 0, 8, 16, or 24 bits 7483 before extracting the 16-bit value. 7484 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>} 7485 Condition defaults to COND_ALWAYS. 7486 Error if any register uses R15. */ 7487 7488static void 7489do_sxtah (void) 7490{ 7491 inst.instruction |= inst.operands[0].reg << 12; 7492 inst.instruction |= inst.operands[1].reg << 16; 7493 inst.instruction |= inst.operands[2].reg; 7494 inst.instruction |= inst.operands[3].imm << 10; 7495} 7496 7497/* ARM V6 SXTH. 7498 7499 SXTH {<cond>} <Rd>, <Rm>{, <rotation>} 7500 Condition defaults to COND_ALWAYS. 7501 Error if any register uses R15. */ 7502 7503static void 7504do_sxth (void) 7505{ 7506 inst.instruction |= inst.operands[0].reg << 12; 7507 inst.instruction |= inst.operands[1].reg; 7508 inst.instruction |= inst.operands[2].imm << 10; 7509} 7510 7511/* VFP instructions. In a logical order: SP variant first, monad 7512 before dyad, arithmetic then move then load/store. */ 7513 7514static void 7515do_vfp_sp_monadic (void) 7516{ 7517 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7518 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 7519} 7520 7521static void 7522do_vfp_sp_dyadic (void) 7523{ 7524 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7525 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 7526 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 7527} 7528 7529static void 7530do_vfp_sp_compare_z (void) 7531{ 7532 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7533} 7534 7535static void 7536do_vfp_dp_sp_cvt (void) 7537{ 7538 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7539 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 7540} 7541 7542static void 7543do_vfp_sp_dp_cvt (void) 7544{ 7545 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7546 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 7547} 7548 7549static void 7550do_vfp_reg_from_sp (void) 7551{ 7552 inst.instruction |= inst.operands[0].reg << 12; 7553 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 7554} 7555 7556static void 7557do_vfp_reg2_from_sp2 (void) 7558{ 7559 constraint (inst.operands[2].imm != 2, 7560 _("only two consecutive VFP SP registers allowed here")); 7561 inst.instruction |= inst.operands[0].reg << 12; 7562 inst.instruction |= inst.operands[1].reg << 16; 7563 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 7564} 7565 7566static void 7567do_vfp_sp_from_reg (void) 7568{ 7569 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn); 7570 inst.instruction |= inst.operands[1].reg << 12; 7571} 7572 7573static void 7574do_vfp_sp2_from_reg2 (void) 7575{ 7576 constraint (inst.operands[0].imm != 2, 7577 _("only two consecutive VFP SP registers allowed here")); 7578 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm); 7579 inst.instruction |= inst.operands[1].reg << 12; 7580 inst.instruction |= inst.operands[2].reg << 16; 7581} 7582 7583static void 7584do_vfp_sp_ldst (void) 7585{ 7586 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7587 encode_arm_cp_address (1, FALSE, TRUE, 0); 7588} 7589 7590static void 7591do_vfp_dp_ldst (void) 7592{ 7593 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7594 encode_arm_cp_address (1, FALSE, TRUE, 0); 7595} 7596 7597 7598static void 7599vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type) 7600{ 7601 if (inst.operands[0].writeback) 7602 inst.instruction |= WRITE_BACK; 7603 else 7604 constraint (ldstm_type != VFP_LDSTMIA, 7605 _("this addressing mode requires base-register writeback")); 7606 inst.instruction |= inst.operands[0].reg << 16; 7607 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd); 7608 inst.instruction |= inst.operands[1].imm; 7609} 7610 7611static void 7612vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type) 7613{ 7614 int count; 7615 7616 if (inst.operands[0].writeback) 7617 inst.instruction |= WRITE_BACK; 7618 else 7619 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX, 7620 _("this addressing mode requires base-register writeback")); 7621 7622 inst.instruction |= inst.operands[0].reg << 16; 7623 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7624 7625 count = inst.operands[1].imm << 1; 7626 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX) 7627 count += 1; 7628 7629 inst.instruction |= count; 7630} 7631 7632static void 7633do_vfp_sp_ldstmia (void) 7634{ 7635 vfp_sp_ldstm (VFP_LDSTMIA); 7636} 7637 7638static void 7639do_vfp_sp_ldstmdb (void) 7640{ 7641 vfp_sp_ldstm (VFP_LDSTMDB); 7642} 7643 7644static void 7645do_vfp_dp_ldstmia (void) 7646{ 7647 vfp_dp_ldstm (VFP_LDSTMIA); 7648} 7649 7650static void 7651do_vfp_dp_ldstmdb (void) 7652{ 7653 vfp_dp_ldstm (VFP_LDSTMDB); 7654} 7655 7656static void 7657do_vfp_xp_ldstmia (void) 7658{ 7659 vfp_dp_ldstm (VFP_LDSTMIAX); 7660} 7661 7662static void 7663do_vfp_xp_ldstmdb (void) 7664{ 7665 vfp_dp_ldstm (VFP_LDSTMDBX); 7666} 7667 7668static void 7669do_vfp_dp_rd_rm (void) 7670{ 7671 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7672 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 7673} 7674 7675static void 7676do_vfp_dp_rn_rd (void) 7677{ 7678 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn); 7679 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7680} 7681 7682static void 7683do_vfp_dp_rd_rn (void) 7684{ 7685 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7686 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 7687} 7688 7689static void 7690do_vfp_dp_rd_rn_rm (void) 7691{ 7692 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7693 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 7694 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm); 7695} 7696 7697static void 7698do_vfp_dp_rd (void) 7699{ 7700 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7701} 7702 7703static void 7704do_vfp_dp_rm_rd_rn (void) 7705{ 7706 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm); 7707 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 7708 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn); 7709} 7710 7711/* VFPv3 instructions. */ 7712static void 7713do_vfp_sp_const (void) 7714{ 7715 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7716 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 7717 inst.instruction |= (inst.operands[1].imm & 0x0f); 7718} 7719 7720static void 7721do_vfp_dp_const (void) 7722{ 7723 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7724 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 7725 inst.instruction |= (inst.operands[1].imm & 0x0f); 7726} 7727 7728static void 7729vfp_conv (int srcsize) 7730{ 7731 unsigned immbits = srcsize - inst.operands[1].imm; 7732 inst.instruction |= (immbits & 1) << 5; 7733 inst.instruction |= (immbits >> 1); 7734} 7735 7736static void 7737do_vfp_sp_conv_16 (void) 7738{ 7739 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7740 vfp_conv (16); 7741} 7742 7743static void 7744do_vfp_dp_conv_16 (void) 7745{ 7746 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7747 vfp_conv (16); 7748} 7749 7750static void 7751do_vfp_sp_conv_32 (void) 7752{ 7753 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 7754 vfp_conv (32); 7755} 7756 7757static void 7758do_vfp_dp_conv_32 (void) 7759{ 7760 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 7761 vfp_conv (32); 7762} 7763 7764 7765/* FPA instructions. Also in a logical order. */ 7766 7767static void 7768do_fpa_cmp (void) 7769{ 7770 inst.instruction |= inst.operands[0].reg << 16; 7771 inst.instruction |= inst.operands[1].reg; 7772} 7773 7774static void 7775do_fpa_ldmstm (void) 7776{ 7777 inst.instruction |= inst.operands[0].reg << 12; 7778 switch (inst.operands[1].imm) 7779 { 7780 case 1: inst.instruction |= CP_T_X; break; 7781 case 2: inst.instruction |= CP_T_Y; break; 7782 case 3: inst.instruction |= CP_T_Y | CP_T_X; break; 7783 case 4: break; 7784 default: abort (); 7785 } 7786 7787 if (inst.instruction & (PRE_INDEX | INDEX_UP)) 7788 { 7789 /* The instruction specified "ea" or "fd", so we can only accept 7790 [Rn]{!}. The instruction does not really support stacking or 7791 unstacking, so we have to emulate these by setting appropriate 7792 bits and offsets. */ 7793 constraint (inst.reloc.exp.X_op != O_constant 7794 || inst.reloc.exp.X_add_number != 0, 7795 _("this instruction does not support indexing")); 7796 7797 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback) 7798 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm; 7799 7800 if (!(inst.instruction & INDEX_UP)) 7801 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number; 7802 7803 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback) 7804 { 7805 inst.operands[2].preind = 0; 7806 inst.operands[2].postind = 1; 7807 } 7808 } 7809 7810 encode_arm_cp_address (2, TRUE, TRUE, 0); 7811} 7812 7813 7814/* iWMMXt instructions: strictly in alphabetical order. */ 7815 7816static void 7817do_iwmmxt_tandorc (void) 7818{ 7819 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here")); 7820} 7821 7822static void 7823do_iwmmxt_textrc (void) 7824{ 7825 inst.instruction |= inst.operands[0].reg << 12; 7826 inst.instruction |= inst.operands[1].imm; 7827} 7828 7829static void 7830do_iwmmxt_textrm (void) 7831{ 7832 inst.instruction |= inst.operands[0].reg << 12; 7833 inst.instruction |= inst.operands[1].reg << 16; 7834 inst.instruction |= inst.operands[2].imm; 7835} 7836 7837static void 7838do_iwmmxt_tinsr (void) 7839{ 7840 inst.instruction |= inst.operands[0].reg << 16; 7841 inst.instruction |= inst.operands[1].reg << 12; 7842 inst.instruction |= inst.operands[2].imm; 7843} 7844 7845static void 7846do_iwmmxt_tmia (void) 7847{ 7848 inst.instruction |= inst.operands[0].reg << 5; 7849 inst.instruction |= inst.operands[1].reg; 7850 inst.instruction |= inst.operands[2].reg << 12; 7851} 7852 7853static void 7854do_iwmmxt_waligni (void) 7855{ 7856 inst.instruction |= inst.operands[0].reg << 12; 7857 inst.instruction |= inst.operands[1].reg << 16; 7858 inst.instruction |= inst.operands[2].reg; 7859 inst.instruction |= inst.operands[3].imm << 20; 7860} 7861 7862static void 7863do_iwmmxt_wmerge (void) 7864{ 7865 inst.instruction |= inst.operands[0].reg << 12; 7866 inst.instruction |= inst.operands[1].reg << 16; 7867 inst.instruction |= inst.operands[2].reg; 7868 inst.instruction |= inst.operands[3].imm << 21; 7869} 7870 7871static void 7872do_iwmmxt_wmov (void) 7873{ 7874 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */ 7875 inst.instruction |= inst.operands[0].reg << 12; 7876 inst.instruction |= inst.operands[1].reg << 16; 7877 inst.instruction |= inst.operands[1].reg; 7878} 7879 7880static void 7881do_iwmmxt_wldstbh (void) 7882{ 7883 int reloc; 7884 inst.instruction |= inst.operands[0].reg << 12; 7885 if (thumb_mode) 7886 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2; 7887 else 7888 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2; 7889 encode_arm_cp_address (1, TRUE, FALSE, reloc); 7890} 7891 7892static void 7893do_iwmmxt_wldstw (void) 7894{ 7895 /* RIWR_RIWC clears .isreg for a control register. */ 7896 if (!inst.operands[0].isreg) 7897 { 7898 constraint (inst.cond != COND_ALWAYS, BAD_COND); 7899 inst.instruction |= 0xf0000000; 7900 } 7901 7902 inst.instruction |= inst.operands[0].reg << 12; 7903 encode_arm_cp_address (1, TRUE, TRUE, 0); 7904} 7905 7906static void 7907do_iwmmxt_wldstd (void) 7908{ 7909 inst.instruction |= inst.operands[0].reg << 12; 7910 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2) 7911 && inst.operands[1].immisreg) 7912 { 7913 inst.instruction &= ~0x1a000ff; 7914 inst.instruction |= (0xf << 28); 7915 if (inst.operands[1].preind) 7916 inst.instruction |= PRE_INDEX; 7917 if (!inst.operands[1].negative) 7918 inst.instruction |= INDEX_UP; 7919 if (inst.operands[1].writeback) 7920 inst.instruction |= WRITE_BACK; 7921 inst.instruction |= inst.operands[1].reg << 16; 7922 inst.instruction |= inst.reloc.exp.X_add_number << 4; 7923 inst.instruction |= inst.operands[1].imm; 7924 } 7925 else 7926 encode_arm_cp_address (1, TRUE, FALSE, 0); 7927} 7928 7929static void 7930do_iwmmxt_wshufh (void) 7931{ 7932 inst.instruction |= inst.operands[0].reg << 12; 7933 inst.instruction |= inst.operands[1].reg << 16; 7934 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16); 7935 inst.instruction |= (inst.operands[2].imm & 0x0f); 7936} 7937 7938static void 7939do_iwmmxt_wzero (void) 7940{ 7941 /* WZERO reg is an alias for WANDN reg, reg, reg. */ 7942 inst.instruction |= inst.operands[0].reg; 7943 inst.instruction |= inst.operands[0].reg << 12; 7944 inst.instruction |= inst.operands[0].reg << 16; 7945} 7946 7947static void 7948do_iwmmxt_wrwrwr_or_imm5 (void) 7949{ 7950 if (inst.operands[2].isreg) 7951 do_rd_rn_rm (); 7952 else { 7953 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2), 7954 _("immediate operand requires iWMMXt2")); 7955 do_rd_rn (); 7956 if (inst.operands[2].imm == 0) 7957 { 7958 switch ((inst.instruction >> 20) & 0xf) 7959 { 7960 case 4: 7961 case 5: 7962 case 6: 7963 case 7: 7964 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */ 7965 inst.operands[2].imm = 16; 7966 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20); 7967 break; 7968 case 8: 7969 case 9: 7970 case 10: 7971 case 11: 7972 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */ 7973 inst.operands[2].imm = 32; 7974 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20); 7975 break; 7976 case 12: 7977 case 13: 7978 case 14: 7979 case 15: 7980 { 7981 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */ 7982 unsigned long wrn; 7983 wrn = (inst.instruction >> 16) & 0xf; 7984 inst.instruction &= 0xff0fff0f; 7985 inst.instruction |= wrn; 7986 /* Bail out here; the instruction is now assembled. */ 7987 return; 7988 } 7989 } 7990 } 7991 /* Map 32 -> 0, etc. */ 7992 inst.operands[2].imm &= 0x1f; 7993 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); 7994 } 7995} 7996 7997/* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register 7998 operations first, then control, shift, and load/store. */ 7999 8000/* Insns like "foo X,Y,Z". */ 8001 8002static void 8003do_mav_triple (void) 8004{ 8005 inst.instruction |= inst.operands[0].reg << 16; 8006 inst.instruction |= inst.operands[1].reg; 8007 inst.instruction |= inst.operands[2].reg << 12; 8008} 8009 8010/* Insns like "foo W,X,Y,Z". 8011 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */ 8012 8013static void 8014do_mav_quad (void) 8015{ 8016 inst.instruction |= inst.operands[0].reg << 5; 8017 inst.instruction |= inst.operands[1].reg << 12; 8018 inst.instruction |= inst.operands[2].reg << 16; 8019 inst.instruction |= inst.operands[3].reg; 8020} 8021 8022/* cfmvsc32<cond> DSPSC,MVDX[15:0]. */ 8023static void 8024do_mav_dspsc (void) 8025{ 8026 inst.instruction |= inst.operands[1].reg << 12; 8027} 8028 8029/* Maverick shift immediate instructions. 8030 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0]. 8031 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */ 8032 8033static void 8034do_mav_shift (void) 8035{ 8036 int imm = inst.operands[2].imm; 8037 8038 inst.instruction |= inst.operands[0].reg << 12; 8039 inst.instruction |= inst.operands[1].reg << 16; 8040 8041 /* Bits 0-3 of the insn should have bits 0-3 of the immediate. 8042 Bits 5-7 of the insn should have bits 4-6 of the immediate. 8043 Bit 4 should be 0. */ 8044 imm = (imm & 0xf) | ((imm & 0x70) << 1); 8045 8046 inst.instruction |= imm; 8047} 8048 8049/* XScale instructions. Also sorted arithmetic before move. */ 8050 8051/* Xscale multiply-accumulate (argument parse) 8052 MIAcc acc0,Rm,Rs 8053 MIAPHcc acc0,Rm,Rs 8054 MIAxycc acc0,Rm,Rs. */ 8055 8056static void 8057do_xsc_mia (void) 8058{ 8059 inst.instruction |= inst.operands[1].reg; 8060 inst.instruction |= inst.operands[2].reg << 12; 8061} 8062 8063/* Xscale move-accumulator-register (argument parse) 8064 8065 MARcc acc0,RdLo,RdHi. */ 8066 8067static void 8068do_xsc_mar (void) 8069{ 8070 inst.instruction |= inst.operands[1].reg << 12; 8071 inst.instruction |= inst.operands[2].reg << 16; 8072} 8073 8074/* Xscale move-register-accumulator (argument parse) 8075 8076 MRAcc RdLo,RdHi,acc0. */ 8077 8078static void 8079do_xsc_mra (void) 8080{ 8081 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP); 8082 inst.instruction |= inst.operands[0].reg << 12; 8083 inst.instruction |= inst.operands[1].reg << 16; 8084} 8085 8086/* Encoding functions relevant only to Thumb. */ 8087 8088/* inst.operands[i] is a shifted-register operand; encode 8089 it into inst.instruction in the format used by Thumb32. */ 8090 8091static void 8092encode_thumb32_shifted_operand (int i) 8093{ 8094 unsigned int value = inst.reloc.exp.X_add_number; 8095 unsigned int shift = inst.operands[i].shift_kind; 8096 8097 constraint (inst.operands[i].immisreg, 8098 _("shift by register not allowed in thumb mode")); 8099 inst.instruction |= inst.operands[i].reg; 8100 if (shift == SHIFT_RRX) 8101 inst.instruction |= SHIFT_ROR << 4; 8102 else 8103 { 8104 constraint (inst.reloc.exp.X_op != O_constant, 8105 _("expression too complex")); 8106 8107 constraint (value > 32 8108 || (value == 32 && (shift == SHIFT_LSL 8109 || shift == SHIFT_ROR)), 8110 _("shift expression is too large")); 8111 8112 if (value == 0) 8113 shift = SHIFT_LSL; 8114 else if (value == 32) 8115 value = 0; 8116 8117 inst.instruction |= shift << 4; 8118 inst.instruction |= (value & 0x1c) << 10; 8119 inst.instruction |= (value & 0x03) << 6; 8120 } 8121} 8122 8123 8124/* inst.operands[i] was set up by parse_address. Encode it into a 8125 Thumb32 format load or store instruction. Reject forms that cannot 8126 be used with such instructions. If is_t is true, reject forms that 8127 cannot be used with a T instruction; if is_d is true, reject forms 8128 that cannot be used with a D instruction. */ 8129 8130static void 8131encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) 8132{ 8133 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC); 8134 8135 constraint (!inst.operands[i].isreg, 8136 _("Instruction does not support =N addresses")); 8137 8138 inst.instruction |= inst.operands[i].reg << 16; 8139 if (inst.operands[i].immisreg) 8140 { 8141 constraint (is_pc, _("cannot use register index with PC-relative addressing")); 8142 constraint (is_t || is_d, _("cannot use register index with this instruction")); 8143 constraint (inst.operands[i].negative, 8144 _("Thumb does not support negative register indexing")); 8145 constraint (inst.operands[i].postind, 8146 _("Thumb does not support register post-indexing")); 8147 constraint (inst.operands[i].writeback, 8148 _("Thumb does not support register indexing with writeback")); 8149 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL, 8150 _("Thumb supports only LSL in shifted register indexing")); 8151 8152 inst.instruction |= inst.operands[i].imm; 8153 if (inst.operands[i].shifted) 8154 { 8155 constraint (inst.reloc.exp.X_op != O_constant, 8156 _("expression too complex")); 8157 constraint (inst.reloc.exp.X_add_number < 0 8158 || inst.reloc.exp.X_add_number > 3, 8159 _("shift out of range")); 8160 inst.instruction |= inst.reloc.exp.X_add_number << 4; 8161 } 8162 inst.reloc.type = BFD_RELOC_UNUSED; 8163 } 8164 else if (inst.operands[i].preind) 8165 { 8166 constraint (is_pc && inst.operands[i].writeback, 8167 _("cannot use writeback with PC-relative addressing")); 8168 constraint (is_t && inst.operands[i].writeback, 8169 _("cannot use writeback with this instruction")); 8170 8171 if (is_d) 8172 { 8173 inst.instruction |= 0x01000000; 8174 if (inst.operands[i].writeback) 8175 inst.instruction |= 0x00200000; 8176 } 8177 else 8178 { 8179 inst.instruction |= 0x00000c00; 8180 if (inst.operands[i].writeback) 8181 inst.instruction |= 0x00000100; 8182 } 8183 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 8184 } 8185 else if (inst.operands[i].postind) 8186 { 8187 assert (inst.operands[i].writeback); 8188 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing")); 8189 constraint (is_t, _("cannot use post-indexing with this instruction")); 8190 8191 if (is_d) 8192 inst.instruction |= 0x00200000; 8193 else 8194 inst.instruction |= 0x00000900; 8195 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 8196 } 8197 else /* unindexed - only for coprocessor */ 8198 inst.error = _("instruction does not accept unindexed addressing"); 8199} 8200 8201/* Table of Thumb instructions which exist in both 16- and 32-bit 8202 encodings (the latter only in post-V6T2 cores). The index is the 8203 value used in the insns table below. When there is more than one 8204 possible 16-bit encoding for the instruction, this table always 8205 holds variant (1). 8206 Also contains several pseudo-instructions used during relaxation. */ 8207#define T16_32_TAB \ 8208 X(adc, 4140, eb400000), \ 8209 X(adcs, 4140, eb500000), \ 8210 X(add, 1c00, eb000000), \ 8211 X(adds, 1c00, eb100000), \ 8212 X(addi, 0000, f1000000), \ 8213 X(addis, 0000, f1100000), \ 8214 X(add_pc,000f, f20f0000), \ 8215 X(add_sp,000d, f10d0000), \ 8216 X(adr, 000f, f20f0000), \ 8217 X(and, 4000, ea000000), \ 8218 X(ands, 4000, ea100000), \ 8219 X(asr, 1000, fa40f000), \ 8220 X(asrs, 1000, fa50f000), \ 8221 X(b, e000, f000b000), \ 8222 X(bcond, d000, f0008000), \ 8223 X(bic, 4380, ea200000), \ 8224 X(bics, 4380, ea300000), \ 8225 X(cmn, 42c0, eb100f00), \ 8226 X(cmp, 2800, ebb00f00), \ 8227 X(cpsie, b660, f3af8400), \ 8228 X(cpsid, b670, f3af8600), \ 8229 X(cpy, 4600, ea4f0000), \ 8230 X(dec_sp,80dd, f1ad0d00), \ 8231 X(eor, 4040, ea800000), \ 8232 X(eors, 4040, ea900000), \ 8233 X(inc_sp,00dd, f10d0d00), \ 8234 X(ldmia, c800, e8900000), \ 8235 X(ldr, 6800, f8500000), \ 8236 X(ldrb, 7800, f8100000), \ 8237 X(ldrh, 8800, f8300000), \ 8238 X(ldrsb, 5600, f9100000), \ 8239 X(ldrsh, 5e00, f9300000), \ 8240 X(ldr_pc,4800, f85f0000), \ 8241 X(ldr_pc2,4800, f85f0000), \ 8242 X(ldr_sp,9800, f85d0000), \ 8243 X(lsl, 0000, fa00f000), \ 8244 X(lsls, 0000, fa10f000), \ 8245 X(lsr, 0800, fa20f000), \ 8246 X(lsrs, 0800, fa30f000), \ 8247 X(mov, 2000, ea4f0000), \ 8248 X(movs, 2000, ea5f0000), \ 8249 X(mul, 4340, fb00f000), \ 8250 X(muls, 4340, ffffffff), /* no 32b muls */ \ 8251 X(mvn, 43c0, ea6f0000), \ 8252 X(mvns, 43c0, ea7f0000), \ 8253 X(neg, 4240, f1c00000), /* rsb #0 */ \ 8254 X(negs, 4240, f1d00000), /* rsbs #0 */ \ 8255 X(orr, 4300, ea400000), \ 8256 X(orrs, 4300, ea500000), \ 8257 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \ 8258 X(push, b400, e92d0000), /* stmdb sp!,... */ \ 8259 X(rev, ba00, fa90f080), \ 8260 X(rev16, ba40, fa90f090), \ 8261 X(revsh, bac0, fa90f0b0), \ 8262 X(ror, 41c0, fa60f000), \ 8263 X(rors, 41c0, fa70f000), \ 8264 X(sbc, 4180, eb600000), \ 8265 X(sbcs, 4180, eb700000), \ 8266 X(stmia, c000, e8800000), \ 8267 X(str, 6000, f8400000), \ 8268 X(strb, 7000, f8000000), \ 8269 X(strh, 8000, f8200000), \ 8270 X(str_sp,9000, f84d0000), \ 8271 X(sub, 1e00, eba00000), \ 8272 X(subs, 1e00, ebb00000), \ 8273 X(subi, 8000, f1a00000), \ 8274 X(subis, 8000, f1b00000), \ 8275 X(sxtb, b240, fa4ff080), \ 8276 X(sxth, b200, fa0ff080), \ 8277 X(tst, 4200, ea100f00), \ 8278 X(uxtb, b2c0, fa5ff080), \ 8279 X(uxth, b280, fa1ff080), \ 8280 X(nop, bf00, f3af8000), \ 8281 X(yield, bf10, f3af8001), \ 8282 X(wfe, bf20, f3af8002), \ 8283 X(wfi, bf30, f3af8003), \ 8284 X(sev, bf40, f3af9004), /* typo, 8004? */ 8285 8286/* To catch errors in encoding functions, the codes are all offset by 8287 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined 8288 as 16-bit instructions. */ 8289#define X(a,b,c) T_MNEM_##a 8290enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB }; 8291#undef X 8292 8293#define X(a,b,c) 0x##b 8294static const unsigned short thumb_op16[] = { T16_32_TAB }; 8295#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)]) 8296#undef X 8297 8298#define X(a,b,c) 0x##c 8299static const unsigned int thumb_op32[] = { T16_32_TAB }; 8300#define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)]) 8301#define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000) 8302#undef X 8303#undef T16_32_TAB 8304 8305/* Thumb instruction encoders, in alphabetical order. */ 8306 8307/* ADDW or SUBW. */ 8308static void 8309do_t_add_sub_w (void) 8310{ 8311 int Rd, Rn; 8312 8313 Rd = inst.operands[0].reg; 8314 Rn = inst.operands[1].reg; 8315 8316 constraint (Rd == 15, _("PC not allowed as destination")); 8317 inst.instruction |= (Rn << 16) | (Rd << 8); 8318 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 8319} 8320 8321/* Parse an add or subtract instruction. We get here with inst.instruction 8322 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */ 8323 8324static void 8325do_t_add_sub (void) 8326{ 8327 int Rd, Rs, Rn; 8328 8329 Rd = inst.operands[0].reg; 8330 Rs = (inst.operands[1].present 8331 ? inst.operands[1].reg /* Rd, Rs, foo */ 8332 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8333 8334 if (unified_syntax) 8335 { 8336 bfd_boolean flags; 8337 bfd_boolean narrow; 8338 int opcode; 8339 8340 flags = (inst.instruction == T_MNEM_adds 8341 || inst.instruction == T_MNEM_subs); 8342 if (flags) 8343 narrow = (current_it_mask == 0); 8344 else 8345 narrow = (current_it_mask != 0); 8346 if (!inst.operands[2].isreg) 8347 { 8348 int add; 8349 8350 add = (inst.instruction == T_MNEM_add 8351 || inst.instruction == T_MNEM_adds); 8352 opcode = 0; 8353 if (inst.size_req != 4) 8354 { 8355 /* Attempt to use a narrow opcode, with relaxation if 8356 appropriate. */ 8357 if (Rd == REG_SP && Rs == REG_SP && !flags) 8358 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp; 8359 else if (Rd <= 7 && Rs == REG_SP && add && !flags) 8360 opcode = T_MNEM_add_sp; 8361 else if (Rd <= 7 && Rs == REG_PC && add && !flags) 8362 opcode = T_MNEM_add_pc; 8363 else if (Rd <= 7 && Rs <= 7 && narrow) 8364 { 8365 if (flags) 8366 opcode = add ? T_MNEM_addis : T_MNEM_subis; 8367 else 8368 opcode = add ? T_MNEM_addi : T_MNEM_subi; 8369 } 8370 if (opcode) 8371 { 8372 inst.instruction = THUMB_OP16(opcode); 8373 inst.instruction |= (Rd << 4) | Rs; 8374 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8375 if (inst.size_req != 2) 8376 inst.relax = opcode; 8377 } 8378 else 8379 constraint (inst.size_req == 2, BAD_HIREG); 8380 } 8381 if (inst.size_req == 4 8382 || (inst.size_req != 2 && !opcode)) 8383 { 8384 if (Rd == REG_PC) 8385 { 8386 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs, 8387 _("only SUBS PC, LR, #const allowed")); 8388 constraint (inst.reloc.exp.X_op != O_constant, 8389 _("expression too complex")); 8390 constraint (inst.reloc.exp.X_add_number < 0 8391 || inst.reloc.exp.X_add_number > 0xff, 8392 _("immediate value out of range")); 8393 inst.instruction = T2_SUBS_PC_LR 8394 | inst.reloc.exp.X_add_number; 8395 inst.reloc.type = BFD_RELOC_UNUSED; 8396 return; 8397 } 8398 else if (Rs == REG_PC) 8399 { 8400 /* Always use addw/subw. */ 8401 inst.instruction = add ? 0xf20f0000 : 0xf2af0000; 8402 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 8403 } 8404 else 8405 { 8406 inst.instruction = THUMB_OP32 (inst.instruction); 8407 inst.instruction = (inst.instruction & 0xe1ffffff) 8408 | 0x10000000; 8409 if (flags) 8410 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8411 else 8412 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM; 8413 } 8414 inst.instruction |= Rd << 8; 8415 inst.instruction |= Rs << 16; 8416 } 8417 } 8418 else 8419 { 8420 Rn = inst.operands[2].reg; 8421 /* See if we can do this with a 16-bit instruction. */ 8422 if (!inst.operands[2].shifted && inst.size_req != 4) 8423 { 8424 if (Rd > 7 || Rs > 7 || Rn > 7) 8425 narrow = FALSE; 8426 8427 if (narrow) 8428 { 8429 inst.instruction = ((inst.instruction == T_MNEM_adds 8430 || inst.instruction == T_MNEM_add) 8431 ? T_OPCODE_ADD_R3 8432 : T_OPCODE_SUB_R3); 8433 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 8434 return; 8435 } 8436 8437 if (inst.instruction == T_MNEM_add) 8438 { 8439 if (Rd == Rs) 8440 { 8441 inst.instruction = T_OPCODE_ADD_HI; 8442 inst.instruction |= (Rd & 8) << 4; 8443 inst.instruction |= (Rd & 7); 8444 inst.instruction |= Rn << 3; 8445 return; 8446 } 8447 /* ... because addition is commutative! */ 8448 else if (Rd == Rn) 8449 { 8450 inst.instruction = T_OPCODE_ADD_HI; 8451 inst.instruction |= (Rd & 8) << 4; 8452 inst.instruction |= (Rd & 7); 8453 inst.instruction |= Rs << 3; 8454 return; 8455 } 8456 } 8457 } 8458 /* If we get here, it can't be done in 16 bits. */ 8459 constraint (inst.operands[2].shifted && inst.operands[2].immisreg, 8460 _("shift must be constant")); 8461 inst.instruction = THUMB_OP32 (inst.instruction); 8462 inst.instruction |= Rd << 8; 8463 inst.instruction |= Rs << 16; 8464 encode_thumb32_shifted_operand (2); 8465 } 8466 } 8467 else 8468 { 8469 constraint (inst.instruction == T_MNEM_adds 8470 || inst.instruction == T_MNEM_subs, 8471 BAD_THUMB32); 8472 8473 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */ 8474 { 8475 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP)) 8476 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC), 8477 BAD_HIREG); 8478 8479 inst.instruction = (inst.instruction == T_MNEM_add 8480 ? 0x0000 : 0x8000); 8481 inst.instruction |= (Rd << 4) | Rs; 8482 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8483 return; 8484 } 8485 8486 Rn = inst.operands[2].reg; 8487 constraint (inst.operands[2].shifted, _("unshifted register required")); 8488 8489 /* We now have Rd, Rs, and Rn set to registers. */ 8490 if (Rd > 7 || Rs > 7 || Rn > 7) 8491 { 8492 /* Can't do this for SUB. */ 8493 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG); 8494 inst.instruction = T_OPCODE_ADD_HI; 8495 inst.instruction |= (Rd & 8) << 4; 8496 inst.instruction |= (Rd & 7); 8497 if (Rs == Rd) 8498 inst.instruction |= Rn << 3; 8499 else if (Rn == Rd) 8500 inst.instruction |= Rs << 3; 8501 else 8502 constraint (1, _("dest must overlap one source register")); 8503 } 8504 else 8505 { 8506 inst.instruction = (inst.instruction == T_MNEM_add 8507 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3); 8508 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 8509 } 8510 } 8511} 8512 8513static void 8514do_t_adr (void) 8515{ 8516 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7) 8517 { 8518 /* Defer to section relaxation. */ 8519 inst.relax = inst.instruction; 8520 inst.instruction = THUMB_OP16 (inst.instruction); 8521 inst.instruction |= inst.operands[0].reg << 4; 8522 } 8523 else if (unified_syntax && inst.size_req != 2) 8524 { 8525 /* Generate a 32-bit opcode. */ 8526 inst.instruction = THUMB_OP32 (inst.instruction); 8527 inst.instruction |= inst.operands[0].reg << 8; 8528 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12; 8529 inst.reloc.pc_rel = 1; 8530 } 8531 else 8532 { 8533 /* Generate a 16-bit opcode. */ 8534 inst.instruction = THUMB_OP16 (inst.instruction); 8535 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 8536 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */ 8537 inst.reloc.pc_rel = 1; 8538 8539 inst.instruction |= inst.operands[0].reg << 4; 8540 } 8541} 8542 8543/* Arithmetic instructions for which there is just one 16-bit 8544 instruction encoding, and it allows only two low registers. 8545 For maximal compatibility with ARM syntax, we allow three register 8546 operands even when Thumb-32 instructions are not available, as long 8547 as the first two are identical. For instance, both "sbc r0,r1" and 8548 "sbc r0,r0,r1" are allowed. */ 8549static void 8550do_t_arit3 (void) 8551{ 8552 int Rd, Rs, Rn; 8553 8554 Rd = inst.operands[0].reg; 8555 Rs = (inst.operands[1].present 8556 ? inst.operands[1].reg /* Rd, Rs, foo */ 8557 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8558 Rn = inst.operands[2].reg; 8559 8560 if (unified_syntax) 8561 { 8562 if (!inst.operands[2].isreg) 8563 { 8564 /* For an immediate, we always generate a 32-bit opcode; 8565 section relaxation will shrink it later if possible. */ 8566 inst.instruction = THUMB_OP32 (inst.instruction); 8567 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8568 inst.instruction |= Rd << 8; 8569 inst.instruction |= Rs << 16; 8570 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8571 } 8572 else 8573 { 8574 bfd_boolean narrow; 8575 8576 /* See if we can do this with a 16-bit instruction. */ 8577 if (THUMB_SETS_FLAGS (inst.instruction)) 8578 narrow = current_it_mask == 0; 8579 else 8580 narrow = current_it_mask != 0; 8581 8582 if (Rd > 7 || Rn > 7 || Rs > 7) 8583 narrow = FALSE; 8584 if (inst.operands[2].shifted) 8585 narrow = FALSE; 8586 if (inst.size_req == 4) 8587 narrow = FALSE; 8588 8589 if (narrow 8590 && Rd == Rs) 8591 { 8592 inst.instruction = THUMB_OP16 (inst.instruction); 8593 inst.instruction |= Rd; 8594 inst.instruction |= Rn << 3; 8595 return; 8596 } 8597 8598 /* If we get here, it can't be done in 16 bits. */ 8599 constraint (inst.operands[2].shifted 8600 && inst.operands[2].immisreg, 8601 _("shift must be constant")); 8602 inst.instruction = THUMB_OP32 (inst.instruction); 8603 inst.instruction |= Rd << 8; 8604 inst.instruction |= Rs << 16; 8605 encode_thumb32_shifted_operand (2); 8606 } 8607 } 8608 else 8609 { 8610 /* On its face this is a lie - the instruction does set the 8611 flags. However, the only supported mnemonic in this mode 8612 says it doesn't. */ 8613 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 8614 8615 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 8616 _("unshifted register required")); 8617 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 8618 constraint (Rd != Rs, 8619 _("dest and source1 must be the same register")); 8620 8621 inst.instruction = THUMB_OP16 (inst.instruction); 8622 inst.instruction |= Rd; 8623 inst.instruction |= Rn << 3; 8624 } 8625} 8626 8627/* Similarly, but for instructions where the arithmetic operation is 8628 commutative, so we can allow either of them to be different from 8629 the destination operand in a 16-bit instruction. For instance, all 8630 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are 8631 accepted. */ 8632static void 8633do_t_arit3c (void) 8634{ 8635 int Rd, Rs, Rn; 8636 8637 Rd = inst.operands[0].reg; 8638 Rs = (inst.operands[1].present 8639 ? inst.operands[1].reg /* Rd, Rs, foo */ 8640 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 8641 Rn = inst.operands[2].reg; 8642 8643 if (unified_syntax) 8644 { 8645 if (!inst.operands[2].isreg) 8646 { 8647 /* For an immediate, we always generate a 32-bit opcode; 8648 section relaxation will shrink it later if possible. */ 8649 inst.instruction = THUMB_OP32 (inst.instruction); 8650 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8651 inst.instruction |= Rd << 8; 8652 inst.instruction |= Rs << 16; 8653 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8654 } 8655 else 8656 { 8657 bfd_boolean narrow; 8658 8659 /* See if we can do this with a 16-bit instruction. */ 8660 if (THUMB_SETS_FLAGS (inst.instruction)) 8661 narrow = current_it_mask == 0; 8662 else 8663 narrow = current_it_mask != 0; 8664 8665 if (Rd > 7 || Rn > 7 || Rs > 7) 8666 narrow = FALSE; 8667 if (inst.operands[2].shifted) 8668 narrow = FALSE; 8669 if (inst.size_req == 4) 8670 narrow = FALSE; 8671 8672 if (narrow) 8673 { 8674 if (Rd == Rs) 8675 { 8676 inst.instruction = THUMB_OP16 (inst.instruction); 8677 inst.instruction |= Rd; 8678 inst.instruction |= Rn << 3; 8679 return; 8680 } 8681 if (Rd == Rn) 8682 { 8683 inst.instruction = THUMB_OP16 (inst.instruction); 8684 inst.instruction |= Rd; 8685 inst.instruction |= Rs << 3; 8686 return; 8687 } 8688 } 8689 8690 /* If we get here, it can't be done in 16 bits. */ 8691 constraint (inst.operands[2].shifted 8692 && inst.operands[2].immisreg, 8693 _("shift must be constant")); 8694 inst.instruction = THUMB_OP32 (inst.instruction); 8695 inst.instruction |= Rd << 8; 8696 inst.instruction |= Rs << 16; 8697 encode_thumb32_shifted_operand (2); 8698 } 8699 } 8700 else 8701 { 8702 /* On its face this is a lie - the instruction does set the 8703 flags. However, the only supported mnemonic in this mode 8704 says it doesn't. */ 8705 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 8706 8707 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 8708 _("unshifted register required")); 8709 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 8710 8711 inst.instruction = THUMB_OP16 (inst.instruction); 8712 inst.instruction |= Rd; 8713 8714 if (Rd == Rs) 8715 inst.instruction |= Rn << 3; 8716 else if (Rd == Rn) 8717 inst.instruction |= Rs << 3; 8718 else 8719 constraint (1, _("dest must overlap one source register")); 8720 } 8721} 8722 8723static void 8724do_t_barrier (void) 8725{ 8726 if (inst.operands[0].present) 8727 { 8728 constraint ((inst.instruction & 0xf0) != 0x40 8729 && inst.operands[0].imm != 0xf, 8730 "bad barrier type"); 8731 inst.instruction |= inst.operands[0].imm; 8732 } 8733 else 8734 inst.instruction |= 0xf; 8735} 8736 8737static void 8738do_t_bfc (void) 8739{ 8740 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 8741 constraint (msb > 32, _("bit-field extends past end of register")); 8742 /* The instruction encoding stores the LSB and MSB, 8743 not the LSB and width. */ 8744 inst.instruction |= inst.operands[0].reg << 8; 8745 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10; 8746 inst.instruction |= (inst.operands[1].imm & 0x03) << 6; 8747 inst.instruction |= msb - 1; 8748} 8749 8750static void 8751do_t_bfi (void) 8752{ 8753 unsigned int msb; 8754 8755 /* #0 in second position is alternative syntax for bfc, which is 8756 the same instruction but with REG_PC in the Rm field. */ 8757 if (!inst.operands[1].isreg) 8758 inst.operands[1].reg = REG_PC; 8759 8760 msb = inst.operands[2].imm + inst.operands[3].imm; 8761 constraint (msb > 32, _("bit-field extends past end of register")); 8762 /* The instruction encoding stores the LSB and MSB, 8763 not the LSB and width. */ 8764 inst.instruction |= inst.operands[0].reg << 8; 8765 inst.instruction |= inst.operands[1].reg << 16; 8766 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 8767 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 8768 inst.instruction |= msb - 1; 8769} 8770 8771static void 8772do_t_bfx (void) 8773{ 8774 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 8775 _("bit-field extends past end of register")); 8776 inst.instruction |= inst.operands[0].reg << 8; 8777 inst.instruction |= inst.operands[1].reg << 16; 8778 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 8779 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 8780 inst.instruction |= inst.operands[3].imm - 1; 8781} 8782 8783/* ARM V5 Thumb BLX (argument parse) 8784 BLX <target_addr> which is BLX(1) 8785 BLX <Rm> which is BLX(2) 8786 Unfortunately, there are two different opcodes for this mnemonic. 8787 So, the insns[].value is not used, and the code here zaps values 8788 into inst.instruction. 8789 8790 ??? How to take advantage of the additional two bits of displacement 8791 available in Thumb32 mode? Need new relocation? */ 8792 8793static void 8794do_t_blx (void) 8795{ 8796 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8797 if (inst.operands[0].isreg) 8798 /* We have a register, so this is BLX(2). */ 8799 inst.instruction |= inst.operands[0].reg << 3; 8800 else 8801 { 8802 /* No register. This must be BLX(1). */ 8803 inst.instruction = 0xf000e800; 8804#ifdef OBJ_ELF 8805 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 8806 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 8807 else 8808#endif 8809 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX; 8810 inst.reloc.pc_rel = 1; 8811 } 8812} 8813 8814static void 8815do_t_branch (void) 8816{ 8817 int opcode; 8818 int cond; 8819 8820 if (current_it_mask) 8821 { 8822 /* Conditional branches inside IT blocks are encoded as unconditional 8823 branches. */ 8824 cond = COND_ALWAYS; 8825 /* A branch must be the last instruction in an IT block. */ 8826 constraint (current_it_mask != 0x10, BAD_BRANCH); 8827 } 8828 else 8829 cond = inst.cond; 8830 8831 if (cond != COND_ALWAYS) 8832 opcode = T_MNEM_bcond; 8833 else 8834 opcode = inst.instruction; 8835 8836 if (unified_syntax && inst.size_req == 4) 8837 { 8838 inst.instruction = THUMB_OP32(opcode); 8839 if (cond == COND_ALWAYS) 8840 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25; 8841 else 8842 { 8843 assert (cond != 0xF); 8844 inst.instruction |= cond << 22; 8845 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20; 8846 } 8847 } 8848 else 8849 { 8850 inst.instruction = THUMB_OP16(opcode); 8851 if (cond == COND_ALWAYS) 8852 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12; 8853 else 8854 { 8855 inst.instruction |= cond << 8; 8856 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9; 8857 } 8858 /* Allow section relaxation. */ 8859 if (unified_syntax && inst.size_req != 2) 8860 inst.relax = opcode; 8861 } 8862 8863 inst.reloc.pc_rel = 1; 8864} 8865 8866static void 8867do_t_bkpt (void) 8868{ 8869 constraint (inst.cond != COND_ALWAYS, 8870 _("instruction is always unconditional")); 8871 if (inst.operands[0].present) 8872 { 8873 constraint (inst.operands[0].imm > 255, 8874 _("immediate value out of range")); 8875 inst.instruction |= inst.operands[0].imm; 8876 } 8877} 8878 8879static void 8880do_t_branch23 (void) 8881{ 8882 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8883 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 8884 inst.reloc.pc_rel = 1; 8885 8886 /* If the destination of the branch is a defined symbol which does not have 8887 the THUMB_FUNC attribute, then we must be calling a function which has 8888 the (interfacearm) attribute. We look for the Thumb entry point to that 8889 function and change the branch to refer to that function instead. */ 8890 if ( inst.reloc.exp.X_op == O_symbol 8891 && inst.reloc.exp.X_add_symbol != NULL 8892 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol) 8893 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol)) 8894 inst.reloc.exp.X_add_symbol = 8895 find_real_start (inst.reloc.exp.X_add_symbol); 8896} 8897 8898static void 8899do_t_bx (void) 8900{ 8901 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8902 inst.instruction |= inst.operands[0].reg << 3; 8903 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc 8904 should cause the alignment to be checked once it is known. This is 8905 because BX PC only works if the instruction is word aligned. */ 8906} 8907 8908static void 8909do_t_bxj (void) 8910{ 8911 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8912 if (inst.operands[0].reg == REG_PC) 8913 as_tsktsk (_("use of r15 in bxj is not really useful")); 8914 8915 inst.instruction |= inst.operands[0].reg << 16; 8916} 8917 8918static void 8919do_t_clz (void) 8920{ 8921 inst.instruction |= inst.operands[0].reg << 8; 8922 inst.instruction |= inst.operands[1].reg << 16; 8923 inst.instruction |= inst.operands[1].reg; 8924} 8925 8926static void 8927do_t_cps (void) 8928{ 8929 constraint (current_it_mask, BAD_NOT_IT); 8930 inst.instruction |= inst.operands[0].imm; 8931} 8932 8933static void 8934do_t_cpsi (void) 8935{ 8936 constraint (current_it_mask, BAD_NOT_IT); 8937 if (unified_syntax 8938 && (inst.operands[1].present || inst.size_req == 4) 8939 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) 8940 { 8941 unsigned int imod = (inst.instruction & 0x0030) >> 4; 8942 inst.instruction = 0xf3af8000; 8943 inst.instruction |= imod << 9; 8944 inst.instruction |= inst.operands[0].imm << 5; 8945 if (inst.operands[1].present) 8946 inst.instruction |= 0x100 | inst.operands[1].imm; 8947 } 8948 else 8949 { 8950 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1) 8951 && (inst.operands[0].imm & 4), 8952 _("selected processor does not support 'A' form " 8953 "of this instruction")); 8954 constraint (inst.operands[1].present || inst.size_req == 4, 8955 _("Thumb does not support the 2-argument " 8956 "form of this instruction")); 8957 inst.instruction |= inst.operands[0].imm; 8958 } 8959} 8960 8961/* THUMB CPY instruction (argument parse). */ 8962 8963static void 8964do_t_cpy (void) 8965{ 8966 if (inst.size_req == 4) 8967 { 8968 inst.instruction = THUMB_OP32 (T_MNEM_mov); 8969 inst.instruction |= inst.operands[0].reg << 8; 8970 inst.instruction |= inst.operands[1].reg; 8971 } 8972 else 8973 { 8974 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 8975 inst.instruction |= (inst.operands[0].reg & 0x7); 8976 inst.instruction |= inst.operands[1].reg << 3; 8977 } 8978} 8979 8980static void 8981do_t_cbz (void) 8982{ 8983 constraint (current_it_mask, BAD_NOT_IT); 8984 constraint (inst.operands[0].reg > 7, BAD_HIREG); 8985 inst.instruction |= inst.operands[0].reg; 8986 inst.reloc.pc_rel = 1; 8987 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7; 8988} 8989 8990static void 8991do_t_dbg (void) 8992{ 8993 inst.instruction |= inst.operands[0].imm; 8994} 8995 8996static void 8997do_t_div (void) 8998{ 8999 if (!inst.operands[1].present) 9000 inst.operands[1].reg = inst.operands[0].reg; 9001 inst.instruction |= inst.operands[0].reg << 8; 9002 inst.instruction |= inst.operands[1].reg << 16; 9003 inst.instruction |= inst.operands[2].reg; 9004} 9005 9006static void 9007do_t_hint (void) 9008{ 9009 if (unified_syntax && inst.size_req == 4) 9010 inst.instruction = THUMB_OP32 (inst.instruction); 9011 else 9012 inst.instruction = THUMB_OP16 (inst.instruction); 9013} 9014 9015static void 9016do_t_it (void) 9017{ 9018 unsigned int cond = inst.operands[0].imm; 9019 9020 constraint (current_it_mask, BAD_NOT_IT); 9021 current_it_mask = (inst.instruction & 0xf) | 0x10; 9022 current_cc = cond; 9023 9024 /* If the condition is a negative condition, invert the mask. */ 9025 if ((cond & 0x1) == 0x0) 9026 { 9027 unsigned int mask = inst.instruction & 0x000f; 9028 9029 if ((mask & 0x7) == 0) 9030 /* no conversion needed */; 9031 else if ((mask & 0x3) == 0) 9032 mask ^= 0x8; 9033 else if ((mask & 0x1) == 0) 9034 mask ^= 0xC; 9035 else 9036 mask ^= 0xE; 9037 9038 inst.instruction &= 0xfff0; 9039 inst.instruction |= mask; 9040 } 9041 9042 inst.instruction |= cond << 4; 9043} 9044 9045/* Helper function used for both push/pop and ldm/stm. */ 9046static void 9047encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) 9048{ 9049 bfd_boolean load; 9050 9051 load = (inst.instruction & (1 << 20)) != 0; 9052 9053 if (mask & (1 << 13)) 9054 inst.error = _("SP not allowed in register list"); 9055 if (load) 9056 { 9057 if (mask & (1 << 14) 9058 && mask & (1 << 15)) 9059 inst.error = _("LR and PC should not both be in register list"); 9060 9061 if ((mask & (1 << base)) != 0 9062 && writeback) 9063 as_warn (_("base register should not be in register list " 9064 "when written back")); 9065 } 9066 else 9067 { 9068 if (mask & (1 << 15)) 9069 inst.error = _("PC not allowed in register list"); 9070 9071 if (mask & (1 << base)) 9072 as_warn (_("value stored for r%d is UNPREDICTABLE"), base); 9073 } 9074 9075 if ((mask & (mask - 1)) == 0) 9076 { 9077 /* Single register transfers implemented as str/ldr. */ 9078 if (writeback) 9079 { 9080 if (inst.instruction & (1 << 23)) 9081 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */ 9082 else 9083 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */ 9084 } 9085 else 9086 { 9087 if (inst.instruction & (1 << 23)) 9088 inst.instruction = 0x00800000; /* ia -> [base] */ 9089 else 9090 inst.instruction = 0x00000c04; /* db -> [base, #-4] */ 9091 } 9092 9093 inst.instruction |= 0xf8400000; 9094 if (load) 9095 inst.instruction |= 0x00100000; 9096 9097 mask = ffs(mask) - 1; 9098 mask <<= 12; 9099 } 9100 else if (writeback) 9101 inst.instruction |= WRITE_BACK; 9102 9103 inst.instruction |= mask; 9104 inst.instruction |= base << 16; 9105} 9106 9107static void 9108do_t_ldmstm (void) 9109{ 9110 /* This really doesn't seem worth it. */ 9111 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 9112 _("expression too complex")); 9113 constraint (inst.operands[1].writeback, 9114 _("Thumb load/store multiple does not support {reglist}^")); 9115 9116 if (unified_syntax) 9117 { 9118 bfd_boolean narrow; 9119 unsigned mask; 9120 9121 narrow = FALSE; 9122 /* See if we can use a 16-bit instruction. */ 9123 if (inst.instruction < 0xffff /* not ldmdb/stmdb */ 9124 && inst.size_req != 4 9125 && !(inst.operands[1].imm & ~0xff)) 9126 { 9127 mask = 1 << inst.operands[0].reg; 9128 9129 if (inst.operands[0].reg <= 7 9130 && (inst.instruction == T_MNEM_stmia 9131 ? inst.operands[0].writeback 9132 : (inst.operands[0].writeback 9133 == !(inst.operands[1].imm & mask)))) 9134 { 9135 if (inst.instruction == T_MNEM_stmia 9136 && (inst.operands[1].imm & mask) 9137 && (inst.operands[1].imm & (mask - 1))) 9138 as_warn (_("value stored for r%d is UNPREDICTABLE"), 9139 inst.operands[0].reg); 9140 9141 inst.instruction = THUMB_OP16 (inst.instruction); 9142 inst.instruction |= inst.operands[0].reg << 8; 9143 inst.instruction |= inst.operands[1].imm; 9144 narrow = TRUE; 9145 } 9146 else if (inst.operands[0] .reg == REG_SP 9147 && inst.operands[0].writeback) 9148 { 9149 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia 9150 ? T_MNEM_push : T_MNEM_pop); 9151 inst.instruction |= inst.operands[1].imm; 9152 narrow = TRUE; 9153 } 9154 } 9155 9156 if (!narrow) 9157 { 9158 if (inst.instruction < 0xffff) 9159 inst.instruction = THUMB_OP32 (inst.instruction); 9160 9161 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm, 9162 inst.operands[0].writeback); 9163 } 9164 } 9165 else 9166 { 9167 constraint (inst.operands[0].reg > 7 9168 || (inst.operands[1].imm & ~0xff), BAD_HIREG); 9169 constraint (inst.instruction != T_MNEM_ldmia 9170 && inst.instruction != T_MNEM_stmia, 9171 _("Thumb-2 instruction only valid in unified syntax")); 9172 if (inst.instruction == T_MNEM_stmia) 9173 { 9174 if (!inst.operands[0].writeback) 9175 as_warn (_("this instruction will write back the base register")); 9176 if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) 9177 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) 9178 as_warn (_("value stored for r%d is UNPREDICTABLE"), 9179 inst.operands[0].reg); 9180 } 9181 else 9182 { 9183 if (!inst.operands[0].writeback 9184 && !(inst.operands[1].imm & (1 << inst.operands[0].reg))) 9185 as_warn (_("this instruction will write back the base register")); 9186 else if (inst.operands[0].writeback 9187 && (inst.operands[1].imm & (1 << inst.operands[0].reg))) 9188 as_warn (_("this instruction will not write back the base register")); 9189 } 9190 9191 inst.instruction = THUMB_OP16 (inst.instruction); 9192 inst.instruction |= inst.operands[0].reg << 8; 9193 inst.instruction |= inst.operands[1].imm; 9194 } 9195} 9196 9197static void 9198do_t_ldrex (void) 9199{ 9200 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 9201 || inst.operands[1].postind || inst.operands[1].writeback 9202 || inst.operands[1].immisreg || inst.operands[1].shifted 9203 || inst.operands[1].negative, 9204 BAD_ADDR_MODE); 9205 9206 inst.instruction |= inst.operands[0].reg << 12; 9207 inst.instruction |= inst.operands[1].reg << 16; 9208 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 9209} 9210 9211static void 9212do_t_ldrexd (void) 9213{ 9214 if (!inst.operands[1].present) 9215 { 9216 constraint (inst.operands[0].reg == REG_LR, 9217 _("r14 not allowed as first register " 9218 "when second register is omitted")); 9219 inst.operands[1].reg = inst.operands[0].reg + 1; 9220 } 9221 constraint (inst.operands[0].reg == inst.operands[1].reg, 9222 BAD_OVERLAP); 9223 9224 inst.instruction |= inst.operands[0].reg << 12; 9225 inst.instruction |= inst.operands[1].reg << 8; 9226 inst.instruction |= inst.operands[2].reg << 16; 9227} 9228 9229static void 9230do_t_ldst (void) 9231{ 9232 unsigned long opcode; 9233 int Rn; 9234 9235 opcode = inst.instruction; 9236 if (unified_syntax) 9237 { 9238 if (!inst.operands[1].isreg) 9239 { 9240 if (opcode <= 0xffff) 9241 inst.instruction = THUMB_OP32 (opcode); 9242 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 9243 return; 9244 } 9245 if (inst.operands[1].isreg 9246 && !inst.operands[1].writeback 9247 && !inst.operands[1].shifted && !inst.operands[1].postind 9248 && !inst.operands[1].negative && inst.operands[0].reg <= 7 9249 && opcode <= 0xffff 9250 && inst.size_req != 4) 9251 { 9252 /* Insn may have a 16-bit form. */ 9253 Rn = inst.operands[1].reg; 9254 if (inst.operands[1].immisreg) 9255 { 9256 inst.instruction = THUMB_OP16 (opcode); 9257 /* [Rn, Ri] */ 9258 if (Rn <= 7 && inst.operands[1].imm <= 7) 9259 goto op16; 9260 } 9261 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh 9262 && opcode != T_MNEM_ldrsb) 9263 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr) 9264 || (Rn == REG_SP && opcode == T_MNEM_str)) 9265 { 9266 /* [Rn, #const] */ 9267 if (Rn > 7) 9268 { 9269 if (Rn == REG_PC) 9270 { 9271 if (inst.reloc.pc_rel) 9272 opcode = T_MNEM_ldr_pc2; 9273 else 9274 opcode = T_MNEM_ldr_pc; 9275 } 9276 else 9277 { 9278 if (opcode == T_MNEM_ldr) 9279 opcode = T_MNEM_ldr_sp; 9280 else 9281 opcode = T_MNEM_str_sp; 9282 } 9283 inst.instruction = inst.operands[0].reg << 8; 9284 } 9285 else 9286 { 9287 inst.instruction = inst.operands[0].reg; 9288 inst.instruction |= inst.operands[1].reg << 3; 9289 } 9290 inst.instruction |= THUMB_OP16 (opcode); 9291 if (inst.size_req == 2) 9292 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9293 else 9294 inst.relax = opcode; 9295 return; 9296 } 9297 } 9298 /* Definitely a 32-bit variant. */ 9299 inst.instruction = THUMB_OP32 (opcode); 9300 inst.instruction |= inst.operands[0].reg << 12; 9301 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE); 9302 return; 9303 } 9304 9305 constraint (inst.operands[0].reg > 7, BAD_HIREG); 9306 9307 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb) 9308 { 9309 /* Only [Rn,Rm] is acceptable. */ 9310 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG); 9311 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg 9312 || inst.operands[1].postind || inst.operands[1].shifted 9313 || inst.operands[1].negative, 9314 _("Thumb does not support this addressing mode")); 9315 inst.instruction = THUMB_OP16 (inst.instruction); 9316 goto op16; 9317 } 9318 9319 inst.instruction = THUMB_OP16 (inst.instruction); 9320 if (!inst.operands[1].isreg) 9321 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 9322 return; 9323 9324 constraint (!inst.operands[1].preind 9325 || inst.operands[1].shifted 9326 || inst.operands[1].writeback, 9327 _("Thumb does not support this addressing mode")); 9328 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP) 9329 { 9330 constraint (inst.instruction & 0x0600, 9331 _("byte or halfword not valid for base register")); 9332 constraint (inst.operands[1].reg == REG_PC 9333 && !(inst.instruction & THUMB_LOAD_BIT), 9334 _("r15 based store not allowed")); 9335 constraint (inst.operands[1].immisreg, 9336 _("invalid base register for register offset")); 9337 9338 if (inst.operands[1].reg == REG_PC) 9339 inst.instruction = T_OPCODE_LDR_PC; 9340 else if (inst.instruction & THUMB_LOAD_BIT) 9341 inst.instruction = T_OPCODE_LDR_SP; 9342 else 9343 inst.instruction = T_OPCODE_STR_SP; 9344 9345 inst.instruction |= inst.operands[0].reg << 8; 9346 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9347 return; 9348 } 9349 9350 constraint (inst.operands[1].reg > 7, BAD_HIREG); 9351 if (!inst.operands[1].immisreg) 9352 { 9353 /* Immediate offset. */ 9354 inst.instruction |= inst.operands[0].reg; 9355 inst.instruction |= inst.operands[1].reg << 3; 9356 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 9357 return; 9358 } 9359 9360 /* Register offset. */ 9361 constraint (inst.operands[1].imm > 7, BAD_HIREG); 9362 constraint (inst.operands[1].negative, 9363 _("Thumb does not support this addressing mode")); 9364 9365 op16: 9366 switch (inst.instruction) 9367 { 9368 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break; 9369 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break; 9370 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break; 9371 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break; 9372 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break; 9373 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break; 9374 case 0x5600 /* ldrsb */: 9375 case 0x5e00 /* ldrsh */: break; 9376 default: abort (); 9377 } 9378 9379 inst.instruction |= inst.operands[0].reg; 9380 inst.instruction |= inst.operands[1].reg << 3; 9381 inst.instruction |= inst.operands[1].imm << 6; 9382} 9383 9384static void 9385do_t_ldstd (void) 9386{ 9387 if (!inst.operands[1].present) 9388 { 9389 inst.operands[1].reg = inst.operands[0].reg + 1; 9390 constraint (inst.operands[0].reg == REG_LR, 9391 _("r14 not allowed here")); 9392 } 9393 inst.instruction |= inst.operands[0].reg << 12; 9394 inst.instruction |= inst.operands[1].reg << 8; 9395 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE); 9396 9397} 9398 9399static void 9400do_t_ldstt (void) 9401{ 9402 inst.instruction |= inst.operands[0].reg << 12; 9403 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE); 9404} 9405 9406static void 9407do_t_mla (void) 9408{ 9409 inst.instruction |= inst.operands[0].reg << 8; 9410 inst.instruction |= inst.operands[1].reg << 16; 9411 inst.instruction |= inst.operands[2].reg; 9412 inst.instruction |= inst.operands[3].reg << 12; 9413} 9414 9415static void 9416do_t_mlal (void) 9417{ 9418 inst.instruction |= inst.operands[0].reg << 12; 9419 inst.instruction |= inst.operands[1].reg << 8; 9420 inst.instruction |= inst.operands[2].reg << 16; 9421 inst.instruction |= inst.operands[3].reg; 9422} 9423 9424static void 9425do_t_mov_cmp (void) 9426{ 9427 if (unified_syntax) 9428 { 9429 int r0off = (inst.instruction == T_MNEM_mov 9430 || inst.instruction == T_MNEM_movs) ? 8 : 16; 9431 unsigned long opcode; 9432 bfd_boolean narrow; 9433 bfd_boolean low_regs; 9434 9435 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7); 9436 opcode = inst.instruction; 9437 if (current_it_mask) 9438 narrow = opcode != T_MNEM_movs; 9439 else 9440 narrow = opcode != T_MNEM_movs || low_regs; 9441 if (inst.size_req == 4 9442 || inst.operands[1].shifted) 9443 narrow = FALSE; 9444 9445 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */ 9446 if (opcode == T_MNEM_movs && inst.operands[1].isreg 9447 && !inst.operands[1].shifted 9448 && inst.operands[0].reg == REG_PC 9449 && inst.operands[1].reg == REG_LR) 9450 { 9451 inst.instruction = T2_SUBS_PC_LR; 9452 return; 9453 } 9454 9455 if (!inst.operands[1].isreg) 9456 { 9457 /* Immediate operand. */ 9458 if (current_it_mask == 0 && opcode == T_MNEM_mov) 9459 narrow = 0; 9460 if (low_regs && narrow) 9461 { 9462 inst.instruction = THUMB_OP16 (opcode); 9463 inst.instruction |= inst.operands[0].reg << 8; 9464 if (inst.size_req == 2) 9465 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 9466 else 9467 inst.relax = opcode; 9468 } 9469 else 9470 { 9471 inst.instruction = THUMB_OP32 (inst.instruction); 9472 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 9473 inst.instruction |= inst.operands[0].reg << r0off; 9474 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9475 } 9476 } 9477 else if (inst.operands[1].shifted && inst.operands[1].immisreg 9478 && (inst.instruction == T_MNEM_mov 9479 || inst.instruction == T_MNEM_movs)) 9480 { 9481 /* Register shifts are encoded as separate shift instructions. */ 9482 bfd_boolean flags = (inst.instruction == T_MNEM_movs); 9483 9484 if (current_it_mask) 9485 narrow = !flags; 9486 else 9487 narrow = flags; 9488 9489 if (inst.size_req == 4) 9490 narrow = FALSE; 9491 9492 if (!low_regs || inst.operands[1].imm > 7) 9493 narrow = FALSE; 9494 9495 if (inst.operands[0].reg != inst.operands[1].reg) 9496 narrow = FALSE; 9497 9498 switch (inst.operands[1].shift_kind) 9499 { 9500 case SHIFT_LSL: 9501 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl); 9502 break; 9503 case SHIFT_ASR: 9504 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr); 9505 break; 9506 case SHIFT_LSR: 9507 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr); 9508 break; 9509 case SHIFT_ROR: 9510 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror); 9511 break; 9512 default: 9513 abort(); 9514 } 9515 9516 inst.instruction = opcode; 9517 if (narrow) 9518 { 9519 inst.instruction |= inst.operands[0].reg; 9520 inst.instruction |= inst.operands[1].imm << 3; 9521 } 9522 else 9523 { 9524 if (flags) 9525 inst.instruction |= CONDS_BIT; 9526 9527 inst.instruction |= inst.operands[0].reg << 8; 9528 inst.instruction |= inst.operands[1].reg << 16; 9529 inst.instruction |= inst.operands[1].imm; 9530 } 9531 } 9532 else if (!narrow) 9533 { 9534 /* Some mov with immediate shift have narrow variants. 9535 Register shifts are handled above. */ 9536 if (low_regs && inst.operands[1].shifted 9537 && (inst.instruction == T_MNEM_mov 9538 || inst.instruction == T_MNEM_movs)) 9539 { 9540 if (current_it_mask) 9541 narrow = (inst.instruction == T_MNEM_mov); 9542 else 9543 narrow = (inst.instruction == T_MNEM_movs); 9544 } 9545 9546 if (narrow) 9547 { 9548 switch (inst.operands[1].shift_kind) 9549 { 9550 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 9551 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 9552 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 9553 default: narrow = FALSE; break; 9554 } 9555 } 9556 9557 if (narrow) 9558 { 9559 inst.instruction |= inst.operands[0].reg; 9560 inst.instruction |= inst.operands[1].reg << 3; 9561 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 9562 } 9563 else 9564 { 9565 inst.instruction = THUMB_OP32 (inst.instruction); 9566 inst.instruction |= inst.operands[0].reg << r0off; 9567 encode_thumb32_shifted_operand (1); 9568 } 9569 } 9570 else 9571 switch (inst.instruction) 9572 { 9573 case T_MNEM_mov: 9574 inst.instruction = T_OPCODE_MOV_HR; 9575 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 9576 inst.instruction |= (inst.operands[0].reg & 0x7); 9577 inst.instruction |= inst.operands[1].reg << 3; 9578 break; 9579 9580 case T_MNEM_movs: 9581 /* We know we have low registers at this point. 9582 Generate ADD Rd, Rs, #0. */ 9583 inst.instruction = T_OPCODE_ADD_I3; 9584 inst.instruction |= inst.operands[0].reg; 9585 inst.instruction |= inst.operands[1].reg << 3; 9586 break; 9587 9588 case T_MNEM_cmp: 9589 if (low_regs) 9590 { 9591 inst.instruction = T_OPCODE_CMP_LR; 9592 inst.instruction |= inst.operands[0].reg; 9593 inst.instruction |= inst.operands[1].reg << 3; 9594 } 9595 else 9596 { 9597 inst.instruction = T_OPCODE_CMP_HR; 9598 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 9599 inst.instruction |= (inst.operands[0].reg & 0x7); 9600 inst.instruction |= inst.operands[1].reg << 3; 9601 } 9602 break; 9603 } 9604 return; 9605 } 9606 9607 inst.instruction = THUMB_OP16 (inst.instruction); 9608 if (inst.operands[1].isreg) 9609 { 9610 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8) 9611 { 9612 /* A move of two lowregs is encoded as ADD Rd, Rs, #0 9613 since a MOV instruction produces unpredictable results. */ 9614 if (inst.instruction == T_OPCODE_MOV_I8) 9615 inst.instruction = T_OPCODE_ADD_I3; 9616 else 9617 inst.instruction = T_OPCODE_CMP_LR; 9618 9619 inst.instruction |= inst.operands[0].reg; 9620 inst.instruction |= inst.operands[1].reg << 3; 9621 } 9622 else 9623 { 9624 if (inst.instruction == T_OPCODE_MOV_I8) 9625 inst.instruction = T_OPCODE_MOV_HR; 9626 else 9627 inst.instruction = T_OPCODE_CMP_HR; 9628 do_t_cpy (); 9629 } 9630 } 9631 else 9632 { 9633 constraint (inst.operands[0].reg > 7, 9634 _("only lo regs allowed with immediate")); 9635 inst.instruction |= inst.operands[0].reg << 8; 9636 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 9637 } 9638} 9639 9640static void 9641do_t_mov16 (void) 9642{ 9643 bfd_vma imm; 9644 bfd_boolean top; 9645 9646 top = (inst.instruction & 0x00800000) != 0; 9647 if (inst.reloc.type == BFD_RELOC_ARM_MOVW) 9648 { 9649 constraint (top, _(":lower16: not allowed this instruction")); 9650 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW; 9651 } 9652 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT) 9653 { 9654 constraint (!top, _(":upper16: not allowed this instruction")); 9655 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT; 9656 } 9657 9658 inst.instruction |= inst.operands[0].reg << 8; 9659 if (inst.reloc.type == BFD_RELOC_UNUSED) 9660 { 9661 imm = inst.reloc.exp.X_add_number; 9662 inst.instruction |= (imm & 0xf000) << 4; 9663 inst.instruction |= (imm & 0x0800) << 15; 9664 inst.instruction |= (imm & 0x0700) << 4; 9665 inst.instruction |= (imm & 0x00ff); 9666 } 9667} 9668 9669static void 9670do_t_mvn_tst (void) 9671{ 9672 if (unified_syntax) 9673 { 9674 int r0off = (inst.instruction == T_MNEM_mvn 9675 || inst.instruction == T_MNEM_mvns) ? 8 : 16; 9676 bfd_boolean narrow; 9677 9678 if (inst.size_req == 4 9679 || inst.instruction > 0xffff 9680 || inst.operands[1].shifted 9681 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 9682 narrow = FALSE; 9683 else if (inst.instruction == T_MNEM_cmn) 9684 narrow = TRUE; 9685 else if (THUMB_SETS_FLAGS (inst.instruction)) 9686 narrow = (current_it_mask == 0); 9687 else 9688 narrow = (current_it_mask != 0); 9689 9690 if (!inst.operands[1].isreg) 9691 { 9692 /* For an immediate, we always generate a 32-bit opcode; 9693 section relaxation will shrink it later if possible. */ 9694 if (inst.instruction < 0xffff) 9695 inst.instruction = THUMB_OP32 (inst.instruction); 9696 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 9697 inst.instruction |= inst.operands[0].reg << r0off; 9698 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9699 } 9700 else 9701 { 9702 /* See if we can do this with a 16-bit instruction. */ 9703 if (narrow) 9704 { 9705 inst.instruction = THUMB_OP16 (inst.instruction); 9706 inst.instruction |= inst.operands[0].reg; 9707 inst.instruction |= inst.operands[1].reg << 3; 9708 } 9709 else 9710 { 9711 constraint (inst.operands[1].shifted 9712 && inst.operands[1].immisreg, 9713 _("shift must be constant")); 9714 if (inst.instruction < 0xffff) 9715 inst.instruction = THUMB_OP32 (inst.instruction); 9716 inst.instruction |= inst.operands[0].reg << r0off; 9717 encode_thumb32_shifted_operand (1); 9718 } 9719 } 9720 } 9721 else 9722 { 9723 constraint (inst.instruction > 0xffff 9724 || inst.instruction == T_MNEM_mvns, BAD_THUMB32); 9725 constraint (!inst.operands[1].isreg || inst.operands[1].shifted, 9726 _("unshifted register required")); 9727 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9728 BAD_HIREG); 9729 9730 inst.instruction = THUMB_OP16 (inst.instruction); 9731 inst.instruction |= inst.operands[0].reg; 9732 inst.instruction |= inst.operands[1].reg << 3; 9733 } 9734} 9735 9736static void 9737do_t_mrs (void) 9738{ 9739 int flags; 9740 9741 if (do_vfp_nsyn_mrs () == SUCCESS) 9742 return; 9743 9744 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); 9745 if (flags == 0) 9746 { 9747 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 9748 _("selected processor does not support " 9749 "requested special purpose register")); 9750 } 9751 else 9752 { 9753 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 9754 _("selected processor does not support " 9755 "requested special purpose register %x")); 9756 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 9757 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f), 9758 _("'CPSR' or 'SPSR' expected")); 9759 } 9760 9761 inst.instruction |= inst.operands[0].reg << 8; 9762 inst.instruction |= (flags & SPSR_BIT) >> 2; 9763 inst.instruction |= inst.operands[1].imm & 0xff; 9764} 9765 9766static void 9767do_t_msr (void) 9768{ 9769 int flags; 9770 9771 if (do_vfp_nsyn_msr () == SUCCESS) 9772 return; 9773 9774 constraint (!inst.operands[1].isreg, 9775 _("Thumb encoding does not support an immediate here")); 9776 flags = inst.operands[0].imm; 9777 if (flags & ~0xff) 9778 { 9779 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 9780 _("selected processor does not support " 9781 "requested special purpose register")); 9782 } 9783 else 9784 { 9785 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 9786 _("selected processor does not support " 9787 "requested special purpose register")); 9788 flags |= PSR_f; 9789 } 9790 inst.instruction |= (flags & SPSR_BIT) >> 2; 9791 inst.instruction |= (flags & ~SPSR_BIT) >> 8; 9792 inst.instruction |= (flags & 0xff); 9793 inst.instruction |= inst.operands[1].reg << 16; 9794} 9795 9796static void 9797do_t_mul (void) 9798{ 9799 if (!inst.operands[2].present) 9800 inst.operands[2].reg = inst.operands[0].reg; 9801 9802 /* There is no 32-bit MULS and no 16-bit MUL. */ 9803 if (unified_syntax && inst.instruction == T_MNEM_mul) 9804 { 9805 inst.instruction = THUMB_OP32 (inst.instruction); 9806 inst.instruction |= inst.operands[0].reg << 8; 9807 inst.instruction |= inst.operands[1].reg << 16; 9808 inst.instruction |= inst.operands[2].reg << 0; 9809 } 9810 else 9811 { 9812 constraint (!unified_syntax 9813 && inst.instruction == T_MNEM_muls, BAD_THUMB32); 9814 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9815 BAD_HIREG); 9816 9817 inst.instruction = THUMB_OP16 (inst.instruction); 9818 inst.instruction |= inst.operands[0].reg; 9819 9820 if (inst.operands[0].reg == inst.operands[1].reg) 9821 inst.instruction |= inst.operands[2].reg << 3; 9822 else if (inst.operands[0].reg == inst.operands[2].reg) 9823 inst.instruction |= inst.operands[1].reg << 3; 9824 else 9825 constraint (1, _("dest must overlap one source register")); 9826 } 9827} 9828 9829static void 9830do_t_mull (void) 9831{ 9832 inst.instruction |= inst.operands[0].reg << 12; 9833 inst.instruction |= inst.operands[1].reg << 8; 9834 inst.instruction |= inst.operands[2].reg << 16; 9835 inst.instruction |= inst.operands[3].reg; 9836 9837 if (inst.operands[0].reg == inst.operands[1].reg) 9838 as_tsktsk (_("rdhi and rdlo must be different")); 9839} 9840 9841static void 9842do_t_nop (void) 9843{ 9844 if (unified_syntax) 9845 { 9846 if (inst.size_req == 4 || inst.operands[0].imm > 15) 9847 { 9848 inst.instruction = THUMB_OP32 (inst.instruction); 9849 inst.instruction |= inst.operands[0].imm; 9850 } 9851 else 9852 { 9853 inst.instruction = THUMB_OP16 (inst.instruction); 9854 inst.instruction |= inst.operands[0].imm << 4; 9855 } 9856 } 9857 else 9858 { 9859 constraint (inst.operands[0].present, 9860 _("Thumb does not support NOP with hints")); 9861 inst.instruction = 0x46c0; 9862 } 9863} 9864 9865static void 9866do_t_neg (void) 9867{ 9868 if (unified_syntax) 9869 { 9870 bfd_boolean narrow; 9871 9872 if (THUMB_SETS_FLAGS (inst.instruction)) 9873 narrow = (current_it_mask == 0); 9874 else 9875 narrow = (current_it_mask != 0); 9876 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 9877 narrow = FALSE; 9878 if (inst.size_req == 4) 9879 narrow = FALSE; 9880 9881 if (!narrow) 9882 { 9883 inst.instruction = THUMB_OP32 (inst.instruction); 9884 inst.instruction |= inst.operands[0].reg << 8; 9885 inst.instruction |= inst.operands[1].reg << 16; 9886 } 9887 else 9888 { 9889 inst.instruction = THUMB_OP16 (inst.instruction); 9890 inst.instruction |= inst.operands[0].reg; 9891 inst.instruction |= inst.operands[1].reg << 3; 9892 } 9893 } 9894 else 9895 { 9896 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9897 BAD_HIREG); 9898 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 9899 9900 inst.instruction = THUMB_OP16 (inst.instruction); 9901 inst.instruction |= inst.operands[0].reg; 9902 inst.instruction |= inst.operands[1].reg << 3; 9903 } 9904} 9905 9906static void 9907do_t_pkhbt (void) 9908{ 9909 inst.instruction |= inst.operands[0].reg << 8; 9910 inst.instruction |= inst.operands[1].reg << 16; 9911 inst.instruction |= inst.operands[2].reg; 9912 if (inst.operands[3].present) 9913 { 9914 unsigned int val = inst.reloc.exp.X_add_number; 9915 constraint (inst.reloc.exp.X_op != O_constant, 9916 _("expression too complex")); 9917 inst.instruction |= (val & 0x1c) << 10; 9918 inst.instruction |= (val & 0x03) << 6; 9919 } 9920} 9921 9922static void 9923do_t_pkhtb (void) 9924{ 9925 if (!inst.operands[3].present) 9926 inst.instruction &= ~0x00000020; 9927 do_t_pkhbt (); 9928} 9929 9930static void 9931do_t_pld (void) 9932{ 9933 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE); 9934} 9935 9936static void 9937do_t_push_pop (void) 9938{ 9939 unsigned mask; 9940 9941 constraint (inst.operands[0].writeback, 9942 _("push/pop do not support {reglist}^")); 9943 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 9944 _("expression too complex")); 9945 9946 mask = inst.operands[0].imm; 9947 if ((mask & ~0xff) == 0) 9948 inst.instruction = THUMB_OP16 (inst.instruction) | mask; 9949 else if ((inst.instruction == T_MNEM_push 9950 && (mask & ~0xff) == 1 << REG_LR) 9951 || (inst.instruction == T_MNEM_pop 9952 && (mask & ~0xff) == 1 << REG_PC)) 9953 { 9954 inst.instruction = THUMB_OP16 (inst.instruction); 9955 inst.instruction |= THUMB_PP_PC_LR; 9956 inst.instruction |= mask & 0xff; 9957 } 9958 else if (unified_syntax) 9959 { 9960 inst.instruction = THUMB_OP32 (inst.instruction); 9961 encode_thumb2_ldmstm(13, mask, TRUE); 9962 } 9963 else 9964 { 9965 inst.error = _("invalid register list to push/pop instruction"); 9966 return; 9967 } 9968} 9969 9970static void 9971do_t_rbit (void) 9972{ 9973 inst.instruction |= inst.operands[0].reg << 8; 9974 inst.instruction |= inst.operands[1].reg << 16; 9975} 9976 9977static void 9978do_t_rd_rm (void) 9979{ 9980 inst.instruction |= inst.operands[0].reg << 8; 9981 inst.instruction |= inst.operands[1].reg; 9982} 9983 9984static void 9985do_t_rev (void) 9986{ 9987 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 9988 && inst.size_req != 4) 9989 { 9990 inst.instruction = THUMB_OP16 (inst.instruction); 9991 inst.instruction |= inst.operands[0].reg; 9992 inst.instruction |= inst.operands[1].reg << 3; 9993 } 9994 else if (unified_syntax) 9995 { 9996 inst.instruction = THUMB_OP32 (inst.instruction); 9997 inst.instruction |= inst.operands[0].reg << 8; 9998 inst.instruction |= inst.operands[1].reg << 16; 9999 inst.instruction |= inst.operands[1].reg; 10000 } 10001 else 10002 inst.error = BAD_HIREG; 10003} 10004 10005static void 10006do_t_rsb (void) 10007{ 10008 int Rd, Rs; 10009 10010 Rd = inst.operands[0].reg; 10011 Rs = (inst.operands[1].present 10012 ? inst.operands[1].reg /* Rd, Rs, foo */ 10013 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 10014 10015 inst.instruction |= Rd << 8; 10016 inst.instruction |= Rs << 16; 10017 if (!inst.operands[2].isreg) 10018 { 10019 bfd_boolean narrow; 10020 10021 if ((inst.instruction & 0x00100000) != 0) 10022 narrow = (current_it_mask == 0); 10023 else 10024 narrow = (current_it_mask != 0); 10025 10026 if (Rd > 7 || Rs > 7) 10027 narrow = FALSE; 10028 10029 if (inst.size_req == 4 || !unified_syntax) 10030 narrow = FALSE; 10031 10032 if (inst.reloc.exp.X_op != O_constant 10033 || inst.reloc.exp.X_add_number != 0) 10034 narrow = FALSE; 10035 10036 /* Turn rsb #0 into 16-bit neg. We should probably do this via 10037 relaxation, but it doesn't seem worth the hassle. */ 10038 if (narrow) 10039 { 10040 inst.reloc.type = BFD_RELOC_UNUSED; 10041 inst.instruction = THUMB_OP16 (T_MNEM_negs); 10042 inst.instruction |= Rs << 3; 10043 inst.instruction |= Rd; 10044 } 10045 else 10046 { 10047 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 10048 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 10049 } 10050 } 10051 else 10052 encode_thumb32_shifted_operand (2); 10053} 10054 10055static void 10056do_t_setend (void) 10057{ 10058 constraint (current_it_mask, BAD_NOT_IT); 10059 if (inst.operands[0].imm) 10060 inst.instruction |= 0x8; 10061} 10062 10063static void 10064do_t_shift (void) 10065{ 10066 if (!inst.operands[1].present) 10067 inst.operands[1].reg = inst.operands[0].reg; 10068 10069 if (unified_syntax) 10070 { 10071 bfd_boolean narrow; 10072 int shift_kind; 10073 10074 switch (inst.instruction) 10075 { 10076 case T_MNEM_asr: 10077 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break; 10078 case T_MNEM_lsl: 10079 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break; 10080 case T_MNEM_lsr: 10081 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break; 10082 case T_MNEM_ror: 10083 case T_MNEM_rors: shift_kind = SHIFT_ROR; break; 10084 default: abort (); 10085 } 10086 10087 if (THUMB_SETS_FLAGS (inst.instruction)) 10088 narrow = (current_it_mask == 0); 10089 else 10090 narrow = (current_it_mask != 0); 10091 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 10092 narrow = FALSE; 10093 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) 10094 narrow = FALSE; 10095 if (inst.operands[2].isreg 10096 && (inst.operands[1].reg != inst.operands[0].reg 10097 || inst.operands[2].reg > 7)) 10098 narrow = FALSE; 10099 if (inst.size_req == 4) 10100 narrow = FALSE; 10101 10102 if (!narrow) 10103 { 10104 if (inst.operands[2].isreg) 10105 { 10106 inst.instruction = THUMB_OP32 (inst.instruction); 10107 inst.instruction |= inst.operands[0].reg << 8; 10108 inst.instruction |= inst.operands[1].reg << 16; 10109 inst.instruction |= inst.operands[2].reg; 10110 } 10111 else 10112 { 10113 inst.operands[1].shifted = 1; 10114 inst.operands[1].shift_kind = shift_kind; 10115 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction) 10116 ? T_MNEM_movs : T_MNEM_mov); 10117 inst.instruction |= inst.operands[0].reg << 8; 10118 encode_thumb32_shifted_operand (1); 10119 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */ 10120 inst.reloc.type = BFD_RELOC_UNUSED; 10121 } 10122 } 10123 else 10124 { 10125 if (inst.operands[2].isreg) 10126 { 10127 switch (shift_kind) 10128 { 10129 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break; 10130 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break; 10131 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break; 10132 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break; 10133 default: abort (); 10134 } 10135 10136 inst.instruction |= inst.operands[0].reg; 10137 inst.instruction |= inst.operands[2].reg << 3; 10138 } 10139 else 10140 { 10141 switch (shift_kind) 10142 { 10143 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 10144 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 10145 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 10146 default: abort (); 10147 } 10148 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 10149 inst.instruction |= inst.operands[0].reg; 10150 inst.instruction |= inst.operands[1].reg << 3; 10151 } 10152 } 10153 } 10154 else 10155 { 10156 constraint (inst.operands[0].reg > 7 10157 || inst.operands[1].reg > 7, BAD_HIREG); 10158 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 10159 10160 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */ 10161 { 10162 constraint (inst.operands[2].reg > 7, BAD_HIREG); 10163 constraint (inst.operands[0].reg != inst.operands[1].reg, 10164 _("source1 and dest must be same register")); 10165 10166 switch (inst.instruction) 10167 { 10168 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break; 10169 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break; 10170 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break; 10171 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break; 10172 default: abort (); 10173 } 10174 10175 inst.instruction |= inst.operands[0].reg; 10176 inst.instruction |= inst.operands[2].reg << 3; 10177 } 10178 else 10179 { 10180 switch (inst.instruction) 10181 { 10182 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break; 10183 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break; 10184 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break; 10185 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return; 10186 default: abort (); 10187 } 10188 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 10189 inst.instruction |= inst.operands[0].reg; 10190 inst.instruction |= inst.operands[1].reg << 3; 10191 } 10192 } 10193} 10194 10195static void 10196do_t_simd (void) 10197{ 10198 inst.instruction |= inst.operands[0].reg << 8; 10199 inst.instruction |= inst.operands[1].reg << 16; 10200 inst.instruction |= inst.operands[2].reg; 10201} 10202 10203static void 10204do_t_smc (void) 10205{ 10206 unsigned int value = inst.reloc.exp.X_add_number; 10207 constraint (inst.reloc.exp.X_op != O_constant, 10208 _("expression too complex")); 10209 inst.reloc.type = BFD_RELOC_UNUSED; 10210 inst.instruction |= (value & 0xf000) >> 12; 10211 inst.instruction |= (value & 0x0ff0); 10212 inst.instruction |= (value & 0x000f) << 16; 10213} 10214 10215static void 10216do_t_ssat (void) 10217{ 10218 inst.instruction |= inst.operands[0].reg << 8; 10219 inst.instruction |= inst.operands[1].imm - 1; 10220 inst.instruction |= inst.operands[2].reg << 16; 10221 10222 if (inst.operands[3].present) 10223 { 10224 constraint (inst.reloc.exp.X_op != O_constant, 10225 _("expression too complex")); 10226 10227 if (inst.reloc.exp.X_add_number != 0) 10228 { 10229 if (inst.operands[3].shift_kind == SHIFT_ASR) 10230 inst.instruction |= 0x00200000; /* sh bit */ 10231 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 10232 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 10233 } 10234 inst.reloc.type = BFD_RELOC_UNUSED; 10235 } 10236} 10237 10238static void 10239do_t_ssat16 (void) 10240{ 10241 inst.instruction |= inst.operands[0].reg << 8; 10242 inst.instruction |= inst.operands[1].imm - 1; 10243 inst.instruction |= inst.operands[2].reg << 16; 10244} 10245 10246static void 10247do_t_strex (void) 10248{ 10249 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 10250 || inst.operands[2].postind || inst.operands[2].writeback 10251 || inst.operands[2].immisreg || inst.operands[2].shifted 10252 || inst.operands[2].negative, 10253 BAD_ADDR_MODE); 10254 10255 inst.instruction |= inst.operands[0].reg << 8; 10256 inst.instruction |= inst.operands[1].reg << 12; 10257 inst.instruction |= inst.operands[2].reg << 16; 10258 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 10259} 10260 10261static void 10262do_t_strexd (void) 10263{ 10264 if (!inst.operands[2].present) 10265 inst.operands[2].reg = inst.operands[1].reg + 1; 10266 10267 constraint (inst.operands[0].reg == inst.operands[1].reg 10268 || inst.operands[0].reg == inst.operands[2].reg 10269 || inst.operands[0].reg == inst.operands[3].reg 10270 || inst.operands[1].reg == inst.operands[2].reg, 10271 BAD_OVERLAP); 10272 10273 inst.instruction |= inst.operands[0].reg; 10274 inst.instruction |= inst.operands[1].reg << 12; 10275 inst.instruction |= inst.operands[2].reg << 8; 10276 inst.instruction |= inst.operands[3].reg << 16; 10277} 10278 10279static void 10280do_t_sxtah (void) 10281{ 10282 inst.instruction |= inst.operands[0].reg << 8; 10283 inst.instruction |= inst.operands[1].reg << 16; 10284 inst.instruction |= inst.operands[2].reg; 10285 inst.instruction |= inst.operands[3].imm << 4; 10286} 10287 10288static void 10289do_t_sxth (void) 10290{ 10291 if (inst.instruction <= 0xffff && inst.size_req != 4 10292 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 10293 && (!inst.operands[2].present || inst.operands[2].imm == 0)) 10294 { 10295 inst.instruction = THUMB_OP16 (inst.instruction); 10296 inst.instruction |= inst.operands[0].reg; 10297 inst.instruction |= inst.operands[1].reg << 3; 10298 } 10299 else if (unified_syntax) 10300 { 10301 if (inst.instruction <= 0xffff) 10302 inst.instruction = THUMB_OP32 (inst.instruction); 10303 inst.instruction |= inst.operands[0].reg << 8; 10304 inst.instruction |= inst.operands[1].reg; 10305 inst.instruction |= inst.operands[2].imm << 4; 10306 } 10307 else 10308 { 10309 constraint (inst.operands[2].present && inst.operands[2].imm != 0, 10310 _("Thumb encoding does not support rotation")); 10311 constraint (1, BAD_HIREG); 10312 } 10313} 10314 10315static void 10316do_t_swi (void) 10317{ 10318 inst.reloc.type = BFD_RELOC_ARM_SWI; 10319} 10320 10321static void 10322do_t_tb (void) 10323{ 10324 int half; 10325 10326 half = (inst.instruction & 0x10) != 0; 10327 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 10328 constraint (inst.operands[0].immisreg, 10329 _("instruction requires register index")); 10330 constraint (inst.operands[0].imm == 15, 10331 _("PC is not a valid index register")); 10332 constraint (!half && inst.operands[0].shifted, 10333 _("instruction does not allow shifted index")); 10334 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm; 10335} 10336 10337static void 10338do_t_usat (void) 10339{ 10340 inst.instruction |= inst.operands[0].reg << 8; 10341 inst.instruction |= inst.operands[1].imm; 10342 inst.instruction |= inst.operands[2].reg << 16; 10343 10344 if (inst.operands[3].present) 10345 { 10346 constraint (inst.reloc.exp.X_op != O_constant, 10347 _("expression too complex")); 10348 if (inst.reloc.exp.X_add_number != 0) 10349 { 10350 if (inst.operands[3].shift_kind == SHIFT_ASR) 10351 inst.instruction |= 0x00200000; /* sh bit */ 10352 10353 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 10354 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 10355 } 10356 inst.reloc.type = BFD_RELOC_UNUSED; 10357 } 10358} 10359 10360static void 10361do_t_usat16 (void) 10362{ 10363 inst.instruction |= inst.operands[0].reg << 8; 10364 inst.instruction |= inst.operands[1].imm; 10365 inst.instruction |= inst.operands[2].reg << 16; 10366} 10367 10368/* Neon instruction encoder helpers. */ 10369 10370/* Encodings for the different types for various Neon opcodes. */ 10371 10372/* An "invalid" code for the following tables. */ 10373#define N_INV -1u 10374 10375struct neon_tab_entry 10376{ 10377 unsigned integer; 10378 unsigned float_or_poly; 10379 unsigned scalar_or_imm; 10380}; 10381 10382/* Map overloaded Neon opcodes to their respective encodings. */ 10383#define NEON_ENC_TAB \ 10384 X(vabd, 0x0000700, 0x1200d00, N_INV), \ 10385 X(vmax, 0x0000600, 0x0000f00, N_INV), \ 10386 X(vmin, 0x0000610, 0x0200f00, N_INV), \ 10387 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ 10388 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ 10389 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ 10390 X(vadd, 0x0000800, 0x0000d00, N_INV), \ 10391 X(vsub, 0x1000800, 0x0200d00, N_INV), \ 10392 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ 10393 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ 10394 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ 10395 /* Register variants of the following two instructions are encoded as 10396 vcge / vcgt with the operands reversed. */ \ 10397 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \ 10398 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \ 10399 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ 10400 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \ 10401 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \ 10402 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \ 10403 X(vmlal, 0x0800800, N_INV, 0x0800240), \ 10404 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \ 10405 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \ 10406 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \ 10407 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \ 10408 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ 10409 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ 10410 X(vshl, 0x0000400, N_INV, 0x0800510), \ 10411 X(vqshl, 0x0000410, N_INV, 0x0800710), \ 10412 X(vand, 0x0000110, N_INV, 0x0800030), \ 10413 X(vbic, 0x0100110, N_INV, 0x0800030), \ 10414 X(veor, 0x1000110, N_INV, N_INV), \ 10415 X(vorn, 0x0300110, N_INV, 0x0800010), \ 10416 X(vorr, 0x0200110, N_INV, 0x0800010), \ 10417 X(vmvn, 0x1b00580, N_INV, 0x0800030), \ 10418 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \ 10419 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \ 10420 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \ 10421 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \ 10422 X(vst1, 0x0000000, 0x0800000, N_INV), \ 10423 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \ 10424 X(vst2, 0x0000100, 0x0800100, N_INV), \ 10425 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \ 10426 X(vst3, 0x0000200, 0x0800200, N_INV), \ 10427 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \ 10428 X(vst4, 0x0000300, 0x0800300, N_INV), \ 10429 X(vmovn, 0x1b20200, N_INV, N_INV), \ 10430 X(vtrn, 0x1b20080, N_INV, N_INV), \ 10431 X(vqmovn, 0x1b20200, N_INV, N_INV), \ 10432 X(vqmovun, 0x1b20240, N_INV, N_INV), \ 10433 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \ 10434 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \ 10435 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \ 10436 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \ 10437 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \ 10438 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \ 10439 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV) 10440 10441enum neon_opc 10442{ 10443#define X(OPC,I,F,S) N_MNEM_##OPC 10444NEON_ENC_TAB 10445#undef X 10446}; 10447 10448static const struct neon_tab_entry neon_enc_tab[] = 10449{ 10450#define X(OPC,I,F,S) { (I), (F), (S) } 10451NEON_ENC_TAB 10452#undef X 10453}; 10454 10455#define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10456#define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10457#define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10458#define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10459#define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10460#define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10461#define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 10462#define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 10463#define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 10464#define NEON_ENC_SINGLE(X) \ 10465 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000)) 10466#define NEON_ENC_DOUBLE(X) \ 10467 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000)) 10468 10469/* Define shapes for instruction operands. The following mnemonic characters 10470 are used in this table: 10471 10472 F - VFP S<n> register 10473 D - Neon D<n> register 10474 Q - Neon Q<n> register 10475 I - Immediate 10476 S - Scalar 10477 R - ARM register 10478 L - D<n> register list 10479 10480 This table is used to generate various data: 10481 - enumerations of the form NS_DDR to be used as arguments to 10482 neon_select_shape. 10483 - a table classifying shapes into single, double, quad, mixed. 10484 - a table used to drive neon_select_shape. 10485*/ 10486 10487#define NEON_SHAPE_DEF \ 10488 X(3, (D, D, D), DOUBLE), \ 10489 X(3, (Q, Q, Q), QUAD), \ 10490 X(3, (D, D, I), DOUBLE), \ 10491 X(3, (Q, Q, I), QUAD), \ 10492 X(3, (D, D, S), DOUBLE), \ 10493 X(3, (Q, Q, S), QUAD), \ 10494 X(2, (D, D), DOUBLE), \ 10495 X(2, (Q, Q), QUAD), \ 10496 X(2, (D, S), DOUBLE), \ 10497 X(2, (Q, S), QUAD), \ 10498 X(2, (D, R), DOUBLE), \ 10499 X(2, (Q, R), QUAD), \ 10500 X(2, (D, I), DOUBLE), \ 10501 X(2, (Q, I), QUAD), \ 10502 X(3, (D, L, D), DOUBLE), \ 10503 X(2, (D, Q), MIXED), \ 10504 X(2, (Q, D), MIXED), \ 10505 X(3, (D, Q, I), MIXED), \ 10506 X(3, (Q, D, I), MIXED), \ 10507 X(3, (Q, D, D), MIXED), \ 10508 X(3, (D, Q, Q), MIXED), \ 10509 X(3, (Q, Q, D), MIXED), \ 10510 X(3, (Q, D, S), MIXED), \ 10511 X(3, (D, Q, S), MIXED), \ 10512 X(4, (D, D, D, I), DOUBLE), \ 10513 X(4, (Q, Q, Q, I), QUAD), \ 10514 X(2, (F, F), SINGLE), \ 10515 X(3, (F, F, F), SINGLE), \ 10516 X(2, (F, I), SINGLE), \ 10517 X(2, (F, D), MIXED), \ 10518 X(2, (D, F), MIXED), \ 10519 X(3, (F, F, I), MIXED), \ 10520 X(4, (R, R, F, F), SINGLE), \ 10521 X(4, (F, F, R, R), SINGLE), \ 10522 X(3, (D, R, R), DOUBLE), \ 10523 X(3, (R, R, D), DOUBLE), \ 10524 X(2, (S, R), SINGLE), \ 10525 X(2, (R, S), SINGLE), \ 10526 X(2, (F, R), SINGLE), \ 10527 X(2, (R, F), SINGLE) 10528 10529#define S2(A,B) NS_##A##B 10530#define S3(A,B,C) NS_##A##B##C 10531#define S4(A,B,C,D) NS_##A##B##C##D 10532 10533#define X(N, L, C) S##N L 10534 10535enum neon_shape 10536{ 10537 NEON_SHAPE_DEF, 10538 NS_NULL 10539}; 10540 10541#undef X 10542#undef S2 10543#undef S3 10544#undef S4 10545 10546enum neon_shape_class 10547{ 10548 SC_SINGLE, 10549 SC_DOUBLE, 10550 SC_QUAD, 10551 SC_MIXED 10552}; 10553 10554#define X(N, L, C) SC_##C 10555 10556static enum neon_shape_class neon_shape_class[] = 10557{ 10558 NEON_SHAPE_DEF 10559}; 10560 10561#undef X 10562 10563enum neon_shape_el 10564{ 10565 SE_F, 10566 SE_D, 10567 SE_Q, 10568 SE_I, 10569 SE_S, 10570 SE_R, 10571 SE_L 10572}; 10573 10574/* Register widths of above. */ 10575static unsigned neon_shape_el_size[] = 10576{ 10577 32, 10578 64, 10579 128, 10580 0, 10581 32, 10582 32, 10583 0 10584}; 10585 10586struct neon_shape_info 10587{ 10588 unsigned els; 10589 enum neon_shape_el el[NEON_MAX_TYPE_ELS]; 10590}; 10591 10592#define S2(A,B) { SE_##A, SE_##B } 10593#define S3(A,B,C) { SE_##A, SE_##B, SE_##C } 10594#define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D } 10595 10596#define X(N, L, C) { N, S##N L } 10597 10598static struct neon_shape_info neon_shape_tab[] = 10599{ 10600 NEON_SHAPE_DEF 10601}; 10602 10603#undef X 10604#undef S2 10605#undef S3 10606#undef S4 10607 10608/* Bit masks used in type checking given instructions. 10609 'N_EQK' means the type must be the same as (or based on in some way) the key 10610 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is 10611 set, various other bits can be set as well in order to modify the meaning of 10612 the type constraint. */ 10613 10614enum neon_type_mask 10615{ 10616 N_S8 = 0x000001, 10617 N_S16 = 0x000002, 10618 N_S32 = 0x000004, 10619 N_S64 = 0x000008, 10620 N_U8 = 0x000010, 10621 N_U16 = 0x000020, 10622 N_U32 = 0x000040, 10623 N_U64 = 0x000080, 10624 N_I8 = 0x000100, 10625 N_I16 = 0x000200, 10626 N_I32 = 0x000400, 10627 N_I64 = 0x000800, 10628 N_8 = 0x001000, 10629 N_16 = 0x002000, 10630 N_32 = 0x004000, 10631 N_64 = 0x008000, 10632 N_P8 = 0x010000, 10633 N_P16 = 0x020000, 10634 N_F32 = 0x040000, 10635 N_F64 = 0x080000, 10636 N_KEY = 0x100000, /* key element (main type specifier). */ 10637 N_EQK = 0x200000, /* given operand has the same type & size as the key. */ 10638 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */ 10639 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */ 10640 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */ 10641 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */ 10642 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */ 10643 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */ 10644 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */ 10645 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */ 10646 N_UTYP = 0, 10647 N_MAX_NONSPECIAL = N_F64 10648}; 10649 10650#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ) 10651 10652#define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64) 10653#define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) 10654#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) 10655#define N_SUF_32 (N_SU_32 | N_F32) 10656#define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) 10657#define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32) 10658 10659/* Pass this as the first type argument to neon_check_type to ignore types 10660 altogether. */ 10661#define N_IGNORE_TYPE (N_KEY | N_EQK) 10662 10663/* Select a "shape" for the current instruction (describing register types or 10664 sizes) from a list of alternatives. Return NS_NULL if the current instruction 10665 doesn't fit. For non-polymorphic shapes, checking is usually done as a 10666 function of operand parsing, so this function doesn't need to be called. 10667 Shapes should be listed in order of decreasing length. */ 10668 10669static enum neon_shape 10670neon_select_shape (enum neon_shape shape, ...) 10671{ 10672 va_list ap; 10673 enum neon_shape first_shape = shape; 10674 10675 /* Fix missing optional operands. FIXME: we don't know at this point how 10676 many arguments we should have, so this makes the assumption that we have 10677 > 1. This is true of all current Neon opcodes, I think, but may not be 10678 true in the future. */ 10679 if (!inst.operands[1].present) 10680 inst.operands[1] = inst.operands[0]; 10681 10682 va_start (ap, shape); 10683 10684 for (; shape != NS_NULL; shape = va_arg (ap, int)) 10685 { 10686 unsigned j; 10687 int matches = 1; 10688 10689 for (j = 0; j < neon_shape_tab[shape].els; j++) 10690 { 10691 if (!inst.operands[j].present) 10692 { 10693 matches = 0; 10694 break; 10695 } 10696 10697 switch (neon_shape_tab[shape].el[j]) 10698 { 10699 case SE_F: 10700 if (!(inst.operands[j].isreg 10701 && inst.operands[j].isvec 10702 && inst.operands[j].issingle 10703 && !inst.operands[j].isquad)) 10704 matches = 0; 10705 break; 10706 10707 case SE_D: 10708 if (!(inst.operands[j].isreg 10709 && inst.operands[j].isvec 10710 && !inst.operands[j].isquad 10711 && !inst.operands[j].issingle)) 10712 matches = 0; 10713 break; 10714 10715 case SE_R: 10716 if (!(inst.operands[j].isreg 10717 && !inst.operands[j].isvec)) 10718 matches = 0; 10719 break; 10720 10721 case SE_Q: 10722 if (!(inst.operands[j].isreg 10723 && inst.operands[j].isvec 10724 && inst.operands[j].isquad 10725 && !inst.operands[j].issingle)) 10726 matches = 0; 10727 break; 10728 10729 case SE_I: 10730 if (!(!inst.operands[j].isreg 10731 && !inst.operands[j].isscalar)) 10732 matches = 0; 10733 break; 10734 10735 case SE_S: 10736 if (!(!inst.operands[j].isreg 10737 && inst.operands[j].isscalar)) 10738 matches = 0; 10739 break; 10740 10741 case SE_L: 10742 break; 10743 } 10744 } 10745 if (matches) 10746 break; 10747 } 10748 10749 va_end (ap); 10750 10751 if (shape == NS_NULL && first_shape != NS_NULL) 10752 first_error (_("invalid instruction shape")); 10753 10754 return shape; 10755} 10756 10757/* True if SHAPE is predominantly a quadword operation (most of the time, this 10758 means the Q bit should be set). */ 10759 10760static int 10761neon_quad (enum neon_shape shape) 10762{ 10763 return neon_shape_class[shape] == SC_QUAD; 10764} 10765 10766static void 10767neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type, 10768 unsigned *g_size) 10769{ 10770 /* Allow modification to be made to types which are constrained to be 10771 based on the key element, based on bits set alongside N_EQK. */ 10772 if ((typebits & N_EQK) != 0) 10773 { 10774 if ((typebits & N_HLF) != 0) 10775 *g_size /= 2; 10776 else if ((typebits & N_DBL) != 0) 10777 *g_size *= 2; 10778 if ((typebits & N_SGN) != 0) 10779 *g_type = NT_signed; 10780 else if ((typebits & N_UNS) != 0) 10781 *g_type = NT_unsigned; 10782 else if ((typebits & N_INT) != 0) 10783 *g_type = NT_integer; 10784 else if ((typebits & N_FLT) != 0) 10785 *g_type = NT_float; 10786 else if ((typebits & N_SIZ) != 0) 10787 *g_type = NT_untyped; 10788 } 10789} 10790 10791/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key" 10792 operand type, i.e. the single type specified in a Neon instruction when it 10793 is the only one given. */ 10794 10795static struct neon_type_el 10796neon_type_promote (struct neon_type_el *key, unsigned thisarg) 10797{ 10798 struct neon_type_el dest = *key; 10799 10800 assert ((thisarg & N_EQK) != 0); 10801 10802 neon_modify_type_size (thisarg, &dest.type, &dest.size); 10803 10804 return dest; 10805} 10806 10807/* Convert Neon type and size into compact bitmask representation. */ 10808 10809static enum neon_type_mask 10810type_chk_of_el_type (enum neon_el_type type, unsigned size) 10811{ 10812 switch (type) 10813 { 10814 case NT_untyped: 10815 switch (size) 10816 { 10817 case 8: return N_8; 10818 case 16: return N_16; 10819 case 32: return N_32; 10820 case 64: return N_64; 10821 default: ; 10822 } 10823 break; 10824 10825 case NT_integer: 10826 switch (size) 10827 { 10828 case 8: return N_I8; 10829 case 16: return N_I16; 10830 case 32: return N_I32; 10831 case 64: return N_I64; 10832 default: ; 10833 } 10834 break; 10835 10836 case NT_float: 10837 switch (size) 10838 { 10839 case 32: return N_F32; 10840 case 64: return N_F64; 10841 default: ; 10842 } 10843 break; 10844 10845 case NT_poly: 10846 switch (size) 10847 { 10848 case 8: return N_P8; 10849 case 16: return N_P16; 10850 default: ; 10851 } 10852 break; 10853 10854 case NT_signed: 10855 switch (size) 10856 { 10857 case 8: return N_S8; 10858 case 16: return N_S16; 10859 case 32: return N_S32; 10860 case 64: return N_S64; 10861 default: ; 10862 } 10863 break; 10864 10865 case NT_unsigned: 10866 switch (size) 10867 { 10868 case 8: return N_U8; 10869 case 16: return N_U16; 10870 case 32: return N_U32; 10871 case 64: return N_U64; 10872 default: ; 10873 } 10874 break; 10875 10876 default: ; 10877 } 10878 10879 return N_UTYP; 10880} 10881 10882/* Convert compact Neon bitmask type representation to a type and size. Only 10883 handles the case where a single bit is set in the mask. */ 10884 10885static int 10886el_type_of_type_chk (enum neon_el_type *type, unsigned *size, 10887 enum neon_type_mask mask) 10888{ 10889 if ((mask & N_EQK) != 0) 10890 return FAIL; 10891 10892 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0) 10893 *size = 8; 10894 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0) 10895 *size = 16; 10896 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0) 10897 *size = 32; 10898 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0) 10899 *size = 64; 10900 else 10901 return FAIL; 10902 10903 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0) 10904 *type = NT_signed; 10905 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0) 10906 *type = NT_unsigned; 10907 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0) 10908 *type = NT_integer; 10909 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0) 10910 *type = NT_untyped; 10911 else if ((mask & (N_P8 | N_P16)) != 0) 10912 *type = NT_poly; 10913 else if ((mask & (N_F32 | N_F64)) != 0) 10914 *type = NT_float; 10915 else 10916 return FAIL; 10917 10918 return SUCCESS; 10919} 10920 10921/* Modify a bitmask of allowed types. This is only needed for type 10922 relaxation. */ 10923 10924static unsigned 10925modify_types_allowed (unsigned allowed, unsigned mods) 10926{ 10927 unsigned size; 10928 enum neon_el_type type; 10929 unsigned destmask; 10930 int i; 10931 10932 destmask = 0; 10933 10934 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1) 10935 { 10936 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS) 10937 { 10938 neon_modify_type_size (mods, &type, &size); 10939 destmask |= type_chk_of_el_type (type, size); 10940 } 10941 } 10942 10943 return destmask; 10944} 10945 10946/* Check type and return type classification. 10947 The manual states (paraphrase): If one datatype is given, it indicates the 10948 type given in: 10949 - the second operand, if there is one 10950 - the operand, if there is no second operand 10951 - the result, if there are no operands. 10952 This isn't quite good enough though, so we use a concept of a "key" datatype 10953 which is set on a per-instruction basis, which is the one which matters when 10954 only one data type is written. 10955 Note: this function has side-effects (e.g. filling in missing operands). All 10956 Neon instructions should call it before performing bit encoding. */ 10957 10958static struct neon_type_el 10959neon_check_type (unsigned els, enum neon_shape ns, ...) 10960{ 10961 va_list ap; 10962 unsigned i, pass, key_el = 0; 10963 unsigned types[NEON_MAX_TYPE_ELS]; 10964 enum neon_el_type k_type = NT_invtype; 10965 unsigned k_size = -1u; 10966 struct neon_type_el badtype = {NT_invtype, -1}; 10967 unsigned key_allowed = 0; 10968 10969 /* Optional registers in Neon instructions are always (not) in operand 1. 10970 Fill in the missing operand here, if it was omitted. */ 10971 if (els > 1 && !inst.operands[1].present) 10972 inst.operands[1] = inst.operands[0]; 10973 10974 /* Suck up all the varargs. */ 10975 va_start (ap, ns); 10976 for (i = 0; i < els; i++) 10977 { 10978 unsigned thisarg = va_arg (ap, unsigned); 10979 if (thisarg == N_IGNORE_TYPE) 10980 { 10981 va_end (ap); 10982 return badtype; 10983 } 10984 types[i] = thisarg; 10985 if ((thisarg & N_KEY) != 0) 10986 key_el = i; 10987 } 10988 va_end (ap); 10989 10990 if (inst.vectype.elems > 0) 10991 for (i = 0; i < els; i++) 10992 if (inst.operands[i].vectype.type != NT_invtype) 10993 { 10994 first_error (_("types specified in both the mnemonic and operands")); 10995 return badtype; 10996 } 10997 10998 /* Duplicate inst.vectype elements here as necessary. 10999 FIXME: No idea if this is exactly the same as the ARM assembler, 11000 particularly when an insn takes one register and one non-register 11001 operand. */ 11002 if (inst.vectype.elems == 1 && els > 1) 11003 { 11004 unsigned j; 11005 inst.vectype.elems = els; 11006 inst.vectype.el[key_el] = inst.vectype.el[0]; 11007 for (j = 0; j < els; j++) 11008 if (j != key_el) 11009 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 11010 types[j]); 11011 } 11012 else if (inst.vectype.elems == 0 && els > 0) 11013 { 11014 unsigned j; 11015 /* No types were given after the mnemonic, so look for types specified 11016 after each operand. We allow some flexibility here; as long as the 11017 "key" operand has a type, we can infer the others. */ 11018 for (j = 0; j < els; j++) 11019 if (inst.operands[j].vectype.type != NT_invtype) 11020 inst.vectype.el[j] = inst.operands[j].vectype; 11021 11022 if (inst.operands[key_el].vectype.type != NT_invtype) 11023 { 11024 for (j = 0; j < els; j++) 11025 if (inst.operands[j].vectype.type == NT_invtype) 11026 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 11027 types[j]); 11028 } 11029 else 11030 { 11031 first_error (_("operand types can't be inferred")); 11032 return badtype; 11033 } 11034 } 11035 else if (inst.vectype.elems != els) 11036 { 11037 first_error (_("type specifier has the wrong number of parts")); 11038 return badtype; 11039 } 11040 11041 for (pass = 0; pass < 2; pass++) 11042 { 11043 for (i = 0; i < els; i++) 11044 { 11045 unsigned thisarg = types[i]; 11046 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0) 11047 ? modify_types_allowed (key_allowed, thisarg) : thisarg; 11048 enum neon_el_type g_type = inst.vectype.el[i].type; 11049 unsigned g_size = inst.vectype.el[i].size; 11050 11051 /* Decay more-specific signed & unsigned types to sign-insensitive 11052 integer types if sign-specific variants are unavailable. */ 11053 if ((g_type == NT_signed || g_type == NT_unsigned) 11054 && (types_allowed & N_SU_ALL) == 0) 11055 g_type = NT_integer; 11056 11057 /* If only untyped args are allowed, decay any more specific types to 11058 them. Some instructions only care about signs for some element 11059 sizes, so handle that properly. */ 11060 if ((g_size == 8 && (types_allowed & N_8) != 0) 11061 || (g_size == 16 && (types_allowed & N_16) != 0) 11062 || (g_size == 32 && (types_allowed & N_32) != 0) 11063 || (g_size == 64 && (types_allowed & N_64) != 0)) 11064 g_type = NT_untyped; 11065 11066 if (pass == 0) 11067 { 11068 if ((thisarg & N_KEY) != 0) 11069 { 11070 k_type = g_type; 11071 k_size = g_size; 11072 key_allowed = thisarg & ~N_KEY; 11073 } 11074 } 11075 else 11076 { 11077 if ((thisarg & N_VFP) != 0) 11078 { 11079 enum neon_shape_el regshape = neon_shape_tab[ns].el[i]; 11080 unsigned regwidth = neon_shape_el_size[regshape], match; 11081 11082 /* In VFP mode, operands must match register widths. If we 11083 have a key operand, use its width, else use the width of 11084 the current operand. */ 11085 if (k_size != -1u) 11086 match = k_size; 11087 else 11088 match = g_size; 11089 11090 if (regwidth != match) 11091 { 11092 first_error (_("operand size must match register width")); 11093 return badtype; 11094 } 11095 } 11096 11097 if ((thisarg & N_EQK) == 0) 11098 { 11099 unsigned given_type = type_chk_of_el_type (g_type, g_size); 11100 11101 if ((given_type & types_allowed) == 0) 11102 { 11103 first_error (_("bad type in Neon instruction")); 11104 return badtype; 11105 } 11106 } 11107 else 11108 { 11109 enum neon_el_type mod_k_type = k_type; 11110 unsigned mod_k_size = k_size; 11111 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size); 11112 if (g_type != mod_k_type || g_size != mod_k_size) 11113 { 11114 first_error (_("inconsistent types in Neon instruction")); 11115 return badtype; 11116 } 11117 } 11118 } 11119 } 11120 } 11121 11122 return inst.vectype.el[key_el]; 11123} 11124 11125/* Neon-style VFP instruction forwarding. */ 11126 11127/* Thumb VFP instructions have 0xE in the condition field. */ 11128 11129static void 11130do_vfp_cond_or_thumb (void) 11131{ 11132 if (thumb_mode) 11133 inst.instruction |= 0xe0000000; 11134 else 11135 inst.instruction |= inst.cond << 28; 11136} 11137 11138/* Look up and encode a simple mnemonic, for use as a helper function for the 11139 Neon-style VFP syntax. This avoids duplication of bits of the insns table, 11140 etc. It is assumed that operand parsing has already been done, and that the 11141 operands are in the form expected by the given opcode (this isn't necessarily 11142 the same as the form in which they were parsed, hence some massaging must 11143 take place before this function is called). 11144 Checks current arch version against that in the looked-up opcode. */ 11145 11146static void 11147do_vfp_nsyn_opcode (const char *opname) 11148{ 11149 const struct asm_opcode *opcode; 11150 11151 opcode = hash_find (arm_ops_hsh, opname); 11152 11153 if (!opcode) 11154 abort (); 11155 11156 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, 11157 thumb_mode ? *opcode->tvariant : *opcode->avariant), 11158 _(BAD_FPU)); 11159 11160 if (thumb_mode) 11161 { 11162 inst.instruction = opcode->tvalue; 11163 opcode->tencode (); 11164 } 11165 else 11166 { 11167 inst.instruction = (inst.cond << 28) | opcode->avalue; 11168 opcode->aencode (); 11169 } 11170} 11171 11172static void 11173do_vfp_nsyn_add_sub (enum neon_shape rs) 11174{ 11175 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd; 11176 11177 if (rs == NS_FFF) 11178 { 11179 if (is_add) 11180 do_vfp_nsyn_opcode ("fadds"); 11181 else 11182 do_vfp_nsyn_opcode ("fsubs"); 11183 } 11184 else 11185 { 11186 if (is_add) 11187 do_vfp_nsyn_opcode ("faddd"); 11188 else 11189 do_vfp_nsyn_opcode ("fsubd"); 11190 } 11191} 11192 11193/* Check operand types to see if this is a VFP instruction, and if so call 11194 PFN (). */ 11195 11196static int 11197try_vfp_nsyn (int args, void (*pfn) (enum neon_shape)) 11198{ 11199 enum neon_shape rs; 11200 struct neon_type_el et; 11201 11202 switch (args) 11203 { 11204 case 2: 11205 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11206 et = neon_check_type (2, rs, 11207 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11208 break; 11209 11210 case 3: 11211 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11212 et = neon_check_type (3, rs, 11213 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11214 break; 11215 11216 default: 11217 abort (); 11218 } 11219 11220 if (et.type != NT_invtype) 11221 { 11222 pfn (rs); 11223 return SUCCESS; 11224 } 11225 else 11226 inst.error = NULL; 11227 11228 return FAIL; 11229} 11230 11231static void 11232do_vfp_nsyn_mla_mls (enum neon_shape rs) 11233{ 11234 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla; 11235 11236 if (rs == NS_FFF) 11237 { 11238 if (is_mla) 11239 do_vfp_nsyn_opcode ("fmacs"); 11240 else 11241 do_vfp_nsyn_opcode ("fmscs"); 11242 } 11243 else 11244 { 11245 if (is_mla) 11246 do_vfp_nsyn_opcode ("fmacd"); 11247 else 11248 do_vfp_nsyn_opcode ("fmscd"); 11249 } 11250} 11251 11252static void 11253do_vfp_nsyn_mul (enum neon_shape rs) 11254{ 11255 if (rs == NS_FFF) 11256 do_vfp_nsyn_opcode ("fmuls"); 11257 else 11258 do_vfp_nsyn_opcode ("fmuld"); 11259} 11260 11261static void 11262do_vfp_nsyn_abs_neg (enum neon_shape rs) 11263{ 11264 int is_neg = (inst.instruction & 0x80) != 0; 11265 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY); 11266 11267 if (rs == NS_FF) 11268 { 11269 if (is_neg) 11270 do_vfp_nsyn_opcode ("fnegs"); 11271 else 11272 do_vfp_nsyn_opcode ("fabss"); 11273 } 11274 else 11275 { 11276 if (is_neg) 11277 do_vfp_nsyn_opcode ("fnegd"); 11278 else 11279 do_vfp_nsyn_opcode ("fabsd"); 11280 } 11281} 11282 11283/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision 11284 insns belong to Neon, and are handled elsewhere. */ 11285 11286static void 11287do_vfp_nsyn_ldm_stm (int is_dbmode) 11288{ 11289 int is_ldm = (inst.instruction & (1 << 20)) != 0; 11290 if (is_ldm) 11291 { 11292 if (is_dbmode) 11293 do_vfp_nsyn_opcode ("fldmdbs"); 11294 else 11295 do_vfp_nsyn_opcode ("fldmias"); 11296 } 11297 else 11298 { 11299 if (is_dbmode) 11300 do_vfp_nsyn_opcode ("fstmdbs"); 11301 else 11302 do_vfp_nsyn_opcode ("fstmias"); 11303 } 11304} 11305 11306static void 11307do_vfp_nsyn_sqrt (void) 11308{ 11309 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11310 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11311 11312 if (rs == NS_FF) 11313 do_vfp_nsyn_opcode ("fsqrts"); 11314 else 11315 do_vfp_nsyn_opcode ("fsqrtd"); 11316} 11317 11318static void 11319do_vfp_nsyn_div (void) 11320{ 11321 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11322 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 11323 N_F32 | N_F64 | N_KEY | N_VFP); 11324 11325 if (rs == NS_FFF) 11326 do_vfp_nsyn_opcode ("fdivs"); 11327 else 11328 do_vfp_nsyn_opcode ("fdivd"); 11329} 11330 11331static void 11332do_vfp_nsyn_nmul (void) 11333{ 11334 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 11335 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 11336 N_F32 | N_F64 | N_KEY | N_VFP); 11337 11338 if (rs == NS_FFF) 11339 { 11340 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11341 do_vfp_sp_dyadic (); 11342 } 11343 else 11344 { 11345 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11346 do_vfp_dp_rd_rn_rm (); 11347 } 11348 do_vfp_cond_or_thumb (); 11349} 11350 11351static void 11352do_vfp_nsyn_cmp (void) 11353{ 11354 if (inst.operands[1].isreg) 11355 { 11356 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 11357 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 11358 11359 if (rs == NS_FF) 11360 { 11361 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11362 do_vfp_sp_monadic (); 11363 } 11364 else 11365 { 11366 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11367 do_vfp_dp_rd_rm (); 11368 } 11369 } 11370 else 11371 { 11372 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL); 11373 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK); 11374 11375 switch (inst.instruction & 0x0fffffff) 11376 { 11377 case N_MNEM_vcmp: 11378 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp; 11379 break; 11380 case N_MNEM_vcmpe: 11381 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe; 11382 break; 11383 default: 11384 abort (); 11385 } 11386 11387 if (rs == NS_FI) 11388 { 11389 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 11390 do_vfp_sp_compare_z (); 11391 } 11392 else 11393 { 11394 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 11395 do_vfp_dp_rd (); 11396 } 11397 } 11398 do_vfp_cond_or_thumb (); 11399} 11400 11401static void 11402nsyn_insert_sp (void) 11403{ 11404 inst.operands[1] = inst.operands[0]; 11405 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 11406 inst.operands[0].reg = 13; 11407 inst.operands[0].isreg = 1; 11408 inst.operands[0].writeback = 1; 11409 inst.operands[0].present = 1; 11410} 11411 11412static void 11413do_vfp_nsyn_push (void) 11414{ 11415 nsyn_insert_sp (); 11416 if (inst.operands[1].issingle) 11417 do_vfp_nsyn_opcode ("fstmdbs"); 11418 else 11419 do_vfp_nsyn_opcode ("fstmdbd"); 11420} 11421 11422static void 11423do_vfp_nsyn_pop (void) 11424{ 11425 nsyn_insert_sp (); 11426 if (inst.operands[1].issingle) 11427 do_vfp_nsyn_opcode ("fldmias"); 11428 else 11429 do_vfp_nsyn_opcode ("fldmiad"); 11430} 11431 11432/* Fix up Neon data-processing instructions, ORing in the correct bits for 11433 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */ 11434 11435static unsigned 11436neon_dp_fixup (unsigned i) 11437{ 11438 if (thumb_mode) 11439 { 11440 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */ 11441 if (i & (1 << 24)) 11442 i |= 1 << 28; 11443 11444 i &= ~(1 << 24); 11445 11446 i |= 0xef000000; 11447 } 11448 else 11449 i |= 0xf2000000; 11450 11451 return i; 11452} 11453 11454/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3 11455 (0, 1, 2, 3). */ 11456 11457static unsigned 11458neon_logbits (unsigned x) 11459{ 11460 return ffs (x) - 4; 11461} 11462 11463#define LOW4(R) ((R) & 0xf) 11464#define HI1(R) (((R) >> 4) & 1) 11465 11466/* Encode insns with bit pattern: 11467 11468 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0| 11469 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm | 11470 11471 SIZE is passed in bits. -1 means size field isn't changed, in case it has a 11472 different meaning for some instruction. */ 11473 11474static void 11475neon_three_same (int isquad, int ubit, int size) 11476{ 11477 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11478 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11479 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 11480 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 11481 inst.instruction |= LOW4 (inst.operands[2].reg); 11482 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 11483 inst.instruction |= (isquad != 0) << 6; 11484 inst.instruction |= (ubit != 0) << 24; 11485 if (size != -1) 11486 inst.instruction |= neon_logbits (size) << 20; 11487 11488 inst.instruction = neon_dp_fixup (inst.instruction); 11489} 11490 11491/* Encode instructions of the form: 11492 11493 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0| 11494 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm | 11495 11496 Don't write size if SIZE == -1. */ 11497 11498static void 11499neon_two_same (int qbit, int ubit, int size) 11500{ 11501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11502 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11503 inst.instruction |= LOW4 (inst.operands[1].reg); 11504 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11505 inst.instruction |= (qbit != 0) << 6; 11506 inst.instruction |= (ubit != 0) << 24; 11507 11508 if (size != -1) 11509 inst.instruction |= neon_logbits (size) << 18; 11510 11511 inst.instruction = neon_dp_fixup (inst.instruction); 11512} 11513 11514/* Neon instruction encoders, in approximate order of appearance. */ 11515 11516static void 11517do_neon_dyadic_i_su (void) 11518{ 11519 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11520 struct neon_type_el et = neon_check_type (3, rs, 11521 N_EQK, N_EQK, N_SU_32 | N_KEY); 11522 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11523} 11524 11525static void 11526do_neon_dyadic_i64_su (void) 11527{ 11528 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11529 struct neon_type_el et = neon_check_type (3, rs, 11530 N_EQK, N_EQK, N_SU_ALL | N_KEY); 11531 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11532} 11533 11534static void 11535neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et, 11536 unsigned immbits) 11537{ 11538 unsigned size = et.size >> 3; 11539 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11540 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11541 inst.instruction |= LOW4 (inst.operands[1].reg); 11542 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11543 inst.instruction |= (isquad != 0) << 6; 11544 inst.instruction |= immbits << 16; 11545 inst.instruction |= (size >> 3) << 7; 11546 inst.instruction |= (size & 0x7) << 19; 11547 if (write_ubit) 11548 inst.instruction |= (uval != 0) << 24; 11549 11550 inst.instruction = neon_dp_fixup (inst.instruction); 11551} 11552 11553static void 11554do_neon_shl_imm (void) 11555{ 11556 if (!inst.operands[2].isreg) 11557 { 11558 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11559 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL); 11560 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11561 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm); 11562 } 11563 else 11564 { 11565 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11566 struct neon_type_el et = neon_check_type (3, rs, 11567 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 11568 unsigned int tmp; 11569 11570 /* VSHL/VQSHL 3-register variants have syntax such as: 11571 vshl.xx Dd, Dm, Dn 11572 whereas other 3-register operations encoded by neon_three_same have 11573 syntax like: 11574 vadd.xx Dd, Dn, Dm 11575 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg 11576 here. */ 11577 tmp = inst.operands[2].reg; 11578 inst.operands[2].reg = inst.operands[1].reg; 11579 inst.operands[1].reg = tmp; 11580 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11581 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11582 } 11583} 11584 11585static void 11586do_neon_qshl_imm (void) 11587{ 11588 if (!inst.operands[2].isreg) 11589 { 11590 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11591 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 11592 11593 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11594 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 11595 inst.operands[2].imm); 11596 } 11597 else 11598 { 11599 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11600 struct neon_type_el et = neon_check_type (3, rs, 11601 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 11602 unsigned int tmp; 11603 11604 /* See note in do_neon_shl_imm. */ 11605 tmp = inst.operands[2].reg; 11606 inst.operands[2].reg = inst.operands[1].reg; 11607 inst.operands[1].reg = tmp; 11608 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11609 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11610 } 11611} 11612 11613static void 11614do_neon_rshl (void) 11615{ 11616 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11617 struct neon_type_el et = neon_check_type (3, rs, 11618 N_EQK, N_EQK, N_SU_ALL | N_KEY); 11619 unsigned int tmp; 11620 11621 tmp = inst.operands[2].reg; 11622 inst.operands[2].reg = inst.operands[1].reg; 11623 inst.operands[1].reg = tmp; 11624 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 11625} 11626 11627static int 11628neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size) 11629{ 11630 /* Handle .I8 pseudo-instructions. */ 11631 if (size == 8) 11632 { 11633 /* Unfortunately, this will make everything apart from zero out-of-range. 11634 FIXME is this the intended semantics? There doesn't seem much point in 11635 accepting .I8 if so. */ 11636 immediate |= immediate << 8; 11637 size = 16; 11638 } 11639 11640 if (size >= 32) 11641 { 11642 if (immediate == (immediate & 0x000000ff)) 11643 { 11644 *immbits = immediate; 11645 return 0x1; 11646 } 11647 else if (immediate == (immediate & 0x0000ff00)) 11648 { 11649 *immbits = immediate >> 8; 11650 return 0x3; 11651 } 11652 else if (immediate == (immediate & 0x00ff0000)) 11653 { 11654 *immbits = immediate >> 16; 11655 return 0x5; 11656 } 11657 else if (immediate == (immediate & 0xff000000)) 11658 { 11659 *immbits = immediate >> 24; 11660 return 0x7; 11661 } 11662 if ((immediate & 0xffff) != (immediate >> 16)) 11663 goto bad_immediate; 11664 immediate &= 0xffff; 11665 } 11666 11667 if (immediate == (immediate & 0x000000ff)) 11668 { 11669 *immbits = immediate; 11670 return 0x9; 11671 } 11672 else if (immediate == (immediate & 0x0000ff00)) 11673 { 11674 *immbits = immediate >> 8; 11675 return 0xb; 11676 } 11677 11678 bad_immediate: 11679 first_error (_("immediate value out of range")); 11680 return FAIL; 11681} 11682 11683/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits 11684 A, B, C, D. */ 11685 11686static int 11687neon_bits_same_in_bytes (unsigned imm) 11688{ 11689 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) 11690 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) 11691 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) 11692 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); 11693} 11694 11695/* For immediate of above form, return 0bABCD. */ 11696 11697static unsigned 11698neon_squash_bits (unsigned imm) 11699{ 11700 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) 11701 | ((imm & 0x01000000) >> 21); 11702} 11703 11704/* Compress quarter-float representation to 0b...000 abcdefgh. */ 11705 11706static unsigned 11707neon_qfloat_bits (unsigned imm) 11708{ 11709 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); 11710} 11711 11712/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into 11713 the instruction. *OP is passed as the initial value of the op field, and 11714 may be set to a different value depending on the constant (i.e. 11715 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not 11716 MVN). If the immediate looks like a repeated parttern then also 11717 try smaller element sizes. */ 11718 11719static int 11720neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, 11721 unsigned *immbits, int *op, int size, 11722 enum neon_el_type type) 11723{ 11724 /* Only permit float immediates (including 0.0/-0.0) if the operand type is 11725 float. */ 11726 if (type == NT_float && !float_p) 11727 return FAIL; 11728 11729 if (type == NT_float && is_quarter_float (immlo) && immhi == 0) 11730 { 11731 if (size != 32 || *op == 1) 11732 return FAIL; 11733 *immbits = neon_qfloat_bits (immlo); 11734 return 0xf; 11735 } 11736 11737 if (size == 64) 11738 { 11739 if (neon_bits_same_in_bytes (immhi) 11740 && neon_bits_same_in_bytes (immlo)) 11741 { 11742 if (*op == 1) 11743 return FAIL; 11744 *immbits = (neon_squash_bits (immhi) << 4) 11745 | neon_squash_bits (immlo); 11746 *op = 1; 11747 return 0xe; 11748 } 11749 11750 if (immhi != immlo) 11751 return FAIL; 11752 } 11753 11754 if (size >= 32) 11755 { 11756 if (immlo == (immlo & 0x000000ff)) 11757 { 11758 *immbits = immlo; 11759 return 0x0; 11760 } 11761 else if (immlo == (immlo & 0x0000ff00)) 11762 { 11763 *immbits = immlo >> 8; 11764 return 0x2; 11765 } 11766 else if (immlo == (immlo & 0x00ff0000)) 11767 { 11768 *immbits = immlo >> 16; 11769 return 0x4; 11770 } 11771 else if (immlo == (immlo & 0xff000000)) 11772 { 11773 *immbits = immlo >> 24; 11774 return 0x6; 11775 } 11776 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) 11777 { 11778 *immbits = (immlo >> 8) & 0xff; 11779 return 0xc; 11780 } 11781 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) 11782 { 11783 *immbits = (immlo >> 16) & 0xff; 11784 return 0xd; 11785 } 11786 11787 if ((immlo & 0xffff) != (immlo >> 16)) 11788 return FAIL; 11789 immlo &= 0xffff; 11790 } 11791 11792 if (size >= 16) 11793 { 11794 if (immlo == (immlo & 0x000000ff)) 11795 { 11796 *immbits = immlo; 11797 return 0x8; 11798 } 11799 else if (immlo == (immlo & 0x0000ff00)) 11800 { 11801 *immbits = immlo >> 8; 11802 return 0xa; 11803 } 11804 11805 if ((immlo & 0xff) != (immlo >> 8)) 11806 return FAIL; 11807 immlo &= 0xff; 11808 } 11809 11810 if (immlo == (immlo & 0x000000ff)) 11811 { 11812 /* Don't allow MVN with 8-bit immediate. */ 11813 if (*op == 1) 11814 return FAIL; 11815 *immbits = immlo; 11816 return 0xe; 11817 } 11818 11819 return FAIL; 11820} 11821 11822/* Write immediate bits [7:0] to the following locations: 11823 11824 |28/24|23 19|18 16|15 4|3 0| 11825 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| 11826 11827 This function is used by VMOV/VMVN/VORR/VBIC. */ 11828 11829static void 11830neon_write_immbits (unsigned immbits) 11831{ 11832 inst.instruction |= immbits & 0xf; 11833 inst.instruction |= ((immbits >> 4) & 0x7) << 16; 11834 inst.instruction |= ((immbits >> 7) & 0x1) << 24; 11835} 11836 11837/* Invert low-order SIZE bits of XHI:XLO. */ 11838 11839static void 11840neon_invert_size (unsigned *xlo, unsigned *xhi, int size) 11841{ 11842 unsigned immlo = xlo ? *xlo : 0; 11843 unsigned immhi = xhi ? *xhi : 0; 11844 11845 switch (size) 11846 { 11847 case 8: 11848 immlo = (~immlo) & 0xff; 11849 break; 11850 11851 case 16: 11852 immlo = (~immlo) & 0xffff; 11853 break; 11854 11855 case 64: 11856 immhi = (~immhi) & 0xffffffff; 11857 /* fall through. */ 11858 11859 case 32: 11860 immlo = (~immlo) & 0xffffffff; 11861 break; 11862 11863 default: 11864 abort (); 11865 } 11866 11867 if (xlo) 11868 *xlo = immlo; 11869 11870 if (xhi) 11871 *xhi = immhi; 11872} 11873 11874static void 11875do_neon_logic (void) 11876{ 11877 if (inst.operands[2].present && inst.operands[2].isreg) 11878 { 11879 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11880 neon_check_type (3, rs, N_IGNORE_TYPE); 11881 /* U bit and size field were set as part of the bitmask. */ 11882 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11883 neon_three_same (neon_quad (rs), 0, -1); 11884 } 11885 else 11886 { 11887 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 11888 struct neon_type_el et = neon_check_type (2, rs, 11889 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 11890 enum neon_opc opcode = inst.instruction & 0x0fffffff; 11891 unsigned immbits; 11892 int cmode; 11893 11894 if (et.type == NT_invtype) 11895 return; 11896 11897 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11898 11899 immbits = inst.operands[1].imm; 11900 if (et.size == 64) 11901 { 11902 /* .i64 is a pseudo-op, so the immediate must be a repeating 11903 pattern. */ 11904 if (immbits != (inst.operands[1].regisimm ? 11905 inst.operands[1].reg : 0)) 11906 { 11907 /* Set immbits to an invalid constant. */ 11908 immbits = 0xdeadbeef; 11909 } 11910 } 11911 11912 switch (opcode) 11913 { 11914 case N_MNEM_vbic: 11915 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11916 break; 11917 11918 case N_MNEM_vorr: 11919 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11920 break; 11921 11922 case N_MNEM_vand: 11923 /* Pseudo-instruction for VBIC. */ 11924 neon_invert_size (&immbits, 0, et.size); 11925 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11926 break; 11927 11928 case N_MNEM_vorn: 11929 /* Pseudo-instruction for VORR. */ 11930 neon_invert_size (&immbits, 0, et.size); 11931 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11932 break; 11933 11934 default: 11935 abort (); 11936 } 11937 11938 if (cmode == FAIL) 11939 return; 11940 11941 inst.instruction |= neon_quad (rs) << 6; 11942 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11943 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11944 inst.instruction |= cmode << 8; 11945 neon_write_immbits (immbits); 11946 11947 inst.instruction = neon_dp_fixup (inst.instruction); 11948 } 11949} 11950 11951static void 11952do_neon_bitfield (void) 11953{ 11954 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11955 neon_check_type (3, rs, N_IGNORE_TYPE); 11956 neon_three_same (neon_quad (rs), 0, -1); 11957} 11958 11959static void 11960neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types, 11961 unsigned destbits) 11962{ 11963 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11964 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK, 11965 types | N_KEY); 11966 if (et.type == NT_float) 11967 { 11968 inst.instruction = NEON_ENC_FLOAT (inst.instruction); 11969 neon_three_same (neon_quad (rs), 0, -1); 11970 } 11971 else 11972 { 11973 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11974 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size); 11975 } 11976} 11977 11978static void 11979do_neon_dyadic_if_su (void) 11980{ 11981 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 11982} 11983 11984static void 11985do_neon_dyadic_if_su_d (void) 11986{ 11987 /* This version only allow D registers, but that constraint is enforced during 11988 operand parsing so we don't need to do anything extra here. */ 11989 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 11990} 11991 11992static void 11993do_neon_dyadic_if_i_d (void) 11994{ 11995 /* The "untyped" case can't happen. Do this to stop the "U" bit being 11996 affected if we specify unsigned args. */ 11997 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 11998} 11999 12000enum vfp_or_neon_is_neon_bits 12001{ 12002 NEON_CHECK_CC = 1, 12003 NEON_CHECK_ARCH = 2 12004}; 12005 12006/* Call this function if an instruction which may have belonged to the VFP or 12007 Neon instruction sets, but turned out to be a Neon instruction (due to the 12008 operand types involved, etc.). We have to check and/or fix-up a couple of 12009 things: 12010 12011 - Make sure the user hasn't attempted to make a Neon instruction 12012 conditional. 12013 - Alter the value in the condition code field if necessary. 12014 - Make sure that the arch supports Neon instructions. 12015 12016 Which of these operations take place depends on bits from enum 12017 vfp_or_neon_is_neon_bits. 12018 12019 WARNING: This function has side effects! If NEON_CHECK_CC is used and the 12020 current instruction's condition is COND_ALWAYS, the condition field is 12021 changed to inst.uncond_value. This is necessary because instructions shared 12022 between VFP and Neon may be conditional for the VFP variants only, and the 12023 unconditional Neon version must have, e.g., 0xF in the condition field. */ 12024 12025static int 12026vfp_or_neon_is_neon (unsigned check) 12027{ 12028 /* Conditions are always legal in Thumb mode (IT blocks). */ 12029 if (!thumb_mode && (check & NEON_CHECK_CC)) 12030 { 12031 if (inst.cond != COND_ALWAYS) 12032 { 12033 first_error (_(BAD_COND)); 12034 return FAIL; 12035 } 12036 if (inst.uncond_value != -1) 12037 inst.instruction |= inst.uncond_value << 28; 12038 } 12039 12040 if ((check & NEON_CHECK_ARCH) 12041 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) 12042 { 12043 first_error (_(BAD_FPU)); 12044 return FAIL; 12045 } 12046 12047 return SUCCESS; 12048} 12049 12050static void 12051do_neon_addsub_if_i (void) 12052{ 12053 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS) 12054 return; 12055 12056 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12057 return; 12058 12059 /* The "untyped" case can't happen. Do this to stop the "U" bit being 12060 affected if we specify unsigned args. */ 12061 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0); 12062} 12063 12064/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the 12065 result to be: 12066 V<op> A,B (A is operand 0, B is operand 2) 12067 to mean: 12068 V<op> A,B,A 12069 not: 12070 V<op> A,B,B 12071 so handle that case specially. */ 12072 12073static void 12074neon_exchange_operands (void) 12075{ 12076 void *scratch = alloca (sizeof (inst.operands[0])); 12077 if (inst.operands[1].present) 12078 { 12079 /* Swap operands[1] and operands[2]. */ 12080 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0])); 12081 inst.operands[1] = inst.operands[2]; 12082 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0])); 12083 } 12084 else 12085 { 12086 inst.operands[1] = inst.operands[2]; 12087 inst.operands[2] = inst.operands[0]; 12088 } 12089} 12090 12091static void 12092neon_compare (unsigned regtypes, unsigned immtypes, int invert) 12093{ 12094 if (inst.operands[2].isreg) 12095 { 12096 if (invert) 12097 neon_exchange_operands (); 12098 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ); 12099 } 12100 else 12101 { 12102 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12103 struct neon_type_el et = neon_check_type (2, rs, 12104 N_EQK | N_SIZ, immtypes | N_KEY); 12105 12106 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12107 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12108 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12109 inst.instruction |= LOW4 (inst.operands[1].reg); 12110 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12111 inst.instruction |= neon_quad (rs) << 6; 12112 inst.instruction |= (et.type == NT_float) << 10; 12113 inst.instruction |= neon_logbits (et.size) << 18; 12114 12115 inst.instruction = neon_dp_fixup (inst.instruction); 12116 } 12117} 12118 12119static void 12120do_neon_cmp (void) 12121{ 12122 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE); 12123} 12124 12125static void 12126do_neon_cmp_inv (void) 12127{ 12128 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE); 12129} 12130 12131static void 12132do_neon_ceq (void) 12133{ 12134 neon_compare (N_IF_32, N_IF_32, FALSE); 12135} 12136 12137/* For multiply instructions, we have the possibility of 16-bit or 32-bit 12138 scalars, which are encoded in 5 bits, M : Rm. 12139 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in 12140 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the 12141 index in M. */ 12142 12143static unsigned 12144neon_scalar_for_mul (unsigned scalar, unsigned elsize) 12145{ 12146 unsigned regno = NEON_SCALAR_REG (scalar); 12147 unsigned elno = NEON_SCALAR_INDEX (scalar); 12148 12149 switch (elsize) 12150 { 12151 case 16: 12152 if (regno > 7 || elno > 3) 12153 goto bad_scalar; 12154 return regno | (elno << 3); 12155 12156 case 32: 12157 if (regno > 15 || elno > 1) 12158 goto bad_scalar; 12159 return regno | (elno << 4); 12160 12161 default: 12162 bad_scalar: 12163 first_error (_("scalar out of range for multiply instruction")); 12164 } 12165 12166 return 0; 12167} 12168 12169/* Encode multiply / multiply-accumulate scalar instructions. */ 12170 12171static void 12172neon_mul_mac (struct neon_type_el et, int ubit) 12173{ 12174 unsigned scalar; 12175 12176 /* Give a more helpful error message if we have an invalid type. */ 12177 if (et.type == NT_invtype) 12178 return; 12179 12180 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size); 12181 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12182 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12183 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12184 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12185 inst.instruction |= LOW4 (scalar); 12186 inst.instruction |= HI1 (scalar) << 5; 12187 inst.instruction |= (et.type == NT_float) << 8; 12188 inst.instruction |= neon_logbits (et.size) << 20; 12189 inst.instruction |= (ubit != 0) << 24; 12190 12191 inst.instruction = neon_dp_fixup (inst.instruction); 12192} 12193 12194static void 12195do_neon_mac_maybe_scalar (void) 12196{ 12197 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS) 12198 return; 12199 12200 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12201 return; 12202 12203 if (inst.operands[2].isscalar) 12204 { 12205 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 12206 struct neon_type_el et = neon_check_type (3, rs, 12207 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY); 12208 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12209 neon_mul_mac (et, neon_quad (rs)); 12210 } 12211 else 12212 { 12213 /* The "untyped" case can't happen. Do this to stop the "U" bit being 12214 affected if we specify unsigned args. */ 12215 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 12216 } 12217} 12218 12219static void 12220do_neon_tst (void) 12221{ 12222 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12223 struct neon_type_el et = neon_check_type (3, rs, 12224 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY); 12225 neon_three_same (neon_quad (rs), 0, et.size); 12226} 12227 12228/* VMUL with 3 registers allows the P8 type. The scalar version supports the 12229 same types as the MAC equivalents. The polynomial type for this instruction 12230 is encoded the same as the integer type. */ 12231 12232static void 12233do_neon_mul (void) 12234{ 12235 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS) 12236 return; 12237 12238 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12239 return; 12240 12241 if (inst.operands[2].isscalar) 12242 do_neon_mac_maybe_scalar (); 12243 else 12244 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0); 12245} 12246 12247static void 12248do_neon_qdmulh (void) 12249{ 12250 if (inst.operands[2].isscalar) 12251 { 12252 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 12253 struct neon_type_el et = neon_check_type (3, rs, 12254 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 12255 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12256 neon_mul_mac (et, neon_quad (rs)); 12257 } 12258 else 12259 { 12260 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12261 struct neon_type_el et = neon_check_type (3, rs, 12262 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 12263 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12264 /* The U bit (rounding) comes from bit mask. */ 12265 neon_three_same (neon_quad (rs), 0, et.size); 12266 } 12267} 12268 12269static void 12270do_neon_fcmp_absolute (void) 12271{ 12272 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12273 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 12274 /* Size field comes from bit mask. */ 12275 neon_three_same (neon_quad (rs), 1, -1); 12276} 12277 12278static void 12279do_neon_fcmp_absolute_inv (void) 12280{ 12281 neon_exchange_operands (); 12282 do_neon_fcmp_absolute (); 12283} 12284 12285static void 12286do_neon_step (void) 12287{ 12288 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 12289 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 12290 neon_three_same (neon_quad (rs), 0, -1); 12291} 12292 12293static void 12294do_neon_abs_neg (void) 12295{ 12296 enum neon_shape rs; 12297 struct neon_type_el et; 12298 12299 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS) 12300 return; 12301 12302 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12303 return; 12304 12305 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12306 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY); 12307 12308 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12309 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12310 inst.instruction |= LOW4 (inst.operands[1].reg); 12311 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12312 inst.instruction |= neon_quad (rs) << 6; 12313 inst.instruction |= (et.type == NT_float) << 10; 12314 inst.instruction |= neon_logbits (et.size) << 18; 12315 12316 inst.instruction = neon_dp_fixup (inst.instruction); 12317} 12318 12319static void 12320do_neon_sli (void) 12321{ 12322 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12323 struct neon_type_el et = neon_check_type (2, rs, 12324 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12325 int imm = inst.operands[2].imm; 12326 constraint (imm < 0 || (unsigned)imm >= et.size, 12327 _("immediate out of range for insert")); 12328 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 12329} 12330 12331static void 12332do_neon_sri (void) 12333{ 12334 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12335 struct neon_type_el et = neon_check_type (2, rs, 12336 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12337 int imm = inst.operands[2].imm; 12338 constraint (imm < 1 || (unsigned)imm > et.size, 12339 _("immediate out of range for insert")); 12340 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm); 12341} 12342 12343static void 12344do_neon_qshlu_imm (void) 12345{ 12346 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12347 struct neon_type_el et = neon_check_type (2, rs, 12348 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY); 12349 int imm = inst.operands[2].imm; 12350 constraint (imm < 0 || (unsigned)imm >= et.size, 12351 _("immediate out of range for shift")); 12352 /* Only encodes the 'U present' variant of the instruction. 12353 In this case, signed types have OP (bit 8) set to 0. 12354 Unsigned types have OP set to 1. */ 12355 inst.instruction |= (et.type == NT_unsigned) << 8; 12356 /* The rest of the bits are the same as other immediate shifts. */ 12357 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 12358} 12359 12360static void 12361do_neon_qmovn (void) 12362{ 12363 struct neon_type_el et = neon_check_type (2, NS_DQ, 12364 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 12365 /* Saturating move where operands can be signed or unsigned, and the 12366 destination has the same signedness. */ 12367 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12368 if (et.type == NT_unsigned) 12369 inst.instruction |= 0xc0; 12370 else 12371 inst.instruction |= 0x80; 12372 neon_two_same (0, 1, et.size / 2); 12373} 12374 12375static void 12376do_neon_qmovun (void) 12377{ 12378 struct neon_type_el et = neon_check_type (2, NS_DQ, 12379 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 12380 /* Saturating move with unsigned results. Operands must be signed. */ 12381 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12382 neon_two_same (0, 1, et.size / 2); 12383} 12384 12385static void 12386do_neon_rshift_sat_narrow (void) 12387{ 12388 /* FIXME: Types for narrowing. If operands are signed, results can be signed 12389 or unsigned. If operands are unsigned, results must also be unsigned. */ 12390 struct neon_type_el et = neon_check_type (2, NS_DQI, 12391 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 12392 int imm = inst.operands[2].imm; 12393 /* This gets the bounds check, size encoding and immediate bits calculation 12394 right. */ 12395 et.size /= 2; 12396 12397 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for 12398 VQMOVN.I<size> <Dd>, <Qm>. */ 12399 if (imm == 0) 12400 { 12401 inst.operands[2].present = 0; 12402 inst.instruction = N_MNEM_vqmovn; 12403 do_neon_qmovn (); 12404 return; 12405 } 12406 12407 constraint (imm < 1 || (unsigned)imm > et.size, 12408 _("immediate out of range")); 12409 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm); 12410} 12411 12412static void 12413do_neon_rshift_sat_narrow_u (void) 12414{ 12415 /* FIXME: Types for narrowing. If operands are signed, results can be signed 12416 or unsigned. If operands are unsigned, results must also be unsigned. */ 12417 struct neon_type_el et = neon_check_type (2, NS_DQI, 12418 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 12419 int imm = inst.operands[2].imm; 12420 /* This gets the bounds check, size encoding and immediate bits calculation 12421 right. */ 12422 et.size /= 2; 12423 12424 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for 12425 VQMOVUN.I<size> <Dd>, <Qm>. */ 12426 if (imm == 0) 12427 { 12428 inst.operands[2].present = 0; 12429 inst.instruction = N_MNEM_vqmovun; 12430 do_neon_qmovun (); 12431 return; 12432 } 12433 12434 constraint (imm < 1 || (unsigned)imm > et.size, 12435 _("immediate out of range")); 12436 /* FIXME: The manual is kind of unclear about what value U should have in 12437 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it 12438 must be 1. */ 12439 neon_imm_shift (TRUE, 1, 0, et, et.size - imm); 12440} 12441 12442static void 12443do_neon_movn (void) 12444{ 12445 struct neon_type_el et = neon_check_type (2, NS_DQ, 12446 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 12447 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12448 neon_two_same (0, 1, et.size / 2); 12449} 12450 12451static void 12452do_neon_rshift_narrow (void) 12453{ 12454 struct neon_type_el et = neon_check_type (2, NS_DQI, 12455 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 12456 int imm = inst.operands[2].imm; 12457 /* This gets the bounds check, size encoding and immediate bits calculation 12458 right. */ 12459 et.size /= 2; 12460 12461 /* If immediate is zero then we are a pseudo-instruction for 12462 VMOVN.I<size> <Dd>, <Qm> */ 12463 if (imm == 0) 12464 { 12465 inst.operands[2].present = 0; 12466 inst.instruction = N_MNEM_vmovn; 12467 do_neon_movn (); 12468 return; 12469 } 12470 12471 constraint (imm < 1 || (unsigned)imm > et.size, 12472 _("immediate out of range for narrowing operation")); 12473 neon_imm_shift (FALSE, 0, 0, et, et.size - imm); 12474} 12475 12476static void 12477do_neon_shll (void) 12478{ 12479 /* FIXME: Type checking when lengthening. */ 12480 struct neon_type_el et = neon_check_type (2, NS_QDI, 12481 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY); 12482 unsigned imm = inst.operands[2].imm; 12483 12484 if (imm == et.size) 12485 { 12486 /* Maximum shift variant. */ 12487 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12488 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12489 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12490 inst.instruction |= LOW4 (inst.operands[1].reg); 12491 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12492 inst.instruction |= neon_logbits (et.size) << 18; 12493 12494 inst.instruction = neon_dp_fixup (inst.instruction); 12495 } 12496 else 12497 { 12498 /* A more-specific type check for non-max versions. */ 12499 et = neon_check_type (2, NS_QDI, 12500 N_EQK | N_DBL, N_SU_32 | N_KEY); 12501 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12502 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm); 12503 } 12504} 12505 12506/* Check the various types for the VCVT instruction, and return which version 12507 the current instruction is. */ 12508 12509static int 12510neon_cvt_flavour (enum neon_shape rs) 12511{ 12512#define CVT_VAR(C,X,Y) \ 12513 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \ 12514 if (et.type != NT_invtype) \ 12515 { \ 12516 inst.error = NULL; \ 12517 return (C); \ 12518 } 12519 struct neon_type_el et; 12520 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF 12521 || rs == NS_FF) ? N_VFP : 0; 12522 /* The instruction versions which take an immediate take one register 12523 argument, which is extended to the width of the full register. Thus the 12524 "source" and "destination" registers must have the same width. Hack that 12525 here by making the size equal to the key (wider, in this case) operand. */ 12526 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0; 12527 12528 CVT_VAR (0, N_S32, N_F32); 12529 CVT_VAR (1, N_U32, N_F32); 12530 CVT_VAR (2, N_F32, N_S32); 12531 CVT_VAR (3, N_F32, N_U32); 12532 12533 whole_reg = N_VFP; 12534 12535 /* VFP instructions. */ 12536 CVT_VAR (4, N_F32, N_F64); 12537 CVT_VAR (5, N_F64, N_F32); 12538 CVT_VAR (6, N_S32, N_F64 | key); 12539 CVT_VAR (7, N_U32, N_F64 | key); 12540 CVT_VAR (8, N_F64 | key, N_S32); 12541 CVT_VAR (9, N_F64 | key, N_U32); 12542 /* VFP instructions with bitshift. */ 12543 CVT_VAR (10, N_F32 | key, N_S16); 12544 CVT_VAR (11, N_F32 | key, N_U16); 12545 CVT_VAR (12, N_F64 | key, N_S16); 12546 CVT_VAR (13, N_F64 | key, N_U16); 12547 CVT_VAR (14, N_S16, N_F32 | key); 12548 CVT_VAR (15, N_U16, N_F32 | key); 12549 CVT_VAR (16, N_S16, N_F64 | key); 12550 CVT_VAR (17, N_U16, N_F64 | key); 12551 12552 return -1; 12553#undef CVT_VAR 12554} 12555 12556/* Neon-syntax VFP conversions. */ 12557 12558static void 12559do_vfp_nsyn_cvt (enum neon_shape rs, int flavour) 12560{ 12561 const char *opname = 0; 12562 12563 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI) 12564 { 12565 /* Conversions with immediate bitshift. */ 12566 const char *enc[] = 12567 { 12568 "ftosls", 12569 "ftouls", 12570 "fsltos", 12571 "fultos", 12572 NULL, 12573 NULL, 12574 "ftosld", 12575 "ftould", 12576 "fsltod", 12577 "fultod", 12578 "fshtos", 12579 "fuhtos", 12580 "fshtod", 12581 "fuhtod", 12582 "ftoshs", 12583 "ftouhs", 12584 "ftoshd", 12585 "ftouhd" 12586 }; 12587 12588 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 12589 { 12590 opname = enc[flavour]; 12591 constraint (inst.operands[0].reg != inst.operands[1].reg, 12592 _("operands 0 and 1 must be the same register")); 12593 inst.operands[1] = inst.operands[2]; 12594 memset (&inst.operands[2], '\0', sizeof (inst.operands[2])); 12595 } 12596 } 12597 else 12598 { 12599 /* Conversions without bitshift. */ 12600 const char *enc[] = 12601 { 12602 "ftosis", 12603 "ftouis", 12604 "fsitos", 12605 "fuitos", 12606 "fcvtsd", 12607 "fcvtds", 12608 "ftosid", 12609 "ftouid", 12610 "fsitod", 12611 "fuitod" 12612 }; 12613 12614 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 12615 opname = enc[flavour]; 12616 } 12617 12618 if (opname) 12619 do_vfp_nsyn_opcode (opname); 12620} 12621 12622static void 12623do_vfp_nsyn_cvtz (void) 12624{ 12625 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL); 12626 int flavour = neon_cvt_flavour (rs); 12627 const char *enc[] = 12628 { 12629 "ftosizs", 12630 "ftouizs", 12631 NULL, 12632 NULL, 12633 NULL, 12634 NULL, 12635 "ftosizd", 12636 "ftouizd" 12637 }; 12638 12639 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour]) 12640 do_vfp_nsyn_opcode (enc[flavour]); 12641} 12642 12643static void 12644do_neon_cvt (void) 12645{ 12646 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ, 12647 NS_FD, NS_DF, NS_FF, NS_NULL); 12648 int flavour = neon_cvt_flavour (rs); 12649 12650 /* VFP rather than Neon conversions. */ 12651 if (flavour >= 4) 12652 { 12653 do_vfp_nsyn_cvt (rs, flavour); 12654 return; 12655 } 12656 12657 switch (rs) 12658 { 12659 case NS_DDI: 12660 case NS_QQI: 12661 { 12662 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12663 return; 12664 12665 /* Fixed-point conversion with #0 immediate is encoded as an 12666 integer conversion. */ 12667 if (inst.operands[2].present && inst.operands[2].imm == 0) 12668 goto int_encode; 12669 unsigned immbits = 32 - inst.operands[2].imm; 12670 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 }; 12671 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12672 if (flavour != -1) 12673 inst.instruction |= enctab[flavour]; 12674 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12675 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12676 inst.instruction |= LOW4 (inst.operands[1].reg); 12677 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12678 inst.instruction |= neon_quad (rs) << 6; 12679 inst.instruction |= 1 << 21; 12680 inst.instruction |= immbits << 16; 12681 12682 inst.instruction = neon_dp_fixup (inst.instruction); 12683 } 12684 break; 12685 12686 case NS_DD: 12687 case NS_QQ: 12688 int_encode: 12689 { 12690 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 }; 12691 12692 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12693 12694 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12695 return; 12696 12697 if (flavour != -1) 12698 inst.instruction |= enctab[flavour]; 12699 12700 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12701 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12702 inst.instruction |= LOW4 (inst.operands[1].reg); 12703 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12704 inst.instruction |= neon_quad (rs) << 6; 12705 inst.instruction |= 2 << 18; 12706 12707 inst.instruction = neon_dp_fixup (inst.instruction); 12708 } 12709 break; 12710 12711 default: 12712 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */ 12713 do_vfp_nsyn_cvt (rs, flavour); 12714 } 12715} 12716 12717static void 12718neon_move_immediate (void) 12719{ 12720 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 12721 struct neon_type_el et = neon_check_type (2, rs, 12722 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 12723 unsigned immlo, immhi = 0, immbits; 12724 int op, cmode, float_p; 12725 12726 constraint (et.type == NT_invtype, 12727 _("operand size must be specified for immediate VMOV")); 12728 12729 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */ 12730 op = (inst.instruction & (1 << 5)) != 0; 12731 12732 immlo = inst.operands[1].imm; 12733 if (inst.operands[1].regisimm) 12734 immhi = inst.operands[1].reg; 12735 12736 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0, 12737 _("immediate has bits set outside the operand size")); 12738 12739 float_p = inst.operands[1].immisfloat; 12740 12741 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op, 12742 et.size, et.type)) == FAIL) 12743 { 12744 /* Invert relevant bits only. */ 12745 neon_invert_size (&immlo, &immhi, et.size); 12746 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable 12747 with one or the other; those cases are caught by 12748 neon_cmode_for_move_imm. */ 12749 op = !op; 12750 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, 12751 &op, et.size, et.type)) == FAIL) 12752 { 12753 first_error (_("immediate out of range")); 12754 return; 12755 } 12756 } 12757 12758 inst.instruction &= ~(1 << 5); 12759 inst.instruction |= op << 5; 12760 12761 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12762 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12763 inst.instruction |= neon_quad (rs) << 6; 12764 inst.instruction |= cmode << 8; 12765 12766 neon_write_immbits (immbits); 12767} 12768 12769static void 12770do_neon_mvn (void) 12771{ 12772 if (inst.operands[1].isreg) 12773 { 12774 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12775 12776 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12777 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12778 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12779 inst.instruction |= LOW4 (inst.operands[1].reg); 12780 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12781 inst.instruction |= neon_quad (rs) << 6; 12782 } 12783 else 12784 { 12785 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12786 neon_move_immediate (); 12787 } 12788 12789 inst.instruction = neon_dp_fixup (inst.instruction); 12790} 12791 12792/* Encode instructions of form: 12793 12794 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0| 12795 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | 12796 12797*/ 12798 12799static void 12800neon_mixed_length (struct neon_type_el et, unsigned size) 12801{ 12802 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12803 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12804 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12805 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12806 inst.instruction |= LOW4 (inst.operands[2].reg); 12807 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12808 inst.instruction |= (et.type == NT_unsigned) << 24; 12809 inst.instruction |= neon_logbits (size) << 20; 12810 12811 inst.instruction = neon_dp_fixup (inst.instruction); 12812} 12813 12814static void 12815do_neon_dyadic_long (void) 12816{ 12817 /* FIXME: Type checking for lengthening op. */ 12818 struct neon_type_el et = neon_check_type (3, NS_QDD, 12819 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY); 12820 neon_mixed_length (et, et.size); 12821} 12822 12823static void 12824do_neon_abal (void) 12825{ 12826 struct neon_type_el et = neon_check_type (3, NS_QDD, 12827 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY); 12828 neon_mixed_length (et, et.size); 12829} 12830 12831static void 12832neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes) 12833{ 12834 if (inst.operands[2].isscalar) 12835 { 12836 struct neon_type_el et = neon_check_type (3, NS_QDS, 12837 N_EQK | N_DBL, N_EQK, regtypes | N_KEY); 12838 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12839 neon_mul_mac (et, et.type == NT_unsigned); 12840 } 12841 else 12842 { 12843 struct neon_type_el et = neon_check_type (3, NS_QDD, 12844 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY); 12845 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12846 neon_mixed_length (et, et.size); 12847 } 12848} 12849 12850static void 12851do_neon_mac_maybe_scalar_long (void) 12852{ 12853 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32); 12854} 12855 12856static void 12857do_neon_dyadic_wide (void) 12858{ 12859 struct neon_type_el et = neon_check_type (3, NS_QQD, 12860 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY); 12861 neon_mixed_length (et, et.size); 12862} 12863 12864static void 12865do_neon_dyadic_narrow (void) 12866{ 12867 struct neon_type_el et = neon_check_type (3, NS_QDD, 12868 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY); 12869 /* Operand sign is unimportant, and the U bit is part of the opcode, 12870 so force the operand type to integer. */ 12871 et.type = NT_integer; 12872 neon_mixed_length (et, et.size / 2); 12873} 12874 12875static void 12876do_neon_mul_sat_scalar_long (void) 12877{ 12878 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32); 12879} 12880 12881static void 12882do_neon_vmull (void) 12883{ 12884 if (inst.operands[2].isscalar) 12885 do_neon_mac_maybe_scalar_long (); 12886 else 12887 { 12888 struct neon_type_el et = neon_check_type (3, NS_QDD, 12889 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY); 12890 if (et.type == NT_poly) 12891 inst.instruction = NEON_ENC_POLY (inst.instruction); 12892 else 12893 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12894 /* For polynomial encoding, size field must be 0b00 and the U bit must be 12895 zero. Should be OK as-is. */ 12896 neon_mixed_length (et, et.size); 12897 } 12898} 12899 12900static void 12901do_neon_ext (void) 12902{ 12903 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL); 12904 struct neon_type_el et = neon_check_type (3, rs, 12905 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12906 unsigned imm = (inst.operands[3].imm * et.size) / 8; 12907 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range")); 12908 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12909 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12910 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12911 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12912 inst.instruction |= LOW4 (inst.operands[2].reg); 12913 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12914 inst.instruction |= neon_quad (rs) << 6; 12915 inst.instruction |= imm << 8; 12916 12917 inst.instruction = neon_dp_fixup (inst.instruction); 12918} 12919 12920static void 12921do_neon_rev (void) 12922{ 12923 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12924 struct neon_type_el et = neon_check_type (2, rs, 12925 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12926 unsigned op = (inst.instruction >> 7) & 3; 12927 /* N (width of reversed regions) is encoded as part of the bitmask. We 12928 extract it here to check the elements to be reversed are smaller. 12929 Otherwise we'd get a reserved instruction. */ 12930 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0; 12931 assert (elsize != 0); 12932 constraint (et.size >= elsize, 12933 _("elements must be smaller than reversal region")); 12934 neon_two_same (neon_quad (rs), 1, et.size); 12935} 12936 12937static void 12938do_neon_dup (void) 12939{ 12940 if (inst.operands[1].isscalar) 12941 { 12942 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL); 12943 struct neon_type_el et = neon_check_type (2, rs, 12944 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12945 unsigned sizebits = et.size >> 3; 12946 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg); 12947 int logsize = neon_logbits (et.size); 12948 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize; 12949 12950 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL) 12951 return; 12952 12953 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12954 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12955 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12956 inst.instruction |= LOW4 (dm); 12957 inst.instruction |= HI1 (dm) << 5; 12958 inst.instruction |= neon_quad (rs) << 6; 12959 inst.instruction |= x << 17; 12960 inst.instruction |= sizebits << 16; 12961 12962 inst.instruction = neon_dp_fixup (inst.instruction); 12963 } 12964 else 12965 { 12966 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL); 12967 struct neon_type_el et = neon_check_type (2, rs, 12968 N_8 | N_16 | N_32 | N_KEY, N_EQK); 12969 /* Duplicate ARM register to lanes of vector. */ 12970 inst.instruction = NEON_ENC_ARMREG (inst.instruction); 12971 switch (et.size) 12972 { 12973 case 8: inst.instruction |= 0x400000; break; 12974 case 16: inst.instruction |= 0x000020; break; 12975 case 32: inst.instruction |= 0x000000; break; 12976 default: break; 12977 } 12978 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 12979 inst.instruction |= LOW4 (inst.operands[0].reg) << 16; 12980 inst.instruction |= HI1 (inst.operands[0].reg) << 7; 12981 inst.instruction |= neon_quad (rs) << 21; 12982 /* The encoding for this instruction is identical for the ARM and Thumb 12983 variants, except for the condition field. */ 12984 do_vfp_cond_or_thumb (); 12985 } 12986} 12987 12988/* VMOV has particularly many variations. It can be one of: 12989 0. VMOV<c><q> <Qd>, <Qm> 12990 1. VMOV<c><q> <Dd>, <Dm> 12991 (Register operations, which are VORR with Rm = Rn.) 12992 2. VMOV<c><q>.<dt> <Qd>, #<imm> 12993 3. VMOV<c><q>.<dt> <Dd>, #<imm> 12994 (Immediate loads.) 12995 4. VMOV<c><q>.<size> <Dn[x]>, <Rd> 12996 (ARM register to scalar.) 12997 5. VMOV<c><q> <Dm>, <Rd>, <Rn> 12998 (Two ARM registers to vector.) 12999 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]> 13000 (Scalar to ARM register.) 13001 7. VMOV<c><q> <Rd>, <Rn>, <Dm> 13002 (Vector to two ARM registers.) 13003 8. VMOV.F32 <Sd>, <Sm> 13004 9. VMOV.F64 <Dd>, <Dm> 13005 (VFP register moves.) 13006 10. VMOV.F32 <Sd>, #imm 13007 11. VMOV.F64 <Dd>, #imm 13008 (VFP float immediate load.) 13009 12. VMOV <Rd>, <Sm> 13010 (VFP single to ARM reg.) 13011 13. VMOV <Sd>, <Rm> 13012 (ARM reg to VFP single.) 13013 14. VMOV <Rd>, <Re>, <Sn>, <Sm> 13014 (Two ARM regs to two VFP singles.) 13015 15. VMOV <Sd>, <Se>, <Rn>, <Rm> 13016 (Two VFP singles to two ARM regs.) 13017 13018 These cases can be disambiguated using neon_select_shape, except cases 1/9 13019 and 3/11 which depend on the operand type too. 13020 13021 All the encoded bits are hardcoded by this function. 13022 13023 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!). 13024 Cases 5, 7 may be used with VFPv2 and above. 13025 13026 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you 13027 can specify a type where it doesn't make sense to, and is ignored). 13028*/ 13029 13030static void 13031do_neon_mov (void) 13032{ 13033 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD, 13034 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, 13035 NS_NULL); 13036 struct neon_type_el et; 13037 const char *ldconst = 0; 13038 13039 switch (rs) 13040 { 13041 case NS_DD: /* case 1/9. */ 13042 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 13043 /* It is not an error here if no type is given. */ 13044 inst.error = NULL; 13045 if (et.type == NT_float && et.size == 64) 13046 { 13047 do_vfp_nsyn_opcode ("fcpyd"); 13048 break; 13049 } 13050 /* fall through. */ 13051 13052 case NS_QQ: /* case 0/1. */ 13053 { 13054 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 13055 return; 13056 /* The architecture manual I have doesn't explicitly state which 13057 value the U bit should have for register->register moves, but 13058 the equivalent VORR instruction has U = 0, so do that. */ 13059 inst.instruction = 0x0200110; 13060 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13061 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13062 inst.instruction |= LOW4 (inst.operands[1].reg); 13063 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 13064 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 13065 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 13066 inst.instruction |= neon_quad (rs) << 6; 13067 13068 inst.instruction = neon_dp_fixup (inst.instruction); 13069 } 13070 break; 13071 13072 case NS_DI: /* case 3/11. */ 13073 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 13074 inst.error = NULL; 13075 if (et.type == NT_float && et.size == 64) 13076 { 13077 /* case 11 (fconstd). */ 13078 ldconst = "fconstd"; 13079 goto encode_fconstd; 13080 } 13081 /* fall through. */ 13082 13083 case NS_QI: /* case 2/3. */ 13084 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 13085 return; 13086 inst.instruction = 0x0800010; 13087 neon_move_immediate (); 13088 inst.instruction = neon_dp_fixup (inst.instruction); 13089 break; 13090 13091 case NS_SR: /* case 4. */ 13092 { 13093 unsigned bcdebits = 0; 13094 struct neon_type_el et = neon_check_type (2, NS_NULL, 13095 N_8 | N_16 | N_32 | N_KEY, N_EQK); 13096 int logsize = neon_logbits (et.size); 13097 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg); 13098 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg); 13099 13100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 13101 _(BAD_FPU)); 13102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 13103 && et.size != 32, _(BAD_FPU)); 13104 constraint (et.type == NT_invtype, _("bad type for scalar")); 13105 constraint (x >= 64 / et.size, _("scalar index out of range")); 13106 13107 switch (et.size) 13108 { 13109 case 8: bcdebits = 0x8; break; 13110 case 16: bcdebits = 0x1; break; 13111 case 32: bcdebits = 0x0; break; 13112 default: ; 13113 } 13114 13115 bcdebits |= x << logsize; 13116 13117 inst.instruction = 0xe000b10; 13118 do_vfp_cond_or_thumb (); 13119 inst.instruction |= LOW4 (dn) << 16; 13120 inst.instruction |= HI1 (dn) << 7; 13121 inst.instruction |= inst.operands[1].reg << 12; 13122 inst.instruction |= (bcdebits & 3) << 5; 13123 inst.instruction |= (bcdebits >> 2) << 21; 13124 } 13125 break; 13126 13127 case NS_DRR: /* case 5 (fmdrr). */ 13128 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 13129 _(BAD_FPU)); 13130 13131 inst.instruction = 0xc400b10; 13132 do_vfp_cond_or_thumb (); 13133 inst.instruction |= LOW4 (inst.operands[0].reg); 13134 inst.instruction |= HI1 (inst.operands[0].reg) << 5; 13135 inst.instruction |= inst.operands[1].reg << 12; 13136 inst.instruction |= inst.operands[2].reg << 16; 13137 break; 13138 13139 case NS_RS: /* case 6. */ 13140 { 13141 struct neon_type_el et = neon_check_type (2, NS_NULL, 13142 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY); 13143 unsigned logsize = neon_logbits (et.size); 13144 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg); 13145 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg); 13146 unsigned abcdebits = 0; 13147 13148 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 13149 _(BAD_FPU)); 13150 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 13151 && et.size != 32, _(BAD_FPU)); 13152 constraint (et.type == NT_invtype, _("bad type for scalar")); 13153 constraint (x >= 64 / et.size, _("scalar index out of range")); 13154 13155 switch (et.size) 13156 { 13157 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break; 13158 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break; 13159 case 32: abcdebits = 0x00; break; 13160 default: ; 13161 } 13162 13163 abcdebits |= x << logsize; 13164 inst.instruction = 0xe100b10; 13165 do_vfp_cond_or_thumb (); 13166 inst.instruction |= LOW4 (dn) << 16; 13167 inst.instruction |= HI1 (dn) << 7; 13168 inst.instruction |= inst.operands[0].reg << 12; 13169 inst.instruction |= (abcdebits & 3) << 5; 13170 inst.instruction |= (abcdebits >> 2) << 21; 13171 } 13172 break; 13173 13174 case NS_RRD: /* case 7 (fmrrd). */ 13175 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 13176 _(BAD_FPU)); 13177 13178 inst.instruction = 0xc500b10; 13179 do_vfp_cond_or_thumb (); 13180 inst.instruction |= inst.operands[0].reg << 12; 13181 inst.instruction |= inst.operands[1].reg << 16; 13182 inst.instruction |= LOW4 (inst.operands[2].reg); 13183 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 13184 break; 13185 13186 case NS_FF: /* case 8 (fcpys). */ 13187 do_vfp_nsyn_opcode ("fcpys"); 13188 break; 13189 13190 case NS_FI: /* case 10 (fconsts). */ 13191 ldconst = "fconsts"; 13192 encode_fconstd: 13193 if (is_quarter_float (inst.operands[1].imm)) 13194 { 13195 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm); 13196 do_vfp_nsyn_opcode (ldconst); 13197 } 13198 else 13199 first_error (_("immediate out of range")); 13200 break; 13201 13202 case NS_RF: /* case 12 (fmrs). */ 13203 do_vfp_nsyn_opcode ("fmrs"); 13204 break; 13205 13206 case NS_FR: /* case 13 (fmsr). */ 13207 do_vfp_nsyn_opcode ("fmsr"); 13208 break; 13209 13210 /* The encoders for the fmrrs and fmsrr instructions expect three operands 13211 (one of which is a list), but we have parsed four. Do some fiddling to 13212 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2 13213 expect. */ 13214 case NS_RRFF: /* case 14 (fmrrs). */ 13215 constraint (inst.operands[3].reg != inst.operands[2].reg + 1, 13216 _("VFP registers must be adjacent")); 13217 inst.operands[2].imm = 2; 13218 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 13219 do_vfp_nsyn_opcode ("fmrrs"); 13220 break; 13221 13222 case NS_FFRR: /* case 15 (fmsrr). */ 13223 constraint (inst.operands[1].reg != inst.operands[0].reg + 1, 13224 _("VFP registers must be adjacent")); 13225 inst.operands[1] = inst.operands[2]; 13226 inst.operands[2] = inst.operands[3]; 13227 inst.operands[0].imm = 2; 13228 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 13229 do_vfp_nsyn_opcode ("fmsrr"); 13230 break; 13231 13232 default: 13233 abort (); 13234 } 13235} 13236 13237static void 13238do_neon_rshift_round_imm (void) 13239{ 13240 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 13241 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 13242 int imm = inst.operands[2].imm; 13243 13244 /* imm == 0 case is encoded as VMOV for V{R}SHR. */ 13245 if (imm == 0) 13246 { 13247 inst.operands[2].present = 0; 13248 do_neon_mov (); 13249 return; 13250 } 13251 13252 constraint (imm < 1 || (unsigned)imm > et.size, 13253 _("immediate out of range for shift")); 13254 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 13255 et.size - imm); 13256} 13257 13258static void 13259do_neon_movl (void) 13260{ 13261 struct neon_type_el et = neon_check_type (2, NS_QD, 13262 N_EQK | N_DBL, N_SU_32 | N_KEY); 13263 unsigned sizebits = et.size >> 3; 13264 inst.instruction |= sizebits << 19; 13265 neon_two_same (0, et.type == NT_unsigned, -1); 13266} 13267 13268static void 13269do_neon_trn (void) 13270{ 13271 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13272 struct neon_type_el et = neon_check_type (2, rs, 13273 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13274 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 13275 neon_two_same (neon_quad (rs), 1, et.size); 13276} 13277 13278static void 13279do_neon_zip_uzp (void) 13280{ 13281 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13282 struct neon_type_el et = neon_check_type (2, rs, 13283 N_EQK, N_8 | N_16 | N_32 | N_KEY); 13284 if (rs == NS_DD && et.size == 32) 13285 { 13286 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */ 13287 inst.instruction = N_MNEM_vtrn; 13288 do_neon_trn (); 13289 return; 13290 } 13291 neon_two_same (neon_quad (rs), 1, et.size); 13292} 13293 13294static void 13295do_neon_sat_abs_neg (void) 13296{ 13297 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13298 struct neon_type_el et = neon_check_type (2, rs, 13299 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 13300 neon_two_same (neon_quad (rs), 1, et.size); 13301} 13302 13303static void 13304do_neon_pair_long (void) 13305{ 13306 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13307 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY); 13308 /* Unsigned is encoded in OP field (bit 7) for these instruction. */ 13309 inst.instruction |= (et.type == NT_unsigned) << 7; 13310 neon_two_same (neon_quad (rs), 1, et.size); 13311} 13312 13313static void 13314do_neon_recip_est (void) 13315{ 13316 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13317 struct neon_type_el et = neon_check_type (2, rs, 13318 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY); 13319 inst.instruction |= (et.type == NT_float) << 8; 13320 neon_two_same (neon_quad (rs), 1, et.size); 13321} 13322 13323static void 13324do_neon_cls (void) 13325{ 13326 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13327 struct neon_type_el et = neon_check_type (2, rs, 13328 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 13329 neon_two_same (neon_quad (rs), 1, et.size); 13330} 13331 13332static void 13333do_neon_clz (void) 13334{ 13335 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13336 struct neon_type_el et = neon_check_type (2, rs, 13337 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY); 13338 neon_two_same (neon_quad (rs), 1, et.size); 13339} 13340 13341static void 13342do_neon_cnt (void) 13343{ 13344 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13345 struct neon_type_el et = neon_check_type (2, rs, 13346 N_EQK | N_INT, N_8 | N_KEY); 13347 neon_two_same (neon_quad (rs), 1, et.size); 13348} 13349 13350static void 13351do_neon_swp (void) 13352{ 13353 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 13354 neon_two_same (neon_quad (rs), 1, -1); 13355} 13356 13357static void 13358do_neon_tbl_tbx (void) 13359{ 13360 unsigned listlenbits; 13361 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY); 13362 13363 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4) 13364 { 13365 first_error (_("bad list length for table lookup")); 13366 return; 13367 } 13368 13369 listlenbits = inst.operands[1].imm - 1; 13370 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13371 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13372 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 13373 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 13374 inst.instruction |= LOW4 (inst.operands[2].reg); 13375 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 13376 inst.instruction |= listlenbits << 8; 13377 13378 inst.instruction = neon_dp_fixup (inst.instruction); 13379} 13380 13381static void 13382do_neon_ldm_stm (void) 13383{ 13384 /* P, U and L bits are part of bitmask. */ 13385 int is_dbmode = (inst.instruction & (1 << 24)) != 0; 13386 unsigned offsetbits = inst.operands[1].imm * 2; 13387 13388 if (inst.operands[1].issingle) 13389 { 13390 do_vfp_nsyn_ldm_stm (is_dbmode); 13391 return; 13392 } 13393 13394 constraint (is_dbmode && !inst.operands[0].writeback, 13395 _("writeback (!) must be used for VLDMDB and VSTMDB")); 13396 13397 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16, 13398 _("register list must contain at least 1 and at most 16 " 13399 "registers")); 13400 13401 inst.instruction |= inst.operands[0].reg << 16; 13402 inst.instruction |= inst.operands[0].writeback << 21; 13403 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 13404 inst.instruction |= HI1 (inst.operands[1].reg) << 22; 13405 13406 inst.instruction |= offsetbits; 13407 13408 do_vfp_cond_or_thumb (); 13409} 13410 13411static void 13412do_neon_ldr_str (void) 13413{ 13414 int is_ldr = (inst.instruction & (1 << 20)) != 0; 13415 13416 if (inst.operands[0].issingle) 13417 { 13418 if (is_ldr) 13419 do_vfp_nsyn_opcode ("flds"); 13420 else 13421 do_vfp_nsyn_opcode ("fsts"); 13422 } 13423 else 13424 { 13425 if (is_ldr) 13426 do_vfp_nsyn_opcode ("fldd"); 13427 else 13428 do_vfp_nsyn_opcode ("fstd"); 13429 } 13430} 13431 13432/* "interleave" version also handles non-interleaving register VLD1/VST1 13433 instructions. */ 13434 13435static void 13436do_neon_ld_st_interleave (void) 13437{ 13438 struct neon_type_el et = neon_check_type (1, NS_NULL, 13439 N_8 | N_16 | N_32 | N_64); 13440 unsigned alignbits = 0; 13441 unsigned idx; 13442 /* The bits in this table go: 13443 0: register stride of one (0) or two (1) 13444 1,2: register list length, minus one (1, 2, 3, 4). 13445 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>). 13446 We use -1 for invalid entries. */ 13447 const int typetable[] = 13448 { 13449 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */ 13450 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */ 13451 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */ 13452 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */ 13453 }; 13454 int typebits; 13455 13456 if (et.type == NT_invtype) 13457 return; 13458 13459 if (inst.operands[1].immisalign) 13460 switch (inst.operands[1].imm >> 8) 13461 { 13462 case 64: alignbits = 1; break; 13463 case 128: 13464 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 13465 goto bad_alignment; 13466 alignbits = 2; 13467 break; 13468 case 256: 13469 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 13470 goto bad_alignment; 13471 alignbits = 3; 13472 break; 13473 default: 13474 bad_alignment: 13475 first_error (_("bad alignment")); 13476 return; 13477 } 13478 13479 inst.instruction |= alignbits << 4; 13480 inst.instruction |= neon_logbits (et.size) << 6; 13481 13482 /* Bits [4:6] of the immediate in a list specifier encode register stride 13483 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of 13484 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look 13485 up the right value for "type" in a table based on this value and the given 13486 list style, then stick it back. */ 13487 idx = ((inst.operands[0].imm >> 4) & 7) 13488 | (((inst.instruction >> 8) & 3) << 3); 13489 13490 typebits = typetable[idx]; 13491 13492 constraint (typebits == -1, _("bad list type for instruction")); 13493 13494 inst.instruction &= ~0xf00; 13495 inst.instruction |= typebits << 8; 13496} 13497 13498/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup. 13499 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0 13500 otherwise. The variable arguments are a list of pairs of legal (size, align) 13501 values, terminated with -1. */ 13502 13503static int 13504neon_alignment_bit (int size, int align, int *do_align, ...) 13505{ 13506 va_list ap; 13507 int result = FAIL, thissize, thisalign; 13508 13509 if (!inst.operands[1].immisalign) 13510 { 13511 *do_align = 0; 13512 return SUCCESS; 13513 } 13514 13515 va_start (ap, do_align); 13516 13517 do 13518 { 13519 thissize = va_arg (ap, int); 13520 if (thissize == -1) 13521 break; 13522 thisalign = va_arg (ap, int); 13523 13524 if (size == thissize && align == thisalign) 13525 result = SUCCESS; 13526 } 13527 while (result != SUCCESS); 13528 13529 va_end (ap); 13530 13531 if (result == SUCCESS) 13532 *do_align = 1; 13533 else 13534 first_error (_("unsupported alignment for instruction")); 13535 13536 return result; 13537} 13538 13539static void 13540do_neon_ld_st_lane (void) 13541{ 13542 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 13543 int align_good, do_align = 0; 13544 int logsize = neon_logbits (et.size); 13545 int align = inst.operands[1].imm >> 8; 13546 int n = (inst.instruction >> 8) & 3; 13547 int max_el = 64 / et.size; 13548 13549 if (et.type == NT_invtype) 13550 return; 13551 13552 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1, 13553 _("bad list length")); 13554 constraint (NEON_LANE (inst.operands[0].imm) >= max_el, 13555 _("scalar index out of range")); 13556 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2 13557 && et.size == 8, 13558 _("stride of 2 unavailable when element size is 8")); 13559 13560 switch (n) 13561 { 13562 case 0: /* VLD1 / VST1. */ 13563 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16, 13564 32, 32, -1); 13565 if (align_good == FAIL) 13566 return; 13567 if (do_align) 13568 { 13569 unsigned alignbits = 0; 13570 switch (et.size) 13571 { 13572 case 16: alignbits = 0x1; break; 13573 case 32: alignbits = 0x3; break; 13574 default: ; 13575 } 13576 inst.instruction |= alignbits << 4; 13577 } 13578 break; 13579 13580 case 1: /* VLD2 / VST2. */ 13581 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32, 13582 32, 64, -1); 13583 if (align_good == FAIL) 13584 return; 13585 if (do_align) 13586 inst.instruction |= 1 << 4; 13587 break; 13588 13589 case 2: /* VLD3 / VST3. */ 13590 constraint (inst.operands[1].immisalign, 13591 _("can't use alignment with this instruction")); 13592 break; 13593 13594 case 3: /* VLD4 / VST4. */ 13595 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 13596 16, 64, 32, 64, 32, 128, -1); 13597 if (align_good == FAIL) 13598 return; 13599 if (do_align) 13600 { 13601 unsigned alignbits = 0; 13602 switch (et.size) 13603 { 13604 case 8: alignbits = 0x1; break; 13605 case 16: alignbits = 0x1; break; 13606 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break; 13607 default: ; 13608 } 13609 inst.instruction |= alignbits << 4; 13610 } 13611 break; 13612 13613 default: ; 13614 } 13615 13616 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */ 13617 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13618 inst.instruction |= 1 << (4 + logsize); 13619 13620 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5); 13621 inst.instruction |= logsize << 10; 13622} 13623 13624/* Encode single n-element structure to all lanes VLD<n> instructions. */ 13625 13626static void 13627do_neon_ld_dup (void) 13628{ 13629 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 13630 int align_good, do_align = 0; 13631 13632 if (et.type == NT_invtype) 13633 return; 13634 13635 switch ((inst.instruction >> 8) & 3) 13636 { 13637 case 0: /* VLD1. */ 13638 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2); 13639 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 13640 &do_align, 16, 16, 32, 32, -1); 13641 if (align_good == FAIL) 13642 return; 13643 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm)) 13644 { 13645 case 1: break; 13646 case 2: inst.instruction |= 1 << 5; break; 13647 default: first_error (_("bad list length")); return; 13648 } 13649 inst.instruction |= neon_logbits (et.size) << 6; 13650 break; 13651 13652 case 1: /* VLD2. */ 13653 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 13654 &do_align, 8, 16, 16, 32, 32, 64, -1); 13655 if (align_good == FAIL) 13656 return; 13657 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2, 13658 _("bad list length")); 13659 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13660 inst.instruction |= 1 << 5; 13661 inst.instruction |= neon_logbits (et.size) << 6; 13662 break; 13663 13664 case 2: /* VLD3. */ 13665 constraint (inst.operands[1].immisalign, 13666 _("can't use alignment with this instruction")); 13667 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3, 13668 _("bad list length")); 13669 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13670 inst.instruction |= 1 << 5; 13671 inst.instruction |= neon_logbits (et.size) << 6; 13672 break; 13673 13674 case 3: /* VLD4. */ 13675 { 13676 int align = inst.operands[1].imm >> 8; 13677 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 13678 16, 64, 32, 64, 32, 128, -1); 13679 if (align_good == FAIL) 13680 return; 13681 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4, 13682 _("bad list length")); 13683 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 13684 inst.instruction |= 1 << 5; 13685 if (et.size == 32 && align == 128) 13686 inst.instruction |= 0x3 << 6; 13687 else 13688 inst.instruction |= neon_logbits (et.size) << 6; 13689 } 13690 break; 13691 13692 default: ; 13693 } 13694 13695 inst.instruction |= do_align << 4; 13696} 13697 13698/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those 13699 apart from bits [11:4]. */ 13700 13701static void 13702do_neon_ldx_stx (void) 13703{ 13704 switch (NEON_LANE (inst.operands[0].imm)) 13705 { 13706 case NEON_INTERLEAVE_LANES: 13707 inst.instruction = NEON_ENC_INTERLV (inst.instruction); 13708 do_neon_ld_st_interleave (); 13709 break; 13710 13711 case NEON_ALL_LANES: 13712 inst.instruction = NEON_ENC_DUP (inst.instruction); 13713 do_neon_ld_dup (); 13714 break; 13715 13716 default: 13717 inst.instruction = NEON_ENC_LANE (inst.instruction); 13718 do_neon_ld_st_lane (); 13719 } 13720 13721 /* L bit comes from bit mask. */ 13722 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13723 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13724 inst.instruction |= inst.operands[1].reg << 16; 13725 13726 if (inst.operands[1].postind) 13727 { 13728 int postreg = inst.operands[1].imm & 0xf; 13729 constraint (!inst.operands[1].immisreg, 13730 _("post-index must be a register")); 13731 constraint (postreg == 0xd || postreg == 0xf, 13732 _("bad register for post-index")); 13733 inst.instruction |= postreg; 13734 } 13735 else if (inst.operands[1].writeback) 13736 { 13737 inst.instruction |= 0xd; 13738 } 13739 else 13740 inst.instruction |= 0xf; 13741 13742 if (thumb_mode) 13743 inst.instruction |= 0xf9000000; 13744 else 13745 inst.instruction |= 0xf4000000; 13746} 13747 13748 13749/* Overall per-instruction processing. */ 13750 13751/* We need to be able to fix up arbitrary expressions in some statements. 13752 This is so that we can handle symbols that are an arbitrary distance from 13753 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 13754 which returns part of an address in a form which will be valid for 13755 a data instruction. We do this by pushing the expression into a symbol 13756 in the expr_section, and creating a fix for that. */ 13757 13758static void 13759fix_new_arm (fragS * frag, 13760 int where, 13761 short int size, 13762 expressionS * exp, 13763 int pc_rel, 13764 int reloc) 13765{ 13766 fixS * new_fix; 13767 13768 switch (exp->X_op) 13769 { 13770 case O_constant: 13771 case O_symbol: 13772 case O_add: 13773 case O_subtract: 13774 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); 13775 break; 13776 13777 default: 13778 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 13779 pc_rel, reloc); 13780 break; 13781 } 13782 13783 /* Mark whether the fix is to a THUMB instruction, or an ARM 13784 instruction. */ 13785 new_fix->tc_fix_data = thumb_mode; 13786} 13787 13788/* Create a frg for an instruction requiring relaxation. */ 13789static void 13790output_relax_insn (void) 13791{ 13792 char * to; 13793 symbolS *sym; 13794 int offset; 13795 13796 /* The size of the instruction is unknown, so tie the debug info to the 13797 start of the instruction. */ 13798 dwarf2_emit_insn (0); 13799 13800 switch (inst.reloc.exp.X_op) 13801 { 13802 case O_symbol: 13803 sym = inst.reloc.exp.X_add_symbol; 13804 offset = inst.reloc.exp.X_add_number; 13805 break; 13806 case O_constant: 13807 sym = NULL; 13808 offset = inst.reloc.exp.X_add_number; 13809 break; 13810 default: 13811 sym = make_expr_symbol (&inst.reloc.exp); 13812 offset = 0; 13813 break; 13814 } 13815 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE, 13816 inst.relax, sym, offset, NULL/*offset, opcode*/); 13817 md_number_to_chars (to, inst.instruction, THUMB_SIZE); 13818} 13819 13820/* Write a 32-bit thumb instruction to buf. */ 13821static void 13822put_thumb32_insn (char * buf, unsigned long insn) 13823{ 13824 md_number_to_chars (buf, insn >> 16, THUMB_SIZE); 13825 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE); 13826} 13827 13828static void 13829output_inst (const char * str) 13830{ 13831 char * to = NULL; 13832 13833 if (inst.error) 13834 { 13835 as_bad ("%s -- `%s'", inst.error, str); 13836 return; 13837 } 13838 if (inst.relax) { 13839 output_relax_insn(); 13840 return; 13841 } 13842 if (inst.size == 0) 13843 return; 13844 13845 to = frag_more (inst.size); 13846 13847 if (thumb_mode && (inst.size > THUMB_SIZE)) 13848 { 13849 assert (inst.size == (2 * THUMB_SIZE)); 13850 put_thumb32_insn (to, inst.instruction); 13851 } 13852 else if (inst.size > INSN_SIZE) 13853 { 13854 assert (inst.size == (2 * INSN_SIZE)); 13855 md_number_to_chars (to, inst.instruction, INSN_SIZE); 13856 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE); 13857 } 13858 else 13859 md_number_to_chars (to, inst.instruction, inst.size); 13860 13861 if (inst.reloc.type != BFD_RELOC_UNUSED) 13862 fix_new_arm (frag_now, to - frag_now->fr_literal, 13863 inst.size, & inst.reloc.exp, inst.reloc.pc_rel, 13864 inst.reloc.type); 13865 13866 dwarf2_emit_insn (inst.size); 13867} 13868 13869/* Tag values used in struct asm_opcode's tag field. */ 13870enum opcode_tag 13871{ 13872 OT_unconditional, /* Instruction cannot be conditionalized. 13873 The ARM condition field is still 0xE. */ 13874 OT_unconditionalF, /* Instruction cannot be conditionalized 13875 and carries 0xF in its ARM condition field. */ 13876 OT_csuffix, /* Instruction takes a conditional suffix. */ 13877 OT_csuffixF, /* Some forms of the instruction take a conditional 13878 suffix, others place 0xF where the condition field 13879 would be. */ 13880 OT_cinfix3, /* Instruction takes a conditional infix, 13881 beginning at character index 3. (In 13882 unified mode, it becomes a suffix.) */ 13883 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for 13884 tsts, cmps, cmns, and teqs. */ 13885 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at 13886 character index 3, even in unified mode. Used for 13887 legacy instructions where suffix and infix forms 13888 may be ambiguous. */ 13889 OT_csuf_or_in3, /* Instruction takes either a conditional 13890 suffix or an infix at character index 3. */ 13891 OT_odd_infix_unc, /* This is the unconditional variant of an 13892 instruction that takes a conditional infix 13893 at an unusual position. In unified mode, 13894 this variant will accept a suffix. */ 13895 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0 13896 are the conditional variants of instructions that 13897 take conditional infixes in unusual positions. 13898 The infix appears at character index 13899 (tag - OT_odd_infix_0). These are not accepted 13900 in unified mode. */ 13901}; 13902 13903/* Subroutine of md_assemble, responsible for looking up the primary 13904 opcode from the mnemonic the user wrote. STR points to the 13905 beginning of the mnemonic. 13906 13907 This is not simply a hash table lookup, because of conditional 13908 variants. Most instructions have conditional variants, which are 13909 expressed with a _conditional affix_ to the mnemonic. If we were 13910 to encode each conditional variant as a literal string in the opcode 13911 table, it would have approximately 20,000 entries. 13912 13913 Most mnemonics take this affix as a suffix, and in unified syntax, 13914 'most' is upgraded to 'all'. However, in the divided syntax, some 13915 instructions take the affix as an infix, notably the s-variants of 13916 the arithmetic instructions. Of those instructions, all but six 13917 have the infix appear after the third character of the mnemonic. 13918 13919 Accordingly, the algorithm for looking up primary opcodes given 13920 an identifier is: 13921 13922 1. Look up the identifier in the opcode table. 13923 If we find a match, go to step U. 13924 13925 2. Look up the last two characters of the identifier in the 13926 conditions table. If we find a match, look up the first N-2 13927 characters of the identifier in the opcode table. If we 13928 find a match, go to step CE. 13929 13930 3. Look up the fourth and fifth characters of the identifier in 13931 the conditions table. If we find a match, extract those 13932 characters from the identifier, and look up the remaining 13933 characters in the opcode table. If we find a match, go 13934 to step CM. 13935 13936 4. Fail. 13937 13938 U. Examine the tag field of the opcode structure, in case this is 13939 one of the six instructions with its conditional infix in an 13940 unusual place. If it is, the tag tells us where to find the 13941 infix; look it up in the conditions table and set inst.cond 13942 accordingly. Otherwise, this is an unconditional instruction. 13943 Again set inst.cond accordingly. Return the opcode structure. 13944 13945 CE. Examine the tag field to make sure this is an instruction that 13946 should receive a conditional suffix. If it is not, fail. 13947 Otherwise, set inst.cond from the suffix we already looked up, 13948 and return the opcode structure. 13949 13950 CM. Examine the tag field to make sure this is an instruction that 13951 should receive a conditional infix after the third character. 13952 If it is not, fail. Otherwise, undo the edits to the current 13953 line of input and proceed as for case CE. */ 13954 13955static const struct asm_opcode * 13956opcode_lookup (char **str) 13957{ 13958 char *end, *base; 13959 char *affix; 13960 const struct asm_opcode *opcode; 13961 const struct asm_cond *cond; 13962 char save[2]; 13963 bfd_boolean neon_supported; 13964 13965 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1); 13966 13967 /* Scan up to the end of the mnemonic, which must end in white space, 13968 '.' (in unified mode, or for Neon instructions), or end of string. */ 13969 for (base = end = *str; *end != '\0'; end++) 13970 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.')) 13971 break; 13972 13973 if (end == base) 13974 return 0; 13975 13976 /* Handle a possible width suffix and/or Neon type suffix. */ 13977 if (end[0] == '.') 13978 { 13979 int offset = 2; 13980 13981 /* The .w and .n suffixes are only valid if the unified syntax is in 13982 use. */ 13983 if (unified_syntax && end[1] == 'w') 13984 inst.size_req = 4; 13985 else if (unified_syntax && end[1] == 'n') 13986 inst.size_req = 2; 13987 else 13988 offset = 0; 13989 13990 inst.vectype.elems = 0; 13991 13992 *str = end + offset; 13993 13994 if (end[offset] == '.') 13995 { 13996 /* See if we have a Neon type suffix (possible in either unified or 13997 non-unified ARM syntax mode). */ 13998 if (parse_neon_type (&inst.vectype, str) == FAIL) 13999 return 0; 14000 } 14001 else if (end[offset] != '\0' && end[offset] != ' ') 14002 return 0; 14003 } 14004 else 14005 *str = end; 14006 14007 /* Look for unaffixed or special-case affixed mnemonic. */ 14008 opcode = hash_find_n (arm_ops_hsh, base, end - base); 14009 if (opcode) 14010 { 14011 /* step U */ 14012 if (opcode->tag < OT_odd_infix_0) 14013 { 14014 inst.cond = COND_ALWAYS; 14015 return opcode; 14016 } 14017 14018 if (unified_syntax) 14019 as_warn (_("conditional infixes are deprecated in unified syntax")); 14020 affix = base + (opcode->tag - OT_odd_infix_0); 14021 cond = hash_find_n (arm_cond_hsh, affix, 2); 14022 assert (cond); 14023 14024 inst.cond = cond->value; 14025 return opcode; 14026 } 14027 14028 /* Cannot have a conditional suffix on a mnemonic of less than two 14029 characters. */ 14030 if (end - base < 3) 14031 return 0; 14032 14033 /* Look for suffixed mnemonic. */ 14034 affix = end - 2; 14035 cond = hash_find_n (arm_cond_hsh, affix, 2); 14036 opcode = hash_find_n (arm_ops_hsh, base, affix - base); 14037 if (opcode && cond) 14038 { 14039 /* step CE */ 14040 switch (opcode->tag) 14041 { 14042 case OT_cinfix3_legacy: 14043 /* Ignore conditional suffixes matched on infix only mnemonics. */ 14044 break; 14045 14046 case OT_cinfix3: 14047 case OT_cinfix3_deprecated: 14048 case OT_odd_infix_unc: 14049 if (!unified_syntax) 14050 return 0; 14051 /* else fall through */ 14052 14053 case OT_csuffix: 14054 case OT_csuffixF: 14055 case OT_csuf_or_in3: 14056 inst.cond = cond->value; 14057 return opcode; 14058 14059 case OT_unconditional: 14060 case OT_unconditionalF: 14061 if (thumb_mode) 14062 { 14063 inst.cond = cond->value; 14064 } 14065 else 14066 { 14067 /* delayed diagnostic */ 14068 inst.error = BAD_COND; 14069 inst.cond = COND_ALWAYS; 14070 } 14071 return opcode; 14072 14073 default: 14074 return 0; 14075 } 14076 } 14077 14078 /* Cannot have a usual-position infix on a mnemonic of less than 14079 six characters (five would be a suffix). */ 14080 if (end - base < 6) 14081 return 0; 14082 14083 /* Look for infixed mnemonic in the usual position. */ 14084 affix = base + 3; 14085 cond = hash_find_n (arm_cond_hsh, affix, 2); 14086 if (!cond) 14087 return 0; 14088 14089 memcpy (save, affix, 2); 14090 memmove (affix, affix + 2, (end - affix) - 2); 14091 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2); 14092 memmove (affix + 2, affix, (end - affix) - 2); 14093 memcpy (affix, save, 2); 14094 14095 if (opcode 14096 && (opcode->tag == OT_cinfix3 14097 || opcode->tag == OT_cinfix3_deprecated 14098 || opcode->tag == OT_csuf_or_in3 14099 || opcode->tag == OT_cinfix3_legacy)) 14100 { 14101 /* step CM */ 14102 if (unified_syntax 14103 && (opcode->tag == OT_cinfix3 14104 || opcode->tag == OT_cinfix3_deprecated)) 14105 as_warn (_("conditional infixes are deprecated in unified syntax")); 14106 14107 inst.cond = cond->value; 14108 return opcode; 14109 } 14110 14111 return 0; 14112} 14113 14114void 14115md_assemble (char *str) 14116{ 14117 char *p = str; 14118 const struct asm_opcode * opcode; 14119 14120 /* Align the previous label if needed. */ 14121 if (last_label_seen != NULL) 14122 { 14123 symbol_set_frag (last_label_seen, frag_now); 14124 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 14125 S_SET_SEGMENT (last_label_seen, now_seg); 14126 } 14127 14128 memset (&inst, '\0', sizeof (inst)); 14129 inst.reloc.type = BFD_RELOC_UNUSED; 14130 14131 opcode = opcode_lookup (&p); 14132 if (!opcode) 14133 { 14134 /* It wasn't an instruction, but it might be a register alias of 14135 the form alias .req reg, or a Neon .dn/.qn directive. */ 14136 if (!create_register_alias (str, p) 14137 && !create_neon_reg_alias (str, p)) 14138 as_bad (_("bad instruction `%s'"), str); 14139 14140 return; 14141 } 14142 14143 if (opcode->tag == OT_cinfix3_deprecated) 14144 as_warn (_("s suffix on comparison instruction is deprecated")); 14145 14146 /* The value which unconditional instructions should have in place of the 14147 condition field. */ 14148 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1; 14149 14150 if (thumb_mode) 14151 { 14152 arm_feature_set variant; 14153 14154 variant = cpu_variant; 14155 /* Only allow coprocessor instructions on Thumb-2 capable devices. */ 14156 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2)) 14157 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard); 14158 /* Check that this instruction is supported for this CPU. */ 14159 if (!opcode->tvariant 14160 || (thumb_mode == 1 14161 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant))) 14162 { 14163 as_bad (_("selected processor does not support `%s'"), str); 14164 return; 14165 } 14166 if (inst.cond != COND_ALWAYS && !unified_syntax 14167 && opcode->tencode != do_t_branch) 14168 { 14169 as_bad (_("Thumb does not support conditional execution")); 14170 return; 14171 } 14172 14173 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req) 14174 { 14175 /* Implicit require narrow instructions on Thumb-1. This avoids 14176 relaxation accidentally introducing Thumb-2 instructions. */ 14177 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23) 14178 inst.size_req = 2; 14179 } 14180 14181 /* Check conditional suffixes. */ 14182 if (current_it_mask) 14183 { 14184 int cond; 14185 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1; 14186 current_it_mask <<= 1; 14187 current_it_mask &= 0x1f; 14188 /* The BKPT instruction is unconditional even in an IT block. */ 14189 if (!inst.error 14190 && cond != inst.cond && opcode->tencode != do_t_bkpt) 14191 { 14192 as_bad (_("incorrect condition in IT block")); 14193 return; 14194 } 14195 } 14196 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch) 14197 { 14198 as_bad (_("thumb conditional instrunction not in IT block")); 14199 return; 14200 } 14201 14202 mapping_state (MAP_THUMB); 14203 inst.instruction = opcode->tvalue; 14204 14205 if (!parse_operands (p, opcode->operands)) 14206 opcode->tencode (); 14207 14208 /* Clear current_it_mask at the end of an IT block. */ 14209 if (current_it_mask == 0x10) 14210 current_it_mask = 0; 14211 14212 if (!(inst.error || inst.relax)) 14213 { 14214 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff); 14215 inst.size = (inst.instruction > 0xffff ? 4 : 2); 14216 if (inst.size_req && inst.size_req != inst.size) 14217 { 14218 as_bad (_("cannot honor width suffix -- `%s'"), str); 14219 return; 14220 } 14221 } 14222 14223 /* Something has gone badly wrong if we try to relax a fixed size 14224 instruction. */ 14225 assert (inst.size_req == 0 || !inst.relax); 14226 14227 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 14228 *opcode->tvariant); 14229 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly 14230 set those bits when Thumb-2 32-bit instructions are seen. ie. 14231 anything other than bl/blx. 14232 This is overly pessimistic for relaxable instructions. */ 14233 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800) 14234 || inst.relax) 14235 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 14236 arm_ext_v6t2); 14237 } 14238 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 14239 { 14240 /* Check that this instruction is supported for this CPU. */ 14241 if (!opcode->avariant || 14242 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)) 14243 { 14244 as_bad (_("selected processor does not support `%s'"), str); 14245 return; 14246 } 14247 if (inst.size_req) 14248 { 14249 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str); 14250 return; 14251 } 14252 14253 mapping_state (MAP_ARM); 14254 inst.instruction = opcode->avalue; 14255 if (opcode->tag == OT_unconditionalF) 14256 inst.instruction |= 0xF << 28; 14257 else 14258 inst.instruction |= inst.cond << 28; 14259 inst.size = INSN_SIZE; 14260 if (!parse_operands (p, opcode->operands)) 14261 opcode->aencode (); 14262 /* Arm mode bx is marked as both v4T and v5 because it's still required 14263 on a hypothetical non-thumb v5 core. */ 14264 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t) 14265 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5)) 14266 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t); 14267 else 14268 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 14269 *opcode->avariant); 14270 } 14271 else 14272 { 14273 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor " 14274 "-- `%s'"), str); 14275 return; 14276 } 14277 output_inst (str); 14278} 14279 14280/* Various frobbings of labels and their addresses. */ 14281 14282void 14283arm_start_line_hook (void) 14284{ 14285 last_label_seen = NULL; 14286} 14287 14288void 14289arm_frob_label (symbolS * sym) 14290{ 14291 last_label_seen = sym; 14292 14293 ARM_SET_THUMB (sym, thumb_mode); 14294 14295#if defined OBJ_COFF || defined OBJ_ELF 14296 ARM_SET_INTERWORK (sym, support_interwork); 14297#endif 14298 14299 /* Note - do not allow local symbols (.Lxxx) to be labeled 14300 as Thumb functions. This is because these labels, whilst 14301 they exist inside Thumb code, are not the entry points for 14302 possible ARM->Thumb calls. Also, these labels can be used 14303 as part of a computed goto or switch statement. eg gcc 14304 can generate code that looks like this: 14305 14306 ldr r2, [pc, .Laaa] 14307 lsl r3, r3, #2 14308 ldr r2, [r3, r2] 14309 mov pc, r2 14310 14311 .Lbbb: .word .Lxxx 14312 .Lccc: .word .Lyyy 14313 ..etc... 14314 .Laaa: .word Lbbb 14315 14316 The first instruction loads the address of the jump table. 14317 The second instruction converts a table index into a byte offset. 14318 The third instruction gets the jump address out of the table. 14319 The fourth instruction performs the jump. 14320 14321 If the address stored at .Laaa is that of a symbol which has the 14322 Thumb_Func bit set, then the linker will arrange for this address 14323 to have the bottom bit set, which in turn would mean that the 14324 address computation performed by the third instruction would end 14325 up with the bottom bit set. Since the ARM is capable of unaligned 14326 word loads, the instruction would then load the incorrect address 14327 out of the jump table, and chaos would ensue. */ 14328 if (label_is_thumb_function_name 14329 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L') 14330 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0) 14331 { 14332 /* When the address of a Thumb function is taken the bottom 14333 bit of that address should be set. This will allow 14334 interworking between Arm and Thumb functions to work 14335 correctly. */ 14336 14337 THUMB_SET_FUNC (sym, 1); 14338 14339 label_is_thumb_function_name = FALSE; 14340 } 14341 14342 dwarf2_emit_label (sym); 14343} 14344 14345int 14346arm_data_in_code (void) 14347{ 14348 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5)) 14349 { 14350 *input_line_pointer = '/'; 14351 input_line_pointer += 5; 14352 *input_line_pointer = 0; 14353 return 1; 14354 } 14355 14356 return 0; 14357} 14358 14359char * 14360arm_canonicalize_symbol_name (char * name) 14361{ 14362 int len; 14363 14364 if (thumb_mode && (len = strlen (name)) > 5 14365 && streq (name + len - 5, "/data")) 14366 *(name + len - 5) = 0; 14367 14368 return name; 14369} 14370 14371/* Table of all register names defined by default. The user can 14372 define additional names with .req. Note that all register names 14373 should appear in both upper and lowercase variants. Some registers 14374 also have mixed-case names. */ 14375 14376#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 } 14377#define REGNUM(p,n,t) REGDEF(p##n, n, t) 14378#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t) 14379#define REGSET(p,t) \ 14380 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 14381 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 14382 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 14383 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t) 14384#define REGSETH(p,t) \ 14385 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 14386 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 14387 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 14388 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t) 14389#define REGSET2(p,t) \ 14390 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \ 14391 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \ 14392 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \ 14393 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t) 14394 14395static const struct reg_entry reg_names[] = 14396{ 14397 /* ARM integer registers. */ 14398 REGSET(r, RN), REGSET(R, RN), 14399 14400 /* ATPCS synonyms. */ 14401 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN), 14402 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN), 14403 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN), 14404 14405 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN), 14406 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN), 14407 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN), 14408 14409 /* Well-known aliases. */ 14410 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN), 14411 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN), 14412 14413 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN), 14414 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN), 14415 14416 /* Coprocessor numbers. */ 14417 REGSET(p, CP), REGSET(P, CP), 14418 14419 /* Coprocessor register numbers. The "cr" variants are for backward 14420 compatibility. */ 14421 REGSET(c, CN), REGSET(C, CN), 14422 REGSET(cr, CN), REGSET(CR, CN), 14423 14424 /* FPA registers. */ 14425 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN), 14426 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN), 14427 14428 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN), 14429 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN), 14430 14431 /* VFP SP registers. */ 14432 REGSET(s,VFS), REGSET(S,VFS), 14433 REGSETH(s,VFS), REGSETH(S,VFS), 14434 14435 /* VFP DP Registers. */ 14436 REGSET(d,VFD), REGSET(D,VFD), 14437 /* Extra Neon DP registers. */ 14438 REGSETH(d,VFD), REGSETH(D,VFD), 14439 14440 /* Neon QP registers. */ 14441 REGSET2(q,NQ), REGSET2(Q,NQ), 14442 14443 /* VFP control registers. */ 14444 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC), 14445 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC), 14446 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC), 14447 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC), 14448 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC), 14449 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC), 14450 14451 /* Maverick DSP coprocessor registers. */ 14452 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX), 14453 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX), 14454 14455 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX), 14456 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX), 14457 REGDEF(dspsc,0,DSPSC), 14458 14459 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX), 14460 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX), 14461 REGDEF(DSPSC,0,DSPSC), 14462 14463 /* iWMMXt data registers - p0, c0-15. */ 14464 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR), 14465 14466 /* iWMMXt control registers - p1, c0-3. */ 14467 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC), 14468 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC), 14469 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC), 14470 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC), 14471 14472 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */ 14473 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG), 14474 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG), 14475 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG), 14476 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG), 14477 14478 /* XScale accumulator registers. */ 14479 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE), 14480}; 14481#undef REGDEF 14482#undef REGNUM 14483#undef REGSET 14484 14485/* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled 14486 within psr_required_here. */ 14487static const struct asm_psr psrs[] = 14488{ 14489 /* Backward compatibility notation. Note that "all" is no longer 14490 truly all possible PSR bits. */ 14491 {"all", PSR_c | PSR_f}, 14492 {"flg", PSR_f}, 14493 {"ctl", PSR_c}, 14494 14495 /* Individual flags. */ 14496 {"f", PSR_f}, 14497 {"c", PSR_c}, 14498 {"x", PSR_x}, 14499 {"s", PSR_s}, 14500 /* Combinations of flags. */ 14501 {"fs", PSR_f | PSR_s}, 14502 {"fx", PSR_f | PSR_x}, 14503 {"fc", PSR_f | PSR_c}, 14504 {"sf", PSR_s | PSR_f}, 14505 {"sx", PSR_s | PSR_x}, 14506 {"sc", PSR_s | PSR_c}, 14507 {"xf", PSR_x | PSR_f}, 14508 {"xs", PSR_x | PSR_s}, 14509 {"xc", PSR_x | PSR_c}, 14510 {"cf", PSR_c | PSR_f}, 14511 {"cs", PSR_c | PSR_s}, 14512 {"cx", PSR_c | PSR_x}, 14513 {"fsx", PSR_f | PSR_s | PSR_x}, 14514 {"fsc", PSR_f | PSR_s | PSR_c}, 14515 {"fxs", PSR_f | PSR_x | PSR_s}, 14516 {"fxc", PSR_f | PSR_x | PSR_c}, 14517 {"fcs", PSR_f | PSR_c | PSR_s}, 14518 {"fcx", PSR_f | PSR_c | PSR_x}, 14519 {"sfx", PSR_s | PSR_f | PSR_x}, 14520 {"sfc", PSR_s | PSR_f | PSR_c}, 14521 {"sxf", PSR_s | PSR_x | PSR_f}, 14522 {"sxc", PSR_s | PSR_x | PSR_c}, 14523 {"scf", PSR_s | PSR_c | PSR_f}, 14524 {"scx", PSR_s | PSR_c | PSR_x}, 14525 {"xfs", PSR_x | PSR_f | PSR_s}, 14526 {"xfc", PSR_x | PSR_f | PSR_c}, 14527 {"xsf", PSR_x | PSR_s | PSR_f}, 14528 {"xsc", PSR_x | PSR_s | PSR_c}, 14529 {"xcf", PSR_x | PSR_c | PSR_f}, 14530 {"xcs", PSR_x | PSR_c | PSR_s}, 14531 {"cfs", PSR_c | PSR_f | PSR_s}, 14532 {"cfx", PSR_c | PSR_f | PSR_x}, 14533 {"csf", PSR_c | PSR_s | PSR_f}, 14534 {"csx", PSR_c | PSR_s | PSR_x}, 14535 {"cxf", PSR_c | PSR_x | PSR_f}, 14536 {"cxs", PSR_c | PSR_x | PSR_s}, 14537 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c}, 14538 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x}, 14539 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c}, 14540 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s}, 14541 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x}, 14542 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s}, 14543 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c}, 14544 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x}, 14545 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c}, 14546 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f}, 14547 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x}, 14548 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f}, 14549 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c}, 14550 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s}, 14551 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c}, 14552 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f}, 14553 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s}, 14554 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f}, 14555 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x}, 14556 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s}, 14557 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x}, 14558 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f}, 14559 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s}, 14560 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f}, 14561}; 14562 14563/* Table of V7M psr names. */ 14564static const struct asm_psr v7m_psrs[] = 14565{ 14566 {"apsr", 0 }, {"APSR", 0 }, 14567 {"iapsr", 1 }, {"IAPSR", 1 }, 14568 {"eapsr", 2 }, {"EAPSR", 2 }, 14569 {"psr", 3 }, {"PSR", 3 }, 14570 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 }, 14571 {"ipsr", 5 }, {"IPSR", 5 }, 14572 {"epsr", 6 }, {"EPSR", 6 }, 14573 {"iepsr", 7 }, {"IEPSR", 7 }, 14574 {"msp", 8 }, {"MSP", 8 }, 14575 {"psp", 9 }, {"PSP", 9 }, 14576 {"primask", 16}, {"PRIMASK", 16}, 14577 {"basepri", 17}, {"BASEPRI", 17}, 14578 {"basepri_max", 18}, {"BASEPRI_MAX", 18}, 14579 {"faultmask", 19}, {"FAULTMASK", 19}, 14580 {"control", 20}, {"CONTROL", 20} 14581}; 14582 14583/* Table of all shift-in-operand names. */ 14584static const struct asm_shift_name shift_names [] = 14585{ 14586 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL }, 14587 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL }, 14588 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR }, 14589 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR }, 14590 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR }, 14591 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX } 14592}; 14593 14594/* Table of all explicit relocation names. */ 14595#ifdef OBJ_ELF 14596static struct reloc_entry reloc_names[] = 14597{ 14598 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 }, 14599 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF }, 14600 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 }, 14601 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 }, 14602 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 }, 14603 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 }, 14604 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32}, 14605 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32}, 14606 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32}, 14607 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32}, 14608 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32} 14609}; 14610#endif 14611 14612/* Table of all conditional affixes. 0xF is not defined as a condition code. */ 14613static const struct asm_cond conds[] = 14614{ 14615 {"eq", 0x0}, 14616 {"ne", 0x1}, 14617 {"cs", 0x2}, {"hs", 0x2}, 14618 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3}, 14619 {"mi", 0x4}, 14620 {"pl", 0x5}, 14621 {"vs", 0x6}, 14622 {"vc", 0x7}, 14623 {"hi", 0x8}, 14624 {"ls", 0x9}, 14625 {"ge", 0xa}, 14626 {"lt", 0xb}, 14627 {"gt", 0xc}, 14628 {"le", 0xd}, 14629 {"al", 0xe} 14630}; 14631 14632static struct asm_barrier_opt barrier_opt_names[] = 14633{ 14634 { "sy", 0xf }, 14635 { "un", 0x7 }, 14636 { "st", 0xe }, 14637 { "unst", 0x6 } 14638}; 14639 14640/* Table of ARM-format instructions. */ 14641 14642/* Macros for gluing together operand strings. N.B. In all cases 14643 other than OPS0, the trailing OP_stop comes from default 14644 zero-initialization of the unspecified elements of the array. */ 14645#define OPS0() { OP_stop, } 14646#define OPS1(a) { OP_##a, } 14647#define OPS2(a,b) { OP_##a,OP_##b, } 14648#define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, } 14649#define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, } 14650#define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, } 14651#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, } 14652 14653/* These macros abstract out the exact format of the mnemonic table and 14654 save some repeated characters. */ 14655 14656/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */ 14657#define TxCE(mnem, op, top, nops, ops, ae, te) \ 14658 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \ 14659 THUMB_VARIANT, do_##ae, do_##te } 14660 14661/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for 14662 a T_MNEM_xyz enumerator. */ 14663#define TCE(mnem, aop, top, nops, ops, ae, te) \ 14664 TxCE(mnem, aop, 0x##top, nops, ops, ae, te) 14665#define tCE(mnem, aop, top, nops, ops, ae, te) \ 14666 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14667 14668/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional 14669 infix after the third character. */ 14670#define TxC3(mnem, op, top, nops, ops, ae, te) \ 14671 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \ 14672 THUMB_VARIANT, do_##ae, do_##te } 14673#define TxC3w(mnem, op, top, nops, ops, ae, te) \ 14674 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \ 14675 THUMB_VARIANT, do_##ae, do_##te } 14676#define TC3(mnem, aop, top, nops, ops, ae, te) \ 14677 TxC3(mnem, aop, 0x##top, nops, ops, ae, te) 14678#define TC3w(mnem, aop, top, nops, ops, ae, te) \ 14679 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te) 14680#define tC3(mnem, aop, top, nops, ops, ae, te) \ 14681 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14682#define tC3w(mnem, aop, top, nops, ops, ae, te) \ 14683 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14684 14685/* Mnemonic with a conditional infix in an unusual place. Each and every variant has to 14686 appear in the condition table. */ 14687#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \ 14688 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14689 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te } 14690 14691#define TxCM(m1, m2, op, top, nops, ops, ae, te) \ 14692 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \ 14693 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \ 14694 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \ 14695 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \ 14696 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \ 14697 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \ 14698 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \ 14699 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \ 14700 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \ 14701 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \ 14702 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \ 14703 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \ 14704 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \ 14705 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \ 14706 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \ 14707 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \ 14708 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \ 14709 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \ 14710 TxCM_(m1, al, m2, op, top, nops, ops, ae, te) 14711 14712#define TCM(m1,m2, aop, top, nops, ops, ae, te) \ 14713 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te) 14714#define tCM(m1,m2, aop, top, nops, ops, ae, te) \ 14715 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te) 14716 14717/* Mnemonic that cannot be conditionalized. The ARM condition-code 14718 field is still 0xE. Many of the Thumb variants can be executed 14719 conditionally, so this is checked separately. */ 14720#define TUE(mnem, op, top, nops, ops, ae, te) \ 14721 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \ 14722 THUMB_VARIANT, do_##ae, do_##te } 14723 14724/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM 14725 condition code field. */ 14726#define TUF(mnem, op, top, nops, ops, ae, te) \ 14727 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \ 14728 THUMB_VARIANT, do_##ae, do_##te } 14729 14730/* ARM-only variants of all the above. */ 14731#define CE(mnem, op, nops, ops, ae) \ 14732 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14733 14734#define C3(mnem, op, nops, ops, ae) \ 14735 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14736 14737/* Legacy mnemonics that always have conditional infix after the third 14738 character. */ 14739#define CL(mnem, op, nops, ops, ae) \ 14740 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14741 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14742 14743/* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */ 14744#define cCE(mnem, op, nops, ops, ae) \ 14745 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14746 14747/* Legacy coprocessor instructions where conditional infix and conditional 14748 suffix are ambiguous. For consistency this includes all FPA instructions, 14749 not just the potentially ambiguous ones. */ 14750#define cCL(mnem, op, nops, ops, ae) \ 14751 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14752 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14753 14754/* Coprocessor, takes either a suffix or a position-3 infix 14755 (for an FPA corner case). */ 14756#define C3E(mnem, op, nops, ops, ae) \ 14757 { #mnem, OPS##nops ops, OT_csuf_or_in3, \ 14758 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14759 14760#define xCM_(m1, m2, m3, op, nops, ops, ae) \ 14761 { #m1 #m2 #m3, OPS##nops ops, \ 14762 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14763 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14764 14765#define CM(m1, m2, op, nops, ops, ae) \ 14766 xCM_(m1, , m2, op, nops, ops, ae), \ 14767 xCM_(m1, eq, m2, op, nops, ops, ae), \ 14768 xCM_(m1, ne, m2, op, nops, ops, ae), \ 14769 xCM_(m1, cs, m2, op, nops, ops, ae), \ 14770 xCM_(m1, hs, m2, op, nops, ops, ae), \ 14771 xCM_(m1, cc, m2, op, nops, ops, ae), \ 14772 xCM_(m1, ul, m2, op, nops, ops, ae), \ 14773 xCM_(m1, lo, m2, op, nops, ops, ae), \ 14774 xCM_(m1, mi, m2, op, nops, ops, ae), \ 14775 xCM_(m1, pl, m2, op, nops, ops, ae), \ 14776 xCM_(m1, vs, m2, op, nops, ops, ae), \ 14777 xCM_(m1, vc, m2, op, nops, ops, ae), \ 14778 xCM_(m1, hi, m2, op, nops, ops, ae), \ 14779 xCM_(m1, ls, m2, op, nops, ops, ae), \ 14780 xCM_(m1, ge, m2, op, nops, ops, ae), \ 14781 xCM_(m1, lt, m2, op, nops, ops, ae), \ 14782 xCM_(m1, gt, m2, op, nops, ops, ae), \ 14783 xCM_(m1, le, m2, op, nops, ops, ae), \ 14784 xCM_(m1, al, m2, op, nops, ops, ae) 14785 14786#define UE(mnem, op, nops, ops, ae) \ 14787 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14788 14789#define UF(mnem, op, nops, ops, ae) \ 14790 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14791 14792/* Neon data-processing. ARM versions are unconditional with cond=0xf. 14793 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we 14794 use the same encoding function for each. */ 14795#define NUF(mnem, op, nops, ops, enc) \ 14796 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \ 14797 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14798 14799/* Neon data processing, version which indirects through neon_enc_tab for 14800 the various overloaded versions of opcodes. */ 14801#define nUF(mnem, op, nops, ops, enc) \ 14802 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \ 14803 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14804 14805/* Neon insn with conditional suffix for the ARM version, non-overloaded 14806 version. */ 14807#define NCE_tag(mnem, op, nops, ops, enc, tag) \ 14808 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \ 14809 THUMB_VARIANT, do_##enc, do_##enc } 14810 14811#define NCE(mnem, op, nops, ops, enc) \ 14812 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14813 14814#define NCEF(mnem, op, nops, ops, enc) \ 14815 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14816 14817/* Neon insn with conditional suffix for the ARM version, overloaded types. */ 14818#define nCE_tag(mnem, op, nops, ops, enc, tag) \ 14819 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \ 14820 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14821 14822#define nCE(mnem, op, nops, ops, enc) \ 14823 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14824 14825#define nCEF(mnem, op, nops, ops, enc) \ 14826 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14827 14828#define do_0 0 14829 14830/* Thumb-only, unconditional. */ 14831#define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te) 14832 14833static const struct asm_opcode insns[] = 14834{ 14835#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */ 14836#define THUMB_VARIANT &arm_ext_v4t 14837 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c), 14838 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c), 14839 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c), 14840 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c), 14841 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub), 14842 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub), 14843 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub), 14844 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub), 14845 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c), 14846 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c), 14847 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3), 14848 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3), 14849 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c), 14850 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c), 14851 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3), 14852 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3), 14853 14854 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism 14855 for setting PSR flag bits. They are obsolete in V6 and do not 14856 have Thumb equivalents. */ 14857 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14858 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14859 CL(tstp, 110f000, 2, (RR, SH), cmp), 14860 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14861 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14862 CL(cmpp, 150f000, 2, (RR, SH), cmp), 14863 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14864 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14865 CL(cmnp, 170f000, 2, (RR, SH), cmp), 14866 14867 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp), 14868 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp), 14869 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst), 14870 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst), 14871 14872 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst), 14873 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14874 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst), 14875 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14876 14877 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14878 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14879 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14880 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14881 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14882 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14883 14884 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi), 14885 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi), 14886 tCE(b, a000000, b, 1, (EXPr), branch, t_branch), 14887 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23), 14888 14889 /* Pseudo ops. */ 14890 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr), 14891 C3(adrl, 28f0000, 2, (RR, EXP), adrl), 14892 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop), 14893 14894 /* Thumb-compatibility pseudo ops. */ 14895 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift), 14896 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift), 14897 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift), 14898 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift), 14899 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift), 14900 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift), 14901 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift), 14902 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift), 14903 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg), 14904 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg), 14905 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop), 14906 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop), 14907 14908 /* These may simplify to neg. */ 14909 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb), 14910 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb), 14911 14912 TCE(rrx, 1a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rd_rm), 14913 TCE(rrxs, 1b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rd_rm), 14914 14915#undef THUMB_VARIANT 14916#define THUMB_VARIANT &arm_ext_v6 14917 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy), 14918 14919 /* V1 instructions with no Thumb analogue prior to V6T2. */ 14920#undef THUMB_VARIANT 14921#define THUMB_VARIANT &arm_ext_v6t2 14922 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 14923 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 14924 CL(teqp, 130f000, 2, (RR, SH), cmp), 14925 14926 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt), 14927 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt), 14928 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt), 14929 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt), 14930 14931 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14932 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14933 14934 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14935 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14936 14937 /* V1 instructions with no Thumb analogue at all. */ 14938 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit), 14939 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit), 14940 14941 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm), 14942 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm), 14943 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm), 14944 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm), 14945 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm), 14946 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm), 14947 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm), 14948 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm), 14949 14950#undef ARM_VARIANT 14951#define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */ 14952#undef THUMB_VARIANT 14953#define THUMB_VARIANT &arm_ext_v4t 14954 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 14955 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 14956 14957#undef THUMB_VARIANT 14958#define THUMB_VARIANT &arm_ext_v6t2 14959 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 14960 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas), 14961 14962 /* Generic coprocessor instructions. */ 14963 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 14964 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14965 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14966 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14967 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14968 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14969 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14970 14971#undef ARM_VARIANT 14972#define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */ 14973 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 14974 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 14975 14976#undef ARM_VARIANT 14977#define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */ 14978 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs), 14979 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr), 14980 14981#undef ARM_VARIANT 14982#define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */ 14983 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14984 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14985 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14986 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14987 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14988 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14989 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14990 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14991 14992#undef ARM_VARIANT 14993#define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */ 14994#undef THUMB_VARIANT 14995#define THUMB_VARIANT &arm_ext_v4t 14996 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14997 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14998 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14999 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15000 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15001 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 15002 15003#undef ARM_VARIANT 15004#define ARM_VARIANT &arm_ext_v4t_5 15005 /* ARM Architecture 4T. */ 15006 /* Note: bx (and blx) are required on V5, even if the processor does 15007 not support Thumb. */ 15008 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx), 15009 15010#undef ARM_VARIANT 15011#define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */ 15012#undef THUMB_VARIANT 15013#define THUMB_VARIANT &arm_ext_v5t 15014 /* Note: blx has 2 variants; the .value coded here is for 15015 BLX(2). Only this variant has conditional execution. */ 15016 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx), 15017 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt), 15018 15019#undef THUMB_VARIANT 15020#define THUMB_VARIANT &arm_ext_v6t2 15021 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz), 15022 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15023 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15024 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15025 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 15026 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 15027 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15028 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 15029 15030#undef ARM_VARIANT 15031#define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */ 15032 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15033 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15034 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15035 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15036 15037 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15038 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 15039 15040 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15041 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15042 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15043 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 15044 15045 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15046 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15047 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15048 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15049 15050 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15051 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15052 15053 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15054 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15055 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15056 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), 15057 15058#undef ARM_VARIANT 15059#define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */ 15060 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld), 15061 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 15062 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 15063 15064 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15065 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15066 15067#undef ARM_VARIANT 15068#define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */ 15069 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj), 15070 15071#undef ARM_VARIANT 15072#define ARM_VARIANT &arm_ext_v6 /* ARM V6. */ 15073#undef THUMB_VARIANT 15074#define THUMB_VARIANT &arm_ext_v6 15075 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi), 15076 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi), 15077 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15078 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15079 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev), 15080 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15081 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15082 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15083 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15084 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend), 15085 15086#undef THUMB_VARIANT 15087#define THUMB_VARIANT &arm_ext_v6t2 15088 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex), 15089 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex), 15090 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15091 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 15092 15093 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat), 15094 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat), 15095 15096/* ARM V6 not included in V7M (eg. integer SIMD). */ 15097#undef THUMB_VARIANT 15098#define THUMB_VARIANT &arm_ext_v6_notm 15099 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps), 15100 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt), 15101 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb), 15102 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15103 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15104 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15105 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15106 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15107 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15108 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15109 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15110 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15111 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15112 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15113 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15114 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15115 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15116 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15117 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15118 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15119 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15120 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15121 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15122 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15123 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15124 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15125 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15126 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15127 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15128 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15129 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15130 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15131 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15132 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15133 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15134 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15135 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15136 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15137 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15138 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe), 15139 UF(rfeib, 9900a00, 1, (RRw), rfe), 15140 UF(rfeda, 8100a00, 1, (RRw), rfe), 15141 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe), 15142 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe), 15143 UF(rfefa, 9900a00, 1, (RRw), rfe), 15144 UF(rfeea, 8100a00, 1, (RRw), rfe), 15145 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe), 15146 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15147 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15148 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15149 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15150 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15151 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15152 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 15153 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 15154 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 15155 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15156 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15157 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15158 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15159 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15160 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15161 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15162 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 15163 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15164 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15165 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15166 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15167 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15168 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15169 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15170 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15171 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15172 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15173 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs), 15174 UF(srsib, 9c00500, 2, (oRRw, I31w), srs), 15175 UF(srsda, 8400500, 2, (oRRw, I31w), srs), 15176 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs), 15177 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16), 15178 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal), 15179 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 15180 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 15181 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16), 15182 15183#undef ARM_VARIANT 15184#define ARM_VARIANT &arm_ext_v6k 15185#undef THUMB_VARIANT 15186#define THUMB_VARIANT &arm_ext_v6k 15187 tCE(yield, 320f001, yield, 0, (), noargs, t_hint), 15188 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint), 15189 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint), 15190 tCE(sev, 320f004, sev, 0, (), noargs, t_hint), 15191 15192#undef THUMB_VARIANT 15193#define THUMB_VARIANT &arm_ext_v6_notm 15194 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd), 15195 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd), 15196 15197#undef THUMB_VARIANT 15198#define THUMB_VARIANT &arm_ext_v6t2 15199 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 15200 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 15201 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 15202 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 15203 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs), 15204 15205#undef ARM_VARIANT 15206#define ARM_VARIANT &arm_ext_v6z 15207 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc), 15208 15209#undef ARM_VARIANT 15210#define ARM_VARIANT &arm_ext_v6t2 15211 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc), 15212 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi), 15213 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx), 15214 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx), 15215 15216 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 15217 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16), 15218 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16), 15219 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit), 15220 15221 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15222 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15223 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15224 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 15225 15226 UT(cbnz, b900, 2, (RR, EXP), t_cbz), 15227 UT(cbz, b100, 2, (RR, EXP), t_cbz), 15228 /* ARM does not really have an IT instruction, so always allow it. */ 15229#undef ARM_VARIANT 15230#define ARM_VARIANT &arm_ext_v1 15231 TUE(it, 0, bf08, 1, (COND), it, t_it), 15232 TUE(itt, 0, bf0c, 1, (COND), it, t_it), 15233 TUE(ite, 0, bf04, 1, (COND), it, t_it), 15234 TUE(ittt, 0, bf0e, 1, (COND), it, t_it), 15235 TUE(itet, 0, bf06, 1, (COND), it, t_it), 15236 TUE(itte, 0, bf0a, 1, (COND), it, t_it), 15237 TUE(itee, 0, bf02, 1, (COND), it, t_it), 15238 TUE(itttt, 0, bf0f, 1, (COND), it, t_it), 15239 TUE(itett, 0, bf07, 1, (COND), it, t_it), 15240 TUE(ittet, 0, bf0b, 1, (COND), it, t_it), 15241 TUE(iteet, 0, bf03, 1, (COND), it, t_it), 15242 TUE(ittte, 0, bf0d, 1, (COND), it, t_it), 15243 TUE(itete, 0, bf05, 1, (COND), it, t_it), 15244 TUE(ittee, 0, bf09, 1, (COND), it, t_it), 15245 TUE(iteee, 0, bf01, 1, (COND), it, t_it), 15246 15247 /* Thumb2 only instructions. */ 15248#undef ARM_VARIANT 15249#define ARM_VARIANT NULL 15250 15251 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 15252 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 15253 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb), 15254 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb), 15255 15256 /* Thumb-2 hardware division instructions (R and M profiles only). */ 15257#undef THUMB_VARIANT 15258#define THUMB_VARIANT &arm_ext_div 15259 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div), 15260 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div), 15261 15262 /* ARM V7 instructions. */ 15263#undef ARM_VARIANT 15264#define ARM_VARIANT &arm_ext_v7 15265#undef THUMB_VARIANT 15266#define THUMB_VARIANT &arm_ext_v7 15267 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld), 15268 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg), 15269 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier), 15270 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier), 15271 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier), 15272 15273#undef ARM_VARIANT 15274#define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */ 15275 cCE(wfs, e200110, 1, (RR), rd), 15276 cCE(rfs, e300110, 1, (RR), rd), 15277 cCE(wfc, e400110, 1, (RR), rd), 15278 cCE(rfc, e500110, 1, (RR), rd), 15279 15280 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr), 15281 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr), 15282 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr), 15283 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr), 15284 15285 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr), 15286 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr), 15287 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr), 15288 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr), 15289 15290 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm), 15291 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm), 15292 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm), 15293 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm), 15294 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm), 15295 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm), 15296 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm), 15297 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm), 15298 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm), 15299 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm), 15300 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm), 15301 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm), 15302 15303 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm), 15304 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm), 15305 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm), 15306 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm), 15307 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm), 15308 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm), 15309 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm), 15310 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm), 15311 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm), 15312 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm), 15313 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm), 15314 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm), 15315 15316 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm), 15317 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm), 15318 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm), 15319 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm), 15320 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm), 15321 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm), 15322 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm), 15323 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm), 15324 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm), 15325 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm), 15326 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm), 15327 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm), 15328 15329 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm), 15330 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm), 15331 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm), 15332 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm), 15333 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm), 15334 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm), 15335 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm), 15336 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm), 15337 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm), 15338 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm), 15339 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm), 15340 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm), 15341 15342 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm), 15343 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm), 15344 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm), 15345 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm), 15346 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm), 15347 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm), 15348 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm), 15349 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm), 15350 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm), 15351 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm), 15352 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm), 15353 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm), 15354 15355 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm), 15356 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm), 15357 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm), 15358 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm), 15359 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm), 15360 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm), 15361 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm), 15362 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm), 15363 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm), 15364 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm), 15365 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm), 15366 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm), 15367 15368 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm), 15369 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm), 15370 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm), 15371 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm), 15372 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm), 15373 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm), 15374 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm), 15375 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm), 15376 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm), 15377 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm), 15378 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm), 15379 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm), 15380 15381 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm), 15382 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm), 15383 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm), 15384 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm), 15385 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm), 15386 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm), 15387 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm), 15388 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm), 15389 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm), 15390 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm), 15391 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm), 15392 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm), 15393 15394 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm), 15395 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm), 15396 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm), 15397 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm), 15398 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm), 15399 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm), 15400 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm), 15401 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm), 15402 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm), 15403 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm), 15404 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm), 15405 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm), 15406 15407 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm), 15408 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm), 15409 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm), 15410 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm), 15411 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm), 15412 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm), 15413 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm), 15414 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm), 15415 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm), 15416 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm), 15417 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm), 15418 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm), 15419 15420 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm), 15421 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm), 15422 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm), 15423 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm), 15424 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm), 15425 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm), 15426 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm), 15427 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm), 15428 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm), 15429 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm), 15430 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm), 15431 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm), 15432 15433 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm), 15434 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm), 15435 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm), 15436 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm), 15437 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm), 15438 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm), 15439 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm), 15440 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm), 15441 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm), 15442 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm), 15443 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm), 15444 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm), 15445 15446 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm), 15447 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm), 15448 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm), 15449 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm), 15450 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm), 15451 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm), 15452 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm), 15453 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm), 15454 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm), 15455 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm), 15456 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm), 15457 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm), 15458 15459 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm), 15460 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm), 15461 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm), 15462 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm), 15463 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm), 15464 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm), 15465 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm), 15466 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm), 15467 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm), 15468 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm), 15469 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm), 15470 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm), 15471 15472 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm), 15473 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm), 15474 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm), 15475 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm), 15476 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm), 15477 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm), 15478 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm), 15479 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm), 15480 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm), 15481 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm), 15482 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm), 15483 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm), 15484 15485 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm), 15486 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm), 15487 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm), 15488 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm), 15489 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm), 15490 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm), 15491 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm), 15492 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm), 15493 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm), 15494 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm), 15495 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm), 15496 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm), 15497 15498 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm), 15499 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm), 15500 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm), 15501 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm), 15502 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm), 15503 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15504 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15505 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15506 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm), 15507 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm), 15508 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm), 15509 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm), 15510 15511 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm), 15512 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm), 15513 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm), 15514 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm), 15515 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm), 15516 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15517 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15518 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15519 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm), 15520 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm), 15521 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm), 15522 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm), 15523 15524 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm), 15525 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm), 15526 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm), 15527 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm), 15528 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm), 15529 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15530 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15531 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15532 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm), 15533 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm), 15534 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm), 15535 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm), 15536 15537 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm), 15538 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm), 15539 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm), 15540 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm), 15541 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm), 15542 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15543 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15544 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15545 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm), 15546 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm), 15547 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm), 15548 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm), 15549 15550 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm), 15551 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm), 15552 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm), 15553 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm), 15554 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm), 15555 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15556 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15557 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15558 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm), 15559 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm), 15560 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm), 15561 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm), 15562 15563 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm), 15564 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm), 15565 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm), 15566 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm), 15567 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm), 15568 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15569 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15570 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15571 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm), 15572 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm), 15573 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm), 15574 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm), 15575 15576 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm), 15577 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm), 15578 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm), 15579 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm), 15580 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm), 15581 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15582 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15583 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15584 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm), 15585 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm), 15586 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm), 15587 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm), 15588 15589 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm), 15590 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm), 15591 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm), 15592 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm), 15593 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm), 15594 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15595 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15596 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15597 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm), 15598 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm), 15599 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm), 15600 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm), 15601 15602 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm), 15603 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm), 15604 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm), 15605 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm), 15606 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm), 15607 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15608 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15609 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15610 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm), 15611 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm), 15612 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm), 15613 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm), 15614 15615 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm), 15616 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm), 15617 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm), 15618 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm), 15619 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm), 15620 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15621 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15622 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15623 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm), 15624 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm), 15625 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm), 15626 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm), 15627 15628 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15629 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15630 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15631 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15632 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15633 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15634 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15635 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15636 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15637 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15638 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15639 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15640 15641 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15642 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15643 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15644 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15645 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15646 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15647 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15648 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15649 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15650 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15651 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15652 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15653 15654 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15655 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15656 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15657 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15658 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15659 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15660 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15661 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15662 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15663 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15664 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15665 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15666 15667 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp), 15668 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp), 15669 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp), 15670 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp), 15671 15672 cCL(flts, e000110, 2, (RF, RR), rn_rd), 15673 cCL(fltsp, e000130, 2, (RF, RR), rn_rd), 15674 cCL(fltsm, e000150, 2, (RF, RR), rn_rd), 15675 cCL(fltsz, e000170, 2, (RF, RR), rn_rd), 15676 cCL(fltd, e000190, 2, (RF, RR), rn_rd), 15677 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd), 15678 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd), 15679 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd), 15680 cCL(flte, e080110, 2, (RF, RR), rn_rd), 15681 cCL(fltep, e080130, 2, (RF, RR), rn_rd), 15682 cCL(fltem, e080150, 2, (RF, RR), rn_rd), 15683 cCL(fltez, e080170, 2, (RF, RR), rn_rd), 15684 15685 /* The implementation of the FIX instruction is broken on some 15686 assemblers, in that it accepts a precision specifier as well as a 15687 rounding specifier, despite the fact that this is meaningless. 15688 To be more compatible, we accept it as well, though of course it 15689 does not set any bits. */ 15690 cCE(fix, e100110, 2, (RR, RF), rd_rm), 15691 cCL(fixp, e100130, 2, (RR, RF), rd_rm), 15692 cCL(fixm, e100150, 2, (RR, RF), rd_rm), 15693 cCL(fixz, e100170, 2, (RR, RF), rd_rm), 15694 cCL(fixsp, e100130, 2, (RR, RF), rd_rm), 15695 cCL(fixsm, e100150, 2, (RR, RF), rd_rm), 15696 cCL(fixsz, e100170, 2, (RR, RF), rd_rm), 15697 cCL(fixdp, e100130, 2, (RR, RF), rd_rm), 15698 cCL(fixdm, e100150, 2, (RR, RF), rd_rm), 15699 cCL(fixdz, e100170, 2, (RR, RF), rd_rm), 15700 cCL(fixep, e100130, 2, (RR, RF), rd_rm), 15701 cCL(fixem, e100150, 2, (RR, RF), rd_rm), 15702 cCL(fixez, e100170, 2, (RR, RF), rd_rm), 15703 15704 /* Instructions that were new with the real FPA, call them V2. */ 15705#undef ARM_VARIANT 15706#define ARM_VARIANT &fpu_fpa_ext_v2 15707 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15708 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15709 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15710 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15711 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15712 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15713 15714#undef ARM_VARIANT 15715#define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */ 15716 /* Moves and type conversions. */ 15717 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic), 15718 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp), 15719 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg), 15720 cCE(fmstat, ef1fa10, 0, (), noargs), 15721 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic), 15722 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic), 15723 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic), 15724 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15725 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic), 15726 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15727 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn), 15728 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd), 15729 15730 /* Memory operations. */ 15731 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15732 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15733 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15734 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15735 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15736 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15737 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15738 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15739 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15740 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15741 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15742 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15743 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15744 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15745 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15746 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15747 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15748 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15749 15750 /* Monadic operations. */ 15751 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic), 15752 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic), 15753 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic), 15754 15755 /* Dyadic operations. */ 15756 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15757 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15758 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15759 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15760 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15761 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15762 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15763 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15764 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15765 15766 /* Comparisons. */ 15767 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic), 15768 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z), 15769 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic), 15770 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z), 15771 15772#undef ARM_VARIANT 15773#define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */ 15774 /* Moves and type conversions. */ 15775 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15776 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15777 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15778 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd), 15779 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd), 15780 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn), 15781 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn), 15782 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15783 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt), 15784 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15785 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15786 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15787 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15788 15789 /* Memory operations. */ 15790 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15791 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15792 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15793 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15794 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15795 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15796 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15797 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15798 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15799 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15800 15801 /* Monadic operations. */ 15802 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15803 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15804 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15805 15806 /* Dyadic operations. */ 15807 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15808 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15809 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15810 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15811 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15812 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15813 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15814 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15815 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15816 15817 /* Comparisons. */ 15818 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15819 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd), 15820 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15821 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd), 15822 15823#undef ARM_VARIANT 15824#define ARM_VARIANT &fpu_vfp_ext_v2 15825 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2), 15826 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2), 15827 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn), 15828 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm), 15829 15830/* Instructions which may belong to either the Neon or VFP instruction sets. 15831 Individual encoder functions perform additional architecture checks. */ 15832#undef ARM_VARIANT 15833#define ARM_VARIANT &fpu_vfp_ext_v1xd 15834#undef THUMB_VARIANT 15835#define THUMB_VARIANT &fpu_vfp_ext_v1xd 15836 /* These mnemonics are unique to VFP. */ 15837 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt), 15838 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div), 15839 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15840 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15841 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15842 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15843 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15844 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push), 15845 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop), 15846 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz), 15847 15848 /* Mnemonics shared by Neon and VFP. */ 15849 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul), 15850 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15851 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15852 15853 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15854 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15855 15856 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15857 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15858 15859 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15860 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15861 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15862 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15863 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15864 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15865 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15866 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15867 15868 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt), 15869 15870 /* NOTE: All VMOV encoding is special-cased! */ 15871 NCE(vmov, 0, 1, (VMOV), neon_mov), 15872 NCE(vmovq, 0, 1, (VMOV), neon_mov), 15873 15874#undef THUMB_VARIANT 15875#define THUMB_VARIANT &fpu_neon_ext_v1 15876#undef ARM_VARIANT 15877#define ARM_VARIANT &fpu_neon_ext_v1 15878 /* Data processing with three registers of the same length. */ 15879 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */ 15880 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su), 15881 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su), 15882 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15883 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15884 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15885 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15886 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15887 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15888 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */ 15889 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15890 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15891 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15892 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15893 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15894 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15895 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15896 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15897 /* If not immediate, fall back to neon_dyadic_i64_su. 15898 shl_imm should accept I8 I16 I32 I64, 15899 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */ 15900 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm), 15901 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm), 15902 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm), 15903 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm), 15904 /* Logic ops, types optional & ignored. */ 15905 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic), 15906 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic), 15907 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic), 15908 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic), 15909 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic), 15910 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic), 15911 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic), 15912 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic), 15913 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic), 15914 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic), 15915 /* Bitfield ops, untyped. */ 15916 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15917 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15918 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15919 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15920 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15921 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15922 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */ 15923 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15924 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15925 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15926 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15927 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15928 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15929 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall 15930 back to neon_dyadic_if_su. */ 15931 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 15932 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 15933 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 15934 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 15935 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 15936 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 15937 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 15938 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 15939 /* Comparison. Type I8 I16 I32 F32. */ 15940 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq), 15941 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq), 15942 /* As above, D registers only. */ 15943 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 15944 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 15945 /* Int and float variants, signedness unimportant. */ 15946 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 15947 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 15948 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d), 15949 /* Add/sub take types I8 I16 I32 I64 F32. */ 15950 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 15951 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 15952 /* vtst takes sizes 8, 16, 32. */ 15953 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst), 15954 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst), 15955 /* VMUL takes I8 I16 I32 F32 P8. */ 15956 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul), 15957 /* VQD{R}MULH takes S16 S32. */ 15958 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 15959 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 15960 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 15961 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 15962 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 15963 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 15964 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 15965 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 15966 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 15967 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 15968 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 15969 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 15970 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 15971 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step), 15972 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 15973 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step), 15974 15975 /* Two address, int/float. Types S8 S16 S32 F32. */ 15976 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg), 15977 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg), 15978 15979 /* Data processing with two registers and a shift amount. */ 15980 /* Right shifts, and variants with rounding. 15981 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */ 15982 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 15983 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 15984 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 15985 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 15986 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 15987 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 15988 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 15989 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 15990 /* Shift and insert. Sizes accepted 8 16 32 64. */ 15991 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli), 15992 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli), 15993 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri), 15994 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri), 15995 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */ 15996 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm), 15997 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm), 15998 /* Right shift immediate, saturating & narrowing, with rounding variants. 15999 Types accepted S16 S32 S64 U16 U32 U64. */ 16000 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 16001 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 16002 /* As above, unsigned. Types accepted S16 S32 S64. */ 16003 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 16004 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 16005 /* Right shift narrowing. Types accepted I16 I32 I64. */ 16006 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow), 16007 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow), 16008 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */ 16009 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll), 16010 /* CVT with optional immediate for fixed-point variant. */ 16011 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt), 16012 16013 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn), 16014 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn), 16015 16016 /* Data processing, three registers of different lengths. */ 16017 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */ 16018 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal), 16019 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long), 16020 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long), 16021 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long), 16022 /* If not scalar, fall back to neon_dyadic_long. 16023 Vector types as above, scalar types S16 S32 U16 U32. */ 16024 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 16025 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 16026 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */ 16027 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 16028 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 16029 /* Dyadic, narrowing insns. Types I16 I32 I64. */ 16030 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16031 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16032 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16033 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 16034 /* Saturating doubling multiplies. Types S16 S32. */ 16035 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16036 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16037 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 16038 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types 16039 S16 S32 U16 U32. */ 16040 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull), 16041 16042 /* Extract. Size 8. */ 16043 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext), 16044 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext), 16045 16046 /* Two registers, miscellaneous. */ 16047 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */ 16048 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev), 16049 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev), 16050 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev), 16051 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev), 16052 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev), 16053 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev), 16054 /* Vector replicate. Sizes 8 16 32. */ 16055 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup), 16056 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup), 16057 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */ 16058 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl), 16059 /* VMOVN. Types I16 I32 I64. */ 16060 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn), 16061 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */ 16062 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn), 16063 /* VQMOVUN. Types S16 S32 S64. */ 16064 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun), 16065 /* VZIP / VUZP. Sizes 8 16 32. */ 16066 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp), 16067 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp), 16068 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp), 16069 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp), 16070 /* VQABS / VQNEG. Types S8 S16 S32. */ 16071 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 16072 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg), 16073 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 16074 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg), 16075 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */ 16076 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long), 16077 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long), 16078 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long), 16079 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long), 16080 /* Reciprocal estimates. Types U32 F32. */ 16081 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est), 16082 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est), 16083 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est), 16084 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est), 16085 /* VCLS. Types S8 S16 S32. */ 16086 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls), 16087 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls), 16088 /* VCLZ. Types I8 I16 I32. */ 16089 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz), 16090 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz), 16091 /* VCNT. Size 8. */ 16092 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt), 16093 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt), 16094 /* Two address, untyped. */ 16095 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp), 16096 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp), 16097 /* VTRN. Sizes 8 16 32. */ 16098 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn), 16099 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn), 16100 16101 /* Table lookup. Size 8. */ 16102 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx), 16103 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx), 16104 16105#undef THUMB_VARIANT 16106#define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext 16107#undef ARM_VARIANT 16108#define ARM_VARIANT &fpu_vfp_v3_or_neon_ext 16109 /* Neon element/structure load/store. */ 16110 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx), 16111 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx), 16112 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx), 16113 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx), 16114 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx), 16115 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx), 16116 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx), 16117 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx), 16118 16119#undef THUMB_VARIANT 16120#define THUMB_VARIANT &fpu_vfp_ext_v3 16121#undef ARM_VARIANT 16122#define ARM_VARIANT &fpu_vfp_ext_v3 16123 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const), 16124 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const), 16125 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16126 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16127 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16128 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16129 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16130 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16131 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16132 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16133 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16134 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16135 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16136 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16137 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16), 16138 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16), 16139 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32), 16140 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32), 16141 16142#undef THUMB_VARIANT 16143#undef ARM_VARIANT 16144#define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */ 16145 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16146 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16147 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16148 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16149 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16150 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 16151 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar), 16152 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra), 16153 16154#undef ARM_VARIANT 16155#define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */ 16156 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc), 16157 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc), 16158 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc), 16159 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd), 16160 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd), 16161 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd), 16162 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc), 16163 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc), 16164 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc), 16165 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16166 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16167 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm), 16168 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16169 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16170 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm), 16171 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16172 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16173 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr), 16174 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd), 16175 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn), 16176 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16177 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16178 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16179 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16180 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16181 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 16182 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn), 16183 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn), 16184 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn), 16185 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn), 16186 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm), 16187 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc), 16188 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc), 16189 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc), 16190 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn), 16191 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn), 16192 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn), 16193 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16194 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16195 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16196 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16197 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16198 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16199 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16200 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16201 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16202 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni), 16203 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16204 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16205 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16206 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16207 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16208 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16209 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16210 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16211 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16212 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16213 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16214 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16215 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16216 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16217 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16218 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16219 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16220 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16221 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16222 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16223 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16224 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 16225 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd), 16226 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16227 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16228 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16229 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16230 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16231 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16232 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16233 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16234 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16235 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16236 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16237 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16238 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16239 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16240 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16241 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16242 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16243 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16244 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov), 16245 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16246 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16247 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16248 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16249 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16250 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16251 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16252 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16253 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16254 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16255 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16256 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16257 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16258 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16259 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16260 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16261 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16262 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16263 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16264 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16265 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16266 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh), 16267 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16268 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16269 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16270 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16271 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16272 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16273 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16274 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16275 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16276 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16277 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16278 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16279 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16280 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16281 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16282 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16283 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 16284 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 16285 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16286 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 16287 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 16288 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd), 16289 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16290 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16291 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16292 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16293 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16294 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16295 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16296 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16297 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16298 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn), 16299 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn), 16300 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn), 16301 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn), 16302 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn), 16303 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn), 16304 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16305 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16306 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16307 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn), 16308 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn), 16309 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn), 16310 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn), 16311 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn), 16312 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn), 16313 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16314 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16315 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16316 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16317 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero), 16318 16319#undef ARM_VARIANT 16320#define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */ 16321 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc), 16322 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc), 16323 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc), 16324 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn), 16325 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn), 16326 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn), 16327 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16328 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16329 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16330 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16331 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16332 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16333 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16334 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16335 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16336 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16337 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16338 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16339 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16340 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16341 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge), 16342 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16343 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16344 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16345 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16346 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16347 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16348 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16349 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16350 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16351 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16352 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16353 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16354 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16355 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16356 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16357 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16358 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16359 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16360 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16361 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16362 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16363 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16364 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16365 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16366 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16367 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16368 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16369 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16370 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16371 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16372 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16373 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16374 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16375 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16376 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16377 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 16378 16379#undef ARM_VARIANT 16380#define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */ 16381 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr), 16382 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr), 16383 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 16384 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 16385 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr), 16386 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr), 16387 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 16388 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 16389 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd), 16390 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn), 16391 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd), 16392 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn), 16393 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd), 16394 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn), 16395 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd), 16396 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn), 16397 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd), 16398 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn), 16399 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn), 16400 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn), 16401 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn), 16402 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn), 16403 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn), 16404 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn), 16405 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn), 16406 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn), 16407 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn), 16408 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn), 16409 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc), 16410 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd), 16411 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn), 16412 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn), 16413 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn), 16414 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn), 16415 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn), 16416 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn), 16417 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn), 16418 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn), 16419 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn), 16420 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn), 16421 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn), 16422 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn), 16423 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple), 16424 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple), 16425 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift), 16426 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift), 16427 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm), 16428 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm), 16429 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm), 16430 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm), 16431 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn), 16432 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn), 16433 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn), 16434 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn), 16435 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm), 16436 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm), 16437 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm), 16438 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm), 16439 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm), 16440 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm), 16441 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn), 16442 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn), 16443 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn), 16444 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn), 16445 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16446 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16447 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16448 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16449 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16450 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 16451 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16452 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 16453 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 16454 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 16455 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 16456 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 16457}; 16458#undef ARM_VARIANT 16459#undef THUMB_VARIANT 16460#undef TCE 16461#undef TCM 16462#undef TUE 16463#undef TUF 16464#undef TCC 16465#undef cCE 16466#undef cCL 16467#undef C3E 16468#undef CE 16469#undef CM 16470#undef UE 16471#undef UF 16472#undef UT 16473#undef NUF 16474#undef nUF 16475#undef NCE 16476#undef nCE 16477#undef OPS0 16478#undef OPS1 16479#undef OPS2 16480#undef OPS3 16481#undef OPS4 16482#undef OPS5 16483#undef OPS6 16484#undef do_0 16485 16486/* MD interface: bits in the object file. */ 16487 16488/* Turn an integer of n bytes (in val) into a stream of bytes appropriate 16489 for use in the a.out file, and stores them in the array pointed to by buf. 16490 This knows about the endian-ness of the target machine and does 16491 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 16492 2 (short) and 4 (long) Floating numbers are put out as a series of 16493 LITTLENUMS (shorts, here at least). */ 16494 16495void 16496md_number_to_chars (char * buf, valueT val, int n) 16497{ 16498 if (target_big_endian) 16499 number_to_chars_bigendian (buf, val, n); 16500 else 16501 number_to_chars_littleendian (buf, val, n); 16502} 16503 16504static valueT 16505md_chars_to_number (char * buf, int n) 16506{ 16507 valueT result = 0; 16508 unsigned char * where = (unsigned char *) buf; 16509 16510 if (target_big_endian) 16511 { 16512 while (n--) 16513 { 16514 result <<= 8; 16515 result |= (*where++ & 255); 16516 } 16517 } 16518 else 16519 { 16520 while (n--) 16521 { 16522 result <<= 8; 16523 result |= (where[n] & 255); 16524 } 16525 } 16526 16527 return result; 16528} 16529 16530/* MD interface: Sections. */ 16531 16532/* Estimate the size of a frag before relaxing. Assume everything fits in 16533 2 bytes. */ 16534 16535int 16536md_estimate_size_before_relax (fragS * fragp, 16537 segT segtype ATTRIBUTE_UNUSED) 16538{ 16539 fragp->fr_var = 2; 16540 return 2; 16541} 16542 16543/* Convert a machine dependent frag. */ 16544 16545void 16546md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp) 16547{ 16548 unsigned long insn; 16549 unsigned long old_op; 16550 char *buf; 16551 expressionS exp; 16552 fixS *fixp; 16553 int reloc_type; 16554 int pc_rel; 16555 int opcode; 16556 16557 buf = fragp->fr_literal + fragp->fr_fix; 16558 16559 old_op = bfd_get_16(abfd, buf); 16560 if (fragp->fr_symbol) { 16561 exp.X_op = O_symbol; 16562 exp.X_add_symbol = fragp->fr_symbol; 16563 } else { 16564 exp.X_op = O_constant; 16565 } 16566 exp.X_add_number = fragp->fr_offset; 16567 opcode = fragp->fr_subtype; 16568 switch (opcode) 16569 { 16570 case T_MNEM_ldr_pc: 16571 case T_MNEM_ldr_pc2: 16572 case T_MNEM_ldr_sp: 16573 case T_MNEM_str_sp: 16574 case T_MNEM_ldr: 16575 case T_MNEM_ldrb: 16576 case T_MNEM_ldrh: 16577 case T_MNEM_str: 16578 case T_MNEM_strb: 16579 case T_MNEM_strh: 16580 if (fragp->fr_var == 4) 16581 { 16582 insn = THUMB_OP32(opcode); 16583 if ((old_op >> 12) == 4 || (old_op >> 12) == 9) 16584 { 16585 insn |= (old_op & 0x700) << 4; 16586 } 16587 else 16588 { 16589 insn |= (old_op & 7) << 12; 16590 insn |= (old_op & 0x38) << 13; 16591 } 16592 insn |= 0x00000c00; 16593 put_thumb32_insn (buf, insn); 16594 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM; 16595 } 16596 else 16597 { 16598 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET; 16599 } 16600 pc_rel = (opcode == T_MNEM_ldr_pc2); 16601 break; 16602 case T_MNEM_adr: 16603 if (fragp->fr_var == 4) 16604 { 16605 insn = THUMB_OP32 (opcode); 16606 insn |= (old_op & 0xf0) << 4; 16607 put_thumb32_insn (buf, insn); 16608 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12; 16609 } 16610 else 16611 { 16612 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16613 exp.X_add_number -= 4; 16614 } 16615 pc_rel = 1; 16616 break; 16617 case T_MNEM_mov: 16618 case T_MNEM_movs: 16619 case T_MNEM_cmp: 16620 case T_MNEM_cmn: 16621 if (fragp->fr_var == 4) 16622 { 16623 int r0off = (opcode == T_MNEM_mov 16624 || opcode == T_MNEM_movs) ? 0 : 8; 16625 insn = THUMB_OP32 (opcode); 16626 insn = (insn & 0xe1ffffff) | 0x10000000; 16627 insn |= (old_op & 0x700) << r0off; 16628 put_thumb32_insn (buf, insn); 16629 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16630 } 16631 else 16632 { 16633 reloc_type = BFD_RELOC_ARM_THUMB_IMM; 16634 } 16635 pc_rel = 0; 16636 break; 16637 case T_MNEM_b: 16638 if (fragp->fr_var == 4) 16639 { 16640 insn = THUMB_OP32(opcode); 16641 put_thumb32_insn (buf, insn); 16642 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25; 16643 } 16644 else 16645 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12; 16646 pc_rel = 1; 16647 break; 16648 case T_MNEM_bcond: 16649 if (fragp->fr_var == 4) 16650 { 16651 insn = THUMB_OP32(opcode); 16652 insn |= (old_op & 0xf00) << 14; 16653 put_thumb32_insn (buf, insn); 16654 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20; 16655 } 16656 else 16657 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9; 16658 pc_rel = 1; 16659 break; 16660 case T_MNEM_add_sp: 16661 case T_MNEM_add_pc: 16662 case T_MNEM_inc_sp: 16663 case T_MNEM_dec_sp: 16664 if (fragp->fr_var == 4) 16665 { 16666 /* ??? Choose between add and addw. */ 16667 insn = THUMB_OP32 (opcode); 16668 insn |= (old_op & 0xf0) << 4; 16669 put_thumb32_insn (buf, insn); 16670 if (opcode == T_MNEM_add_pc) 16671 reloc_type = BFD_RELOC_ARM_T32_IMM12; 16672 else 16673 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16674 } 16675 else 16676 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16677 pc_rel = 0; 16678 break; 16679 16680 case T_MNEM_addi: 16681 case T_MNEM_addis: 16682 case T_MNEM_subi: 16683 case T_MNEM_subis: 16684 if (fragp->fr_var == 4) 16685 { 16686 insn = THUMB_OP32 (opcode); 16687 insn |= (old_op & 0xf0) << 4; 16688 insn |= (old_op & 0xf) << 16; 16689 put_thumb32_insn (buf, insn); 16690 if (insn & (1 << 20)) 16691 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16692 else 16693 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16694 } 16695 else 16696 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16697 pc_rel = 0; 16698 break; 16699 default: 16700 abort(); 16701 } 16702 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel, 16703 reloc_type); 16704 fixp->fx_file = fragp->fr_file; 16705 fixp->fx_line = fragp->fr_line; 16706 fragp->fr_fix += fragp->fr_var; 16707} 16708 16709/* Return the size of a relaxable immediate operand instruction. 16710 SHIFT and SIZE specify the form of the allowable immediate. */ 16711static int 16712relax_immediate (fragS *fragp, int size, int shift) 16713{ 16714 offsetT offset; 16715 offsetT mask; 16716 offsetT low; 16717 16718 /* ??? Should be able to do better than this. */ 16719 if (fragp->fr_symbol) 16720 return 4; 16721 16722 low = (1 << shift) - 1; 16723 mask = (1 << (shift + size)) - (1 << shift); 16724 offset = fragp->fr_offset; 16725 /* Force misaligned offsets to 32-bit variant. */ 16726 if (offset & low) 16727 return 4; 16728 if (offset & ~mask) 16729 return 4; 16730 return 2; 16731} 16732 16733/* Get the address of a symbol during relaxation. */ 16734static addressT 16735relaxed_symbol_addr(fragS *fragp, long stretch) 16736{ 16737 fragS *sym_frag; 16738 addressT addr; 16739 symbolS *sym; 16740 16741 sym = fragp->fr_symbol; 16742 sym_frag = symbol_get_frag (sym); 16743 know (S_GET_SEGMENT (sym) != absolute_section 16744 || sym_frag == &zero_address_frag); 16745 addr = S_GET_VALUE (sym) + fragp->fr_offset; 16746 16747 /* If frag has yet to be reached on this pass, assume it will 16748 move by STRETCH just as we did. If this is not so, it will 16749 be because some frag between grows, and that will force 16750 another pass. */ 16751 16752 if (stretch != 0 16753 && sym_frag->relax_marker != fragp->relax_marker) 16754 addr += stretch; 16755 16756 return addr; 16757} 16758 16759/* Return the size of a relaxable adr pseudo-instruction or PC-relative 16760 load. */ 16761static int 16762relax_adr (fragS *fragp, asection *sec, long stretch) 16763{ 16764 addressT addr; 16765 offsetT val; 16766 16767 /* Assume worst case for symbols not known to be in the same section. */ 16768 if (!S_IS_DEFINED(fragp->fr_symbol) 16769 || sec != S_GET_SEGMENT (fragp->fr_symbol)) 16770 return 4; 16771 16772 val = relaxed_symbol_addr(fragp, stretch); 16773 addr = fragp->fr_address + fragp->fr_fix; 16774 addr = (addr + 4) & ~3; 16775 /* Force misaligned targets to 32-bit variant. */ 16776 if (val & 3) 16777 return 4; 16778 val -= addr; 16779 if (val < 0 || val > 1020) 16780 return 4; 16781 return 2; 16782} 16783 16784/* Return the size of a relaxable add/sub immediate instruction. */ 16785static int 16786relax_addsub (fragS *fragp, asection *sec) 16787{ 16788 char *buf; 16789 int op; 16790 16791 buf = fragp->fr_literal + fragp->fr_fix; 16792 op = bfd_get_16(sec->owner, buf); 16793 if ((op & 0xf) == ((op >> 4) & 0xf)) 16794 return relax_immediate (fragp, 8, 0); 16795 else 16796 return relax_immediate (fragp, 3, 0); 16797} 16798 16799 16800/* Return the size of a relaxable branch instruction. BITS is the 16801 size of the offset field in the narrow instruction. */ 16802 16803static int 16804relax_branch (fragS *fragp, asection *sec, int bits, long stretch) 16805{ 16806 addressT addr; 16807 offsetT val; 16808 offsetT limit; 16809 16810 /* Assume worst case for symbols not known to be in the same section. */ 16811 if (!S_IS_DEFINED(fragp->fr_symbol) 16812 || sec != S_GET_SEGMENT (fragp->fr_symbol)) 16813 return 4; 16814 16815 val = relaxed_symbol_addr(fragp, stretch); 16816 addr = fragp->fr_address + fragp->fr_fix + 4; 16817 val -= addr; 16818 16819 /* Offset is a signed value *2 */ 16820 limit = 1 << bits; 16821 if (val >= limit || val < -limit) 16822 return 4; 16823 return 2; 16824} 16825 16826 16827/* Relax a machine dependent frag. This returns the amount by which 16828 the current size of the frag should change. */ 16829 16830int 16831arm_relax_frag (asection *sec, fragS *fragp, long stretch) 16832{ 16833 int oldsize; 16834 int newsize; 16835 16836 oldsize = fragp->fr_var; 16837 switch (fragp->fr_subtype) 16838 { 16839 case T_MNEM_ldr_pc2: 16840 newsize = relax_adr(fragp, sec, stretch); 16841 break; 16842 case T_MNEM_ldr_pc: 16843 case T_MNEM_ldr_sp: 16844 case T_MNEM_str_sp: 16845 newsize = relax_immediate(fragp, 8, 2); 16846 break; 16847 case T_MNEM_ldr: 16848 case T_MNEM_str: 16849 newsize = relax_immediate(fragp, 5, 2); 16850 break; 16851 case T_MNEM_ldrh: 16852 case T_MNEM_strh: 16853 newsize = relax_immediate(fragp, 5, 1); 16854 break; 16855 case T_MNEM_ldrb: 16856 case T_MNEM_strb: 16857 newsize = relax_immediate(fragp, 5, 0); 16858 break; 16859 case T_MNEM_adr: 16860 newsize = relax_adr(fragp, sec, stretch); 16861 break; 16862 case T_MNEM_mov: 16863 case T_MNEM_movs: 16864 case T_MNEM_cmp: 16865 case T_MNEM_cmn: 16866 newsize = relax_immediate(fragp, 8, 0); 16867 break; 16868 case T_MNEM_b: 16869 newsize = relax_branch(fragp, sec, 11, stretch); 16870 break; 16871 case T_MNEM_bcond: 16872 newsize = relax_branch(fragp, sec, 8, stretch); 16873 break; 16874 case T_MNEM_add_sp: 16875 case T_MNEM_add_pc: 16876 newsize = relax_immediate (fragp, 8, 2); 16877 break; 16878 case T_MNEM_inc_sp: 16879 case T_MNEM_dec_sp: 16880 newsize = relax_immediate (fragp, 7, 2); 16881 break; 16882 case T_MNEM_addi: 16883 case T_MNEM_addis: 16884 case T_MNEM_subi: 16885 case T_MNEM_subis: 16886 newsize = relax_addsub (fragp, sec); 16887 break; 16888 default: 16889 abort(); 16890 } 16891 16892 fragp->fr_var = newsize; 16893 /* Freeze wide instructions that are at or before the same location as 16894 in the previous pass. This avoids infinite loops. 16895 Don't freeze them unconditionally because targets may be artificialy 16896 misaligned by the expansion of preceeding frags. */ 16897 if (stretch <= 0 && newsize > 2) 16898 { 16899 md_convert_frag (sec->owner, sec, fragp); 16900 frag_wane(fragp); 16901 } 16902 16903 return newsize - oldsize; 16904} 16905 16906/* Round up a section size to the appropriate boundary. */ 16907 16908valueT 16909md_section_align (segT segment ATTRIBUTE_UNUSED, 16910 valueT size) 16911{ 16912#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 16913 if (OUTPUT_FLAVOR == bfd_target_aout_flavour) 16914 { 16915 /* For a.out, force the section size to be aligned. If we don't do 16916 this, BFD will align it for us, but it will not write out the 16917 final bytes of the section. This may be a bug in BFD, but it is 16918 easier to fix it here since that is how the other a.out targets 16919 work. */ 16920 int align; 16921 16922 align = bfd_get_section_alignment (stdoutput, segment); 16923 size = ((size + (1 << align) - 1) & ((valueT) -1 << align)); 16924 } 16925#endif 16926 16927 return size; 16928} 16929 16930/* This is called from HANDLE_ALIGN in write.c. Fill in the contents 16931 of an rs_align_code fragment. */ 16932 16933void 16934arm_handle_align (fragS * fragP) 16935{ 16936 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 }; 16937 static char const thumb_noop[2] = { 0xc0, 0x46 }; 16938 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 }; 16939 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 }; 16940 16941 int bytes, fix, noop_size; 16942 char * p; 16943 const char * noop; 16944 16945 if (fragP->fr_type != rs_align_code) 16946 return; 16947 16948 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 16949 p = fragP->fr_literal + fragP->fr_fix; 16950 fix = 0; 16951 16952 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE) 16953 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE; 16954 16955 if (fragP->tc_frag_data) 16956 { 16957 if (target_big_endian) 16958 noop = thumb_bigend_noop; 16959 else 16960 noop = thumb_noop; 16961 noop_size = sizeof (thumb_noop); 16962 } 16963 else 16964 { 16965 if (target_big_endian) 16966 noop = arm_bigend_noop; 16967 else 16968 noop = arm_noop; 16969 noop_size = sizeof (arm_noop); 16970 } 16971 16972 if (bytes & (noop_size - 1)) 16973 { 16974 fix = bytes & (noop_size - 1); 16975 memset (p, 0, fix); 16976 p += fix; 16977 bytes -= fix; 16978 } 16979 16980 while (bytes >= noop_size) 16981 { 16982 memcpy (p, noop, noop_size); 16983 p += noop_size; 16984 bytes -= noop_size; 16985 fix += noop_size; 16986 } 16987 16988 fragP->fr_fix += fix; 16989 fragP->fr_var = noop_size; 16990} 16991 16992/* Called from md_do_align. Used to create an alignment 16993 frag in a code section. */ 16994 16995void 16996arm_frag_align_code (int n, int max) 16997{ 16998 char * p; 16999 17000 /* We assume that there will never be a requirement 17001 to support alignments greater than 32 bytes. */ 17002 if (max > MAX_MEM_FOR_RS_ALIGN_CODE) 17003 as_fatal (_("alignments greater than 32 bytes not supported in .text sections.")); 17004 17005 p = frag_var (rs_align_code, 17006 MAX_MEM_FOR_RS_ALIGN_CODE, 17007 1, 17008 (relax_substateT) max, 17009 (symbolS *) NULL, 17010 (offsetT) n, 17011 (char *) NULL); 17012 *p = 0; 17013} 17014 17015/* Perform target specific initialisation of a frag. */ 17016 17017void 17018arm_init_frag (fragS * fragP) 17019{ 17020 /* Record whether this frag is in an ARM or a THUMB area. */ 17021 fragP->tc_frag_data = thumb_mode; 17022} 17023 17024#ifdef OBJ_ELF 17025/* When we change sections we need to issue a new mapping symbol. */ 17026 17027void 17028arm_elf_change_section (void) 17029{ 17030 flagword flags; 17031 segment_info_type *seginfo; 17032 17033 /* Link an unlinked unwind index table section to the .text section. */ 17034 if (elf_section_type (now_seg) == SHT_ARM_EXIDX 17035 && elf_linked_to_section (now_seg) == NULL) 17036 elf_linked_to_section (now_seg) = text_section; 17037 17038 if (!SEG_NORMAL (now_seg)) 17039 return; 17040 17041 flags = bfd_get_section_flags (stdoutput, now_seg); 17042 17043 /* We can ignore sections that only contain debug info. */ 17044 if ((flags & SEC_ALLOC) == 0) 17045 return; 17046 17047 seginfo = seg_info (now_seg); 17048 mapstate = seginfo->tc_segment_info_data.mapstate; 17049 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency; 17050} 17051 17052int 17053arm_elf_section_type (const char * str, size_t len) 17054{ 17055 if (len == 5 && strncmp (str, "exidx", 5) == 0) 17056 return SHT_ARM_EXIDX; 17057 17058 return -1; 17059} 17060 17061/* Code to deal with unwinding tables. */ 17062 17063static void add_unwind_adjustsp (offsetT); 17064 17065/* Cenerate and deferred unwind frame offset. */ 17066 17067static void 17068flush_pending_unwind (void) 17069{ 17070 offsetT offset; 17071 17072 offset = unwind.pending_offset; 17073 unwind.pending_offset = 0; 17074 if (offset != 0) 17075 add_unwind_adjustsp (offset); 17076} 17077 17078/* Add an opcode to this list for this function. Two-byte opcodes should 17079 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse 17080 order. */ 17081 17082static void 17083add_unwind_opcode (valueT op, int length) 17084{ 17085 /* Add any deferred stack adjustment. */ 17086 if (unwind.pending_offset) 17087 flush_pending_unwind (); 17088 17089 unwind.sp_restored = 0; 17090 17091 if (unwind.opcode_count + length > unwind.opcode_alloc) 17092 { 17093 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE; 17094 if (unwind.opcodes) 17095 unwind.opcodes = xrealloc (unwind.opcodes, 17096 unwind.opcode_alloc); 17097 else 17098 unwind.opcodes = xmalloc (unwind.opcode_alloc); 17099 } 17100 while (length > 0) 17101 { 17102 length--; 17103 unwind.opcodes[unwind.opcode_count] = op & 0xff; 17104 op >>= 8; 17105 unwind.opcode_count++; 17106 } 17107} 17108 17109/* Add unwind opcodes to adjust the stack pointer. */ 17110 17111static void 17112add_unwind_adjustsp (offsetT offset) 17113{ 17114 valueT op; 17115 17116 if (offset > 0x200) 17117 { 17118 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */ 17119 char bytes[5]; 17120 int n; 17121 valueT o; 17122 17123 /* Long form: 0xb2, uleb128. */ 17124 /* This might not fit in a word so add the individual bytes, 17125 remembering the list is built in reverse order. */ 17126 o = (valueT) ((offset - 0x204) >> 2); 17127 if (o == 0) 17128 add_unwind_opcode (0, 1); 17129 17130 /* Calculate the uleb128 encoding of the offset. */ 17131 n = 0; 17132 while (o) 17133 { 17134 bytes[n] = o & 0x7f; 17135 o >>= 7; 17136 if (o) 17137 bytes[n] |= 0x80; 17138 n++; 17139 } 17140 /* Add the insn. */ 17141 for (; n; n--) 17142 add_unwind_opcode (bytes[n - 1], 1); 17143 add_unwind_opcode (0xb2, 1); 17144 } 17145 else if (offset > 0x100) 17146 { 17147 /* Two short opcodes. */ 17148 add_unwind_opcode (0x3f, 1); 17149 op = (offset - 0x104) >> 2; 17150 add_unwind_opcode (op, 1); 17151 } 17152 else if (offset > 0) 17153 { 17154 /* Short opcode. */ 17155 op = (offset - 4) >> 2; 17156 add_unwind_opcode (op, 1); 17157 } 17158 else if (offset < 0) 17159 { 17160 offset = -offset; 17161 while (offset > 0x100) 17162 { 17163 add_unwind_opcode (0x7f, 1); 17164 offset -= 0x100; 17165 } 17166 op = ((offset - 4) >> 2) | 0x40; 17167 add_unwind_opcode (op, 1); 17168 } 17169} 17170 17171/* Finish the list of unwind opcodes for this function. */ 17172static void 17173finish_unwind_opcodes (void) 17174{ 17175 valueT op; 17176 17177 if (unwind.fp_used) 17178 { 17179 /* Adjust sp as necessary. */ 17180 unwind.pending_offset += unwind.fp_offset - unwind.frame_size; 17181 flush_pending_unwind (); 17182 17183 /* After restoring sp from the frame pointer. */ 17184 op = 0x90 | unwind.fp_reg; 17185 add_unwind_opcode (op, 1); 17186 } 17187 else 17188 flush_pending_unwind (); 17189} 17190 17191 17192/* Start an exception table entry. If idx is nonzero this is an index table 17193 entry. */ 17194 17195static void 17196start_unwind_section (const segT text_seg, int idx) 17197{ 17198 const char * text_name; 17199 const char * prefix; 17200 const char * prefix_once; 17201 const char * group_name; 17202 size_t prefix_len; 17203 size_t text_len; 17204 char * sec_name; 17205 size_t sec_name_len; 17206 int type; 17207 int flags; 17208 int linkonce; 17209 17210 if (idx) 17211 { 17212 prefix = ELF_STRING_ARM_unwind; 17213 prefix_once = ELF_STRING_ARM_unwind_once; 17214 type = SHT_ARM_EXIDX; 17215 } 17216 else 17217 { 17218 prefix = ELF_STRING_ARM_unwind_info; 17219 prefix_once = ELF_STRING_ARM_unwind_info_once; 17220 type = SHT_PROGBITS; 17221 } 17222 17223 text_name = segment_name (text_seg); 17224 if (streq (text_name, ".text")) 17225 text_name = ""; 17226 17227 if (strncmp (text_name, ".gnu.linkonce.t.", 17228 strlen (".gnu.linkonce.t.")) == 0) 17229 { 17230 prefix = prefix_once; 17231 text_name += strlen (".gnu.linkonce.t."); 17232 } 17233 17234 prefix_len = strlen (prefix); 17235 text_len = strlen (text_name); 17236 sec_name_len = prefix_len + text_len; 17237 sec_name = xmalloc (sec_name_len + 1); 17238 memcpy (sec_name, prefix, prefix_len); 17239 memcpy (sec_name + prefix_len, text_name, text_len); 17240 sec_name[prefix_len + text_len] = '\0'; 17241 17242 flags = SHF_ALLOC; 17243 linkonce = 0; 17244 group_name = 0; 17245 17246 /* Handle COMDAT group. */ 17247 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0) 17248 { 17249 group_name = elf_group_name (text_seg); 17250 if (group_name == NULL) 17251 { 17252 as_bad ("Group section `%s' has no group signature", 17253 segment_name (text_seg)); 17254 ignore_rest_of_line (); 17255 return; 17256 } 17257 flags |= SHF_GROUP; 17258 linkonce = 1; 17259 } 17260 17261 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0); 17262 17263 /* Set the setion link for index tables. */ 17264 if (idx) 17265 elf_linked_to_section (now_seg) = text_seg; 17266} 17267 17268 17269/* Start an unwind table entry. HAVE_DATA is nonzero if we have additional 17270 personality routine data. Returns zero, or the index table value for 17271 and inline entry. */ 17272 17273static valueT 17274create_unwind_entry (int have_data) 17275{ 17276 int size; 17277 addressT where; 17278 char *ptr; 17279 /* The current word of data. */ 17280 valueT data; 17281 /* The number of bytes left in this word. */ 17282 int n; 17283 17284 finish_unwind_opcodes (); 17285 17286 /* Remember the current text section. */ 17287 unwind.saved_seg = now_seg; 17288 unwind.saved_subseg = now_subseg; 17289 17290 start_unwind_section (now_seg, 0); 17291 17292 if (unwind.personality_routine == NULL) 17293 { 17294 if (unwind.personality_index == -2) 17295 { 17296 if (have_data) 17297 as_bad (_("handerdata in cantunwind frame")); 17298 return 1; /* EXIDX_CANTUNWIND. */ 17299 } 17300 17301 /* Use a default personality routine if none is specified. */ 17302 if (unwind.personality_index == -1) 17303 { 17304 if (unwind.opcode_count > 3) 17305 unwind.personality_index = 1; 17306 else 17307 unwind.personality_index = 0; 17308 } 17309 17310 /* Space for the personality routine entry. */ 17311 if (unwind.personality_index == 0) 17312 { 17313 if (unwind.opcode_count > 3) 17314 as_bad (_("too many unwind opcodes for personality routine 0")); 17315 17316 if (!have_data) 17317 { 17318 /* All the data is inline in the index table. */ 17319 data = 0x80; 17320 n = 3; 17321 while (unwind.opcode_count > 0) 17322 { 17323 unwind.opcode_count--; 17324 data = (data << 8) | unwind.opcodes[unwind.opcode_count]; 17325 n--; 17326 } 17327 17328 /* Pad with "finish" opcodes. */ 17329 while (n--) 17330 data = (data << 8) | 0xb0; 17331 17332 return data; 17333 } 17334 size = 0; 17335 } 17336 else 17337 /* We get two opcodes "free" in the first word. */ 17338 size = unwind.opcode_count - 2; 17339 } 17340 else 17341 /* An extra byte is required for the opcode count. */ 17342 size = unwind.opcode_count + 1; 17343 17344 size = (size + 3) >> 2; 17345 if (size > 0xff) 17346 as_bad (_("too many unwind opcodes")); 17347 17348 frag_align (2, 0, 0); 17349 record_alignment (now_seg, 2); 17350 unwind.table_entry = expr_build_dot (); 17351 17352 /* Allocate the table entry. */ 17353 ptr = frag_more ((size << 2) + 4); 17354 memset(ptr, 0, (size << 2) + 4); 17355 where = frag_now_fix () - ((size << 2) + 4); 17356 17357 switch (unwind.personality_index) 17358 { 17359 case -1: 17360 /* ??? Should this be a PLT generating relocation? */ 17361 /* Custom personality routine. */ 17362 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1, 17363 BFD_RELOC_ARM_PREL31); 17364 17365 where += 4; 17366 ptr += 4; 17367 17368 /* Set the first byte to the number of additional words. */ 17369 data = size - 1; 17370 n = 3; 17371 break; 17372 17373 /* ABI defined personality routines. */ 17374 case 0: 17375 /* Three opcodes bytes are packed into the first word. */ 17376 data = 0x80; 17377 n = 3; 17378 break; 17379 17380 case 1: 17381 case 2: 17382 /* The size and first two opcode bytes go in the first word. */ 17383 data = ((0x80 + unwind.personality_index) << 8) | size; 17384 n = 2; 17385 break; 17386 17387 default: 17388 /* Should never happen. */ 17389 abort (); 17390 } 17391 17392 /* Pack the opcodes into words (MSB first), reversing the list at the same 17393 time. */ 17394 while (unwind.opcode_count > 0) 17395 { 17396 if (n == 0) 17397 { 17398 md_number_to_chars (ptr, data, 4); 17399 ptr += 4; 17400 n = 4; 17401 data = 0; 17402 } 17403 unwind.opcode_count--; 17404 n--; 17405 data = (data << 8) | unwind.opcodes[unwind.opcode_count]; 17406 } 17407 17408 /* Finish off the last word. */ 17409 if (n < 4) 17410 { 17411 /* Pad with "finish" opcodes. */ 17412 while (n--) 17413 data = (data << 8) | 0xb0; 17414 17415 md_number_to_chars (ptr, data, 4); 17416 } 17417 17418 if (!have_data) 17419 { 17420 /* Add an empty descriptor if there is no user-specified data. */ 17421 ptr = frag_more (4); 17422 md_number_to_chars (ptr, 0, 4); 17423 } 17424 17425 return 0; 17426} 17427 17428 17429/* Initialize the DWARF-2 unwind information for this procedure. */ 17430 17431void 17432tc_arm_frame_initial_instructions (void) 17433{ 17434 cfi_add_CFA_def_cfa (REG_SP, 0); 17435} 17436#endif /* OBJ_ELF */ 17437 17438/* Convert REGNAME to a DWARF-2 register number. */ 17439 17440int 17441tc_arm_regname_to_dw2regnum (char *regname) 17442{ 17443 int reg = arm_reg_parse (®name, REG_TYPE_RN); 17444 17445 if (reg == FAIL) 17446 return -1; 17447 17448 return reg; 17449} 17450 17451#ifdef TE_PE 17452void 17453tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size) 17454{ 17455 expressionS expr; 17456 17457 expr.X_op = O_secrel; 17458 expr.X_add_symbol = symbol; 17459 expr.X_add_number = 0; 17460 emit_expr (&expr, size); 17461} 17462#endif 17463 17464/* MD interface: Symbol and relocation handling. */ 17465 17466/* Return the address within the segment that a PC-relative fixup is 17467 relative to. For ARM, PC-relative fixups applied to instructions 17468 are generally relative to the location of the fixup plus 8 bytes. 17469 Thumb branches are offset by 4, and Thumb loads relative to PC 17470 require special handling. */ 17471 17472long 17473md_pcrel_from_section (fixS * fixP, segT seg) 17474{ 17475 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 17476 17477 /* If this is pc-relative and we are going to emit a relocation 17478 then we just want to put out any pipeline compensation that the linker 17479 will need. Otherwise we want to use the calculated base. 17480 For WinCE we skip the bias for externals as well, since this 17481 is how the MS ARM-CE assembler behaves and we want to be compatible. */ 17482 if (fixP->fx_pcrel 17483 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 17484 || (arm_force_relocation (fixP) 17485#ifdef TE_WINCE 17486 && !S_IS_EXTERNAL (fixP->fx_addsy) 17487#endif 17488 ))) 17489 base = 0; 17490 17491 switch (fixP->fx_r_type) 17492 { 17493 /* PC relative addressing on the Thumb is slightly odd as the 17494 bottom two bits of the PC are forced to zero for the 17495 calculation. This happens *after* application of the 17496 pipeline offset. However, Thumb adrl already adjusts for 17497 this, so we need not do it again. */ 17498 case BFD_RELOC_ARM_THUMB_ADD: 17499 return base & ~3; 17500 17501 case BFD_RELOC_ARM_THUMB_OFFSET: 17502 case BFD_RELOC_ARM_T32_OFFSET_IMM: 17503 case BFD_RELOC_ARM_T32_ADD_PC12: 17504 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 17505 return (base + 4) & ~3; 17506 17507 /* Thumb branches are simply offset by +4. */ 17508 case BFD_RELOC_THUMB_PCREL_BRANCH7: 17509 case BFD_RELOC_THUMB_PCREL_BRANCH9: 17510 case BFD_RELOC_THUMB_PCREL_BRANCH12: 17511 case BFD_RELOC_THUMB_PCREL_BRANCH20: 17512 case BFD_RELOC_THUMB_PCREL_BRANCH23: 17513 case BFD_RELOC_THUMB_PCREL_BRANCH25: 17514 case BFD_RELOC_THUMB_PCREL_BLX: 17515 return base + 4; 17516 17517 /* ARM mode branches are offset by +8. However, the Windows CE 17518 loader expects the relocation not to take this into account. */ 17519 case BFD_RELOC_ARM_PCREL_BRANCH: 17520 case BFD_RELOC_ARM_PCREL_CALL: 17521 case BFD_RELOC_ARM_PCREL_JUMP: 17522 case BFD_RELOC_ARM_PCREL_BLX: 17523 case BFD_RELOC_ARM_PLT32: 17524#ifdef TE_WINCE 17525 /* When handling fixups immediately, because we have already 17526 discovered the value of a symbol, or the address of the frag involved 17527 we must account for the offset by +8, as the OS loader will never see the reloc. 17528 see fixup_segment() in write.c 17529 The S_IS_EXTERNAL test handles the case of global symbols. 17530 Those need the calculated base, not just the pipe compensation the linker will need. */ 17531 if (fixP->fx_pcrel 17532 && fixP->fx_addsy != NULL 17533 && (S_GET_SEGMENT (fixP->fx_addsy) == seg) 17534 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP))) 17535 return base + 8; 17536 return base; 17537#else 17538 return base + 8; 17539#endif 17540 17541 /* ARM mode loads relative to PC are also offset by +8. Unlike 17542 branches, the Windows CE loader *does* expect the relocation 17543 to take this into account. */ 17544 case BFD_RELOC_ARM_OFFSET_IMM: 17545 case BFD_RELOC_ARM_OFFSET_IMM8: 17546 case BFD_RELOC_ARM_HWLITERAL: 17547 case BFD_RELOC_ARM_LITERAL: 17548 case BFD_RELOC_ARM_CP_OFF_IMM: 17549 return base + 8; 17550 17551 17552 /* Other PC-relative relocations are un-offset. */ 17553 default: 17554 return base; 17555 } 17556} 17557 17558/* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 17559 Otherwise we have no need to default values of symbols. */ 17560 17561symbolS * 17562md_undefined_symbol (char * name ATTRIBUTE_UNUSED) 17563{ 17564#ifdef OBJ_ELF 17565 if (name[0] == '_' && name[1] == 'G' 17566 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 17567 { 17568 if (!GOT_symbol) 17569 { 17570 if (symbol_find (name)) 17571 as_bad ("GOT already in the symbol table"); 17572 17573 GOT_symbol = symbol_new (name, undefined_section, 17574 (valueT) 0, & zero_address_frag); 17575 } 17576 17577 return GOT_symbol; 17578 } 17579#endif 17580 17581 return 0; 17582} 17583 17584/* Subroutine of md_apply_fix. Check to see if an immediate can be 17585 computed as two separate immediate values, added together. We 17586 already know that this value cannot be computed by just one ARM 17587 instruction. */ 17588 17589static unsigned int 17590validate_immediate_twopart (unsigned int val, 17591 unsigned int * highpart) 17592{ 17593 unsigned int a; 17594 unsigned int i; 17595 17596 for (i = 0; i < 32; i += 2) 17597 if (((a = rotate_left (val, i)) & 0xff) != 0) 17598 { 17599 if (a & 0xff00) 17600 { 17601 if (a & ~ 0xffff) 17602 continue; 17603 * highpart = (a >> 8) | ((i + 24) << 7); 17604 } 17605 else if (a & 0xff0000) 17606 { 17607 if (a & 0xff000000) 17608 continue; 17609 * highpart = (a >> 16) | ((i + 16) << 7); 17610 } 17611 else 17612 { 17613 assert (a & 0xff000000); 17614 * highpart = (a >> 24) | ((i + 8) << 7); 17615 } 17616 17617 return (a & 0xff) | (i << 7); 17618 } 17619 17620 return FAIL; 17621} 17622 17623static int 17624validate_offset_imm (unsigned int val, int hwse) 17625{ 17626 if ((hwse && val > 255) || val > 4095) 17627 return FAIL; 17628 return val; 17629} 17630 17631/* Subroutine of md_apply_fix. Do those data_ops which can take a 17632 negative immediate constant by altering the instruction. A bit of 17633 a hack really. 17634 MOV <-> MVN 17635 AND <-> BIC 17636 ADC <-> SBC 17637 by inverting the second operand, and 17638 ADD <-> SUB 17639 CMP <-> CMN 17640 by negating the second operand. */ 17641 17642static int 17643negate_data_op (unsigned long * instruction, 17644 unsigned long value) 17645{ 17646 int op, new_inst; 17647 unsigned long negated, inverted; 17648 17649 negated = encode_arm_immediate (-value); 17650 inverted = encode_arm_immediate (~value); 17651 17652 op = (*instruction >> DATA_OP_SHIFT) & 0xf; 17653 switch (op) 17654 { 17655 /* First negates. */ 17656 case OPCODE_SUB: /* ADD <-> SUB */ 17657 new_inst = OPCODE_ADD; 17658 value = negated; 17659 break; 17660 17661 case OPCODE_ADD: 17662 new_inst = OPCODE_SUB; 17663 value = negated; 17664 break; 17665 17666 case OPCODE_CMP: /* CMP <-> CMN */ 17667 new_inst = OPCODE_CMN; 17668 value = negated; 17669 break; 17670 17671 case OPCODE_CMN: 17672 new_inst = OPCODE_CMP; 17673 value = negated; 17674 break; 17675 17676 /* Now Inverted ops. */ 17677 case OPCODE_MOV: /* MOV <-> MVN */ 17678 new_inst = OPCODE_MVN; 17679 value = inverted; 17680 break; 17681 17682 case OPCODE_MVN: 17683 new_inst = OPCODE_MOV; 17684 value = inverted; 17685 break; 17686 17687 case OPCODE_AND: /* AND <-> BIC */ 17688 new_inst = OPCODE_BIC; 17689 value = inverted; 17690 break; 17691 17692 case OPCODE_BIC: 17693 new_inst = OPCODE_AND; 17694 value = inverted; 17695 break; 17696 17697 case OPCODE_ADC: /* ADC <-> SBC */ 17698 new_inst = OPCODE_SBC; 17699 value = inverted; 17700 break; 17701 17702 case OPCODE_SBC: 17703 new_inst = OPCODE_ADC; 17704 value = inverted; 17705 break; 17706 17707 /* We cannot do anything. */ 17708 default: 17709 return FAIL; 17710 } 17711 17712 if (value == (unsigned) FAIL) 17713 return FAIL; 17714 17715 *instruction &= OPCODE_MASK; 17716 *instruction |= new_inst << DATA_OP_SHIFT; 17717 return value; 17718} 17719 17720/* Like negate_data_op, but for Thumb-2. */ 17721 17722static unsigned int 17723thumb32_negate_data_op (offsetT *instruction, unsigned int value) 17724{ 17725 int op, new_inst; 17726 int rd; 17727 unsigned int negated, inverted; 17728 17729 negated = encode_thumb32_immediate (-value); 17730 inverted = encode_thumb32_immediate (~value); 17731 17732 rd = (*instruction >> 8) & 0xf; 17733 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf; 17734 switch (op) 17735 { 17736 /* ADD <-> SUB. Includes CMP <-> CMN. */ 17737 case T2_OPCODE_SUB: 17738 new_inst = T2_OPCODE_ADD; 17739 value = negated; 17740 break; 17741 17742 case T2_OPCODE_ADD: 17743 new_inst = T2_OPCODE_SUB; 17744 value = negated; 17745 break; 17746 17747 /* ORR <-> ORN. Includes MOV <-> MVN. */ 17748 case T2_OPCODE_ORR: 17749 new_inst = T2_OPCODE_ORN; 17750 value = inverted; 17751 break; 17752 17753 case T2_OPCODE_ORN: 17754 new_inst = T2_OPCODE_ORR; 17755 value = inverted; 17756 break; 17757 17758 /* AND <-> BIC. TST has no inverted equivalent. */ 17759 case T2_OPCODE_AND: 17760 new_inst = T2_OPCODE_BIC; 17761 if (rd == 15) 17762 value = FAIL; 17763 else 17764 value = inverted; 17765 break; 17766 17767 case T2_OPCODE_BIC: 17768 new_inst = T2_OPCODE_AND; 17769 value = inverted; 17770 break; 17771 17772 /* ADC <-> SBC */ 17773 case T2_OPCODE_ADC: 17774 new_inst = T2_OPCODE_SBC; 17775 value = inverted; 17776 break; 17777 17778 case T2_OPCODE_SBC: 17779 new_inst = T2_OPCODE_ADC; 17780 value = inverted; 17781 break; 17782 17783 /* We cannot do anything. */ 17784 default: 17785 return FAIL; 17786 } 17787 17788 if (value == (unsigned int)FAIL) 17789 return FAIL; 17790 17791 *instruction &= T2_OPCODE_MASK; 17792 *instruction |= new_inst << T2_DATA_OP_SHIFT; 17793 return value; 17794} 17795 17796/* Read a 32-bit thumb instruction from buf. */ 17797static unsigned long 17798get_thumb32_insn (char * buf) 17799{ 17800 unsigned long insn; 17801 insn = md_chars_to_number (buf, THUMB_SIZE) << 16; 17802 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 17803 17804 return insn; 17805} 17806 17807 17808/* We usually want to set the low bit on the address of thumb function 17809 symbols. In particular .word foo - . should have the low bit set. 17810 Generic code tries to fold the difference of two symbols to 17811 a constant. Prevent this and force a relocation when the first symbols 17812 is a thumb function. */ 17813int 17814arm_optimize_expr (expressionS *l, operatorT op, expressionS *r) 17815{ 17816 if (op == O_subtract 17817 && l->X_op == O_symbol 17818 && r->X_op == O_symbol 17819 && THUMB_IS_FUNC (l->X_add_symbol)) 17820 { 17821 l->X_op = O_subtract; 17822 l->X_op_symbol = r->X_add_symbol; 17823 l->X_add_number -= r->X_add_number; 17824 return 1; 17825 } 17826 /* Process as normal. */ 17827 return 0; 17828} 17829 17830void 17831md_apply_fix (fixS * fixP, 17832 valueT * valP, 17833 segT seg) 17834{ 17835 offsetT value = * valP; 17836 offsetT newval; 17837 unsigned int newimm; 17838 unsigned long temp; 17839 int sign; 17840 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal; 17841 17842 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 17843 17844 /* Note whether this will delete the relocation. */ 17845 17846 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 17847 fixP->fx_done = 1; 17848 17849 /* On a 64-bit host, silently truncate 'value' to 32 bits for 17850 consistency with the behavior on 32-bit hosts. Remember value 17851 for emit_reloc. */ 17852 value &= 0xffffffff; 17853 value ^= 0x80000000; 17854 value -= 0x80000000; 17855 17856 *valP = value; 17857 fixP->fx_addnumber = value; 17858 17859 /* Same treatment for fixP->fx_offset. */ 17860 fixP->fx_offset &= 0xffffffff; 17861 fixP->fx_offset ^= 0x80000000; 17862 fixP->fx_offset -= 0x80000000; 17863 17864 switch (fixP->fx_r_type) 17865 { 17866 case BFD_RELOC_NONE: 17867 /* This will need to go in the object file. */ 17868 fixP->fx_done = 0; 17869 break; 17870 17871 case BFD_RELOC_ARM_IMMEDIATE: 17872 /* We claim that this fixup has been processed here, 17873 even if in fact we generate an error because we do 17874 not have a reloc for it, so tc_gen_reloc will reject it. */ 17875 fixP->fx_done = 1; 17876 17877 if (fixP->fx_addsy 17878 && ! S_IS_DEFINED (fixP->fx_addsy)) 17879 { 17880 as_bad_where (fixP->fx_file, fixP->fx_line, 17881 _("undefined symbol %s used as an immediate value"), 17882 S_GET_NAME (fixP->fx_addsy)); 17883 break; 17884 } 17885 17886 newimm = encode_arm_immediate (value); 17887 temp = md_chars_to_number (buf, INSN_SIZE); 17888 17889 /* If the instruction will fail, see if we can fix things up by 17890 changing the opcode. */ 17891 if (newimm == (unsigned int) FAIL 17892 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL) 17893 { 17894 as_bad_where (fixP->fx_file, fixP->fx_line, 17895 _("invalid constant (%lx) after fixup"), 17896 (unsigned long) value); 17897 break; 17898 } 17899 17900 newimm |= (temp & 0xfffff000); 17901 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 17902 break; 17903 17904 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 17905 { 17906 unsigned int highpart = 0; 17907 unsigned int newinsn = 0xe1a00000; /* nop. */ 17908 17909 newimm = encode_arm_immediate (value); 17910 temp = md_chars_to_number (buf, INSN_SIZE); 17911 17912 /* If the instruction will fail, see if we can fix things up by 17913 changing the opcode. */ 17914 if (newimm == (unsigned int) FAIL 17915 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL) 17916 { 17917 /* No ? OK - try using two ADD instructions to generate 17918 the value. */ 17919 newimm = validate_immediate_twopart (value, & highpart); 17920 17921 /* Yes - then make sure that the second instruction is 17922 also an add. */ 17923 if (newimm != (unsigned int) FAIL) 17924 newinsn = temp; 17925 /* Still No ? Try using a negated value. */ 17926 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL) 17927 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT; 17928 /* Otherwise - give up. */ 17929 else 17930 { 17931 as_bad_where (fixP->fx_file, fixP->fx_line, 17932 _("unable to compute ADRL instructions for PC offset of 0x%lx"), 17933 (long) value); 17934 break; 17935 } 17936 17937 /* Replace the first operand in the 2nd instruction (which 17938 is the PC) with the destination register. We have 17939 already added in the PC in the first instruction and we 17940 do not want to do it again. */ 17941 newinsn &= ~ 0xf0000; 17942 newinsn |= ((newinsn & 0x0f000) << 4); 17943 } 17944 17945 newimm |= (temp & 0xfffff000); 17946 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 17947 17948 highpart |= (newinsn & 0xfffff000); 17949 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE); 17950 } 17951 break; 17952 17953 case BFD_RELOC_ARM_OFFSET_IMM: 17954 if (!fixP->fx_done && seg->use_rela_p) 17955 value = 0; 17956 17957 case BFD_RELOC_ARM_LITERAL: 17958 sign = value >= 0; 17959 17960 if (value < 0) 17961 value = - value; 17962 17963 if (validate_offset_imm (value, 0) == FAIL) 17964 { 17965 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL) 17966 as_bad_where (fixP->fx_file, fixP->fx_line, 17967 _("invalid literal constant: pool needs to be closer")); 17968 else 17969 as_bad_where (fixP->fx_file, fixP->fx_line, 17970 _("bad immediate value for offset (%ld)"), 17971 (long) value); 17972 break; 17973 } 17974 17975 newval = md_chars_to_number (buf, INSN_SIZE); 17976 newval &= 0xff7ff000; 17977 newval |= value | (sign ? INDEX_UP : 0); 17978 md_number_to_chars (buf, newval, INSN_SIZE); 17979 break; 17980 17981 case BFD_RELOC_ARM_OFFSET_IMM8: 17982 case BFD_RELOC_ARM_HWLITERAL: 17983 sign = value >= 0; 17984 17985 if (value < 0) 17986 value = - value; 17987 17988 if (validate_offset_imm (value, 1) == FAIL) 17989 { 17990 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL) 17991 as_bad_where (fixP->fx_file, fixP->fx_line, 17992 _("invalid literal constant: pool needs to be closer")); 17993 else 17994 as_bad (_("bad immediate value for 8-bit offset (%ld)"), 17995 (long) value); 17996 break; 17997 } 17998 17999 newval = md_chars_to_number (buf, INSN_SIZE); 18000 newval &= 0xff7ff0f0; 18001 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0); 18002 md_number_to_chars (buf, newval, INSN_SIZE); 18003 break; 18004 18005 case BFD_RELOC_ARM_T32_OFFSET_U8: 18006 if (value < 0 || value > 1020 || value % 4 != 0) 18007 as_bad_where (fixP->fx_file, fixP->fx_line, 18008 _("bad immediate value for offset (%ld)"), (long) value); 18009 value /= 4; 18010 18011 newval = md_chars_to_number (buf+2, THUMB_SIZE); 18012 newval |= value; 18013 md_number_to_chars (buf+2, newval, THUMB_SIZE); 18014 break; 18015 18016 case BFD_RELOC_ARM_T32_OFFSET_IMM: 18017 /* This is a complicated relocation used for all varieties of Thumb32 18018 load/store instruction with immediate offset: 18019 18020 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit, 18021 *4, optional writeback(W) 18022 (doubleword load/store) 18023 18024 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel 18025 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit 18026 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction) 18027 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit 18028 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit 18029 18030 Uppercase letters indicate bits that are already encoded at 18031 this point. Lowercase letters are our problem. For the 18032 second block of instructions, the secondary opcode nybble 18033 (bits 8..11) is present, and bit 23 is zero, even if this is 18034 a PC-relative operation. */ 18035 newval = md_chars_to_number (buf, THUMB_SIZE); 18036 newval <<= 16; 18037 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE); 18038 18039 if ((newval & 0xf0000000) == 0xe0000000) 18040 { 18041 /* Doubleword load/store: 8-bit offset, scaled by 4. */ 18042 if (value >= 0) 18043 newval |= (1 << 23); 18044 else 18045 value = -value; 18046 if (value % 4 != 0) 18047 { 18048 as_bad_where (fixP->fx_file, fixP->fx_line, 18049 _("offset not a multiple of 4")); 18050 break; 18051 } 18052 value /= 4; 18053 if (value > 0xff) 18054 { 18055 as_bad_where (fixP->fx_file, fixP->fx_line, 18056 _("offset out of range")); 18057 break; 18058 } 18059 newval &= ~0xff; 18060 } 18061 else if ((newval & 0x000f0000) == 0x000f0000) 18062 { 18063 /* PC-relative, 12-bit offset. */ 18064 if (value >= 0) 18065 newval |= (1 << 23); 18066 else 18067 value = -value; 18068 if (value > 0xfff) 18069 { 18070 as_bad_where (fixP->fx_file, fixP->fx_line, 18071 _("offset out of range")); 18072 break; 18073 } 18074 newval &= ~0xfff; 18075 } 18076 else if ((newval & 0x00000100) == 0x00000100) 18077 { 18078 /* Writeback: 8-bit, +/- offset. */ 18079 if (value >= 0) 18080 newval |= (1 << 9); 18081 else 18082 value = -value; 18083 if (value > 0xff) 18084 { 18085 as_bad_where (fixP->fx_file, fixP->fx_line, 18086 _("offset out of range")); 18087 break; 18088 } 18089 newval &= ~0xff; 18090 } 18091 else if ((newval & 0x00000f00) == 0x00000e00) 18092 { 18093 /* T-instruction: positive 8-bit offset. */ 18094 if (value < 0 || value > 0xff) 18095 { 18096 as_bad_where (fixP->fx_file, fixP->fx_line, 18097 _("offset out of range")); 18098 break; 18099 } 18100 newval &= ~0xff; 18101 newval |= value; 18102 } 18103 else 18104 { 18105 /* Positive 12-bit or negative 8-bit offset. */ 18106 int limit; 18107 if (value >= 0) 18108 { 18109 newval |= (1 << 23); 18110 limit = 0xfff; 18111 } 18112 else 18113 { 18114 value = -value; 18115 limit = 0xff; 18116 } 18117 if (value > limit) 18118 { 18119 as_bad_where (fixP->fx_file, fixP->fx_line, 18120 _("offset out of range")); 18121 break; 18122 } 18123 newval &= ~limit; 18124 } 18125 18126 newval |= value; 18127 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE); 18128 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE); 18129 break; 18130 18131 case BFD_RELOC_ARM_SHIFT_IMM: 18132 newval = md_chars_to_number (buf, INSN_SIZE); 18133 if (((unsigned long) value) > 32 18134 || (value == 32 18135 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60))) 18136 { 18137 as_bad_where (fixP->fx_file, fixP->fx_line, 18138 _("shift expression is too large")); 18139 break; 18140 } 18141 18142 if (value == 0) 18143 /* Shifts of zero must be done as lsl. */ 18144 newval &= ~0x60; 18145 else if (value == 32) 18146 value = 0; 18147 newval &= 0xfffff07f; 18148 newval |= (value & 0x1f) << 7; 18149 md_number_to_chars (buf, newval, INSN_SIZE); 18150 break; 18151 18152 case BFD_RELOC_ARM_T32_IMMEDIATE: 18153 case BFD_RELOC_ARM_T32_ADD_IMM: 18154 case BFD_RELOC_ARM_T32_IMM12: 18155 case BFD_RELOC_ARM_T32_ADD_PC12: 18156 /* We claim that this fixup has been processed here, 18157 even if in fact we generate an error because we do 18158 not have a reloc for it, so tc_gen_reloc will reject it. */ 18159 fixP->fx_done = 1; 18160 18161 if (fixP->fx_addsy 18162 && ! S_IS_DEFINED (fixP->fx_addsy)) 18163 { 18164 as_bad_where (fixP->fx_file, fixP->fx_line, 18165 _("undefined symbol %s used as an immediate value"), 18166 S_GET_NAME (fixP->fx_addsy)); 18167 break; 18168 } 18169 18170 newval = md_chars_to_number (buf, THUMB_SIZE); 18171 newval <<= 16; 18172 newval |= md_chars_to_number (buf+2, THUMB_SIZE); 18173 18174 newimm = FAIL; 18175 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 18176 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 18177 { 18178 newimm = encode_thumb32_immediate (value); 18179 if (newimm == (unsigned int) FAIL) 18180 newimm = thumb32_negate_data_op (&newval, value); 18181 } 18182 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE 18183 && newimm == (unsigned int) FAIL) 18184 { 18185 /* Turn add/sum into addw/subw. */ 18186 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 18187 newval = (newval & 0xfeffffff) | 0x02000000; 18188 18189 /* 12 bit immediate for addw/subw. */ 18190 if (value < 0) 18191 { 18192 value = -value; 18193 newval ^= 0x00a00000; 18194 } 18195 if (value > 0xfff) 18196 newimm = (unsigned int) FAIL; 18197 else 18198 newimm = value; 18199 } 18200 18201 if (newimm == (unsigned int)FAIL) 18202 { 18203 as_bad_where (fixP->fx_file, fixP->fx_line, 18204 _("invalid constant (%lx) after fixup"), 18205 (unsigned long) value); 18206 break; 18207 } 18208 18209 newval |= (newimm & 0x800) << 15; 18210 newval |= (newimm & 0x700) << 4; 18211 newval |= (newimm & 0x0ff); 18212 18213 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE); 18214 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE); 18215 break; 18216 18217 case BFD_RELOC_ARM_SMC: 18218 if (((unsigned long) value) > 0xffff) 18219 as_bad_where (fixP->fx_file, fixP->fx_line, 18220 _("invalid smc expression")); 18221 newval = md_chars_to_number (buf, INSN_SIZE); 18222 newval |= (value & 0xf) | ((value & 0xfff0) << 4); 18223 md_number_to_chars (buf, newval, INSN_SIZE); 18224 break; 18225 18226 case BFD_RELOC_ARM_SWI: 18227 if (fixP->tc_fix_data != 0) 18228 { 18229 if (((unsigned long) value) > 0xff) 18230 as_bad_where (fixP->fx_file, fixP->fx_line, 18231 _("invalid swi expression")); 18232 newval = md_chars_to_number (buf, THUMB_SIZE); 18233 newval |= value; 18234 md_number_to_chars (buf, newval, THUMB_SIZE); 18235 } 18236 else 18237 { 18238 if (((unsigned long) value) > 0x00ffffff) 18239 as_bad_where (fixP->fx_file, fixP->fx_line, 18240 _("invalid swi expression")); 18241 newval = md_chars_to_number (buf, INSN_SIZE); 18242 newval |= value; 18243 md_number_to_chars (buf, newval, INSN_SIZE); 18244 } 18245 break; 18246 18247 case BFD_RELOC_ARM_MULTI: 18248 if (((unsigned long) value) > 0xffff) 18249 as_bad_where (fixP->fx_file, fixP->fx_line, 18250 _("invalid expression in load/store multiple")); 18251 newval = value | md_chars_to_number (buf, INSN_SIZE); 18252 md_number_to_chars (buf, newval, INSN_SIZE); 18253 break; 18254 18255#ifdef OBJ_ELF 18256 case BFD_RELOC_ARM_PCREL_CALL: 18257 newval = md_chars_to_number (buf, INSN_SIZE); 18258 if ((newval & 0xf0000000) == 0xf0000000) 18259 temp = 1; 18260 else 18261 temp = 3; 18262 goto arm_branch_common; 18263 18264 case BFD_RELOC_ARM_PCREL_JUMP: 18265 case BFD_RELOC_ARM_PLT32: 18266#endif 18267 case BFD_RELOC_ARM_PCREL_BRANCH: 18268 temp = 3; 18269 goto arm_branch_common; 18270 18271 case BFD_RELOC_ARM_PCREL_BLX: 18272 temp = 1; 18273 arm_branch_common: 18274 /* We are going to store value (shifted right by two) in the 18275 instruction, in a 24 bit, signed field. Bits 26 through 32 either 18276 all clear or all set and bit 0 must be clear. For B/BL bit 1 must 18277 also be be clear. */ 18278 if (value & temp) 18279 as_bad_where (fixP->fx_file, fixP->fx_line, 18280 _("misaligned branch destination")); 18281 if ((value & (offsetT)0xfe000000) != (offsetT)0 18282 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000) 18283 as_bad_where (fixP->fx_file, fixP->fx_line, 18284 _("branch out of range")); 18285 18286 if (fixP->fx_done || !seg->use_rela_p) 18287 { 18288 newval = md_chars_to_number (buf, INSN_SIZE); 18289 newval |= (value >> 2) & 0x00ffffff; 18290 /* Set the H bit on BLX instructions. */ 18291 if (temp == 1) 18292 { 18293 if (value & 2) 18294 newval |= 0x01000000; 18295 else 18296 newval &= ~0x01000000; 18297 } 18298 md_number_to_chars (buf, newval, INSN_SIZE); 18299 } 18300 break; 18301 18302 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */ 18303 /* CBZ can only branch forward. */ 18304 18305 /* Attempts to use CBZ to branch to the next instruction 18306 (which, strictly speaking, are prohibited) will be turned into 18307 no-ops. 18308 18309 FIXME: It may be better to remove the instruction completely and 18310 perform relaxation. */ 18311 if (value == -2) 18312 { 18313 newval = md_chars_to_number (buf, THUMB_SIZE); 18314 newval = 0xbf00; /* NOP encoding T1 */ 18315 md_number_to_chars (buf, newval, THUMB_SIZE); 18316 } 18317 else 18318 { 18319 if (value & ~0x7e) 18320 as_bad_where (fixP->fx_file, fixP->fx_line, 18321 _("branch out of range")); 18322 18323 if (fixP->fx_done || !seg->use_rela_p) 18324 { 18325 newval = md_chars_to_number (buf, THUMB_SIZE); 18326 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3); 18327 md_number_to_chars (buf, newval, THUMB_SIZE); 18328 } 18329 } 18330 break; 18331 18332 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */ 18333 if ((value & ~0xff) && ((value & ~0xff) != ~0xff)) 18334 as_bad_where (fixP->fx_file, fixP->fx_line, 18335 _("branch out of range")); 18336 18337 if (fixP->fx_done || !seg->use_rela_p) 18338 { 18339 newval = md_chars_to_number (buf, THUMB_SIZE); 18340 newval |= (value & 0x1ff) >> 1; 18341 md_number_to_chars (buf, newval, THUMB_SIZE); 18342 } 18343 break; 18344 18345 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */ 18346 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff)) 18347 as_bad_where (fixP->fx_file, fixP->fx_line, 18348 _("branch out of range")); 18349 18350 if (fixP->fx_done || !seg->use_rela_p) 18351 { 18352 newval = md_chars_to_number (buf, THUMB_SIZE); 18353 newval |= (value & 0xfff) >> 1; 18354 md_number_to_chars (buf, newval, THUMB_SIZE); 18355 } 18356 break; 18357 18358 case BFD_RELOC_THUMB_PCREL_BRANCH20: 18359 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff)) 18360 as_bad_where (fixP->fx_file, fixP->fx_line, 18361 _("conditional branch out of range")); 18362 18363 if (fixP->fx_done || !seg->use_rela_p) 18364 { 18365 offsetT newval2; 18366 addressT S, J1, J2, lo, hi; 18367 18368 S = (value & 0x00100000) >> 20; 18369 J2 = (value & 0x00080000) >> 19; 18370 J1 = (value & 0x00040000) >> 18; 18371 hi = (value & 0x0003f000) >> 12; 18372 lo = (value & 0x00000ffe) >> 1; 18373 18374 newval = md_chars_to_number (buf, THUMB_SIZE); 18375 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18376 newval |= (S << 10) | hi; 18377 newval2 |= (J1 << 13) | (J2 << 11) | lo; 18378 md_number_to_chars (buf, newval, THUMB_SIZE); 18379 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18380 } 18381 break; 18382 18383 case BFD_RELOC_THUMB_PCREL_BLX: 18384 case BFD_RELOC_THUMB_PCREL_BRANCH23: 18385 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff)) 18386 as_bad_where (fixP->fx_file, fixP->fx_line, 18387 _("branch out of range")); 18388 18389 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX) 18390 /* For a BLX instruction, make sure that the relocation is rounded up 18391 to a word boundary. This follows the semantics of the instruction 18392 which specifies that bit 1 of the target address will come from bit 18393 1 of the base address. */ 18394 value = (value + 1) & ~ 1; 18395 18396 if (fixP->fx_done || !seg->use_rela_p) 18397 { 18398 offsetT newval2; 18399 18400 newval = md_chars_to_number (buf, THUMB_SIZE); 18401 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18402 newval |= (value & 0x7fffff) >> 12; 18403 newval2 |= (value & 0xfff) >> 1; 18404 md_number_to_chars (buf, newval, THUMB_SIZE); 18405 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18406 } 18407 break; 18408 18409 case BFD_RELOC_THUMB_PCREL_BRANCH25: 18410 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff)) 18411 as_bad_where (fixP->fx_file, fixP->fx_line, 18412 _("branch out of range")); 18413 18414 if (fixP->fx_done || !seg->use_rela_p) 18415 { 18416 offsetT newval2; 18417 addressT S, I1, I2, lo, hi; 18418 18419 S = (value & 0x01000000) >> 24; 18420 I1 = (value & 0x00800000) >> 23; 18421 I2 = (value & 0x00400000) >> 22; 18422 hi = (value & 0x003ff000) >> 12; 18423 lo = (value & 0x00000ffe) >> 1; 18424 18425 I1 = !(I1 ^ S); 18426 I2 = !(I2 ^ S); 18427 18428 newval = md_chars_to_number (buf, THUMB_SIZE); 18429 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 18430 newval |= (S << 10) | hi; 18431 newval2 |= (I1 << 13) | (I2 << 11) | lo; 18432 md_number_to_chars (buf, newval, THUMB_SIZE); 18433 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 18434 } 18435 break; 18436 18437 case BFD_RELOC_8: 18438 if (fixP->fx_done || !seg->use_rela_p) 18439 md_number_to_chars (buf, value, 1); 18440 break; 18441 18442 case BFD_RELOC_16: 18443 if (fixP->fx_done || !seg->use_rela_p) 18444 md_number_to_chars (buf, value, 2); 18445 break; 18446 18447#ifdef OBJ_ELF 18448 case BFD_RELOC_ARM_TLS_GD32: 18449 case BFD_RELOC_ARM_TLS_LE32: 18450 case BFD_RELOC_ARM_TLS_IE32: 18451 case BFD_RELOC_ARM_TLS_LDM32: 18452 case BFD_RELOC_ARM_TLS_LDO32: 18453 S_SET_THREAD_LOCAL (fixP->fx_addsy); 18454 /* fall through */ 18455 18456 case BFD_RELOC_ARM_GOT32: 18457 case BFD_RELOC_ARM_GOTOFF: 18458 case BFD_RELOC_ARM_TARGET2: 18459 if (fixP->fx_done || !seg->use_rela_p) 18460 md_number_to_chars (buf, 0, 4); 18461 break; 18462#endif 18463 18464 case BFD_RELOC_RVA: 18465 case BFD_RELOC_32: 18466 case BFD_RELOC_ARM_TARGET1: 18467 case BFD_RELOC_ARM_ROSEGREL32: 18468 case BFD_RELOC_ARM_SBREL32: 18469 case BFD_RELOC_32_PCREL: 18470#ifdef TE_PE 18471 case BFD_RELOC_32_SECREL: 18472#endif 18473 if (fixP->fx_done || !seg->use_rela_p) 18474#ifdef TE_WINCE 18475 /* For WinCE we only do this for pcrel fixups. */ 18476 if (fixP->fx_done || fixP->fx_pcrel) 18477#endif 18478 md_number_to_chars (buf, value, 4); 18479 break; 18480 18481#ifdef OBJ_ELF 18482 case BFD_RELOC_ARM_PREL31: 18483 if (fixP->fx_done || !seg->use_rela_p) 18484 { 18485 newval = md_chars_to_number (buf, 4) & 0x80000000; 18486 if ((value ^ (value >> 1)) & 0x40000000) 18487 { 18488 as_bad_where (fixP->fx_file, fixP->fx_line, 18489 _("rel31 relocation overflow")); 18490 } 18491 newval |= value & 0x7fffffff; 18492 md_number_to_chars (buf, newval, 4); 18493 } 18494 break; 18495#endif 18496 18497 case BFD_RELOC_ARM_CP_OFF_IMM: 18498 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 18499 if (value < -1023 || value > 1023 || (value & 3)) 18500 as_bad_where (fixP->fx_file, fixP->fx_line, 18501 _("co-processor offset out of range")); 18502 cp_off_common: 18503 sign = value >= 0; 18504 if (value < 0) 18505 value = -value; 18506 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 18507 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 18508 newval = md_chars_to_number (buf, INSN_SIZE); 18509 else 18510 newval = get_thumb32_insn (buf); 18511 newval &= 0xff7fff00; 18512 newval |= (value >> 2) | (sign ? INDEX_UP : 0); 18513 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 18514 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 18515 md_number_to_chars (buf, newval, INSN_SIZE); 18516 else 18517 put_thumb32_insn (buf, newval); 18518 break; 18519 18520 case BFD_RELOC_ARM_CP_OFF_IMM_S2: 18521 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2: 18522 if (value < -255 || value > 255) 18523 as_bad_where (fixP->fx_file, fixP->fx_line, 18524 _("co-processor offset out of range")); 18525 value *= 4; 18526 goto cp_off_common; 18527 18528 case BFD_RELOC_ARM_THUMB_OFFSET: 18529 newval = md_chars_to_number (buf, THUMB_SIZE); 18530 /* Exactly what ranges, and where the offset is inserted depends 18531 on the type of instruction, we can establish this from the 18532 top 4 bits. */ 18533 switch (newval >> 12) 18534 { 18535 case 4: /* PC load. */ 18536 /* Thumb PC loads are somewhat odd, bit 1 of the PC is 18537 forced to zero for these loads; md_pcrel_from has already 18538 compensated for this. */ 18539 if (value & 3) 18540 as_bad_where (fixP->fx_file, fixP->fx_line, 18541 _("invalid offset, target not word aligned (0x%08lX)"), 18542 (((unsigned long) fixP->fx_frag->fr_address 18543 + (unsigned long) fixP->fx_where) & ~3) 18544 + (unsigned long) value); 18545 18546 if (value & ~0x3fc) 18547 as_bad_where (fixP->fx_file, fixP->fx_line, 18548 _("invalid offset, value too big (0x%08lX)"), 18549 (long) value); 18550 18551 newval |= value >> 2; 18552 break; 18553 18554 case 9: /* SP load/store. */ 18555 if (value & ~0x3fc) 18556 as_bad_where (fixP->fx_file, fixP->fx_line, 18557 _("invalid offset, value too big (0x%08lX)"), 18558 (long) value); 18559 newval |= value >> 2; 18560 break; 18561 18562 case 6: /* Word load/store. */ 18563 if (value & ~0x7c) 18564 as_bad_where (fixP->fx_file, fixP->fx_line, 18565 _("invalid offset, value too big (0x%08lX)"), 18566 (long) value); 18567 newval |= value << 4; /* 6 - 2. */ 18568 break; 18569 18570 case 7: /* Byte load/store. */ 18571 if (value & ~0x1f) 18572 as_bad_where (fixP->fx_file, fixP->fx_line, 18573 _("invalid offset, value too big (0x%08lX)"), 18574 (long) value); 18575 newval |= value << 6; 18576 break; 18577 18578 case 8: /* Halfword load/store. */ 18579 if (value & ~0x3e) 18580 as_bad_where (fixP->fx_file, fixP->fx_line, 18581 _("invalid offset, value too big (0x%08lX)"), 18582 (long) value); 18583 newval |= value << 5; /* 6 - 1. */ 18584 break; 18585 18586 default: 18587 as_bad_where (fixP->fx_file, fixP->fx_line, 18588 "Unable to process relocation for thumb opcode: %lx", 18589 (unsigned long) newval); 18590 break; 18591 } 18592 md_number_to_chars (buf, newval, THUMB_SIZE); 18593 break; 18594 18595 case BFD_RELOC_ARM_THUMB_ADD: 18596 /* This is a complicated relocation, since we use it for all of 18597 the following immediate relocations: 18598 18599 3bit ADD/SUB 18600 8bit ADD/SUB 18601 9bit ADD/SUB SP word-aligned 18602 10bit ADD PC/SP word-aligned 18603 18604 The type of instruction being processed is encoded in the 18605 instruction field: 18606 18607 0x8000 SUB 18608 0x00F0 Rd 18609 0x000F Rs 18610 */ 18611 newval = md_chars_to_number (buf, THUMB_SIZE); 18612 { 18613 int rd = (newval >> 4) & 0xf; 18614 int rs = newval & 0xf; 18615 int subtract = !!(newval & 0x8000); 18616 18617 /* Check for HI regs, only very restricted cases allowed: 18618 Adjusting SP, and using PC or SP to get an address. */ 18619 if ((rd > 7 && (rd != REG_SP || rs != REG_SP)) 18620 || (rs > 7 && rs != REG_SP && rs != REG_PC)) 18621 as_bad_where (fixP->fx_file, fixP->fx_line, 18622 _("invalid Hi register with immediate")); 18623 18624 /* If value is negative, choose the opposite instruction. */ 18625 if (value < 0) 18626 { 18627 value = -value; 18628 subtract = !subtract; 18629 if (value < 0) 18630 as_bad_where (fixP->fx_file, fixP->fx_line, 18631 _("immediate value out of range")); 18632 } 18633 18634 if (rd == REG_SP) 18635 { 18636 if (value & ~0x1fc) 18637 as_bad_where (fixP->fx_file, fixP->fx_line, 18638 _("invalid immediate for stack address calculation")); 18639 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST; 18640 newval |= value >> 2; 18641 } 18642 else if (rs == REG_PC || rs == REG_SP) 18643 { 18644 if (subtract || value & ~0x3fc) 18645 as_bad_where (fixP->fx_file, fixP->fx_line, 18646 _("invalid immediate for address calculation (value = 0x%08lX)"), 18647 (unsigned long) value); 18648 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP); 18649 newval |= rd << 8; 18650 newval |= value >> 2; 18651 } 18652 else if (rs == rd) 18653 { 18654 if (value & ~0xff) 18655 as_bad_where (fixP->fx_file, fixP->fx_line, 18656 _("immediate value out of range")); 18657 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8; 18658 newval |= (rd << 8) | value; 18659 } 18660 else 18661 { 18662 if (value & ~0x7) 18663 as_bad_where (fixP->fx_file, fixP->fx_line, 18664 _("immediate value out of range")); 18665 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3; 18666 newval |= rd | (rs << 3) | (value << 6); 18667 } 18668 } 18669 md_number_to_chars (buf, newval, THUMB_SIZE); 18670 break; 18671 18672 case BFD_RELOC_ARM_THUMB_IMM: 18673 newval = md_chars_to_number (buf, THUMB_SIZE); 18674 if (value < 0 || value > 255) 18675 as_bad_where (fixP->fx_file, fixP->fx_line, 18676 _("invalid immediate: %ld is too large"), 18677 (long) value); 18678 newval |= value; 18679 md_number_to_chars (buf, newval, THUMB_SIZE); 18680 break; 18681 18682 case BFD_RELOC_ARM_THUMB_SHIFT: 18683 /* 5bit shift value (0..32). LSL cannot take 32. */ 18684 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f; 18685 temp = newval & 0xf800; 18686 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I)) 18687 as_bad_where (fixP->fx_file, fixP->fx_line, 18688 _("invalid shift value: %ld"), (long) value); 18689 /* Shifts of zero must be encoded as LSL. */ 18690 if (value == 0) 18691 newval = (newval & 0x003f) | T_OPCODE_LSL_I; 18692 /* Shifts of 32 are encoded as zero. */ 18693 else if (value == 32) 18694 value = 0; 18695 newval |= value << 6; 18696 md_number_to_chars (buf, newval, THUMB_SIZE); 18697 break; 18698 18699 case BFD_RELOC_VTABLE_INHERIT: 18700 case BFD_RELOC_VTABLE_ENTRY: 18701 fixP->fx_done = 0; 18702 return; 18703 18704 case BFD_RELOC_ARM_MOVW: 18705 case BFD_RELOC_ARM_MOVT: 18706 case BFD_RELOC_ARM_THUMB_MOVW: 18707 case BFD_RELOC_ARM_THUMB_MOVT: 18708 if (fixP->fx_done || !seg->use_rela_p) 18709 { 18710 /* REL format relocations are limited to a 16-bit addend. */ 18711 if (!fixP->fx_done) 18712 { 18713 if (value < -0x1000 || value > 0xffff) 18714 as_bad_where (fixP->fx_file, fixP->fx_line, 18715 _("offset too big")); 18716 } 18717 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT 18718 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 18719 { 18720 value >>= 16; 18721 } 18722 18723 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW 18724 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 18725 { 18726 newval = get_thumb32_insn (buf); 18727 newval &= 0xfbf08f00; 18728 newval |= (value & 0xf000) << 4; 18729 newval |= (value & 0x0800) << 15; 18730 newval |= (value & 0x0700) << 4; 18731 newval |= (value & 0x00ff); 18732 put_thumb32_insn (buf, newval); 18733 } 18734 else 18735 { 18736 newval = md_chars_to_number (buf, 4); 18737 newval &= 0xfff0f000; 18738 newval |= value & 0x0fff; 18739 newval |= (value & 0xf000) << 4; 18740 md_number_to_chars (buf, newval, 4); 18741 } 18742 } 18743 return; 18744 18745 case BFD_RELOC_ARM_ALU_PC_G0_NC: 18746 case BFD_RELOC_ARM_ALU_PC_G0: 18747 case BFD_RELOC_ARM_ALU_PC_G1_NC: 18748 case BFD_RELOC_ARM_ALU_PC_G1: 18749 case BFD_RELOC_ARM_ALU_PC_G2: 18750 case BFD_RELOC_ARM_ALU_SB_G0_NC: 18751 case BFD_RELOC_ARM_ALU_SB_G0: 18752 case BFD_RELOC_ARM_ALU_SB_G1_NC: 18753 case BFD_RELOC_ARM_ALU_SB_G1: 18754 case BFD_RELOC_ARM_ALU_SB_G2: 18755 assert (!fixP->fx_done); 18756 if (!seg->use_rela_p) 18757 { 18758 bfd_vma insn; 18759 bfd_vma encoded_addend; 18760 bfd_vma addend_abs = abs (value); 18761 18762 /* Check that the absolute value of the addend can be 18763 expressed as an 8-bit constant plus a rotation. */ 18764 encoded_addend = encode_arm_immediate (addend_abs); 18765 if (encoded_addend == (unsigned int) FAIL) 18766 as_bad_where (fixP->fx_file, fixP->fx_line, 18767 _("the offset 0x%08lX is not representable"), 18768 (unsigned long) addend_abs); 18769 18770 /* Extract the instruction. */ 18771 insn = md_chars_to_number (buf, INSN_SIZE); 18772 18773 /* If the addend is positive, use an ADD instruction. 18774 Otherwise use a SUB. Take care not to destroy the S bit. */ 18775 insn &= 0xff1fffff; 18776 if (value < 0) 18777 insn |= 1 << 22; 18778 else 18779 insn |= 1 << 23; 18780 18781 /* Place the encoded addend into the first 12 bits of the 18782 instruction. */ 18783 insn &= 0xfffff000; 18784 insn |= encoded_addend; 18785 18786 /* Update the instruction. */ 18787 md_number_to_chars (buf, insn, INSN_SIZE); 18788 } 18789 break; 18790 18791 case BFD_RELOC_ARM_LDR_PC_G0: 18792 case BFD_RELOC_ARM_LDR_PC_G1: 18793 case BFD_RELOC_ARM_LDR_PC_G2: 18794 case BFD_RELOC_ARM_LDR_SB_G0: 18795 case BFD_RELOC_ARM_LDR_SB_G1: 18796 case BFD_RELOC_ARM_LDR_SB_G2: 18797 assert (!fixP->fx_done); 18798 if (!seg->use_rela_p) 18799 { 18800 bfd_vma insn; 18801 bfd_vma addend_abs = abs (value); 18802 18803 /* Check that the absolute value of the addend can be 18804 encoded in 12 bits. */ 18805 if (addend_abs >= 0x1000) 18806 as_bad_where (fixP->fx_file, fixP->fx_line, 18807 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"), 18808 (unsigned long) addend_abs); 18809 18810 /* Extract the instruction. */ 18811 insn = md_chars_to_number (buf, INSN_SIZE); 18812 18813 /* If the addend is negative, clear bit 23 of the instruction. 18814 Otherwise set it. */ 18815 if (value < 0) 18816 insn &= ~(1 << 23); 18817 else 18818 insn |= 1 << 23; 18819 18820 /* Place the absolute value of the addend into the first 12 bits 18821 of the instruction. */ 18822 insn &= 0xfffff000; 18823 insn |= addend_abs; 18824 18825 /* Update the instruction. */ 18826 md_number_to_chars (buf, insn, INSN_SIZE); 18827 } 18828 break; 18829 18830 case BFD_RELOC_ARM_LDRS_PC_G0: 18831 case BFD_RELOC_ARM_LDRS_PC_G1: 18832 case BFD_RELOC_ARM_LDRS_PC_G2: 18833 case BFD_RELOC_ARM_LDRS_SB_G0: 18834 case BFD_RELOC_ARM_LDRS_SB_G1: 18835 case BFD_RELOC_ARM_LDRS_SB_G2: 18836 assert (!fixP->fx_done); 18837 if (!seg->use_rela_p) 18838 { 18839 bfd_vma insn; 18840 bfd_vma addend_abs = abs (value); 18841 18842 /* Check that the absolute value of the addend can be 18843 encoded in 8 bits. */ 18844 if (addend_abs >= 0x100) 18845 as_bad_where (fixP->fx_file, fixP->fx_line, 18846 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"), 18847 (unsigned long) addend_abs); 18848 18849 /* Extract the instruction. */ 18850 insn = md_chars_to_number (buf, INSN_SIZE); 18851 18852 /* If the addend is negative, clear bit 23 of the instruction. 18853 Otherwise set it. */ 18854 if (value < 0) 18855 insn &= ~(1 << 23); 18856 else 18857 insn |= 1 << 23; 18858 18859 /* Place the first four bits of the absolute value of the addend 18860 into the first 4 bits of the instruction, and the remaining 18861 four into bits 8 .. 11. */ 18862 insn &= 0xfffff0f0; 18863 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4); 18864 18865 /* Update the instruction. */ 18866 md_number_to_chars (buf, insn, INSN_SIZE); 18867 } 18868 break; 18869 18870 case BFD_RELOC_ARM_LDC_PC_G0: 18871 case BFD_RELOC_ARM_LDC_PC_G1: 18872 case BFD_RELOC_ARM_LDC_PC_G2: 18873 case BFD_RELOC_ARM_LDC_SB_G0: 18874 case BFD_RELOC_ARM_LDC_SB_G1: 18875 case BFD_RELOC_ARM_LDC_SB_G2: 18876 assert (!fixP->fx_done); 18877 if (!seg->use_rela_p) 18878 { 18879 bfd_vma insn; 18880 bfd_vma addend_abs = abs (value); 18881 18882 /* Check that the absolute value of the addend is a multiple of 18883 four and, when divided by four, fits in 8 bits. */ 18884 if (addend_abs & 0x3) 18885 as_bad_where (fixP->fx_file, fixP->fx_line, 18886 _("bad offset 0x%08lX (must be word-aligned)"), 18887 (unsigned long) addend_abs); 18888 18889 if ((addend_abs >> 2) > 0xff) 18890 as_bad_where (fixP->fx_file, fixP->fx_line, 18891 _("bad offset 0x%08lX (must be an 8-bit number of words)"), 18892 (unsigned long) addend_abs); 18893 18894 /* Extract the instruction. */ 18895 insn = md_chars_to_number (buf, INSN_SIZE); 18896 18897 /* If the addend is negative, clear bit 23 of the instruction. 18898 Otherwise set it. */ 18899 if (value < 0) 18900 insn &= ~(1 << 23); 18901 else 18902 insn |= 1 << 23; 18903 18904 /* Place the addend (divided by four) into the first eight 18905 bits of the instruction. */ 18906 insn &= 0xfffffff0; 18907 insn |= addend_abs >> 2; 18908 18909 /* Update the instruction. */ 18910 md_number_to_chars (buf, insn, INSN_SIZE); 18911 } 18912 break; 18913 18914 case BFD_RELOC_UNUSED: 18915 default: 18916 as_bad_where (fixP->fx_file, fixP->fx_line, 18917 _("bad relocation fixup type (%d)"), fixP->fx_r_type); 18918 } 18919} 18920 18921/* Translate internal representation of relocation info to BFD target 18922 format. */ 18923 18924arelent * 18925tc_gen_reloc (asection *section, fixS *fixp) 18926{ 18927 arelent * reloc; 18928 bfd_reloc_code_real_type code; 18929 18930 reloc = xmalloc (sizeof (arelent)); 18931 18932 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *)); 18933 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 18934 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 18935 18936 if (fixp->fx_pcrel) 18937 { 18938 if (section->use_rela_p) 18939 fixp->fx_offset -= md_pcrel_from_section (fixp, section); 18940 else 18941 fixp->fx_offset = reloc->address; 18942 } 18943 reloc->addend = fixp->fx_offset; 18944 18945 switch (fixp->fx_r_type) 18946 { 18947 case BFD_RELOC_8: 18948 if (fixp->fx_pcrel) 18949 { 18950 code = BFD_RELOC_8_PCREL; 18951 break; 18952 } 18953 18954 case BFD_RELOC_16: 18955 if (fixp->fx_pcrel) 18956 { 18957 code = BFD_RELOC_16_PCREL; 18958 break; 18959 } 18960 18961 case BFD_RELOC_32: 18962 if (fixp->fx_pcrel) 18963 { 18964 code = BFD_RELOC_32_PCREL; 18965 break; 18966 } 18967 18968 case BFD_RELOC_ARM_MOVW: 18969 if (fixp->fx_pcrel) 18970 { 18971 code = BFD_RELOC_ARM_MOVW_PCREL; 18972 break; 18973 } 18974 18975 case BFD_RELOC_ARM_MOVT: 18976 if (fixp->fx_pcrel) 18977 { 18978 code = BFD_RELOC_ARM_MOVT_PCREL; 18979 break; 18980 } 18981 18982 case BFD_RELOC_ARM_THUMB_MOVW: 18983 if (fixp->fx_pcrel) 18984 { 18985 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL; 18986 break; 18987 } 18988 18989 case BFD_RELOC_ARM_THUMB_MOVT: 18990 if (fixp->fx_pcrel) 18991 { 18992 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL; 18993 break; 18994 } 18995 18996 case BFD_RELOC_NONE: 18997 case BFD_RELOC_ARM_PCREL_BRANCH: 18998 case BFD_RELOC_ARM_PCREL_BLX: 18999 case BFD_RELOC_RVA: 19000 case BFD_RELOC_THUMB_PCREL_BRANCH7: 19001 case BFD_RELOC_THUMB_PCREL_BRANCH9: 19002 case BFD_RELOC_THUMB_PCREL_BRANCH12: 19003 case BFD_RELOC_THUMB_PCREL_BRANCH20: 19004 case BFD_RELOC_THUMB_PCREL_BRANCH23: 19005 case BFD_RELOC_THUMB_PCREL_BRANCH25: 19006 case BFD_RELOC_THUMB_PCREL_BLX: 19007 case BFD_RELOC_VTABLE_ENTRY: 19008 case BFD_RELOC_VTABLE_INHERIT: 19009#ifdef TE_PE 19010 case BFD_RELOC_32_SECREL: 19011#endif 19012 code = fixp->fx_r_type; 19013 break; 19014 19015 case BFD_RELOC_ARM_LITERAL: 19016 case BFD_RELOC_ARM_HWLITERAL: 19017 /* If this is called then the a literal has 19018 been referenced across a section boundary. */ 19019 as_bad_where (fixp->fx_file, fixp->fx_line, 19020 _("literal referenced across section boundary")); 19021 return NULL; 19022 19023#ifdef OBJ_ELF 19024 case BFD_RELOC_ARM_GOT32: 19025 case BFD_RELOC_ARM_GOTOFF: 19026 case BFD_RELOC_ARM_PLT32: 19027 case BFD_RELOC_ARM_TARGET1: 19028 case BFD_RELOC_ARM_ROSEGREL32: 19029 case BFD_RELOC_ARM_SBREL32: 19030 case BFD_RELOC_ARM_PREL31: 19031 case BFD_RELOC_ARM_TARGET2: 19032 case BFD_RELOC_ARM_TLS_LE32: 19033 case BFD_RELOC_ARM_TLS_LDO32: 19034 case BFD_RELOC_ARM_PCREL_CALL: 19035 case BFD_RELOC_ARM_PCREL_JUMP: 19036 case BFD_RELOC_ARM_ALU_PC_G0_NC: 19037 case BFD_RELOC_ARM_ALU_PC_G0: 19038 case BFD_RELOC_ARM_ALU_PC_G1_NC: 19039 case BFD_RELOC_ARM_ALU_PC_G1: 19040 case BFD_RELOC_ARM_ALU_PC_G2: 19041 case BFD_RELOC_ARM_LDR_PC_G0: 19042 case BFD_RELOC_ARM_LDR_PC_G1: 19043 case BFD_RELOC_ARM_LDR_PC_G2: 19044 case BFD_RELOC_ARM_LDRS_PC_G0: 19045 case BFD_RELOC_ARM_LDRS_PC_G1: 19046 case BFD_RELOC_ARM_LDRS_PC_G2: 19047 case BFD_RELOC_ARM_LDC_PC_G0: 19048 case BFD_RELOC_ARM_LDC_PC_G1: 19049 case BFD_RELOC_ARM_LDC_PC_G2: 19050 case BFD_RELOC_ARM_ALU_SB_G0_NC: 19051 case BFD_RELOC_ARM_ALU_SB_G0: 19052 case BFD_RELOC_ARM_ALU_SB_G1_NC: 19053 case BFD_RELOC_ARM_ALU_SB_G1: 19054 case BFD_RELOC_ARM_ALU_SB_G2: 19055 case BFD_RELOC_ARM_LDR_SB_G0: 19056 case BFD_RELOC_ARM_LDR_SB_G1: 19057 case BFD_RELOC_ARM_LDR_SB_G2: 19058 case BFD_RELOC_ARM_LDRS_SB_G0: 19059 case BFD_RELOC_ARM_LDRS_SB_G1: 19060 case BFD_RELOC_ARM_LDRS_SB_G2: 19061 case BFD_RELOC_ARM_LDC_SB_G0: 19062 case BFD_RELOC_ARM_LDC_SB_G1: 19063 case BFD_RELOC_ARM_LDC_SB_G2: 19064 code = fixp->fx_r_type; 19065 break; 19066 19067 case BFD_RELOC_ARM_TLS_GD32: 19068 case BFD_RELOC_ARM_TLS_IE32: 19069 case BFD_RELOC_ARM_TLS_LDM32: 19070 /* BFD will include the symbol's address in the addend. 19071 But we don't want that, so subtract it out again here. */ 19072 if (!S_IS_COMMON (fixp->fx_addsy)) 19073 reloc->addend -= (*reloc->sym_ptr_ptr)->value; 19074 code = fixp->fx_r_type; 19075 break; 19076#endif 19077 19078 case BFD_RELOC_ARM_IMMEDIATE: 19079 as_bad_where (fixp->fx_file, fixp->fx_line, 19080 _("internal relocation (type: IMMEDIATE) not fixed up")); 19081 return NULL; 19082 19083 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 19084 as_bad_where (fixp->fx_file, fixp->fx_line, 19085 _("ADRL used for a symbol not defined in the same file")); 19086 return NULL; 19087 19088 case BFD_RELOC_ARM_OFFSET_IMM: 19089 if (section->use_rela_p) 19090 { 19091 code = fixp->fx_r_type; 19092 break; 19093 } 19094 19095 if (fixp->fx_addsy != NULL 19096 && !S_IS_DEFINED (fixp->fx_addsy) 19097 && S_IS_LOCAL (fixp->fx_addsy)) 19098 { 19099 as_bad_where (fixp->fx_file, fixp->fx_line, 19100 _("undefined local label `%s'"), 19101 S_GET_NAME (fixp->fx_addsy)); 19102 return NULL; 19103 } 19104 19105 as_bad_where (fixp->fx_file, fixp->fx_line, 19106 _("internal_relocation (type: OFFSET_IMM) not fixed up")); 19107 return NULL; 19108 19109 default: 19110 { 19111 char * type; 19112 19113 switch (fixp->fx_r_type) 19114 { 19115 case BFD_RELOC_NONE: type = "NONE"; break; 19116 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break; 19117 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break; 19118 case BFD_RELOC_ARM_SMC: type = "SMC"; break; 19119 case BFD_RELOC_ARM_SWI: type = "SWI"; break; 19120 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break; 19121 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break; 19122 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break; 19123 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break; 19124 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break; 19125 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break; 19126 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break; 19127 default: type = _("<unknown>"); break; 19128 } 19129 as_bad_where (fixp->fx_file, fixp->fx_line, 19130 _("cannot represent %s relocation in this object file format"), 19131 type); 19132 return NULL; 19133 } 19134 } 19135 19136#ifdef OBJ_ELF 19137 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32) 19138 && GOT_symbol 19139 && fixp->fx_addsy == GOT_symbol) 19140 { 19141 code = BFD_RELOC_ARM_GOTPC; 19142 reloc->addend = fixp->fx_offset = reloc->address; 19143 } 19144#endif 19145 19146 reloc->howto = bfd_reloc_type_lookup (stdoutput, code); 19147 19148 if (reloc->howto == NULL) 19149 { 19150 as_bad_where (fixp->fx_file, fixp->fx_line, 19151 _("cannot represent %s relocation in this object file format"), 19152 bfd_get_reloc_code_name (code)); 19153 return NULL; 19154 } 19155 19156 /* HACK: Since arm ELF uses Rel instead of Rela, encode the 19157 vtable entry to be used in the relocation's section offset. */ 19158 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 19159 reloc->address = fixp->fx_offset; 19160 19161 return reloc; 19162} 19163 19164/* This fix_new is called by cons via TC_CONS_FIX_NEW. */ 19165 19166void 19167cons_fix_new_arm (fragS * frag, 19168 int where, 19169 int size, 19170 expressionS * exp) 19171{ 19172 bfd_reloc_code_real_type type; 19173 int pcrel = 0; 19174 19175 /* Pick a reloc. 19176 FIXME: @@ Should look at CPU word size. */ 19177 switch (size) 19178 { 19179 case 1: 19180 type = BFD_RELOC_8; 19181 break; 19182 case 2: 19183 type = BFD_RELOC_16; 19184 break; 19185 case 4: 19186 default: 19187 type = BFD_RELOC_32; 19188 break; 19189 case 8: 19190 type = BFD_RELOC_64; 19191 break; 19192 } 19193 19194#ifdef TE_PE 19195 if (exp->X_op == O_secrel) 19196 { 19197 exp->X_op = O_symbol; 19198 type = BFD_RELOC_32_SECREL; 19199 } 19200#endif 19201 19202 fix_new_exp (frag, where, (int) size, exp, pcrel, type); 19203} 19204 19205#if defined OBJ_COFF || defined OBJ_ELF 19206void 19207arm_validate_fix (fixS * fixP) 19208{ 19209 /* If the destination of the branch is a defined symbol which does not have 19210 the THUMB_FUNC attribute, then we must be calling a function which has 19211 the (interfacearm) attribute. We look for the Thumb entry point to that 19212 function and change the branch to refer to that function instead. */ 19213 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23 19214 && fixP->fx_addsy != NULL 19215 && S_IS_DEFINED (fixP->fx_addsy) 19216 && ! THUMB_IS_FUNC (fixP->fx_addsy)) 19217 { 19218 fixP->fx_addsy = find_real_start (fixP->fx_addsy); 19219 } 19220} 19221#endif 19222 19223int 19224arm_force_relocation (struct fix * fixp) 19225{ 19226#if defined (OBJ_COFF) && defined (TE_PE) 19227 if (fixp->fx_r_type == BFD_RELOC_RVA) 19228 return 1; 19229#endif 19230 19231 /* Resolve these relocations even if the symbol is extern or weak. */ 19232 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE 19233 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM 19234 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE 19235 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM 19236 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 19237 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12 19238 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12) 19239 return 0; 19240 19241 /* Always leave these relocations for the linker. */ 19242 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC 19243 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) 19244 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) 19245 return 1; 19246 19247 /* Always generate relocations against function symbols. */ 19248 if (fixp->fx_r_type == BFD_RELOC_32 19249 && fixp->fx_addsy 19250 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION)) 19251 return 1; 19252 19253 return generic_force_reloc (fixp); 19254} 19255 19256#if defined (OBJ_ELF) || defined (OBJ_COFF) 19257/* Relocations against function names must be left unadjusted, 19258 so that the linker can use this information to generate interworking 19259 stubs. The MIPS version of this function 19260 also prevents relocations that are mips-16 specific, but I do not 19261 know why it does this. 19262 19263 FIXME: 19264 There is one other problem that ought to be addressed here, but 19265 which currently is not: Taking the address of a label (rather 19266 than a function) and then later jumping to that address. Such 19267 addresses also ought to have their bottom bit set (assuming that 19268 they reside in Thumb code), but at the moment they will not. */ 19269 19270bfd_boolean 19271arm_fix_adjustable (fixS * fixP) 19272{ 19273 if (fixP->fx_addsy == NULL) 19274 return 1; 19275 19276 /* Preserve relocations against symbols with function type. */ 19277 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION) 19278 return 0; 19279 19280 if (THUMB_IS_FUNC (fixP->fx_addsy) 19281 && fixP->fx_subsy == NULL) 19282 return 0; 19283 19284 /* We need the symbol name for the VTABLE entries. */ 19285 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT 19286 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY) 19287 return 0; 19288 19289 /* Don't allow symbols to be discarded on GOT related relocs. */ 19290 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32 19291 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32 19292 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF 19293 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32 19294 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32 19295 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32 19296 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32 19297 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32 19298 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2) 19299 return 0; 19300 19301 /* Similarly for group relocations. */ 19302 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC 19303 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) 19304 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) 19305 return 0; 19306 19307 return 1; 19308} 19309#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */ 19310 19311#ifdef OBJ_ELF 19312 19313const char * 19314elf32_arm_target_format (void) 19315{ 19316#ifdef TE_SYMBIAN 19317 return (target_big_endian 19318 ? "elf32-bigarm-symbian" 19319 : "elf32-littlearm-symbian"); 19320#elif defined (TE_VXWORKS) 19321 return (target_big_endian 19322 ? "elf32-bigarm-vxworks" 19323 : "elf32-littlearm-vxworks"); 19324#else 19325 if (target_big_endian) 19326 return "elf32-bigarm"; 19327 else 19328 return "elf32-littlearm"; 19329#endif 19330} 19331 19332void 19333armelf_frob_symbol (symbolS * symp, 19334 int * puntp) 19335{ 19336 elf_frob_symbol (symp, puntp); 19337} 19338#endif 19339 19340/* MD interface: Finalization. */ 19341 19342/* A good place to do this, although this was probably not intended 19343 for this kind of use. We need to dump the literal pool before 19344 references are made to a null symbol pointer. */ 19345 19346void 19347arm_cleanup (void) 19348{ 19349 literal_pool * pool; 19350 19351 for (pool = list_of_pools; pool; pool = pool->next) 19352 { 19353 /* Put it at the end of the relevent section. */ 19354 subseg_set (pool->section, pool->sub_section); 19355#ifdef OBJ_ELF 19356 arm_elf_change_section (); 19357#endif 19358 s_ltorg (0); 19359 } 19360} 19361 19362/* Adjust the symbol table. This marks Thumb symbols as distinct from 19363 ARM ones. */ 19364 19365void 19366arm_adjust_symtab (void) 19367{ 19368#ifdef OBJ_COFF 19369 symbolS * sym; 19370 19371 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) 19372 { 19373 if (ARM_IS_THUMB (sym)) 19374 { 19375 if (THUMB_IS_FUNC (sym)) 19376 { 19377 /* Mark the symbol as a Thumb function. */ 19378 if ( S_GET_STORAGE_CLASS (sym) == C_STAT 19379 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */ 19380 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC); 19381 19382 else if (S_GET_STORAGE_CLASS (sym) == C_EXT) 19383 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC); 19384 else 19385 as_bad (_("%s: unexpected function type: %d"), 19386 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym)); 19387 } 19388 else switch (S_GET_STORAGE_CLASS (sym)) 19389 { 19390 case C_EXT: 19391 S_SET_STORAGE_CLASS (sym, C_THUMBEXT); 19392 break; 19393 case C_STAT: 19394 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT); 19395 break; 19396 case C_LABEL: 19397 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL); 19398 break; 19399 default: 19400 /* Do nothing. */ 19401 break; 19402 } 19403 } 19404 19405 if (ARM_IS_INTERWORK (sym)) 19406 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF; 19407 } 19408#endif 19409#ifdef OBJ_ELF 19410 symbolS * sym; 19411 char bind; 19412 19413 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) 19414 { 19415 if (ARM_IS_THUMB (sym)) 19416 { 19417 elf_symbol_type * elf_sym; 19418 19419 elf_sym = elf_symbol (symbol_get_bfdsym (sym)); 19420 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info); 19421 19422 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name, 19423 BFD_ARM_SPECIAL_SYM_TYPE_ANY)) 19424 { 19425 /* If it's a .thumb_func, declare it as so, 19426 otherwise tag label as .code 16. */ 19427 if (THUMB_IS_FUNC (sym)) 19428 elf_sym->internal_elf_sym.st_info = 19429 ELF_ST_INFO (bind, STT_ARM_TFUNC); 19430 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) 19431 elf_sym->internal_elf_sym.st_info = 19432 ELF_ST_INFO (bind, STT_ARM_16BIT); 19433 } 19434 } 19435 } 19436#endif 19437} 19438 19439/* MD interface: Initialization. */ 19440 19441static void 19442set_constant_flonums (void) 19443{ 19444 int i; 19445 19446 for (i = 0; i < NUM_FLOAT_VALS; i++) 19447 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL) 19448 abort (); 19449} 19450 19451/* Auto-select Thumb mode if it's the only available instruction set for the 19452 given architecture. */ 19453 19454static void 19455autoselect_thumb_from_cpu_variant (void) 19456{ 19457 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 19458 opcode_select (16); 19459} 19460 19461void 19462md_begin (void) 19463{ 19464 unsigned mach; 19465 unsigned int i; 19466 19467 if ( (arm_ops_hsh = hash_new ()) == NULL 19468 || (arm_cond_hsh = hash_new ()) == NULL 19469 || (arm_shift_hsh = hash_new ()) == NULL 19470 || (arm_psr_hsh = hash_new ()) == NULL 19471 || (arm_v7m_psr_hsh = hash_new ()) == NULL 19472 || (arm_reg_hsh = hash_new ()) == NULL 19473 || (arm_reloc_hsh = hash_new ()) == NULL 19474 || (arm_barrier_opt_hsh = hash_new ()) == NULL) 19475 as_fatal (_("virtual memory exhausted")); 19476 19477 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++) 19478 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i)); 19479 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++) 19480 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i)); 19481 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++) 19482 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i)); 19483 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++) 19484 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i)); 19485 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++) 19486 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i)); 19487 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++) 19488 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i)); 19489 for (i = 0; 19490 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt); 19491 i++) 19492 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template, 19493 (PTR) (barrier_opt_names + i)); 19494#ifdef OBJ_ELF 19495 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++) 19496 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i)); 19497#endif 19498 19499 set_constant_flonums (); 19500 19501 /* Set the cpu variant based on the command-line options. We prefer 19502 -mcpu= over -march= if both are set (as for GCC); and we prefer 19503 -mfpu= over any other way of setting the floating point unit. 19504 Use of legacy options with new options are faulted. */ 19505 if (legacy_cpu) 19506 { 19507 if (mcpu_cpu_opt || march_cpu_opt) 19508 as_bad (_("use of old and new-style options to set CPU type")); 19509 19510 mcpu_cpu_opt = legacy_cpu; 19511 } 19512 else if (!mcpu_cpu_opt) 19513 mcpu_cpu_opt = march_cpu_opt; 19514 19515 if (legacy_fpu) 19516 { 19517 if (mfpu_opt) 19518 as_bad (_("use of old and new-style options to set FPU type")); 19519 19520 mfpu_opt = legacy_fpu; 19521 } 19522 else if (!mfpu_opt) 19523 { 19524#if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS)) 19525 /* Some environments specify a default FPU. If they don't, infer it 19526 from the processor. */ 19527 if (mcpu_fpu_opt) 19528 mfpu_opt = mcpu_fpu_opt; 19529 else 19530 mfpu_opt = march_fpu_opt; 19531#else 19532 mfpu_opt = &fpu_default; 19533#endif 19534 } 19535 19536 if (!mfpu_opt) 19537 { 19538 if (mcpu_cpu_opt != NULL) 19539 mfpu_opt = &fpu_default; 19540 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5)) 19541 mfpu_opt = &fpu_arch_vfp_v2; 19542 else 19543 mfpu_opt = &fpu_arch_fpa; 19544 } 19545 19546#ifdef CPU_DEFAULT 19547 if (!mcpu_cpu_opt) 19548 { 19549 mcpu_cpu_opt = &cpu_default; 19550 selected_cpu = cpu_default; 19551 } 19552#else 19553 if (mcpu_cpu_opt) 19554 selected_cpu = *mcpu_cpu_opt; 19555 else 19556 mcpu_cpu_opt = &arm_arch_any; 19557#endif 19558 19559 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 19560 19561 autoselect_thumb_from_cpu_variant (); 19562 19563 arm_arch_used = thumb_arch_used = arm_arch_none; 19564 19565#if defined OBJ_COFF || defined OBJ_ELF 19566 { 19567 unsigned int flags = 0; 19568 19569#if defined OBJ_ELF 19570 flags = meabi_flags; 19571 19572 switch (meabi_flags) 19573 { 19574 case EF_ARM_EABI_UNKNOWN: 19575#endif 19576 /* Set the flags in the private structure. */ 19577 if (uses_apcs_26) flags |= F_APCS26; 19578 if (support_interwork) flags |= F_INTERWORK; 19579 if (uses_apcs_float) flags |= F_APCS_FLOAT; 19580 if (pic_code) flags |= F_PIC; 19581 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard)) 19582 flags |= F_SOFT_FLOAT; 19583 19584 switch (mfloat_abi_opt) 19585 { 19586 case ARM_FLOAT_ABI_SOFT: 19587 case ARM_FLOAT_ABI_SOFTFP: 19588 flags |= F_SOFT_FLOAT; 19589 break; 19590 19591 case ARM_FLOAT_ABI_HARD: 19592 if (flags & F_SOFT_FLOAT) 19593 as_bad (_("hard-float conflicts with specified fpu")); 19594 break; 19595 } 19596 19597 /* Using pure-endian doubles (even if soft-float). */ 19598 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 19599 flags |= F_VFP_FLOAT; 19600 19601#if defined OBJ_ELF 19602 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick)) 19603 flags |= EF_ARM_MAVERICK_FLOAT; 19604 break; 19605 19606 case EF_ARM_EABI_VER4: 19607 case EF_ARM_EABI_VER5: 19608 /* No additional flags to set. */ 19609 break; 19610 19611 default: 19612 abort (); 19613 } 19614#endif 19615 bfd_set_private_flags (stdoutput, flags); 19616 19617 /* We have run out flags in the COFF header to encode the 19618 status of ATPCS support, so instead we create a dummy, 19619 empty, debug section called .arm.atpcs. */ 19620 if (atpcs) 19621 { 19622 asection * sec; 19623 19624 sec = bfd_make_section (stdoutput, ".arm.atpcs"); 19625 19626 if (sec != NULL) 19627 { 19628 bfd_set_section_flags 19629 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */); 19630 bfd_set_section_size (stdoutput, sec, 0); 19631 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0); 19632 } 19633 } 19634 } 19635#endif 19636 19637 /* Record the CPU type as well. */ 19638 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)) 19639 mach = bfd_mach_arm_iWMMXt2; 19640 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt)) 19641 mach = bfd_mach_arm_iWMMXt; 19642 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale)) 19643 mach = bfd_mach_arm_XScale; 19644 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick)) 19645 mach = bfd_mach_arm_ep9312; 19646 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e)) 19647 mach = bfd_mach_arm_5TE; 19648 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5)) 19649 { 19650 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 19651 mach = bfd_mach_arm_5T; 19652 else 19653 mach = bfd_mach_arm_5; 19654 } 19655 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4)) 19656 { 19657 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 19658 mach = bfd_mach_arm_4T; 19659 else 19660 mach = bfd_mach_arm_4; 19661 } 19662 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m)) 19663 mach = bfd_mach_arm_3M; 19664 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3)) 19665 mach = bfd_mach_arm_3; 19666 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s)) 19667 mach = bfd_mach_arm_2a; 19668 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2)) 19669 mach = bfd_mach_arm_2; 19670 else 19671 mach = bfd_mach_arm_unknown; 19672 19673 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 19674} 19675 19676/* Command line processing. */ 19677 19678/* md_parse_option 19679 Invocation line includes a switch not recognized by the base assembler. 19680 See if it's a processor-specific option. 19681 19682 This routine is somewhat complicated by the need for backwards 19683 compatibility (since older releases of gcc can't be changed). 19684 The new options try to make the interface as compatible as 19685 possible with GCC. 19686 19687 New options (supported) are: 19688 19689 -mcpu=<cpu name> Assemble for selected processor 19690 -march=<architecture name> Assemble for selected architecture 19691 -mfpu=<fpu architecture> Assemble for selected FPU. 19692 -EB/-mbig-endian Big-endian 19693 -EL/-mlittle-endian Little-endian 19694 -k Generate PIC code 19695 -mthumb Start in Thumb mode 19696 -mthumb-interwork Code supports ARM/Thumb interworking 19697 19698 For now we will also provide support for: 19699 19700 -mapcs-32 32-bit Program counter 19701 -mapcs-26 26-bit Program counter 19702 -macps-float Floats passed in FP registers 19703 -mapcs-reentrant Reentrant code 19704 -matpcs 19705 (sometime these will probably be replaced with -mapcs=<list of options> 19706 and -matpcs=<list of options>) 19707 19708 The remaining options are only supported for back-wards compatibility. 19709 Cpu variants, the arm part is optional: 19710 -m[arm]1 Currently not supported. 19711 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor 19712 -m[arm]3 Arm 3 processor 19713 -m[arm]6[xx], Arm 6 processors 19714 -m[arm]7[xx][t][[d]m] Arm 7 processors 19715 -m[arm]8[10] Arm 8 processors 19716 -m[arm]9[20][tdmi] Arm 9 processors 19717 -mstrongarm[110[0]] StrongARM processors 19718 -mxscale XScale processors 19719 -m[arm]v[2345[t[e]]] Arm architectures 19720 -mall All (except the ARM1) 19721 FP variants: 19722 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions 19723 -mfpe-old (No float load/store multiples) 19724 -mvfpxd VFP Single precision 19725 -mvfp All VFP 19726 -mno-fpu Disable all floating point instructions 19727 19728 The following CPU names are recognized: 19729 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620, 19730 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700, 19731 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c, 19732 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9, 19733 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e, 19734 arm10t arm10e, arm1020t, arm1020e, arm10200e, 19735 strongarm, strongarm110, strongarm1100, strongarm1110, xscale. 19736 19737 */ 19738 19739const char * md_shortopts = "m:k"; 19740 19741#ifdef ARM_BI_ENDIAN 19742#define OPTION_EB (OPTION_MD_BASE + 0) 19743#define OPTION_EL (OPTION_MD_BASE + 1) 19744#else 19745#if TARGET_BYTES_BIG_ENDIAN 19746#define OPTION_EB (OPTION_MD_BASE + 0) 19747#else 19748#define OPTION_EL (OPTION_MD_BASE + 1) 19749#endif 19750#endif 19751 19752struct option md_longopts[] = 19753{ 19754#ifdef OPTION_EB 19755 {"EB", no_argument, NULL, OPTION_EB}, 19756#endif 19757#ifdef OPTION_EL 19758 {"EL", no_argument, NULL, OPTION_EL}, 19759#endif 19760 {NULL, no_argument, NULL, 0} 19761}; 19762 19763size_t md_longopts_size = sizeof (md_longopts); 19764 19765struct arm_option_table 19766{ 19767 char *option; /* Option name to match. */ 19768 char *help; /* Help information. */ 19769 int *var; /* Variable to change. */ 19770 int value; /* What to change it to. */ 19771 char *deprecated; /* If non-null, print this message. */ 19772}; 19773 19774struct arm_option_table arm_opts[] = 19775{ 19776 {"k", N_("generate PIC code"), &pic_code, 1, NULL}, 19777 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL}, 19778 {"mthumb-interwork", N_("support ARM/Thumb interworking"), 19779 &support_interwork, 1, NULL}, 19780 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL}, 19781 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL}, 19782 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float, 19783 1, NULL}, 19784 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL}, 19785 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL}, 19786 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, 19787 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, 19788 NULL}, 19789 19790 /* These are recognized by the assembler, but have no affect on code. */ 19791 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL}, 19792 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL}, 19793 {NULL, NULL, NULL, 0, NULL} 19794}; 19795 19796struct arm_legacy_option_table 19797{ 19798 char *option; /* Option name to match. */ 19799 const arm_feature_set **var; /* Variable to change. */ 19800 const arm_feature_set value; /* What to change it to. */ 19801 char *deprecated; /* If non-null, print this message. */ 19802}; 19803 19804const struct arm_legacy_option_table arm_legacy_opts[] = 19805{ 19806 /* DON'T add any new processors to this list -- we want the whole list 19807 to go away... Add them to the processors table instead. */ 19808 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, 19809 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, 19810 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, 19811 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, 19812 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, 19813 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, 19814 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, 19815 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, 19816 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, 19817 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, 19818 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, 19819 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, 19820 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, 19821 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, 19822 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, 19823 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, 19824 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, 19825 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, 19826 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, 19827 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, 19828 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, 19829 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, 19830 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, 19831 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, 19832 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, 19833 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, 19834 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, 19835 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, 19836 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, 19837 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, 19838 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, 19839 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, 19840 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, 19841 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, 19842 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, 19843 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, 19844 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, 19845 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, 19846 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, 19847 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, 19848 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, 19849 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, 19850 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, 19851 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, 19852 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, 19853 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, 19854 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19855 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19856 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19857 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, 19858 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, 19859 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, 19860 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, 19861 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, 19862 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, 19863 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, 19864 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, 19865 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, 19866 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, 19867 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, 19868 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, 19869 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, 19870 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, 19871 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, 19872 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, 19873 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, 19874 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, 19875 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, 19876 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")}, 19877 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4, 19878 N_("use -mcpu=strongarm110")}, 19879 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4, 19880 N_("use -mcpu=strongarm1100")}, 19881 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4, 19882 N_("use -mcpu=strongarm1110")}, 19883 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")}, 19884 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")}, 19885 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")}, 19886 19887 /* Architecture variants -- don't add any more to this list either. */ 19888 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, 19889 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, 19890 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, 19891 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, 19892 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, 19893 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, 19894 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, 19895 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, 19896 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, 19897 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, 19898 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, 19899 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, 19900 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, 19901 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, 19902 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, 19903 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, 19904 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, 19905 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, 19906 19907 /* Floating point variants -- don't add any more to this list either. */ 19908 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")}, 19909 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")}, 19910 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")}, 19911 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE, 19912 N_("use either -mfpu=softfpa or -mfpu=softvfp")}, 19913 19914 {NULL, NULL, ARM_ARCH_NONE, NULL} 19915}; 19916 19917struct arm_cpu_option_table 19918{ 19919 char *name; 19920 const arm_feature_set value; 19921 /* For some CPUs we assume an FPU unless the user explicitly sets 19922 -mfpu=... */ 19923 const arm_feature_set default_fpu; 19924 /* The canonical name of the CPU, or NULL to use NAME converted to upper 19925 case. */ 19926 const char *canonical_name; 19927}; 19928 19929/* This list should, at a minimum, contain all the cpu names 19930 recognized by GCC. */ 19931static const struct arm_cpu_option_table arm_cpus[] = 19932{ 19933 {"all", ARM_ANY, FPU_ARCH_FPA, NULL}, 19934 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL}, 19935 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL}, 19936 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, 19937 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, 19938 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19939 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19940 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19941 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19942 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19943 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19944 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 19945 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19946 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 19947 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19948 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, 19949 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19950 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19951 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19952 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19953 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19954 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19955 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19956 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19957 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19958 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19959 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19960 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, 19961 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19962 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19963 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19964 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19965 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19966 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19967 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19968 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19969 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19970 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, 19971 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19972 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"}, 19973 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19974 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19975 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19976 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, 19977 /* For V5 or later processors we default to using VFP; but the user 19978 should really set the FPU type explicitly. */ 19979 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 19980 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19981 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, 19982 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, 19983 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, 19984 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 19985 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"}, 19986 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19987 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, 19988 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"}, 19989 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19990 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19991 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 19992 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 19993 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19994 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"}, 19995 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, 19996 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19997 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, 19998 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"}, 19999 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, 20000 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"}, 20001 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL}, 20002 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"}, 20003 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL}, 20004 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL}, 20005 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL}, 20006 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL}, 20007 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL}, 20008 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL}, 20009 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL}, 20010 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3 20011 | FPU_NEON_EXT_V1), 20012 NULL}, 20013 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3 20014 | FPU_NEON_EXT_V1), 20015 NULL}, 20016 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL}, 20017 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL}, 20018 /* ??? XSCALE is really an architecture. */ 20019 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, 20020 /* ??? iwmmxt is not a processor. */ 20021 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL}, 20022 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL}, 20023 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, 20024 /* Maverick */ 20025 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"}, 20026 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL} 20027}; 20028 20029struct arm_arch_option_table 20030{ 20031 char *name; 20032 const arm_feature_set value; 20033 const arm_feature_set default_fpu; 20034}; 20035 20036/* This list should, at a minimum, contain all the architecture names 20037 recognized by GCC. */ 20038static const struct arm_arch_option_table arm_archs[] = 20039{ 20040 {"all", ARM_ANY, FPU_ARCH_FPA}, 20041 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA}, 20042 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA}, 20043 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA}, 20044 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA}, 20045 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA}, 20046 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA}, 20047 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA}, 20048 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA}, 20049 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA}, 20050 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA}, 20051 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP}, 20052 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP}, 20053 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP}, 20054 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP}, 20055 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP}, 20056 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP}, 20057 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP}, 20058 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP}, 20059 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP}, 20060 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP}, 20061 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP}, 20062 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP}, 20063 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP}, 20064 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP}, 20065 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP}, 20066 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP}, 20067 /* The official spelling of the ARMv7 profile variants is the dashed form. 20068 Accept the non-dashed form for compatibility with old toolchains. */ 20069 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP}, 20070 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP}, 20071 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP}, 20072 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP}, 20073 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP}, 20074 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP}, 20075 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP}, 20076 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP}, 20077 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP}, 20078 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE} 20079}; 20080 20081/* ISA extensions in the co-processor space. */ 20082struct arm_option_cpu_value_table 20083{ 20084 char *name; 20085 const arm_feature_set value; 20086}; 20087 20088static const struct arm_option_cpu_value_table arm_extensions[] = 20089{ 20090 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)}, 20091 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)}, 20092 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)}, 20093 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)}, 20094 {NULL, ARM_ARCH_NONE} 20095}; 20096 20097/* This list should, at a minimum, contain all the fpu names 20098 recognized by GCC. */ 20099static const struct arm_option_cpu_value_table arm_fpus[] = 20100{ 20101 {"softfpa", FPU_NONE}, 20102 {"fpe", FPU_ARCH_FPE}, 20103 {"fpe2", FPU_ARCH_FPE}, 20104 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */ 20105 {"fpa", FPU_ARCH_FPA}, 20106 {"fpa10", FPU_ARCH_FPA}, 20107 {"fpa11", FPU_ARCH_FPA}, 20108 {"arm7500fe", FPU_ARCH_FPA}, 20109 {"softvfp", FPU_ARCH_VFP}, 20110 {"softvfp+vfp", FPU_ARCH_VFP_V2}, 20111 {"vfp", FPU_ARCH_VFP_V2}, 20112 {"vfpv2", FPU_ARCH_VFP_V2}, 20113 {"vfp9", FPU_ARCH_VFP_V2}, 20114 {"vfp3", FPU_ARCH_VFP_V3}, 20115 {"vfpv3", FPU_ARCH_VFP_V3}, 20116 {"vfp10", FPU_ARCH_VFP_V2}, 20117 {"vfp10-r0", FPU_ARCH_VFP_V1}, 20118 {"vfpxd", FPU_ARCH_VFP_V1xD}, 20119 {"arm1020t", FPU_ARCH_VFP_V1}, 20120 {"arm1020e", FPU_ARCH_VFP_V2}, 20121 {"arm1136jfs", FPU_ARCH_VFP_V2}, 20122 {"arm1136jf-s", FPU_ARCH_VFP_V2}, 20123 {"maverick", FPU_ARCH_MAVERICK}, 20124 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1}, 20125 {NULL, ARM_ARCH_NONE} 20126}; 20127 20128struct arm_option_value_table 20129{ 20130 char *name; 20131 long value; 20132}; 20133 20134static const struct arm_option_value_table arm_float_abis[] = 20135{ 20136 {"hard", ARM_FLOAT_ABI_HARD}, 20137 {"softfp", ARM_FLOAT_ABI_SOFTFP}, 20138 {"soft", ARM_FLOAT_ABI_SOFT}, 20139 {NULL, 0} 20140}; 20141 20142#ifdef OBJ_ELF 20143/* We only know how to output GNU and ver 4/5 (AAELF) formats. */ 20144static const struct arm_option_value_table arm_eabis[] = 20145{ 20146 {"gnu", EF_ARM_EABI_UNKNOWN}, 20147 {"4", EF_ARM_EABI_VER4}, 20148 {"5", EF_ARM_EABI_VER5}, 20149 {NULL, 0} 20150}; 20151#endif 20152 20153struct arm_long_option_table 20154{ 20155 char * option; /* Substring to match. */ 20156 char * help; /* Help information. */ 20157 int (* func) (char * subopt); /* Function to decode sub-option. */ 20158 char * deprecated; /* If non-null, print this message. */ 20159}; 20160 20161static int 20162arm_parse_extension (char * str, const arm_feature_set **opt_p) 20163{ 20164 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set)); 20165 20166 /* Copy the feature set, so that we can modify it. */ 20167 *ext_set = **opt_p; 20168 *opt_p = ext_set; 20169 20170 while (str != NULL && *str != 0) 20171 { 20172 const struct arm_option_cpu_value_table * opt; 20173 char * ext; 20174 int optlen; 20175 20176 if (*str != '+') 20177 { 20178 as_bad (_("invalid architectural extension")); 20179 return 0; 20180 } 20181 20182 str++; 20183 ext = strchr (str, '+'); 20184 20185 if (ext != NULL) 20186 optlen = ext - str; 20187 else 20188 optlen = strlen (str); 20189 20190 if (optlen == 0) 20191 { 20192 as_bad (_("missing architectural extension")); 20193 return 0; 20194 } 20195 20196 for (opt = arm_extensions; opt->name != NULL; opt++) 20197 if (strncmp (opt->name, str, optlen) == 0) 20198 { 20199 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value); 20200 break; 20201 } 20202 20203 if (opt->name == NULL) 20204 { 20205 as_bad (_("unknown architectural extnsion `%s'"), str); 20206 return 0; 20207 } 20208 20209 str = ext; 20210 }; 20211 20212 return 1; 20213} 20214 20215static int 20216arm_parse_cpu (char * str) 20217{ 20218 const struct arm_cpu_option_table * opt; 20219 char * ext = strchr (str, '+'); 20220 int optlen; 20221 20222 if (ext != NULL) 20223 optlen = ext - str; 20224 else 20225 optlen = strlen (str); 20226 20227 if (optlen == 0) 20228 { 20229 as_bad (_("missing cpu name `%s'"), str); 20230 return 0; 20231 } 20232 20233 for (opt = arm_cpus; opt->name != NULL; opt++) 20234 if (strncmp (opt->name, str, optlen) == 0) 20235 { 20236 mcpu_cpu_opt = &opt->value; 20237 mcpu_fpu_opt = &opt->default_fpu; 20238 if (opt->canonical_name) 20239 strcpy(selected_cpu_name, opt->canonical_name); 20240 else 20241 { 20242 int i; 20243 for (i = 0; i < optlen; i++) 20244 selected_cpu_name[i] = TOUPPER (opt->name[i]); 20245 selected_cpu_name[i] = 0; 20246 } 20247 20248 if (ext != NULL) 20249 return arm_parse_extension (ext, &mcpu_cpu_opt); 20250 20251 return 1; 20252 } 20253 20254 as_bad (_("unknown cpu `%s'"), str); 20255 return 0; 20256} 20257 20258static int 20259arm_parse_arch (char * str) 20260{ 20261 const struct arm_arch_option_table *opt; 20262 char *ext = strchr (str, '+'); 20263 int optlen; 20264 20265 if (ext != NULL) 20266 optlen = ext - str; 20267 else 20268 optlen = strlen (str); 20269 20270 if (optlen == 0) 20271 { 20272 as_bad (_("missing architecture name `%s'"), str); 20273 return 0; 20274 } 20275 20276 for (opt = arm_archs; opt->name != NULL; opt++) 20277 if (streq (opt->name, str)) 20278 { 20279 march_cpu_opt = &opt->value; 20280 march_fpu_opt = &opt->default_fpu; 20281 strcpy(selected_cpu_name, opt->name); 20282 20283 if (ext != NULL) 20284 return arm_parse_extension (ext, &march_cpu_opt); 20285 20286 return 1; 20287 } 20288 20289 as_bad (_("unknown architecture `%s'\n"), str); 20290 return 0; 20291} 20292 20293static int 20294arm_parse_fpu (char * str) 20295{ 20296 const struct arm_option_cpu_value_table * opt; 20297 20298 for (opt = arm_fpus; opt->name != NULL; opt++) 20299 if (streq (opt->name, str)) 20300 { 20301 mfpu_opt = &opt->value; 20302 return 1; 20303 } 20304 20305 as_bad (_("unknown floating point format `%s'\n"), str); 20306 return 0; 20307} 20308 20309static int 20310arm_parse_float_abi (char * str) 20311{ 20312 const struct arm_option_value_table * opt; 20313 20314 for (opt = arm_float_abis; opt->name != NULL; opt++) 20315 if (streq (opt->name, str)) 20316 { 20317 mfloat_abi_opt = opt->value; 20318 return 1; 20319 } 20320 20321 as_bad (_("unknown floating point abi `%s'\n"), str); 20322 return 0; 20323} 20324 20325#ifdef OBJ_ELF 20326static int 20327arm_parse_eabi (char * str) 20328{ 20329 const struct arm_option_value_table *opt; 20330 20331 for (opt = arm_eabis; opt->name != NULL; opt++) 20332 if (streq (opt->name, str)) 20333 { 20334 meabi_flags = opt->value; 20335 return 1; 20336 } 20337 as_bad (_("unknown EABI `%s'\n"), str); 20338 return 0; 20339} 20340#endif 20341 20342struct arm_long_option_table arm_long_opts[] = 20343{ 20344 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), 20345 arm_parse_cpu, NULL}, 20346 {"march=", N_("<arch name>\t assemble for architecture <arch name>"), 20347 arm_parse_arch, NULL}, 20348 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"), 20349 arm_parse_fpu, NULL}, 20350 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"), 20351 arm_parse_float_abi, NULL}, 20352#ifdef OBJ_ELF 20353 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"), 20354 arm_parse_eabi, NULL}, 20355#endif 20356 {NULL, NULL, 0, NULL} 20357}; 20358 20359int 20360md_parse_option (int c, char * arg) 20361{ 20362 struct arm_option_table *opt; 20363 const struct arm_legacy_option_table *fopt; 20364 struct arm_long_option_table *lopt; 20365 20366 switch (c) 20367 { 20368#ifdef OPTION_EB 20369 case OPTION_EB: 20370 target_big_endian = 1; 20371 break; 20372#endif 20373 20374#ifdef OPTION_EL 20375 case OPTION_EL: 20376 target_big_endian = 0; 20377 break; 20378#endif 20379 20380 case 'a': 20381 /* Listing option. Just ignore these, we don't support additional 20382 ones. */ 20383 return 0; 20384 20385 default: 20386 for (opt = arm_opts; opt->option != NULL; opt++) 20387 { 20388 if (c == opt->option[0] 20389 && ((arg == NULL && opt->option[1] == 0) 20390 || streq (arg, opt->option + 1))) 20391 { 20392#if WARN_DEPRECATED 20393 /* If the option is deprecated, tell the user. */ 20394 if (opt->deprecated != NULL) 20395 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 20396 arg ? arg : "", _(opt->deprecated)); 20397#endif 20398 20399 if (opt->var != NULL) 20400 *opt->var = opt->value; 20401 20402 return 1; 20403 } 20404 } 20405 20406 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++) 20407 { 20408 if (c == fopt->option[0] 20409 && ((arg == NULL && fopt->option[1] == 0) 20410 || streq (arg, fopt->option + 1))) 20411 { 20412#if WARN_DEPRECATED 20413 /* If the option is deprecated, tell the user. */ 20414 if (fopt->deprecated != NULL) 20415 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 20416 arg ? arg : "", _(fopt->deprecated)); 20417#endif 20418 20419 if (fopt->var != NULL) 20420 *fopt->var = &fopt->value; 20421 20422 return 1; 20423 } 20424 } 20425 20426 for (lopt = arm_long_opts; lopt->option != NULL; lopt++) 20427 { 20428 /* These options are expected to have an argument. */ 20429 if (c == lopt->option[0] 20430 && arg != NULL 20431 && strncmp (arg, lopt->option + 1, 20432 strlen (lopt->option + 1)) == 0) 20433 { 20434#if WARN_DEPRECATED 20435 /* If the option is deprecated, tell the user. */ 20436 if (lopt->deprecated != NULL) 20437 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, 20438 _(lopt->deprecated)); 20439#endif 20440 20441 /* Call the sup-option parser. */ 20442 return lopt->func (arg + strlen (lopt->option) - 1); 20443 } 20444 } 20445 20446 return 0; 20447 } 20448 20449 return 1; 20450} 20451 20452void 20453md_show_usage (FILE * fp) 20454{ 20455 struct arm_option_table *opt; 20456 struct arm_long_option_table *lopt; 20457 20458 fprintf (fp, _(" ARM-specific assembler options:\n")); 20459 20460 for (opt = arm_opts; opt->option != NULL; opt++) 20461 if (opt->help != NULL) 20462 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); 20463 20464 for (lopt = arm_long_opts; lopt->option != NULL; lopt++) 20465 if (lopt->help != NULL) 20466 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); 20467 20468#ifdef OPTION_EB 20469 fprintf (fp, _("\ 20470 -EB assemble code for a big-endian cpu\n")); 20471#endif 20472 20473#ifdef OPTION_EL 20474 fprintf (fp, _("\ 20475 -EL assemble code for a little-endian cpu\n")); 20476#endif 20477} 20478 20479 20480#ifdef OBJ_ELF 20481typedef struct 20482{ 20483 int val; 20484 arm_feature_set flags; 20485} cpu_arch_ver_table; 20486 20487/* Mapping from CPU features to EABI CPU arch values. Table must be sorted 20488 least features first. */ 20489static const cpu_arch_ver_table cpu_arch_ver[] = 20490{ 20491 {1, ARM_ARCH_V4}, 20492 {2, ARM_ARCH_V4T}, 20493 {3, ARM_ARCH_V5}, 20494 {4, ARM_ARCH_V5TE}, 20495 {5, ARM_ARCH_V5TEJ}, 20496 {6, ARM_ARCH_V6}, 20497 {7, ARM_ARCH_V6Z}, 20498 {8, ARM_ARCH_V6K}, 20499 {9, ARM_ARCH_V6T2}, 20500 {10, ARM_ARCH_V7A}, 20501 {10, ARM_ARCH_V7R}, 20502 {10, ARM_ARCH_V7M}, 20503 {0, ARM_ARCH_NONE} 20504}; 20505 20506/* Set the public EABI object attributes. */ 20507static void 20508aeabi_set_public_attributes (void) 20509{ 20510 int arch; 20511 arm_feature_set flags; 20512 arm_feature_set tmp; 20513 const cpu_arch_ver_table *p; 20514 20515 /* Choose the architecture based on the capabilities of the requested cpu 20516 (if any) and/or the instructions actually used. */ 20517 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used); 20518 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt); 20519 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu); 20520 /*Allow the user to override the reported architecture. */ 20521 if (object_arch) 20522 { 20523 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any); 20524 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch); 20525 } 20526 20527 tmp = flags; 20528 arch = 0; 20529 for (p = cpu_arch_ver; p->val; p++) 20530 { 20531 if (ARM_CPU_HAS_FEATURE (tmp, p->flags)) 20532 { 20533 arch = p->val; 20534 ARM_CLEAR_FEATURE (tmp, tmp, p->flags); 20535 } 20536 } 20537 20538 /* Tag_CPU_name. */ 20539 if (selected_cpu_name[0]) 20540 { 20541 char *p; 20542 20543 p = selected_cpu_name; 20544 if (strncmp(p, "armv", 4) == 0) 20545 { 20546 int i; 20547 20548 p += 4; 20549 for (i = 0; p[i]; i++) 20550 p[i] = TOUPPER (p[i]); 20551 } 20552 bfd_elf_add_proc_attr_string (stdoutput, 5, p); 20553 } 20554 /* Tag_CPU_arch. */ 20555 bfd_elf_add_proc_attr_int (stdoutput, 6, arch); 20556 /* Tag_CPU_arch_profile. */ 20557 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)) 20558 bfd_elf_add_proc_attr_int (stdoutput, 7, 'A'); 20559 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r)) 20560 bfd_elf_add_proc_attr_int (stdoutput, 7, 'R'); 20561 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)) 20562 bfd_elf_add_proc_attr_int (stdoutput, 7, 'M'); 20563 /* Tag_ARM_ISA_use. */ 20564 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full)) 20565 bfd_elf_add_proc_attr_int (stdoutput, 8, 1); 20566 /* Tag_THUMB_ISA_use. */ 20567 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full)) 20568 bfd_elf_add_proc_attr_int (stdoutput, 9, 20569 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1); 20570 /* Tag_VFP_arch. */ 20571 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3) 20572 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3)) 20573 bfd_elf_add_proc_attr_int (stdoutput, 10, 3); 20574 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2) 20575 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2)) 20576 bfd_elf_add_proc_attr_int (stdoutput, 10, 2); 20577 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1) 20578 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1) 20579 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd) 20580 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd)) 20581 bfd_elf_add_proc_attr_int (stdoutput, 10, 1); 20582 /* Tag_WMMX_arch. */ 20583 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt) 20584 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt)) 20585 bfd_elf_add_proc_attr_int (stdoutput, 11, 1); 20586 /* Tag_NEON_arch. */ 20587 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1) 20588 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1)) 20589 bfd_elf_add_proc_attr_int (stdoutput, 12, 1); 20590} 20591 20592/* Add the default contents for the .ARM.attributes section. */ 20593void 20594arm_md_end (void) 20595{ 20596 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) 20597 return; 20598 20599 aeabi_set_public_attributes (); 20600} 20601#endif /* OBJ_ELF */ 20602 20603 20604/* Parse a .cpu directive. */ 20605 20606static void 20607s_arm_cpu (int ignored ATTRIBUTE_UNUSED) 20608{ 20609 const struct arm_cpu_option_table *opt; 20610 char *name; 20611 char saved_char; 20612 20613 name = input_line_pointer; 20614 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20615 input_line_pointer++; 20616 saved_char = *input_line_pointer; 20617 *input_line_pointer = 0; 20618 20619 /* Skip the first "all" entry. */ 20620 for (opt = arm_cpus + 1; opt->name != NULL; opt++) 20621 if (streq (opt->name, name)) 20622 { 20623 mcpu_cpu_opt = &opt->value; 20624 selected_cpu = opt->value; 20625 if (opt->canonical_name) 20626 strcpy(selected_cpu_name, opt->canonical_name); 20627 else 20628 { 20629 int i; 20630 for (i = 0; opt->name[i]; i++) 20631 selected_cpu_name[i] = TOUPPER (opt->name[i]); 20632 selected_cpu_name[i] = 0; 20633 } 20634 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20635 *input_line_pointer = saved_char; 20636 demand_empty_rest_of_line (); 20637 return; 20638 } 20639 as_bad (_("unknown cpu `%s'"), name); 20640 *input_line_pointer = saved_char; 20641 ignore_rest_of_line (); 20642} 20643 20644 20645/* Parse a .arch directive. */ 20646 20647static void 20648s_arm_arch (int ignored ATTRIBUTE_UNUSED) 20649{ 20650 const struct arm_arch_option_table *opt; 20651 char saved_char; 20652 char *name; 20653 20654 name = input_line_pointer; 20655 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20656 input_line_pointer++; 20657 saved_char = *input_line_pointer; 20658 *input_line_pointer = 0; 20659 20660 /* Skip the first "all" entry. */ 20661 for (opt = arm_archs + 1; opt->name != NULL; opt++) 20662 if (streq (opt->name, name)) 20663 { 20664 mcpu_cpu_opt = &opt->value; 20665 selected_cpu = opt->value; 20666 strcpy(selected_cpu_name, opt->name); 20667 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20668 *input_line_pointer = saved_char; 20669 demand_empty_rest_of_line (); 20670 return; 20671 } 20672 20673 as_bad (_("unknown architecture `%s'\n"), name); 20674 *input_line_pointer = saved_char; 20675 ignore_rest_of_line (); 20676} 20677 20678 20679/* Parse a .object_arch directive. */ 20680 20681static void 20682s_arm_object_arch (int ignored ATTRIBUTE_UNUSED) 20683{ 20684 const struct arm_arch_option_table *opt; 20685 char saved_char; 20686 char *name; 20687 20688 name = input_line_pointer; 20689 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20690 input_line_pointer++; 20691 saved_char = *input_line_pointer; 20692 *input_line_pointer = 0; 20693 20694 /* Skip the first "all" entry. */ 20695 for (opt = arm_archs + 1; opt->name != NULL; opt++) 20696 if (streq (opt->name, name)) 20697 { 20698 object_arch = &opt->value; 20699 *input_line_pointer = saved_char; 20700 demand_empty_rest_of_line (); 20701 return; 20702 } 20703 20704 as_bad (_("unknown architecture `%s'\n"), name); 20705 *input_line_pointer = saved_char; 20706 ignore_rest_of_line (); 20707} 20708 20709 20710/* Parse a .fpu directive. */ 20711 20712static void 20713s_arm_fpu (int ignored ATTRIBUTE_UNUSED) 20714{ 20715 const struct arm_option_cpu_value_table *opt; 20716 char saved_char; 20717 char *name; 20718 20719 name = input_line_pointer; 20720 while (*input_line_pointer && !ISSPACE(*input_line_pointer)) 20721 input_line_pointer++; 20722 saved_char = *input_line_pointer; 20723 *input_line_pointer = 0; 20724 20725 for (opt = arm_fpus; opt->name != NULL; opt++) 20726 if (streq (opt->name, name)) 20727 { 20728 mfpu_opt = &opt->value; 20729 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 20730 *input_line_pointer = saved_char; 20731 demand_empty_rest_of_line (); 20732 return; 20733 } 20734 20735 as_bad (_("unknown floating point format `%s'\n"), name); 20736 *input_line_pointer = saved_char; 20737 ignore_rest_of_line (); 20738} 20739 20740/* Copy symbol information. */ 20741void 20742arm_copy_symbol_attributes (symbolS *dest, symbolS *src) 20743{ 20744 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src); 20745} 20746