tc-arm.c revision 1.3
1/* tc-arm.c -- Assemble for the ARM
2   Copyright (C) 1994-2015 Free Software Foundation, Inc.
3   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4	Modified by David Taylor (dtaylor@armltd.co.uk)
5	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9   This file is part of GAS, the GNU Assembler.
10
11   GAS is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 3, or (at your option)
14   any later version.
15
16   GAS is distributed in the hope that it will be useful,
17   but WITHOUT ANY WARRANTY; without even the implied warranty of
18   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19   GNU General Public License for more details.
20
21   You should have received a copy of the GNU General Public License
22   along with GAS; see the file COPYING.  If not, write to the Free
23   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24   02110-1301, USA.  */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define	 NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35
36#ifdef OBJ_ELF
37#include "elf/arm.h"
38#include "dw2gencfi.h"
39#endif
40
41#include "dwarf2dbg.h"
42
43#ifdef OBJ_ELF
44/* Must be at least the size of the largest unwind opcode (currently two).  */
45#define ARM_OPCODE_CHUNK_SIZE 8
46
47/* This structure holds the unwinding state.  */
48
49static struct
50{
51  symbolS *	  proc_start;
52  symbolS *	  table_entry;
53  symbolS *	  personality_routine;
54  int		  personality_index;
55  /* The segment containing the function.  */
56  segT		  saved_seg;
57  subsegT	  saved_subseg;
58  /* Opcodes generated from this function.  */
59  unsigned char * opcodes;
60  int		  opcode_count;
61  int		  opcode_alloc;
62  /* The number of bytes pushed to the stack.  */
63  offsetT	  frame_size;
64  /* We don't add stack adjustment opcodes immediately so that we can merge
65     multiple adjustments.  We can also omit the final adjustment
66     when using a frame pointer.  */
67  offsetT	  pending_offset;
68  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
69     hold the reg+offset to use when restoring sp from a frame pointer.	 */
70  offsetT	  fp_offset;
71  int		  fp_reg;
72  /* Nonzero if an unwind_setfp directive has been seen.  */
73  unsigned	  fp_used:1;
74  /* Nonzero if the last opcode restores sp from fp_reg.  */
75  unsigned	  sp_restored:1;
76} unwind;
77
78#endif /* OBJ_ELF */
79
80/* Results from operand parsing worker functions.  */
81
82typedef enum
83{
84  PARSE_OPERAND_SUCCESS,
85  PARSE_OPERAND_FAIL,
86  PARSE_OPERAND_FAIL_NO_BACKTRACK
87} parse_operand_result;
88
89enum arm_float_abi
90{
91  ARM_FLOAT_ABI_HARD,
92  ARM_FLOAT_ABI_SOFTFP,
93  ARM_FLOAT_ABI_SOFT
94};
95
96/* Types of processor to assemble for.	*/
97#ifndef CPU_DEFAULT
98/* The code that was here used to select a default CPU depending on compiler
99   pre-defines which were only present when doing native builds, thus
100   changing gas' default behaviour depending upon the build host.
101
102   If you have a target that requires a default CPU option then the you
103   should define CPU_DEFAULT here.  */
104#endif
105
106#ifndef FPU_DEFAULT
107# ifdef TE_LINUX
108#  define FPU_DEFAULT FPU_ARCH_FPA
109# elif defined (TE_NetBSD)
110#  ifdef OBJ_ELF
111#   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
112#  else
113    /* Legacy a.out format.  */
114#   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
115#  endif
116# elif defined (TE_VXWORKS)
117#  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
118# else
119   /* For backwards compatibility, default to FPA.  */
120#  define FPU_DEFAULT FPU_ARCH_FPA
121# endif
122#endif /* ifndef FPU_DEFAULT */
123
124#define streq(a, b)	      (strcmp (a, b) == 0)
125
126static arm_feature_set cpu_variant;
127static arm_feature_set arm_arch_used;
128static arm_feature_set thumb_arch_used;
129
130/* Flags stored in private area of BFD structure.  */
131static int uses_apcs_26	     = FALSE;
132static int atpcs	     = FALSE;
133static int support_interwork = FALSE;
134static int uses_apcs_float   = FALSE;
135static int pic_code	     = FALSE;
136static int fix_v4bx	     = FALSE;
137/* Warn on using deprecated features.  */
138static int warn_on_deprecated = TRUE;
139
140/* Understand CodeComposer Studio assembly syntax.  */
141bfd_boolean codecomposer_syntax = FALSE;
142
143/* Variables that we set while parsing command-line options.  Once all
144   options have been read we re-process these values to set the real
145   assembly flags.  */
146static const arm_feature_set *legacy_cpu = NULL;
147static const arm_feature_set *legacy_fpu = NULL;
148
149static const arm_feature_set *mcpu_cpu_opt = NULL;
150static const arm_feature_set *mcpu_fpu_opt = NULL;
151static const arm_feature_set *march_cpu_opt = NULL;
152static const arm_feature_set *march_fpu_opt = NULL;
153static const arm_feature_set *mfpu_opt = NULL;
154static const arm_feature_set *object_arch = NULL;
155
156/* Constants for known architecture features.  */
157static const arm_feature_set fpu_default = FPU_DEFAULT;
158static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167#ifdef CPU_DEFAULT
168static const arm_feature_set cpu_default = CPU_DEFAULT;
169#endif
170
171static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179static const arm_feature_set arm_ext_v4t_5 =
180  ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189static const arm_feature_set arm_ext_v6_notm =
190  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191static const arm_feature_set arm_ext_v6_dsp =
192  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193static const arm_feature_set arm_ext_barrier =
194  ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195static const arm_feature_set arm_ext_msr =
196  ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203static const arm_feature_set arm_ext_m =
204  ARM_FEATURE_CORE_LOW (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M);
205static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211
212static const arm_feature_set arm_arch_any = ARM_ANY;
213static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
214static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
215static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
216static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
217
218static const arm_feature_set arm_cext_iwmmxt2 =
219  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
220static const arm_feature_set arm_cext_iwmmxt =
221  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
222static const arm_feature_set arm_cext_xscale =
223  ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
224static const arm_feature_set arm_cext_maverick =
225  ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
226static const arm_feature_set fpu_fpa_ext_v1 =
227  ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
228static const arm_feature_set fpu_fpa_ext_v2 =
229  ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
230static const arm_feature_set fpu_vfp_ext_v1xd =
231  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
232static const arm_feature_set fpu_vfp_ext_v1 =
233  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
234static const arm_feature_set fpu_vfp_ext_v2 =
235  ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
236static const arm_feature_set fpu_vfp_ext_v3xd =
237  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
238static const arm_feature_set fpu_vfp_ext_v3 =
239  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
240static const arm_feature_set fpu_vfp_ext_d32 =
241  ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
242static const arm_feature_set fpu_neon_ext_v1 =
243  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
244static const arm_feature_set fpu_vfp_v3_or_neon_ext =
245  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
246static const arm_feature_set fpu_vfp_fp16 =
247  ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
248static const arm_feature_set fpu_neon_ext_fma =
249  ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
250static const arm_feature_set fpu_vfp_ext_fma =
251  ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
252static const arm_feature_set fpu_vfp_ext_armv8 =
253  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
254static const arm_feature_set fpu_vfp_ext_armv8xd =
255  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
256static const arm_feature_set fpu_neon_ext_armv8 =
257  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
258static const arm_feature_set fpu_crypto_ext_armv8 =
259  ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
260static const arm_feature_set crc_ext_armv8 =
261  ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
262static const arm_feature_set fpu_neon_ext_v8_1 =
263  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
264
265static int mfloat_abi_opt = -1;
266/* Record user cpu selection for object attributes.  */
267static arm_feature_set selected_cpu = ARM_ARCH_NONE;
268/* Must be long enough to hold any of the names in arm_cpus.  */
269static char selected_cpu_name[20];
270
271extern FLONUM_TYPE generic_floating_point_number;
272
273/* Return if no cpu was selected on command-line.  */
274static bfd_boolean
275no_cpu_selected (void)
276{
277  return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
278}
279
280#ifdef OBJ_ELF
281# ifdef EABI_DEFAULT
282static int meabi_flags = EABI_DEFAULT;
283# else
284static int meabi_flags = EF_ARM_EABI_UNKNOWN;
285# endif
286
287static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
288
289bfd_boolean
290arm_is_eabi (void)
291{
292  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
293}
294#endif
295
296#ifdef OBJ_ELF
297/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
298symbolS * GOT_symbol;
299#endif
300
301/* 0: assemble for ARM,
302   1: assemble for Thumb,
303   2: assemble for Thumb even though target CPU does not support thumb
304      instructions.  */
305static int thumb_mode = 0;
306/* A value distinct from the possible values for thumb_mode that we
307   can use to record whether thumb_mode has been copied into the
308   tc_frag_data field of a frag.  */
309#define MODE_RECORDED (1 << 4)
310
311/* Specifies the intrinsic IT insn behavior mode.  */
312enum implicit_it_mode
313{
314  IMPLICIT_IT_MODE_NEVER  = 0x00,
315  IMPLICIT_IT_MODE_ARM    = 0x01,
316  IMPLICIT_IT_MODE_THUMB  = 0x02,
317  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
318};
319static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
320
321/* If unified_syntax is true, we are processing the new unified
322   ARM/Thumb syntax.  Important differences from the old ARM mode:
323
324     - Immediate operands do not require a # prefix.
325     - Conditional affixes always appear at the end of the
326       instruction.  (For backward compatibility, those instructions
327       that formerly had them in the middle, continue to accept them
328       there.)
329     - The IT instruction may appear, and if it does is validated
330       against subsequent conditional affixes.  It does not generate
331       machine code.
332
333   Important differences from the old Thumb mode:
334
335     - Immediate operands do not require a # prefix.
336     - Most of the V6T2 instructions are only available in unified mode.
337     - The .N and .W suffixes are recognized and honored (it is an error
338       if they cannot be honored).
339     - All instructions set the flags if and only if they have an 's' affix.
340     - Conditional affixes may be used.  They are validated against
341       preceding IT instructions.  Unlike ARM mode, you cannot use a
342       conditional affix except in the scope of an IT instruction.  */
343
344static bfd_boolean unified_syntax = FALSE;
345
346/* An immediate operand can start with #, and ld*, st*, pld operands
347   can contain [ and ].  We need to tell APP not to elide whitespace
348   before a [, which can appear as the first operand for pld.
349   Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
350const char arm_symbol_chars[] = "#[]{}";
351
352enum neon_el_type
353{
354  NT_invtype,
355  NT_untyped,
356  NT_integer,
357  NT_float,
358  NT_poly,
359  NT_signed,
360  NT_unsigned
361};
362
363struct neon_type_el
364{
365  enum neon_el_type type;
366  unsigned size;
367};
368
369#define NEON_MAX_TYPE_ELS 4
370
371struct neon_type
372{
373  struct neon_type_el el[NEON_MAX_TYPE_ELS];
374  unsigned elems;
375};
376
377enum it_instruction_type
378{
379   OUTSIDE_IT_INSN,
380   INSIDE_IT_INSN,
381   INSIDE_IT_LAST_INSN,
382   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
383			      if inside, should be the last one.  */
384   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
385			      i.e. BKPT and NOP.  */
386   IT_INSN                 /* The IT insn has been parsed.  */
387};
388
389/* The maximum number of operands we need.  */
390#define ARM_IT_MAX_OPERANDS 6
391
392struct arm_it
393{
394  const char *	error;
395  unsigned long instruction;
396  int		size;
397  int		size_req;
398  int		cond;
399  /* "uncond_value" is set to the value in place of the conditional field in
400     unconditional versions of the instruction, or -1 if nothing is
401     appropriate.  */
402  int		uncond_value;
403  struct neon_type vectype;
404  /* This does not indicate an actual NEON instruction, only that
405     the mnemonic accepts neon-style type suffixes.  */
406  int		is_neon;
407  /* Set to the opcode if the instruction needs relaxation.
408     Zero if the instruction is not relaxed.  */
409  unsigned long	relax;
410  struct
411  {
412    bfd_reloc_code_real_type type;
413    expressionS		     exp;
414    int			     pc_rel;
415  } reloc;
416
417  enum it_instruction_type it_insn_type;
418
419  struct
420  {
421    unsigned reg;
422    signed int imm;
423    struct neon_type_el vectype;
424    unsigned present	: 1;  /* Operand present.  */
425    unsigned isreg	: 1;  /* Operand was a register.  */
426    unsigned immisreg	: 1;  /* .imm field is a second register.  */
427    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
428    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
429    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
430    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
431       instructions. This allows us to disambiguate ARM <-> vector insns.  */
432    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
433    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
434    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
435    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
436    unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
437    unsigned writeback	: 1;  /* Operand has trailing !  */
438    unsigned preind	: 1;  /* Preindexed address.  */
439    unsigned postind	: 1;  /* Postindexed address.  */
440    unsigned negative	: 1;  /* Index register was negated.  */
441    unsigned shifted	: 1;  /* Shift applied to operation.  */
442    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
443  } operands[ARM_IT_MAX_OPERANDS];
444};
445
446static struct arm_it inst;
447
448#define NUM_FLOAT_VALS 8
449
450const char * fp_const[] =
451{
452  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
453};
454
455/* Number of littlenums required to hold an extended precision number.	*/
456#define MAX_LITTLENUMS 6
457
458LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
459
460#define FAIL	(-1)
461#define SUCCESS (0)
462
463#define SUFF_S 1
464#define SUFF_D 2
465#define SUFF_E 3
466#define SUFF_P 4
467
468#define CP_T_X	 0x00008000
469#define CP_T_Y	 0x00400000
470
471#define CONDS_BIT	 0x00100000
472#define LOAD_BIT	 0x00100000
473
474#define DOUBLE_LOAD_FLAG 0x00000001
475
476struct asm_cond
477{
478  const char *	 template_name;
479  unsigned long  value;
480};
481
482#define COND_ALWAYS 0xE
483
484struct asm_psr
485{
486  const char *   template_name;
487  unsigned long  field;
488};
489
490struct asm_barrier_opt
491{
492  const char *    template_name;
493  unsigned long   value;
494  const arm_feature_set arch;
495};
496
497/* The bit that distinguishes CPSR and SPSR.  */
498#define SPSR_BIT   (1 << 22)
499
500/* The individual PSR flag bits.  */
501#define PSR_c	(1 << 16)
502#define PSR_x	(1 << 17)
503#define PSR_s	(1 << 18)
504#define PSR_f	(1 << 19)
505
506struct reloc_entry
507{
508  char *                    name;
509  bfd_reloc_code_real_type  reloc;
510};
511
512enum vfp_reg_pos
513{
514  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
515  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
516};
517
518enum vfp_ldstm_type
519{
520  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
521};
522
523/* Bits for DEFINED field in neon_typed_alias.  */
524#define NTA_HASTYPE  1
525#define NTA_HASINDEX 2
526
527struct neon_typed_alias
528{
529  unsigned char        defined;
530  unsigned char        index;
531  struct neon_type_el  eltype;
532};
533
534/* ARM register categories.  This includes coprocessor numbers and various
535   architecture extensions' registers.	*/
536enum arm_reg_type
537{
538  REG_TYPE_RN,
539  REG_TYPE_CP,
540  REG_TYPE_CN,
541  REG_TYPE_FN,
542  REG_TYPE_VFS,
543  REG_TYPE_VFD,
544  REG_TYPE_NQ,
545  REG_TYPE_VFSD,
546  REG_TYPE_NDQ,
547  REG_TYPE_NSDQ,
548  REG_TYPE_VFC,
549  REG_TYPE_MVF,
550  REG_TYPE_MVD,
551  REG_TYPE_MVFX,
552  REG_TYPE_MVDX,
553  REG_TYPE_MVAX,
554  REG_TYPE_DSPSC,
555  REG_TYPE_MMXWR,
556  REG_TYPE_MMXWC,
557  REG_TYPE_MMXWCG,
558  REG_TYPE_XSCALE,
559  REG_TYPE_RNB
560};
561
562/* Structure for a hash table entry for a register.
563   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
564   information which states whether a vector type or index is specified (for a
565   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
566struct reg_entry
567{
568  const char *               name;
569  unsigned int               number;
570  unsigned char              type;
571  unsigned char              builtin;
572  struct neon_typed_alias *  neon;
573};
574
575/* Diagnostics used when we don't get a register of the expected type.	*/
576const char * const reg_expected_msgs[] =
577{
578  N_("ARM register expected"),
579  N_("bad or missing co-processor number"),
580  N_("co-processor register expected"),
581  N_("FPA register expected"),
582  N_("VFP single precision register expected"),
583  N_("VFP/Neon double precision register expected"),
584  N_("Neon quad precision register expected"),
585  N_("VFP single or double precision register expected"),
586  N_("Neon double or quad precision register expected"),
587  N_("VFP single, double or Neon quad precision register expected"),
588  N_("VFP system register expected"),
589  N_("Maverick MVF register expected"),
590  N_("Maverick MVD register expected"),
591  N_("Maverick MVFX register expected"),
592  N_("Maverick MVDX register expected"),
593  N_("Maverick MVAX register expected"),
594  N_("Maverick DSPSC register expected"),
595  N_("iWMMXt data register expected"),
596  N_("iWMMXt control register expected"),
597  N_("iWMMXt scalar register expected"),
598  N_("XScale accumulator register expected"),
599};
600
601/* Some well known registers that we refer to directly elsewhere.  */
602#define REG_R12	12
603#define REG_SP	13
604#define REG_LR	14
605#define REG_PC	15
606
607/* ARM instructions take 4bytes in the object file, Thumb instructions
608   take 2:  */
609#define INSN_SIZE	4
610
611struct asm_opcode
612{
613  /* Basic string to match.  */
614  const char * template_name;
615
616  /* Parameters to instruction.	 */
617  unsigned int operands[8];
618
619  /* Conditional tag - see opcode_lookup.  */
620  unsigned int tag : 4;
621
622  /* Basic instruction code.  */
623  unsigned int avalue : 28;
624
625  /* Thumb-format instruction code.  */
626  unsigned int tvalue;
627
628  /* Which architecture variant provides this instruction.  */
629  const arm_feature_set * avariant;
630  const arm_feature_set * tvariant;
631
632  /* Function to call to encode instruction in ARM format.  */
633  void (* aencode) (void);
634
635  /* Function to call to encode instruction in Thumb format.  */
636  void (* tencode) (void);
637};
638
639/* Defines for various bits that we will want to toggle.  */
640#define INST_IMMEDIATE	0x02000000
641#define OFFSET_REG	0x02000000
642#define HWOFFSET_IMM	0x00400000
643#define SHIFT_BY_REG	0x00000010
644#define PRE_INDEX	0x01000000
645#define INDEX_UP	0x00800000
646#define WRITE_BACK	0x00200000
647#define LDM_TYPE_2_OR_3	0x00400000
648#define CPSI_MMOD	0x00020000
649
650#define LITERAL_MASK	0xf000f000
651#define OPCODE_MASK	0xfe1fffff
652#define V4_STR_BIT	0x00000020
653#define VLDR_VMOV_SAME	0x0040f000
654
655#define T2_SUBS_PC_LR	0xf3de8f00
656
657#define DATA_OP_SHIFT	21
658
659#define T2_OPCODE_MASK	0xfe1fffff
660#define T2_DATA_OP_SHIFT 21
661
662#define A_COND_MASK         0xf0000000
663#define A_PUSH_POP_OP_MASK  0x0fff0000
664
665/* Opcodes for pushing/poping registers to/from the stack.  */
666#define A1_OPCODE_PUSH    0x092d0000
667#define A2_OPCODE_PUSH    0x052d0004
668#define A2_OPCODE_POP     0x049d0004
669
670/* Codes to distinguish the arithmetic instructions.  */
671#define OPCODE_AND	0
672#define OPCODE_EOR	1
673#define OPCODE_SUB	2
674#define OPCODE_RSB	3
675#define OPCODE_ADD	4
676#define OPCODE_ADC	5
677#define OPCODE_SBC	6
678#define OPCODE_RSC	7
679#define OPCODE_TST	8
680#define OPCODE_TEQ	9
681#define OPCODE_CMP	10
682#define OPCODE_CMN	11
683#define OPCODE_ORR	12
684#define OPCODE_MOV	13
685#define OPCODE_BIC	14
686#define OPCODE_MVN	15
687
688#define T2_OPCODE_AND	0
689#define T2_OPCODE_BIC	1
690#define T2_OPCODE_ORR	2
691#define T2_OPCODE_ORN	3
692#define T2_OPCODE_EOR	4
693#define T2_OPCODE_ADD	8
694#define T2_OPCODE_ADC	10
695#define T2_OPCODE_SBC	11
696#define T2_OPCODE_SUB	13
697#define T2_OPCODE_RSB	14
698
699#define T_OPCODE_MUL 0x4340
700#define T_OPCODE_TST 0x4200
701#define T_OPCODE_CMN 0x42c0
702#define T_OPCODE_NEG 0x4240
703#define T_OPCODE_MVN 0x43c0
704
705#define T_OPCODE_ADD_R3	0x1800
706#define T_OPCODE_SUB_R3 0x1a00
707#define T_OPCODE_ADD_HI 0x4400
708#define T_OPCODE_ADD_ST 0xb000
709#define T_OPCODE_SUB_ST 0xb080
710#define T_OPCODE_ADD_SP 0xa800
711#define T_OPCODE_ADD_PC 0xa000
712#define T_OPCODE_ADD_I8 0x3000
713#define T_OPCODE_SUB_I8 0x3800
714#define T_OPCODE_ADD_I3 0x1c00
715#define T_OPCODE_SUB_I3 0x1e00
716
717#define T_OPCODE_ASR_R	0x4100
718#define T_OPCODE_LSL_R	0x4080
719#define T_OPCODE_LSR_R	0x40c0
720#define T_OPCODE_ROR_R	0x41c0
721#define T_OPCODE_ASR_I	0x1000
722#define T_OPCODE_LSL_I	0x0000
723#define T_OPCODE_LSR_I	0x0800
724
725#define T_OPCODE_MOV_I8	0x2000
726#define T_OPCODE_CMP_I8 0x2800
727#define T_OPCODE_CMP_LR 0x4280
728#define T_OPCODE_MOV_HR 0x4600
729#define T_OPCODE_CMP_HR 0x4500
730
731#define T_OPCODE_LDR_PC 0x4800
732#define T_OPCODE_LDR_SP 0x9800
733#define T_OPCODE_STR_SP 0x9000
734#define T_OPCODE_LDR_IW 0x6800
735#define T_OPCODE_STR_IW 0x6000
736#define T_OPCODE_LDR_IH 0x8800
737#define T_OPCODE_STR_IH 0x8000
738#define T_OPCODE_LDR_IB 0x7800
739#define T_OPCODE_STR_IB 0x7000
740#define T_OPCODE_LDR_RW 0x5800
741#define T_OPCODE_STR_RW 0x5000
742#define T_OPCODE_LDR_RH 0x5a00
743#define T_OPCODE_STR_RH 0x5200
744#define T_OPCODE_LDR_RB 0x5c00
745#define T_OPCODE_STR_RB 0x5400
746
747#define T_OPCODE_PUSH	0xb400
748#define T_OPCODE_POP	0xbc00
749
750#define T_OPCODE_BRANCH 0xe000
751
752#define THUMB_SIZE	2	/* Size of thumb instruction.  */
753#define THUMB_PP_PC_LR 0x0100
754#define THUMB_LOAD_BIT 0x0800
755#define THUMB2_LOAD_BIT 0x00100000
756
757#define BAD_ARGS	_("bad arguments to instruction")
758#define BAD_SP          _("r13 not allowed here")
759#define BAD_PC		_("r15 not allowed here")
760#define BAD_COND	_("instruction cannot be conditional")
761#define BAD_OVERLAP	_("registers may not be the same")
762#define BAD_HIREG	_("lo register required")
763#define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
764#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
765#define BAD_BRANCH	_("branch must be last instruction in IT block")
766#define BAD_NOT_IT	_("instruction not allowed in IT block")
767#define BAD_FPU		_("selected FPU does not support instruction")
768#define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
769#define BAD_IT_COND	_("incorrect condition in IT block")
770#define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
771#define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
772#define BAD_PC_ADDRESSING \
773	_("cannot use register index with PC-relative addressing")
774#define BAD_PC_WRITEBACK \
775	_("cannot use writeback with PC-relative addressing")
776#define BAD_RANGE     _("branch out of range")
777#define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
778
779static struct hash_control * arm_ops_hsh;
780static struct hash_control * arm_cond_hsh;
781static struct hash_control * arm_shift_hsh;
782static struct hash_control * arm_psr_hsh;
783static struct hash_control * arm_v7m_psr_hsh;
784static struct hash_control * arm_reg_hsh;
785static struct hash_control * arm_reloc_hsh;
786static struct hash_control * arm_barrier_opt_hsh;
787
788/* Stuff needed to resolve the label ambiguity
789   As:
790     ...
791     label:   <insn>
792   may differ from:
793     ...
794     label:
795	      <insn>  */
796
797symbolS *  last_label_seen;
798static int label_is_thumb_function_name = FALSE;
799
800/* Literal pool structure.  Held on a per-section
801   and per-sub-section basis.  */
802
803#define MAX_LITERAL_POOL_SIZE 1024
804typedef struct literal_pool
805{
806  expressionS	         literals [MAX_LITERAL_POOL_SIZE];
807  unsigned int	         next_free_entry;
808  unsigned int	         id;
809  symbolS *	         symbol;
810  segT		         section;
811  subsegT	         sub_section;
812#ifdef OBJ_ELF
813  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
814#endif
815  struct literal_pool *  next;
816  unsigned int		 alignment;
817} literal_pool;
818
819/* Pointer to a linked list of literal pools.  */
820literal_pool * list_of_pools = NULL;
821
822typedef enum asmfunc_states
823{
824  OUTSIDE_ASMFUNC,
825  WAITING_ASMFUNC_NAME,
826  WAITING_ENDASMFUNC
827} asmfunc_states;
828
829static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
830
831#ifdef OBJ_ELF
832#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
833#else
834static struct current_it now_it;
835#endif
836
837static inline int
838now_it_compatible (int cond)
839{
840  return (cond & ~1) == (now_it.cc & ~1);
841}
842
843static inline int
844conditional_insn (void)
845{
846  return inst.cond != COND_ALWAYS;
847}
848
849static int in_it_block (void);
850
851static int handle_it_state (void);
852
853static void force_automatic_it_block_close (void);
854
855static void it_fsm_post_encode (void);
856
857#define set_it_insn_type(type)			\
858  do						\
859    {						\
860      inst.it_insn_type = type;			\
861      if (handle_it_state () == FAIL)		\
862	return;					\
863    }						\
864  while (0)
865
866#define set_it_insn_type_nonvoid(type, failret) \
867  do						\
868    {                                           \
869      inst.it_insn_type = type;			\
870      if (handle_it_state () == FAIL)		\
871	return failret;				\
872    }						\
873  while(0)
874
875#define set_it_insn_type_last()				\
876  do							\
877    {							\
878      if (inst.cond == COND_ALWAYS)			\
879	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
880      else						\
881	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
882    }							\
883  while (0)
884
885/* Pure syntax.	 */
886
887/* This array holds the chars that always start a comment.  If the
888   pre-processor is disabled, these aren't very useful.	 */
889char arm_comment_chars[] = "@";
890
891/* This array holds the chars that only start a comment at the beginning of
892   a line.  If the line seems to have the form '# 123 filename'
893   .line and .file directives will appear in the pre-processed output.	*/
894/* Note that input_file.c hand checks for '#' at the beginning of the
895   first line of the input file.  This is because the compiler outputs
896   #NO_APP at the beginning of its output.  */
897/* Also note that comments like this one will always work.  */
898const char line_comment_chars[] = "#";
899
900char arm_line_separator_chars[] = ";";
901
902/* Chars that can be used to separate mant
903   from exp in floating point numbers.	*/
904const char EXP_CHARS[] = "eE";
905
906/* Chars that mean this number is a floating point constant.  */
907/* As in 0f12.456  */
908/* or	 0d1.2345e12  */
909
910const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
911
912/* Prefix characters that indicate the start of an immediate
913   value.  */
914#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
915
916/* Separator character handling.  */
917
918#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
919
920static inline int
921skip_past_char (char ** str, char c)
922{
923  /* PR gas/14987: Allow for whitespace before the expected character.  */
924  skip_whitespace (*str);
925
926  if (**str == c)
927    {
928      (*str)++;
929      return SUCCESS;
930    }
931  else
932    return FAIL;
933}
934
935#define skip_past_comma(str) skip_past_char (str, ',')
936
937/* Arithmetic expressions (possibly involving symbols).	 */
938
939/* Return TRUE if anything in the expression is a bignum.  */
940
941static int
942walk_no_bignums (symbolS * sp)
943{
944  if (symbol_get_value_expression (sp)->X_op == O_big)
945    return 1;
946
947  if (symbol_get_value_expression (sp)->X_add_symbol)
948    {
949      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
950	      || (symbol_get_value_expression (sp)->X_op_symbol
951		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
952    }
953
954  return 0;
955}
956
957static int in_my_get_expression = 0;
958
959/* Third argument to my_get_expression.	 */
960#define GE_NO_PREFIX 0
961#define GE_IMM_PREFIX 1
962#define GE_OPT_PREFIX 2
963/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
964   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
965#define GE_OPT_PREFIX_BIG 3
966
967static int
968my_get_expression (expressionS * ep, char ** str, int prefix_mode)
969{
970  char * save_in;
971  segT	 seg;
972
973  /* In unified syntax, all prefixes are optional.  */
974  if (unified_syntax)
975    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
976		  : GE_OPT_PREFIX;
977
978  switch (prefix_mode)
979    {
980    case GE_NO_PREFIX: break;
981    case GE_IMM_PREFIX:
982      if (!is_immediate_prefix (**str))
983	{
984	  inst.error = _("immediate expression requires a # prefix");
985	  return FAIL;
986	}
987      (*str)++;
988      break;
989    case GE_OPT_PREFIX:
990    case GE_OPT_PREFIX_BIG:
991      if (is_immediate_prefix (**str))
992	(*str)++;
993      break;
994    default: abort ();
995    }
996
997  memset (ep, 0, sizeof (expressionS));
998
999  save_in = input_line_pointer;
1000  input_line_pointer = *str;
1001  in_my_get_expression = 1;
1002  seg = expression (ep);
1003  in_my_get_expression = 0;
1004
1005  if (ep->X_op == O_illegal || ep->X_op == O_absent)
1006    {
1007      /* We found a bad or missing expression in md_operand().  */
1008      *str = input_line_pointer;
1009      input_line_pointer = save_in;
1010      if (inst.error == NULL)
1011	inst.error = (ep->X_op == O_absent
1012		      ? _("missing expression") :_("bad expression"));
1013      return 1;
1014    }
1015
1016#ifdef OBJ_AOUT
1017  if (seg != absolute_section
1018      && seg != text_section
1019      && seg != data_section
1020      && seg != bss_section
1021      && seg != undefined_section)
1022    {
1023      inst.error = _("bad segment");
1024      *str = input_line_pointer;
1025      input_line_pointer = save_in;
1026      return 1;
1027    }
1028#else
1029  (void) seg;
1030#endif
1031
1032  /* Get rid of any bignums now, so that we don't generate an error for which
1033     we can't establish a line number later on.	 Big numbers are never valid
1034     in instructions, which is where this routine is always called.  */
1035  if (prefix_mode != GE_OPT_PREFIX_BIG
1036      && (ep->X_op == O_big
1037	  || (ep->X_add_symbol
1038	      && (walk_no_bignums (ep->X_add_symbol)
1039		  || (ep->X_op_symbol
1040		      && walk_no_bignums (ep->X_op_symbol))))))
1041    {
1042      inst.error = _("invalid constant");
1043      *str = input_line_pointer;
1044      input_line_pointer = save_in;
1045      return 1;
1046    }
1047
1048  *str = input_line_pointer;
1049  input_line_pointer = save_in;
1050  return 0;
1051}
1052
1053/* Turn a string in input_line_pointer into a floating point constant
1054   of type TYPE, and store the appropriate bytes in *LITP.  The number
1055   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1056   returned, or NULL on OK.
1057
1058   Note that fp constants aren't represent in the normal way on the ARM.
1059   In big endian mode, things are as expected.	However, in little endian
1060   mode fp constants are big-endian word-wise, and little-endian byte-wise
1061   within the words.  For example, (double) 1.1 in big endian mode is
1062   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1063   the byte sequence 99 99 f1 3f 9a 99 99 99.
1064
1065   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1066
1067char *
1068md_atof (int type, char * litP, int * sizeP)
1069{
1070  int prec;
1071  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1072  char *t;
1073  int i;
1074
1075  switch (type)
1076    {
1077    case 'f':
1078    case 'F':
1079    case 's':
1080    case 'S':
1081      prec = 2;
1082      break;
1083
1084    case 'd':
1085    case 'D':
1086    case 'r':
1087    case 'R':
1088      prec = 4;
1089      break;
1090
1091    case 'x':
1092    case 'X':
1093      prec = 5;
1094      break;
1095
1096    case 'p':
1097    case 'P':
1098      prec = 5;
1099      break;
1100
1101    default:
1102      *sizeP = 0;
1103      return _("Unrecognized or unsupported floating point constant");
1104    }
1105
1106  t = atof_ieee (input_line_pointer, type, words);
1107  if (t)
1108    input_line_pointer = t;
1109  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1110
1111  if (target_big_endian)
1112    {
1113      for (i = 0; i < prec; i++)
1114	{
1115	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1116	  litP += sizeof (LITTLENUM_TYPE);
1117	}
1118    }
1119  else
1120    {
1121      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1122	for (i = prec - 1; i >= 0; i--)
1123	  {
1124	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1125	    litP += sizeof (LITTLENUM_TYPE);
1126	  }
1127      else
1128	/* For a 4 byte float the order of elements in `words' is 1 0.
1129	   For an 8 byte float the order is 1 0 3 2.  */
1130	for (i = 0; i < prec; i += 2)
1131	  {
1132	    md_number_to_chars (litP, (valueT) words[i + 1],
1133				sizeof (LITTLENUM_TYPE));
1134	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1135				(valueT) words[i], sizeof (LITTLENUM_TYPE));
1136	    litP += 2 * sizeof (LITTLENUM_TYPE);
1137	  }
1138    }
1139
1140  return NULL;
1141}
1142
1143/* We handle all bad expressions here, so that we can report the faulty
1144   instruction in the error message.  */
1145void
1146md_operand (expressionS * exp)
1147{
1148  if (in_my_get_expression)
1149    exp->X_op = O_illegal;
1150}
1151
1152/* Immediate values.  */
1153
1154/* Generic immediate-value read function for use in directives.
1155   Accepts anything that 'expression' can fold to a constant.
1156   *val receives the number.  */
1157#ifdef OBJ_ELF
1158static int
1159immediate_for_directive (int *val)
1160{
1161  expressionS exp;
1162  exp.X_op = O_illegal;
1163
1164  if (is_immediate_prefix (*input_line_pointer))
1165    {
1166      input_line_pointer++;
1167      expression (&exp);
1168    }
1169
1170  if (exp.X_op != O_constant)
1171    {
1172      as_bad (_("expected #constant"));
1173      ignore_rest_of_line ();
1174      return FAIL;
1175    }
1176  *val = exp.X_add_number;
1177  return SUCCESS;
1178}
1179#endif
1180
1181/* Register parsing.  */
1182
1183/* Generic register parser.  CCP points to what should be the
1184   beginning of a register name.  If it is indeed a valid register
1185   name, advance CCP over it and return the reg_entry structure;
1186   otherwise return NULL.  Does not issue diagnostics.	*/
1187
1188static struct reg_entry *
1189arm_reg_parse_multi (char **ccp)
1190{
1191  char *start = *ccp;
1192  char *p;
1193  struct reg_entry *reg;
1194
1195  skip_whitespace (start);
1196
1197#ifdef REGISTER_PREFIX
1198  if (*start != REGISTER_PREFIX)
1199    return NULL;
1200  start++;
1201#endif
1202#ifdef OPTIONAL_REGISTER_PREFIX
1203  if (*start == OPTIONAL_REGISTER_PREFIX)
1204    start++;
1205#endif
1206
1207  p = start;
1208  if (!ISALPHA (*p) || !is_name_beginner (*p))
1209    return NULL;
1210
1211  do
1212    p++;
1213  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1214
1215  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1216
1217  if (!reg)
1218    return NULL;
1219
1220  *ccp = p;
1221  return reg;
1222}
1223
1224static int
1225arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1226		    enum arm_reg_type type)
1227{
1228  /* Alternative syntaxes are accepted for a few register classes.  */
1229  switch (type)
1230    {
1231    case REG_TYPE_MVF:
1232    case REG_TYPE_MVD:
1233    case REG_TYPE_MVFX:
1234    case REG_TYPE_MVDX:
1235      /* Generic coprocessor register names are allowed for these.  */
1236      if (reg && reg->type == REG_TYPE_CN)
1237	return reg->number;
1238      break;
1239
1240    case REG_TYPE_CP:
1241      /* For backward compatibility, a bare number is valid here.  */
1242      {
1243	unsigned long processor = strtoul (start, ccp, 10);
1244	if (*ccp != start && processor <= 15)
1245	  return processor;
1246      }
1247
1248    case REG_TYPE_MMXWC:
1249      /* WC includes WCG.  ??? I'm not sure this is true for all
1250	 instructions that take WC registers.  */
1251      if (reg && reg->type == REG_TYPE_MMXWCG)
1252	return reg->number;
1253      break;
1254
1255    default:
1256      break;
1257    }
1258
1259  return FAIL;
1260}
1261
1262/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1263   return value is the register number or FAIL.  */
1264
1265static int
1266arm_reg_parse (char **ccp, enum arm_reg_type type)
1267{
1268  char *start = *ccp;
1269  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1270  int ret;
1271
1272  /* Do not allow a scalar (reg+index) to parse as a register.  */
1273  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1274    return FAIL;
1275
1276  if (reg && reg->type == type)
1277    return reg->number;
1278
1279  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1280    return ret;
1281
1282  *ccp = start;
1283  return FAIL;
1284}
1285
1286/* Parse a Neon type specifier. *STR should point at the leading '.'
1287   character. Does no verification at this stage that the type fits the opcode
1288   properly. E.g.,
1289
1290     .i32.i32.s16
1291     .s32.f32
1292     .u16
1293
1294   Can all be legally parsed by this function.
1295
1296   Fills in neon_type struct pointer with parsed information, and updates STR
1297   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1298   type, FAIL if not.  */
1299
1300static int
1301parse_neon_type (struct neon_type *type, char **str)
1302{
1303  char *ptr = *str;
1304
1305  if (type)
1306    type->elems = 0;
1307
1308  while (type->elems < NEON_MAX_TYPE_ELS)
1309    {
1310      enum neon_el_type thistype = NT_untyped;
1311      unsigned thissize = -1u;
1312
1313      if (*ptr != '.')
1314	break;
1315
1316      ptr++;
1317
1318      /* Just a size without an explicit type.  */
1319      if (ISDIGIT (*ptr))
1320	goto parsesize;
1321
1322      switch (TOLOWER (*ptr))
1323	{
1324	case 'i': thistype = NT_integer; break;
1325	case 'f': thistype = NT_float; break;
1326	case 'p': thistype = NT_poly; break;
1327	case 's': thistype = NT_signed; break;
1328	case 'u': thistype = NT_unsigned; break;
1329	case 'd':
1330	  thistype = NT_float;
1331	  thissize = 64;
1332	  ptr++;
1333	  goto done;
1334	default:
1335	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1336	  return FAIL;
1337	}
1338
1339      ptr++;
1340
1341      /* .f is an abbreviation for .f32.  */
1342      if (thistype == NT_float && !ISDIGIT (*ptr))
1343	thissize = 32;
1344      else
1345	{
1346	parsesize:
1347	  thissize = strtoul (ptr, &ptr, 10);
1348
1349	  if (thissize != 8 && thissize != 16 && thissize != 32
1350	      && thissize != 64)
1351	    {
1352	      as_bad (_("bad size %d in type specifier"), thissize);
1353	      return FAIL;
1354	    }
1355	}
1356
1357      done:
1358      if (type)
1359	{
1360	  type->el[type->elems].type = thistype;
1361	  type->el[type->elems].size = thissize;
1362	  type->elems++;
1363	}
1364    }
1365
1366  /* Empty/missing type is not a successful parse.  */
1367  if (type->elems == 0)
1368    return FAIL;
1369
1370  *str = ptr;
1371
1372  return SUCCESS;
1373}
1374
1375/* Errors may be set multiple times during parsing or bit encoding
1376   (particularly in the Neon bits), but usually the earliest error which is set
1377   will be the most meaningful. Avoid overwriting it with later (cascading)
1378   errors by calling this function.  */
1379
1380static void
1381first_error (const char *err)
1382{
1383  if (!inst.error)
1384    inst.error = err;
1385}
1386
1387/* Parse a single type, e.g. ".s32", leading period included.  */
1388static int
1389parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1390{
1391  char *str = *ccp;
1392  struct neon_type optype;
1393
1394  if (*str == '.')
1395    {
1396      if (parse_neon_type (&optype, &str) == SUCCESS)
1397	{
1398	  if (optype.elems == 1)
1399	    *vectype = optype.el[0];
1400	  else
1401	    {
1402	      first_error (_("only one type should be specified for operand"));
1403	      return FAIL;
1404	    }
1405	}
1406      else
1407	{
1408	  first_error (_("vector type expected"));
1409	  return FAIL;
1410	}
1411    }
1412  else
1413    return FAIL;
1414
1415  *ccp = str;
1416
1417  return SUCCESS;
1418}
1419
1420/* Special meanings for indices (which have a range of 0-7), which will fit into
1421   a 4-bit integer.  */
1422
1423#define NEON_ALL_LANES		15
1424#define NEON_INTERLEAVE_LANES	14
1425
1426/* Parse either a register or a scalar, with an optional type. Return the
1427   register number, and optionally fill in the actual type of the register
1428   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1429   type/index information in *TYPEINFO.  */
1430
1431static int
1432parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1433			   enum arm_reg_type *rtype,
1434			   struct neon_typed_alias *typeinfo)
1435{
1436  char *str = *ccp;
1437  struct reg_entry *reg = arm_reg_parse_multi (&str);
1438  struct neon_typed_alias atype;
1439  struct neon_type_el parsetype;
1440
1441  atype.defined = 0;
1442  atype.index = -1;
1443  atype.eltype.type = NT_invtype;
1444  atype.eltype.size = -1;
1445
1446  /* Try alternate syntax for some types of register. Note these are mutually
1447     exclusive with the Neon syntax extensions.  */
1448  if (reg == NULL)
1449    {
1450      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1451      if (altreg != FAIL)
1452	*ccp = str;
1453      if (typeinfo)
1454	*typeinfo = atype;
1455      return altreg;
1456    }
1457
1458  /* Undo polymorphism when a set of register types may be accepted.  */
1459  if ((type == REG_TYPE_NDQ
1460       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1461      || (type == REG_TYPE_VFSD
1462	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1463      || (type == REG_TYPE_NSDQ
1464	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1465	      || reg->type == REG_TYPE_NQ))
1466      || (type == REG_TYPE_MMXWC
1467	  && (reg->type == REG_TYPE_MMXWCG)))
1468    type = (enum arm_reg_type) reg->type;
1469
1470  if (type != reg->type)
1471    return FAIL;
1472
1473  if (reg->neon)
1474    atype = *reg->neon;
1475
1476  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1477    {
1478      if ((atype.defined & NTA_HASTYPE) != 0)
1479	{
1480	  first_error (_("can't redefine type for operand"));
1481	  return FAIL;
1482	}
1483      atype.defined |= NTA_HASTYPE;
1484      atype.eltype = parsetype;
1485    }
1486
1487  if (skip_past_char (&str, '[') == SUCCESS)
1488    {
1489      if (type != REG_TYPE_VFD)
1490	{
1491	  first_error (_("only D registers may be indexed"));
1492	  return FAIL;
1493	}
1494
1495      if ((atype.defined & NTA_HASINDEX) != 0)
1496	{
1497	  first_error (_("can't change index for operand"));
1498	  return FAIL;
1499	}
1500
1501      atype.defined |= NTA_HASINDEX;
1502
1503      if (skip_past_char (&str, ']') == SUCCESS)
1504	atype.index = NEON_ALL_LANES;
1505      else
1506	{
1507	  expressionS exp;
1508
1509	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1510
1511	  if (exp.X_op != O_constant)
1512	    {
1513	      first_error (_("constant expression required"));
1514	      return FAIL;
1515	    }
1516
1517	  if (skip_past_char (&str, ']') == FAIL)
1518	    return FAIL;
1519
1520	  atype.index = exp.X_add_number;
1521	}
1522    }
1523
1524  if (typeinfo)
1525    *typeinfo = atype;
1526
1527  if (rtype)
1528    *rtype = type;
1529
1530  *ccp = str;
1531
1532  return reg->number;
1533}
1534
1535/* Like arm_reg_parse, but allow allow the following extra features:
1536    - If RTYPE is non-zero, return the (possibly restricted) type of the
1537      register (e.g. Neon double or quad reg when either has been requested).
1538    - If this is a Neon vector type with additional type information, fill
1539      in the struct pointed to by VECTYPE (if non-NULL).
1540   This function will fault on encountering a scalar.  */
1541
1542static int
1543arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1544		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1545{
1546  struct neon_typed_alias atype;
1547  char *str = *ccp;
1548  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1549
1550  if (reg == FAIL)
1551    return FAIL;
1552
1553  /* Do not allow regname(... to parse as a register.  */
1554  if (*str == '(')
1555    return FAIL;
1556
1557  /* Do not allow a scalar (reg+index) to parse as a register.  */
1558  if ((atype.defined & NTA_HASINDEX) != 0)
1559    {
1560      first_error (_("register operand expected, but got scalar"));
1561      return FAIL;
1562    }
1563
1564  if (vectype)
1565    *vectype = atype.eltype;
1566
1567  *ccp = str;
1568
1569  return reg;
1570}
1571
1572#define NEON_SCALAR_REG(X)	((X) >> 4)
1573#define NEON_SCALAR_INDEX(X)	((X) & 15)
1574
1575/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1576   have enough information to be able to do a good job bounds-checking. So, we
1577   just do easy checks here, and do further checks later.  */
1578
1579static int
1580parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1581{
1582  int reg;
1583  char *str = *ccp;
1584  struct neon_typed_alias atype;
1585
1586  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1587
1588  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1589    return FAIL;
1590
1591  if (atype.index == NEON_ALL_LANES)
1592    {
1593      first_error (_("scalar must have an index"));
1594      return FAIL;
1595    }
1596  else if (atype.index >= 64 / elsize)
1597    {
1598      first_error (_("scalar index out of range"));
1599      return FAIL;
1600    }
1601
1602  if (type)
1603    *type = atype.eltype;
1604
1605  *ccp = str;
1606
1607  return reg * 16 + atype.index;
1608}
1609
1610/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1611
1612static long
1613parse_reg_list (char ** strp)
1614{
1615  char * str = * strp;
1616  long	 range = 0;
1617  int	 another_range;
1618
1619  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1620  do
1621    {
1622      skip_whitespace (str);
1623
1624      another_range = 0;
1625
1626      if (*str == '{')
1627	{
1628	  int in_range = 0;
1629	  int cur_reg = -1;
1630
1631	  str++;
1632	  do
1633	    {
1634	      int reg;
1635
1636	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1637		{
1638		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1639		  return FAIL;
1640		}
1641
1642	      if (in_range)
1643		{
1644		  int i;
1645
1646		  if (reg <= cur_reg)
1647		    {
1648		      first_error (_("bad range in register list"));
1649		      return FAIL;
1650		    }
1651
1652		  for (i = cur_reg + 1; i < reg; i++)
1653		    {
1654		      if (range & (1 << i))
1655			as_tsktsk
1656			  (_("Warning: duplicated register (r%d) in register list"),
1657			   i);
1658		      else
1659			range |= 1 << i;
1660		    }
1661		  in_range = 0;
1662		}
1663
1664	      if (range & (1 << reg))
1665		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1666			   reg);
1667	      else if (reg <= cur_reg)
1668		as_tsktsk (_("Warning: register range not in ascending order"));
1669
1670	      range |= 1 << reg;
1671	      cur_reg = reg;
1672	    }
1673	  while (skip_past_comma (&str) != FAIL
1674		 || (in_range = 1, *str++ == '-'));
1675	  str--;
1676
1677	  if (skip_past_char (&str, '}') == FAIL)
1678	    {
1679	      first_error (_("missing `}'"));
1680	      return FAIL;
1681	    }
1682	}
1683      else
1684	{
1685	  expressionS exp;
1686
1687	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1688	    return FAIL;
1689
1690	  if (exp.X_op == O_constant)
1691	    {
1692	      if (exp.X_add_number
1693		  != (exp.X_add_number & 0x0000ffff))
1694		{
1695		  inst.error = _("invalid register mask");
1696		  return FAIL;
1697		}
1698
1699	      if ((range & exp.X_add_number) != 0)
1700		{
1701		  int regno = range & exp.X_add_number;
1702
1703		  regno &= -regno;
1704		  regno = (1 << regno) - 1;
1705		  as_tsktsk
1706		    (_("Warning: duplicated register (r%d) in register list"),
1707		     regno);
1708		}
1709
1710	      range |= exp.X_add_number;
1711	    }
1712	  else
1713	    {
1714	      if (inst.reloc.type != 0)
1715		{
1716		  inst.error = _("expression too complex");
1717		  return FAIL;
1718		}
1719
1720	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1721	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
1722	      inst.reloc.pc_rel = 0;
1723	    }
1724	}
1725
1726      if (*str == '|' || *str == '+')
1727	{
1728	  str++;
1729	  another_range = 1;
1730	}
1731    }
1732  while (another_range);
1733
1734  *strp = str;
1735  return range;
1736}
1737
1738/* Types of registers in a list.  */
1739
1740enum reg_list_els
1741{
1742  REGLIST_VFP_S,
1743  REGLIST_VFP_D,
1744  REGLIST_NEON_D
1745};
1746
1747/* Parse a VFP register list.  If the string is invalid return FAIL.
1748   Otherwise return the number of registers, and set PBASE to the first
1749   register.  Parses registers of type ETYPE.
1750   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1751     - Q registers can be used to specify pairs of D registers
1752     - { } can be omitted from around a singleton register list
1753	 FIXME: This is not implemented, as it would require backtracking in
1754	 some cases, e.g.:
1755	   vtbl.8 d3,d4,d5
1756	 This could be done (the meaning isn't really ambiguous), but doesn't
1757	 fit in well with the current parsing framework.
1758     - 32 D registers may be used (also true for VFPv3).
1759   FIXME: Types are ignored in these register lists, which is probably a
1760   bug.  */
1761
1762static int
1763parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1764{
1765  char *str = *ccp;
1766  int base_reg;
1767  int new_base;
1768  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1769  int max_regs = 0;
1770  int count = 0;
1771  int warned = 0;
1772  unsigned long mask = 0;
1773  int i;
1774
1775  if (skip_past_char (&str, '{') == FAIL)
1776    {
1777      inst.error = _("expecting {");
1778      return FAIL;
1779    }
1780
1781  switch (etype)
1782    {
1783    case REGLIST_VFP_S:
1784      regtype = REG_TYPE_VFS;
1785      max_regs = 32;
1786      break;
1787
1788    case REGLIST_VFP_D:
1789      regtype = REG_TYPE_VFD;
1790      break;
1791
1792    case REGLIST_NEON_D:
1793      regtype = REG_TYPE_NDQ;
1794      break;
1795    }
1796
1797  if (etype != REGLIST_VFP_S)
1798    {
1799      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1800      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1801	{
1802	  max_regs = 32;
1803	  if (thumb_mode)
1804	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1805				    fpu_vfp_ext_d32);
1806	  else
1807	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1808				    fpu_vfp_ext_d32);
1809	}
1810      else
1811	max_regs = 16;
1812    }
1813
1814  base_reg = max_regs;
1815
1816  do
1817    {
1818      int setmask = 1, addregs = 1;
1819
1820      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1821
1822      if (new_base == FAIL)
1823	{
1824	  first_error (_(reg_expected_msgs[regtype]));
1825	  return FAIL;
1826	}
1827
1828      if (new_base >= max_regs)
1829	{
1830	  first_error (_("register out of range in list"));
1831	  return FAIL;
1832	}
1833
1834      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1835      if (regtype == REG_TYPE_NQ)
1836	{
1837	  setmask = 3;
1838	  addregs = 2;
1839	}
1840
1841      if (new_base < base_reg)
1842	base_reg = new_base;
1843
1844      if (mask & (setmask << new_base))
1845	{
1846	  first_error (_("invalid register list"));
1847	  return FAIL;
1848	}
1849
1850      if ((mask >> new_base) != 0 && ! warned)
1851	{
1852	  as_tsktsk (_("register list not in ascending order"));
1853	  warned = 1;
1854	}
1855
1856      mask |= setmask << new_base;
1857      count += addregs;
1858
1859      if (*str == '-') /* We have the start of a range expression */
1860	{
1861	  int high_range;
1862
1863	  str++;
1864
1865	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1866	      == FAIL)
1867	    {
1868	      inst.error = gettext (reg_expected_msgs[regtype]);
1869	      return FAIL;
1870	    }
1871
1872	  if (high_range >= max_regs)
1873	    {
1874	      first_error (_("register out of range in list"));
1875	      return FAIL;
1876	    }
1877
1878	  if (regtype == REG_TYPE_NQ)
1879	    high_range = high_range + 1;
1880
1881	  if (high_range <= new_base)
1882	    {
1883	      inst.error = _("register range not in ascending order");
1884	      return FAIL;
1885	    }
1886
1887	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
1888	    {
1889	      if (mask & (setmask << new_base))
1890		{
1891		  inst.error = _("invalid register list");
1892		  return FAIL;
1893		}
1894
1895	      mask |= setmask << new_base;
1896	      count += addregs;
1897	    }
1898	}
1899    }
1900  while (skip_past_comma (&str) != FAIL);
1901
1902  str++;
1903
1904  /* Sanity check -- should have raised a parse error above.  */
1905  if (count == 0 || count > max_regs)
1906    abort ();
1907
1908  *pbase = base_reg;
1909
1910  /* Final test -- the registers must be consecutive.  */
1911  mask >>= base_reg;
1912  for (i = 0; i < count; i++)
1913    {
1914      if ((mask & (1u << i)) == 0)
1915	{
1916	  inst.error = _("non-contiguous register range");
1917	  return FAIL;
1918	}
1919    }
1920
1921  *ccp = str;
1922
1923  return count;
1924}
1925
1926/* True if two alias types are the same.  */
1927
1928static bfd_boolean
1929neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1930{
1931  if (!a && !b)
1932    return TRUE;
1933
1934  if (!a || !b)
1935    return FALSE;
1936
1937  if (a->defined != b->defined)
1938    return FALSE;
1939
1940  if ((a->defined & NTA_HASTYPE) != 0
1941      && (a->eltype.type != b->eltype.type
1942	  || a->eltype.size != b->eltype.size))
1943    return FALSE;
1944
1945  if ((a->defined & NTA_HASINDEX) != 0
1946      && (a->index != b->index))
1947    return FALSE;
1948
1949  return TRUE;
1950}
1951
1952/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1953   The base register is put in *PBASE.
1954   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1955   the return value.
1956   The register stride (minus one) is put in bit 4 of the return value.
1957   Bits [6:5] encode the list length (minus one).
1958   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1959
1960#define NEON_LANE(X)		((X) & 0xf)
1961#define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
1962#define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
1963
1964static int
1965parse_neon_el_struct_list (char **str, unsigned *pbase,
1966			   struct neon_type_el *eltype)
1967{
1968  char *ptr = *str;
1969  int base_reg = -1;
1970  int reg_incr = -1;
1971  int count = 0;
1972  int lane = -1;
1973  int leading_brace = 0;
1974  enum arm_reg_type rtype = REG_TYPE_NDQ;
1975  const char *const incr_error = _("register stride must be 1 or 2");
1976  const char *const type_error = _("mismatched element/structure types in list");
1977  struct neon_typed_alias firsttype;
1978
1979  if (skip_past_char (&ptr, '{') == SUCCESS)
1980    leading_brace = 1;
1981
1982  do
1983    {
1984      struct neon_typed_alias atype;
1985      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1986
1987      if (getreg == FAIL)
1988	{
1989	  first_error (_(reg_expected_msgs[rtype]));
1990	  return FAIL;
1991	}
1992
1993      if (base_reg == -1)
1994	{
1995	  base_reg = getreg;
1996	  if (rtype == REG_TYPE_NQ)
1997	    {
1998	      reg_incr = 1;
1999	    }
2000	  firsttype = atype;
2001	}
2002      else if (reg_incr == -1)
2003	{
2004	  reg_incr = getreg - base_reg;
2005	  if (reg_incr < 1 || reg_incr > 2)
2006	    {
2007	      first_error (_(incr_error));
2008	      return FAIL;
2009	    }
2010	}
2011      else if (getreg != base_reg + reg_incr * count)
2012	{
2013	  first_error (_(incr_error));
2014	  return FAIL;
2015	}
2016
2017      if (! neon_alias_types_same (&atype, &firsttype))
2018	{
2019	  first_error (_(type_error));
2020	  return FAIL;
2021	}
2022
2023      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2024	 modes.  */
2025      if (ptr[0] == '-')
2026	{
2027	  struct neon_typed_alias htype;
2028	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2029	  if (lane == -1)
2030	    lane = NEON_INTERLEAVE_LANES;
2031	  else if (lane != NEON_INTERLEAVE_LANES)
2032	    {
2033	      first_error (_(type_error));
2034	      return FAIL;
2035	    }
2036	  if (reg_incr == -1)
2037	    reg_incr = 1;
2038	  else if (reg_incr != 1)
2039	    {
2040	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2041	      return FAIL;
2042	    }
2043	  ptr++;
2044	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2045	  if (hireg == FAIL)
2046	    {
2047	      first_error (_(reg_expected_msgs[rtype]));
2048	      return FAIL;
2049	    }
2050	  if (! neon_alias_types_same (&htype, &firsttype))
2051	    {
2052	      first_error (_(type_error));
2053	      return FAIL;
2054	    }
2055	  count += hireg + dregs - getreg;
2056	  continue;
2057	}
2058
2059      /* If we're using Q registers, we can't use [] or [n] syntax.  */
2060      if (rtype == REG_TYPE_NQ)
2061	{
2062	  count += 2;
2063	  continue;
2064	}
2065
2066      if ((atype.defined & NTA_HASINDEX) != 0)
2067	{
2068	  if (lane == -1)
2069	    lane = atype.index;
2070	  else if (lane != atype.index)
2071	    {
2072	      first_error (_(type_error));
2073	      return FAIL;
2074	    }
2075	}
2076      else if (lane == -1)
2077	lane = NEON_INTERLEAVE_LANES;
2078      else if (lane != NEON_INTERLEAVE_LANES)
2079	{
2080	  first_error (_(type_error));
2081	  return FAIL;
2082	}
2083      count++;
2084    }
2085  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2086
2087  /* No lane set by [x]. We must be interleaving structures.  */
2088  if (lane == -1)
2089    lane = NEON_INTERLEAVE_LANES;
2090
2091  /* Sanity check.  */
2092  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2093      || (count > 1 && reg_incr == -1))
2094    {
2095      first_error (_("error parsing element/structure list"));
2096      return FAIL;
2097    }
2098
2099  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2100    {
2101      first_error (_("expected }"));
2102      return FAIL;
2103    }
2104
2105  if (reg_incr == -1)
2106    reg_incr = 1;
2107
2108  if (eltype)
2109    *eltype = firsttype.eltype;
2110
2111  *pbase = base_reg;
2112  *str = ptr;
2113
2114  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2115}
2116
2117/* Parse an explicit relocation suffix on an expression.  This is
2118   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2119   arm_reloc_hsh contains no entries, so this function can only
2120   succeed if there is no () after the word.  Returns -1 on error,
2121   BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2122
2123static int
2124parse_reloc (char **str)
2125{
2126  struct reloc_entry *r;
2127  char *p, *q;
2128
2129  if (**str != '(')
2130    return BFD_RELOC_UNUSED;
2131
2132  p = *str + 1;
2133  q = p;
2134
2135  while (*q && *q != ')' && *q != ',')
2136    q++;
2137  if (*q != ')')
2138    return -1;
2139
2140  if ((r = (struct reloc_entry *)
2141       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2142    return -1;
2143
2144  *str = q + 1;
2145  return r->reloc;
2146}
2147
2148/* Directives: register aliases.  */
2149
2150static struct reg_entry *
2151insert_reg_alias (char *str, unsigned number, int type)
2152{
2153  struct reg_entry *new_reg;
2154  const char *name;
2155
2156  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2157    {
2158      if (new_reg->builtin)
2159	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2160
2161      /* Only warn about a redefinition if it's not defined as the
2162	 same register.	 */
2163      else if (new_reg->number != number || new_reg->type != type)
2164	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2165
2166      return NULL;
2167    }
2168
2169  name = xstrdup (str);
2170  new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2171
2172  new_reg->name = name;
2173  new_reg->number = number;
2174  new_reg->type = type;
2175  new_reg->builtin = FALSE;
2176  new_reg->neon = NULL;
2177
2178  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2179    abort ();
2180
2181  return new_reg;
2182}
2183
2184static void
2185insert_neon_reg_alias (char *str, int number, int type,
2186		       struct neon_typed_alias *atype)
2187{
2188  struct reg_entry *reg = insert_reg_alias (str, number, type);
2189
2190  if (!reg)
2191    {
2192      first_error (_("attempt to redefine typed alias"));
2193      return;
2194    }
2195
2196  if (atype)
2197    {
2198      reg->neon = (struct neon_typed_alias *)
2199	  xmalloc (sizeof (struct neon_typed_alias));
2200      *reg->neon = *atype;
2201    }
2202}
2203
2204/* Look for the .req directive.	 This is of the form:
2205
2206	new_register_name .req existing_register_name
2207
2208   If we find one, or if it looks sufficiently like one that we want to
2209   handle any error here, return TRUE.  Otherwise return FALSE.  */
2210
2211static bfd_boolean
2212create_register_alias (char * newname, char *p)
2213{
2214  struct reg_entry *old;
2215  char *oldname, *nbuf;
2216  size_t nlen;
2217
2218  /* The input scrubber ensures that whitespace after the mnemonic is
2219     collapsed to single spaces.  */
2220  oldname = p;
2221  if (strncmp (oldname, " .req ", 6) != 0)
2222    return FALSE;
2223
2224  oldname += 6;
2225  if (*oldname == '\0')
2226    return FALSE;
2227
2228  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2229  if (!old)
2230    {
2231      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2232      return TRUE;
2233    }
2234
2235  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2236     the desired alias name, and p points to its end.  If not, then
2237     the desired alias name is in the global original_case_string.  */
2238#ifdef TC_CASE_SENSITIVE
2239  nlen = p - newname;
2240#else
2241  newname = original_case_string;
2242  nlen = strlen (newname);
2243#endif
2244
2245  nbuf = (char *) alloca (nlen + 1);
2246  memcpy (nbuf, newname, nlen);
2247  nbuf[nlen] = '\0';
2248
2249  /* Create aliases under the new name as stated; an all-lowercase
2250     version of the new name; and an all-uppercase version of the new
2251     name.  */
2252  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2253    {
2254      for (p = nbuf; *p; p++)
2255	*p = TOUPPER (*p);
2256
2257      if (strncmp (nbuf, newname, nlen))
2258	{
2259	  /* If this attempt to create an additional alias fails, do not bother
2260	     trying to create the all-lower case alias.  We will fail and issue
2261	     a second, duplicate error message.  This situation arises when the
2262	     programmer does something like:
2263	       foo .req r0
2264	       Foo .req r1
2265	     The second .req creates the "Foo" alias but then fails to create
2266	     the artificial FOO alias because it has already been created by the
2267	     first .req.  */
2268	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2269	    return TRUE;
2270	}
2271
2272      for (p = nbuf; *p; p++)
2273	*p = TOLOWER (*p);
2274
2275      if (strncmp (nbuf, newname, nlen))
2276	insert_reg_alias (nbuf, old->number, old->type);
2277    }
2278
2279  return TRUE;
2280}
2281
2282/* Create a Neon typed/indexed register alias using directives, e.g.:
2283     X .dn d5.s32[1]
2284     Y .qn 6.s16
2285     Z .dn d7
2286     T .dn Z[0]
2287   These typed registers can be used instead of the types specified after the
2288   Neon mnemonic, so long as all operands given have types. Types can also be
2289   specified directly, e.g.:
2290     vadd d0.s32, d1.s32, d2.s32  */
2291
2292static bfd_boolean
2293create_neon_reg_alias (char *newname, char *p)
2294{
2295  enum arm_reg_type basetype;
2296  struct reg_entry *basereg;
2297  struct reg_entry mybasereg;
2298  struct neon_type ntype;
2299  struct neon_typed_alias typeinfo;
2300  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2301  int namelen;
2302
2303  typeinfo.defined = 0;
2304  typeinfo.eltype.type = NT_invtype;
2305  typeinfo.eltype.size = -1;
2306  typeinfo.index = -1;
2307
2308  nameend = p;
2309
2310  if (strncmp (p, " .dn ", 5) == 0)
2311    basetype = REG_TYPE_VFD;
2312  else if (strncmp (p, " .qn ", 5) == 0)
2313    basetype = REG_TYPE_NQ;
2314  else
2315    return FALSE;
2316
2317  p += 5;
2318
2319  if (*p == '\0')
2320    return FALSE;
2321
2322  basereg = arm_reg_parse_multi (&p);
2323
2324  if (basereg && basereg->type != basetype)
2325    {
2326      as_bad (_("bad type for register"));
2327      return FALSE;
2328    }
2329
2330  if (basereg == NULL)
2331    {
2332      expressionS exp;
2333      /* Try parsing as an integer.  */
2334      my_get_expression (&exp, &p, GE_NO_PREFIX);
2335      if (exp.X_op != O_constant)
2336	{
2337	  as_bad (_("expression must be constant"));
2338	  return FALSE;
2339	}
2340      basereg = &mybasereg;
2341      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2342						  : exp.X_add_number;
2343      basereg->neon = 0;
2344    }
2345
2346  if (basereg->neon)
2347    typeinfo = *basereg->neon;
2348
2349  if (parse_neon_type (&ntype, &p) == SUCCESS)
2350    {
2351      /* We got a type.  */
2352      if (typeinfo.defined & NTA_HASTYPE)
2353	{
2354	  as_bad (_("can't redefine the type of a register alias"));
2355	  return FALSE;
2356	}
2357
2358      typeinfo.defined |= NTA_HASTYPE;
2359      if (ntype.elems != 1)
2360	{
2361	  as_bad (_("you must specify a single type only"));
2362	  return FALSE;
2363	}
2364      typeinfo.eltype = ntype.el[0];
2365    }
2366
2367  if (skip_past_char (&p, '[') == SUCCESS)
2368    {
2369      expressionS exp;
2370      /* We got a scalar index.  */
2371
2372      if (typeinfo.defined & NTA_HASINDEX)
2373	{
2374	  as_bad (_("can't redefine the index of a scalar alias"));
2375	  return FALSE;
2376	}
2377
2378      my_get_expression (&exp, &p, GE_NO_PREFIX);
2379
2380      if (exp.X_op != O_constant)
2381	{
2382	  as_bad (_("scalar index must be constant"));
2383	  return FALSE;
2384	}
2385
2386      typeinfo.defined |= NTA_HASINDEX;
2387      typeinfo.index = exp.X_add_number;
2388
2389      if (skip_past_char (&p, ']') == FAIL)
2390	{
2391	  as_bad (_("expecting ]"));
2392	  return FALSE;
2393	}
2394    }
2395
2396  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2397     the desired alias name, and p points to its end.  If not, then
2398     the desired alias name is in the global original_case_string.  */
2399#ifdef TC_CASE_SENSITIVE
2400  namelen = nameend - newname;
2401#else
2402  newname = original_case_string;
2403  namelen = strlen (newname);
2404#endif
2405
2406  namebuf = (char *) alloca (namelen + 1);
2407  strncpy (namebuf, newname, namelen);
2408  namebuf[namelen] = '\0';
2409
2410  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2411			 typeinfo.defined != 0 ? &typeinfo : NULL);
2412
2413  /* Insert name in all uppercase.  */
2414  for (p = namebuf; *p; p++)
2415    *p = TOUPPER (*p);
2416
2417  if (strncmp (namebuf, newname, namelen))
2418    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2419			   typeinfo.defined != 0 ? &typeinfo : NULL);
2420
2421  /* Insert name in all lowercase.  */
2422  for (p = namebuf; *p; p++)
2423    *p = TOLOWER (*p);
2424
2425  if (strncmp (namebuf, newname, namelen))
2426    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2427			   typeinfo.defined != 0 ? &typeinfo : NULL);
2428
2429  return TRUE;
2430}
2431
2432/* Should never be called, as .req goes between the alias and the
2433   register name, not at the beginning of the line.  */
2434
2435static void
2436s_req (int a ATTRIBUTE_UNUSED)
2437{
2438  as_bad (_("invalid syntax for .req directive"));
2439}
2440
2441static void
2442s_dn (int a ATTRIBUTE_UNUSED)
2443{
2444  as_bad (_("invalid syntax for .dn directive"));
2445}
2446
2447static void
2448s_qn (int a ATTRIBUTE_UNUSED)
2449{
2450  as_bad (_("invalid syntax for .qn directive"));
2451}
2452
2453/* The .unreq directive deletes an alias which was previously defined
2454   by .req.  For example:
2455
2456       my_alias .req r11
2457       .unreq my_alias	  */
2458
2459static void
2460s_unreq (int a ATTRIBUTE_UNUSED)
2461{
2462  char * name;
2463  char saved_char;
2464
2465  name = input_line_pointer;
2466
2467  while (*input_line_pointer != 0
2468	 && *input_line_pointer != ' '
2469	 && *input_line_pointer != '\n')
2470    ++input_line_pointer;
2471
2472  saved_char = *input_line_pointer;
2473  *input_line_pointer = 0;
2474
2475  if (!*name)
2476    as_bad (_("invalid syntax for .unreq directive"));
2477  else
2478    {
2479      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2480							      name);
2481
2482      if (!reg)
2483	as_bad (_("unknown register alias '%s'"), name);
2484      else if (reg->builtin)
2485	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2486		 name);
2487      else
2488	{
2489	  char * p;
2490	  char * nbuf;
2491
2492	  hash_delete (arm_reg_hsh, name, FALSE);
2493	  free ((char *) reg->name);
2494	  if (reg->neon)
2495	    free (reg->neon);
2496	  free (reg);
2497
2498	  /* Also locate the all upper case and all lower case versions.
2499	     Do not complain if we cannot find one or the other as it
2500	     was probably deleted above.  */
2501
2502	  nbuf = strdup (name);
2503	  for (p = nbuf; *p; p++)
2504	    *p = TOUPPER (*p);
2505	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2506	  if (reg)
2507	    {
2508	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2509	      free ((char *) reg->name);
2510	      if (reg->neon)
2511		free (reg->neon);
2512	      free (reg);
2513	    }
2514
2515	  for (p = nbuf; *p; p++)
2516	    *p = TOLOWER (*p);
2517	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2518	  if (reg)
2519	    {
2520	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2521	      free ((char *) reg->name);
2522	      if (reg->neon)
2523		free (reg->neon);
2524	      free (reg);
2525	    }
2526
2527	  free (nbuf);
2528	}
2529    }
2530
2531  *input_line_pointer = saved_char;
2532  demand_empty_rest_of_line ();
2533}
2534
2535/* Directives: Instruction set selection.  */
2536
2537#ifdef OBJ_ELF
2538/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2539   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2540   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2541   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2542
2543/* Create a new mapping symbol for the transition to STATE.  */
2544
2545static void
2546make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2547{
2548  symbolS * symbolP;
2549  const char * symname;
2550  int type;
2551
2552  switch (state)
2553    {
2554    case MAP_DATA:
2555      symname = "$d";
2556      type = BSF_NO_FLAGS;
2557      break;
2558    case MAP_ARM:
2559      symname = "$a";
2560      type = BSF_NO_FLAGS;
2561      break;
2562    case MAP_THUMB:
2563      symname = "$t";
2564      type = BSF_NO_FLAGS;
2565      break;
2566    default:
2567      abort ();
2568    }
2569
2570  symbolP = symbol_new (symname, now_seg, value, frag);
2571  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2572
2573  switch (state)
2574    {
2575    case MAP_ARM:
2576      THUMB_SET_FUNC (symbolP, 0);
2577      ARM_SET_THUMB (symbolP, 0);
2578      ARM_SET_INTERWORK (symbolP, support_interwork);
2579      break;
2580
2581    case MAP_THUMB:
2582      THUMB_SET_FUNC (symbolP, 1);
2583      ARM_SET_THUMB (symbolP, 1);
2584      ARM_SET_INTERWORK (symbolP, support_interwork);
2585      break;
2586
2587    case MAP_DATA:
2588    default:
2589      break;
2590    }
2591
2592  /* Save the mapping symbols for future reference.  Also check that
2593     we do not place two mapping symbols at the same offset within a
2594     frag.  We'll handle overlap between frags in
2595     check_mapping_symbols.
2596
2597     If .fill or other data filling directive generates zero sized data,
2598     the mapping symbol for the following code will have the same value
2599     as the one generated for the data filling directive.  In this case,
2600     we replace the old symbol with the new one at the same address.  */
2601  if (value == 0)
2602    {
2603      if (frag->tc_frag_data.first_map != NULL)
2604	{
2605	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2606	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2607	}
2608      frag->tc_frag_data.first_map = symbolP;
2609    }
2610  if (frag->tc_frag_data.last_map != NULL)
2611    {
2612      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2613      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2614	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2615    }
2616  frag->tc_frag_data.last_map = symbolP;
2617}
2618
2619/* We must sometimes convert a region marked as code to data during
2620   code alignment, if an odd number of bytes have to be padded.  The
2621   code mapping symbol is pushed to an aligned address.  */
2622
2623static void
2624insert_data_mapping_symbol (enum mstate state,
2625			    valueT value, fragS *frag, offsetT bytes)
2626{
2627  /* If there was already a mapping symbol, remove it.  */
2628  if (frag->tc_frag_data.last_map != NULL
2629      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2630    {
2631      symbolS *symp = frag->tc_frag_data.last_map;
2632
2633      if (value == 0)
2634	{
2635	  know (frag->tc_frag_data.first_map == symp);
2636	  frag->tc_frag_data.first_map = NULL;
2637	}
2638      frag->tc_frag_data.last_map = NULL;
2639      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2640    }
2641
2642  make_mapping_symbol (MAP_DATA, value, frag);
2643  make_mapping_symbol (state, value + bytes, frag);
2644}
2645
2646static void mapping_state_2 (enum mstate state, int max_chars);
2647
2648/* Set the mapping state to STATE.  Only call this when about to
2649   emit some STATE bytes to the file.  */
2650
2651#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2652void
2653mapping_state (enum mstate state)
2654{
2655  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2656
2657  if (mapstate == state)
2658    /* The mapping symbol has already been emitted.
2659       There is nothing else to do.  */
2660    return;
2661
2662  if (state == MAP_ARM || state == MAP_THUMB)
2663    /*  PR gas/12931
2664	All ARM instructions require 4-byte alignment.
2665	(Almost) all Thumb instructions require 2-byte alignment.
2666
2667	When emitting instructions into any section, mark the section
2668	appropriately.
2669
2670	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2671	but themselves require 2-byte alignment; this applies to some
2672	PC- relative forms.  However, these cases will invovle implicit
2673	literal pool generation or an explicit .align >=2, both of
2674	which will cause the section to me marked with sufficient
2675	alignment.  Thus, we don't handle those cases here.  */
2676    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2677
2678  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2679    /* This case will be evaluated later.  */
2680    return;
2681
2682  mapping_state_2 (state, 0);
2683}
2684
2685/* Same as mapping_state, but MAX_CHARS bytes have already been
2686   allocated.  Put the mapping symbol that far back.  */
2687
2688static void
2689mapping_state_2 (enum mstate state, int max_chars)
2690{
2691  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2692
2693  if (!SEG_NORMAL (now_seg))
2694    return;
2695
2696  if (mapstate == state)
2697    /* The mapping symbol has already been emitted.
2698       There is nothing else to do.  */
2699    return;
2700
2701  if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2702	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2703    {
2704      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2705      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2706
2707      if (add_symbol)
2708	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2709    }
2710
2711  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2712  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2713}
2714#undef TRANSITION
2715#else
2716#define mapping_state(x) ((void)0)
2717#define mapping_state_2(x, y) ((void)0)
2718#endif
2719
2720/* Find the real, Thumb encoded start of a Thumb function.  */
2721
2722#ifdef OBJ_COFF
2723static symbolS *
2724find_real_start (symbolS * symbolP)
2725{
2726  char *       real_start;
2727  const char * name = S_GET_NAME (symbolP);
2728  symbolS *    new_target;
2729
2730  /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
2731#define STUB_NAME ".real_start_of"
2732
2733  if (name == NULL)
2734    abort ();
2735
2736  /* The compiler may generate BL instructions to local labels because
2737     it needs to perform a branch to a far away location. These labels
2738     do not have a corresponding ".real_start_of" label.  We check
2739     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2740     the ".real_start_of" convention for nonlocal branches.  */
2741  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2742    return symbolP;
2743
2744  real_start = ACONCAT ((STUB_NAME, name, NULL));
2745  new_target = symbol_find (real_start);
2746
2747  if (new_target == NULL)
2748    {
2749      as_warn (_("Failed to find real start of function: %s\n"), name);
2750      new_target = symbolP;
2751    }
2752
2753  return new_target;
2754}
2755#endif
2756
2757static void
2758opcode_select (int width)
2759{
2760  switch (width)
2761    {
2762    case 16:
2763      if (! thumb_mode)
2764	{
2765	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2766	    as_bad (_("selected processor does not support THUMB opcodes"));
2767
2768	  thumb_mode = 1;
2769	  /* No need to force the alignment, since we will have been
2770	     coming from ARM mode, which is word-aligned.  */
2771	  record_alignment (now_seg, 1);
2772	}
2773      break;
2774
2775    case 32:
2776      if (thumb_mode)
2777	{
2778	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2779	    as_bad (_("selected processor does not support ARM opcodes"));
2780
2781	  thumb_mode = 0;
2782
2783	  if (!need_pass_2)
2784	    frag_align (2, 0, 0);
2785
2786	  record_alignment (now_seg, 1);
2787	}
2788      break;
2789
2790    default:
2791      as_bad (_("invalid instruction size selected (%d)"), width);
2792    }
2793}
2794
2795static void
2796s_arm (int ignore ATTRIBUTE_UNUSED)
2797{
2798  opcode_select (32);
2799  demand_empty_rest_of_line ();
2800}
2801
2802static void
2803s_thumb (int ignore ATTRIBUTE_UNUSED)
2804{
2805  opcode_select (16);
2806  demand_empty_rest_of_line ();
2807}
2808
2809static void
2810s_code (int unused ATTRIBUTE_UNUSED)
2811{
2812  int temp;
2813
2814  temp = get_absolute_expression ();
2815  switch (temp)
2816    {
2817    case 16:
2818    case 32:
2819      opcode_select (temp);
2820      break;
2821
2822    default:
2823      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2824    }
2825}
2826
2827static void
2828s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2829{
2830  /* If we are not already in thumb mode go into it, EVEN if
2831     the target processor does not support thumb instructions.
2832     This is used by gcc/config/arm/lib1funcs.asm for example
2833     to compile interworking support functions even if the
2834     target processor should not support interworking.	*/
2835  if (! thumb_mode)
2836    {
2837      thumb_mode = 2;
2838      record_alignment (now_seg, 1);
2839    }
2840
2841  demand_empty_rest_of_line ();
2842}
2843
2844static void
2845s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2846{
2847  s_thumb (0);
2848
2849  /* The following label is the name/address of the start of a Thumb function.
2850     We need to know this for the interworking support.	 */
2851  label_is_thumb_function_name = TRUE;
2852}
2853
2854/* Perform a .set directive, but also mark the alias as
2855   being a thumb function.  */
2856
2857static void
2858s_thumb_set (int equiv)
2859{
2860  /* XXX the following is a duplicate of the code for s_set() in read.c
2861     We cannot just call that code as we need to get at the symbol that
2862     is created.  */
2863  char *    name;
2864  char	    delim;
2865  char *    end_name;
2866  symbolS * symbolP;
2867
2868  /* Especial apologies for the random logic:
2869     This just grew, and could be parsed much more simply!
2870     Dean - in haste.  */
2871  delim	    = get_symbol_name (& name);
2872  end_name  = input_line_pointer;
2873  (void) restore_line_pointer (delim);
2874
2875  if (*input_line_pointer != ',')
2876    {
2877      *end_name = 0;
2878      as_bad (_("expected comma after name \"%s\""), name);
2879      *end_name = delim;
2880      ignore_rest_of_line ();
2881      return;
2882    }
2883
2884  input_line_pointer++;
2885  *end_name = 0;
2886
2887  if (name[0] == '.' && name[1] == '\0')
2888    {
2889      /* XXX - this should not happen to .thumb_set.  */
2890      abort ();
2891    }
2892
2893  if ((symbolP = symbol_find (name)) == NULL
2894      && (symbolP = md_undefined_symbol (name)) == NULL)
2895    {
2896#ifndef NO_LISTING
2897      /* When doing symbol listings, play games with dummy fragments living
2898	 outside the normal fragment chain to record the file and line info
2899	 for this symbol.  */
2900      if (listing & LISTING_SYMBOLS)
2901	{
2902	  extern struct list_info_struct * listing_tail;
2903	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2904
2905	  memset (dummy_frag, 0, sizeof (fragS));
2906	  dummy_frag->fr_type = rs_fill;
2907	  dummy_frag->line = listing_tail;
2908	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2909	  dummy_frag->fr_symbol = symbolP;
2910	}
2911      else
2912#endif
2913	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2914
2915#ifdef OBJ_COFF
2916      /* "set" symbols are local unless otherwise specified.  */
2917      SF_SET_LOCAL (symbolP);
2918#endif /* OBJ_COFF  */
2919    }				/* Make a new symbol.  */
2920
2921  symbol_table_insert (symbolP);
2922
2923  * end_name = delim;
2924
2925  if (equiv
2926      && S_IS_DEFINED (symbolP)
2927      && S_GET_SEGMENT (symbolP) != reg_section)
2928    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2929
2930  pseudo_set (symbolP);
2931
2932  demand_empty_rest_of_line ();
2933
2934  /* XXX Now we come to the Thumb specific bit of code.	 */
2935
2936  THUMB_SET_FUNC (symbolP, 1);
2937  ARM_SET_THUMB (symbolP, 1);
2938#if defined OBJ_ELF || defined OBJ_COFF
2939  ARM_SET_INTERWORK (symbolP, support_interwork);
2940#endif
2941}
2942
2943/* Directives: Mode selection.  */
2944
2945/* .syntax [unified|divided] - choose the new unified syntax
2946   (same for Arm and Thumb encoding, modulo slight differences in what
2947   can be represented) or the old divergent syntax for each mode.  */
2948static void
2949s_syntax (int unused ATTRIBUTE_UNUSED)
2950{
2951  char *name, delim;
2952
2953  delim = get_symbol_name (& name);
2954
2955  if (!strcasecmp (name, "unified"))
2956    unified_syntax = TRUE;
2957  else if (!strcasecmp (name, "divided"))
2958    unified_syntax = FALSE;
2959  else
2960    {
2961      as_bad (_("unrecognized syntax mode \"%s\""), name);
2962      return;
2963    }
2964  (void) restore_line_pointer (delim);
2965  demand_empty_rest_of_line ();
2966}
2967
2968/* Directives: sectioning and alignment.  */
2969
2970static void
2971s_bss (int ignore ATTRIBUTE_UNUSED)
2972{
2973  /* We don't support putting frags in the BSS segment, we fake it by
2974     marking in_bss, then looking at s_skip for clues.	*/
2975  subseg_set (bss_section, 0);
2976  demand_empty_rest_of_line ();
2977
2978#ifdef md_elf_section_change_hook
2979  md_elf_section_change_hook ();
2980#endif
2981}
2982
2983static void
2984s_even (int ignore ATTRIBUTE_UNUSED)
2985{
2986  /* Never make frag if expect extra pass.  */
2987  if (!need_pass_2)
2988    frag_align (1, 0, 0);
2989
2990  record_alignment (now_seg, 1);
2991
2992  demand_empty_rest_of_line ();
2993}
2994
2995/* Directives: CodeComposer Studio.  */
2996
2997/*  .ref  (for CodeComposer Studio syntax only).  */
2998static void
2999s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3000{
3001  if (codecomposer_syntax)
3002    ignore_rest_of_line ();
3003  else
3004    as_bad (_(".ref pseudo-op only available with -mccs flag."));
3005}
3006
3007/*  If name is not NULL, then it is used for marking the beginning of a
3008    function, wherease if it is NULL then it means the function end.  */
3009static void
3010asmfunc_debug (const char * name)
3011{
3012  static const char * last_name = NULL;
3013
3014  if (name != NULL)
3015    {
3016      gas_assert (last_name == NULL);
3017      last_name = name;
3018
3019      if (debug_type == DEBUG_STABS)
3020         stabs_generate_asm_func (name, name);
3021    }
3022  else
3023    {
3024      gas_assert (last_name != NULL);
3025
3026      if (debug_type == DEBUG_STABS)
3027        stabs_generate_asm_endfunc (last_name, last_name);
3028
3029      last_name = NULL;
3030    }
3031}
3032
3033static void
3034s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3035{
3036  if (codecomposer_syntax)
3037    {
3038      switch (asmfunc_state)
3039	{
3040	case OUTSIDE_ASMFUNC:
3041	  asmfunc_state = WAITING_ASMFUNC_NAME;
3042	  break;
3043
3044	case WAITING_ASMFUNC_NAME:
3045	  as_bad (_(".asmfunc repeated."));
3046	  break;
3047
3048	case WAITING_ENDASMFUNC:
3049	  as_bad (_(".asmfunc without function."));
3050	  break;
3051	}
3052      demand_empty_rest_of_line ();
3053    }
3054  else
3055    as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3056}
3057
3058static void
3059s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3060{
3061  if (codecomposer_syntax)
3062    {
3063      switch (asmfunc_state)
3064	{
3065	case OUTSIDE_ASMFUNC:
3066	  as_bad (_(".endasmfunc without a .asmfunc."));
3067	  break;
3068
3069	case WAITING_ASMFUNC_NAME:
3070	  as_bad (_(".endasmfunc without function."));
3071	  break;
3072
3073	case WAITING_ENDASMFUNC:
3074	  asmfunc_state = OUTSIDE_ASMFUNC;
3075	  asmfunc_debug (NULL);
3076	  break;
3077	}
3078      demand_empty_rest_of_line ();
3079    }
3080  else
3081    as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3082}
3083
3084static void
3085s_ccs_def (int name)
3086{
3087  if (codecomposer_syntax)
3088    s_globl (name);
3089  else
3090    as_bad (_(".def pseudo-op only available with -mccs flag."));
3091}
3092
3093/* Directives: Literal pools.  */
3094
3095static literal_pool *
3096find_literal_pool (void)
3097{
3098  literal_pool * pool;
3099
3100  for (pool = list_of_pools; pool != NULL; pool = pool->next)
3101    {
3102      if (pool->section == now_seg
3103	  && pool->sub_section == now_subseg)
3104	break;
3105    }
3106
3107  return pool;
3108}
3109
3110static literal_pool *
3111find_or_make_literal_pool (void)
3112{
3113  /* Next literal pool ID number.  */
3114  static unsigned int latest_pool_num = 1;
3115  literal_pool *      pool;
3116
3117  pool = find_literal_pool ();
3118
3119  if (pool == NULL)
3120    {
3121      /* Create a new pool.  */
3122      pool = (literal_pool *) xmalloc (sizeof (* pool));
3123      if (! pool)
3124	return NULL;
3125
3126      pool->next_free_entry = 0;
3127      pool->section	    = now_seg;
3128      pool->sub_section	    = now_subseg;
3129      pool->next	    = list_of_pools;
3130      pool->symbol	    = NULL;
3131      pool->alignment	    = 2;
3132
3133      /* Add it to the list.  */
3134      list_of_pools = pool;
3135    }
3136
3137  /* New pools, and emptied pools, will have a NULL symbol.  */
3138  if (pool->symbol == NULL)
3139    {
3140      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3141				    (valueT) 0, &zero_address_frag);
3142      pool->id = latest_pool_num ++;
3143    }
3144
3145  /* Done.  */
3146  return pool;
3147}
3148
3149/* Add the literal in the global 'inst'
3150   structure to the relevant literal pool.  */
3151
3152static int
3153add_to_lit_pool (unsigned int nbytes)
3154{
3155#define PADDING_SLOT 0x1
3156#define LIT_ENTRY_SIZE_MASK 0xFF
3157  literal_pool * pool;
3158  unsigned int entry, pool_size = 0;
3159  bfd_boolean padding_slot_p = FALSE;
3160  unsigned imm1 = 0;
3161  unsigned imm2 = 0;
3162
3163  if (nbytes == 8)
3164    {
3165      imm1 = inst.operands[1].imm;
3166      imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3167	       : inst.reloc.exp.X_unsigned ? 0
3168	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3169      if (target_big_endian)
3170	{
3171	  imm1 = imm2;
3172	  imm2 = inst.operands[1].imm;
3173	}
3174    }
3175
3176  pool = find_or_make_literal_pool ();
3177
3178  /* Check if this literal value is already in the pool.  */
3179  for (entry = 0; entry < pool->next_free_entry; entry ++)
3180    {
3181      if (nbytes == 4)
3182	{
3183	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3184	      && (inst.reloc.exp.X_op == O_constant)
3185	      && (pool->literals[entry].X_add_number
3186		  == inst.reloc.exp.X_add_number)
3187	      && (pool->literals[entry].X_md == nbytes)
3188	      && (pool->literals[entry].X_unsigned
3189		  == inst.reloc.exp.X_unsigned))
3190	    break;
3191
3192	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3193	      && (inst.reloc.exp.X_op == O_symbol)
3194	      && (pool->literals[entry].X_add_number
3195		  == inst.reloc.exp.X_add_number)
3196	      && (pool->literals[entry].X_add_symbol
3197		  == inst.reloc.exp.X_add_symbol)
3198	      && (pool->literals[entry].X_op_symbol
3199		  == inst.reloc.exp.X_op_symbol)
3200	      && (pool->literals[entry].X_md == nbytes))
3201	    break;
3202	}
3203      else if ((nbytes == 8)
3204	       && !(pool_size & 0x7)
3205	       && ((entry + 1) != pool->next_free_entry)
3206	       && (pool->literals[entry].X_op == O_constant)
3207	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3208	       && (pool->literals[entry].X_unsigned
3209		   == inst.reloc.exp.X_unsigned)
3210	       && (pool->literals[entry + 1].X_op == O_constant)
3211	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3212	       && (pool->literals[entry + 1].X_unsigned
3213		   == inst.reloc.exp.X_unsigned))
3214	break;
3215
3216      padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3217      if (padding_slot_p && (nbytes == 4))
3218	break;
3219
3220      pool_size += 4;
3221    }
3222
3223  /* Do we need to create a new entry?	*/
3224  if (entry == pool->next_free_entry)
3225    {
3226      if (entry >= MAX_LITERAL_POOL_SIZE)
3227	{
3228	  inst.error = _("literal pool overflow");
3229	  return FAIL;
3230	}
3231
3232      if (nbytes == 8)
3233	{
3234	  /* For 8-byte entries, we align to an 8-byte boundary,
3235	     and split it into two 4-byte entries, because on 32-bit
3236	     host, 8-byte constants are treated as big num, thus
3237	     saved in "generic_bignum" which will be overwritten
3238	     by later assignments.
3239
3240	     We also need to make sure there is enough space for
3241	     the split.
3242
3243	     We also check to make sure the literal operand is a
3244	     constant number.  */
3245	  if (!(inst.reloc.exp.X_op == O_constant
3246	        || inst.reloc.exp.X_op == O_big))
3247	    {
3248	      inst.error = _("invalid type for literal pool");
3249	      return FAIL;
3250	    }
3251	  else if (pool_size & 0x7)
3252	    {
3253	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3254		{
3255		  inst.error = _("literal pool overflow");
3256		  return FAIL;
3257		}
3258
3259	      pool->literals[entry] = inst.reloc.exp;
3260	      pool->literals[entry].X_add_number = 0;
3261	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3262	      pool->next_free_entry += 1;
3263	      pool_size += 4;
3264	    }
3265	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3266	    {
3267	      inst.error = _("literal pool overflow");
3268	      return FAIL;
3269	    }
3270
3271	  pool->literals[entry] = inst.reloc.exp;
3272	  pool->literals[entry].X_op = O_constant;
3273	  pool->literals[entry].X_add_number = imm1;
3274	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3275	  pool->literals[entry++].X_md = 4;
3276	  pool->literals[entry] = inst.reloc.exp;
3277	  pool->literals[entry].X_op = O_constant;
3278	  pool->literals[entry].X_add_number = imm2;
3279	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3280	  pool->literals[entry].X_md = 4;
3281	  pool->alignment = 3;
3282	  pool->next_free_entry += 1;
3283	}
3284      else
3285	{
3286	  pool->literals[entry] = inst.reloc.exp;
3287	  pool->literals[entry].X_md = 4;
3288	}
3289
3290#ifdef OBJ_ELF
3291      /* PR ld/12974: Record the location of the first source line to reference
3292	 this entry in the literal pool.  If it turns out during linking that the
3293	 symbol does not exist we will be able to give an accurate line number for
3294	 the (first use of the) missing reference.  */
3295      if (debug_type == DEBUG_DWARF2)
3296	dwarf2_where (pool->locs + entry);
3297#endif
3298      pool->next_free_entry += 1;
3299    }
3300  else if (padding_slot_p)
3301    {
3302      pool->literals[entry] = inst.reloc.exp;
3303      pool->literals[entry].X_md = nbytes;
3304    }
3305
3306  inst.reloc.exp.X_op	      = O_symbol;
3307  inst.reloc.exp.X_add_number = pool_size;
3308  inst.reloc.exp.X_add_symbol = pool->symbol;
3309
3310  return SUCCESS;
3311}
3312
3313bfd_boolean
3314tc_start_label_without_colon (void)
3315{
3316  bfd_boolean ret = TRUE;
3317
3318  if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3319    {
3320      const char *label = input_line_pointer;
3321
3322      while (!is_end_of_line[(int) label[-1]])
3323	--label;
3324
3325      if (*label == '.')
3326	{
3327	  as_bad (_("Invalid label '%s'"), label);
3328	  ret = FALSE;
3329	}
3330
3331      asmfunc_debug (label);
3332
3333      asmfunc_state = WAITING_ENDASMFUNC;
3334    }
3335
3336  return ret;
3337}
3338
3339/* Can't use symbol_new here, so have to create a symbol and then at
3340   a later date assign it a value. Thats what these functions do.  */
3341
3342static void
3343symbol_locate (symbolS *    symbolP,
3344	       const char * name,	/* It is copied, the caller can modify.	 */
3345	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3346	       valueT	    valu,	/* Symbol value.  */
3347	       fragS *	    frag)	/* Associated fragment.	 */
3348{
3349  size_t name_length;
3350  char * preserved_copy_of_name;
3351
3352  name_length = strlen (name) + 1;   /* +1 for \0.  */
3353  obstack_grow (&notes, name, name_length);
3354  preserved_copy_of_name = (char *) obstack_finish (&notes);
3355
3356#ifdef tc_canonicalize_symbol_name
3357  preserved_copy_of_name =
3358    tc_canonicalize_symbol_name (preserved_copy_of_name);
3359#endif
3360
3361  S_SET_NAME (symbolP, preserved_copy_of_name);
3362
3363  S_SET_SEGMENT (symbolP, segment);
3364  S_SET_VALUE (symbolP, valu);
3365  symbol_clear_list_pointers (symbolP);
3366
3367  symbol_set_frag (symbolP, frag);
3368
3369  /* Link to end of symbol chain.  */
3370  {
3371    extern int symbol_table_frozen;
3372
3373    if (symbol_table_frozen)
3374      abort ();
3375  }
3376
3377  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3378
3379  obj_symbol_new_hook (symbolP);
3380
3381#ifdef tc_symbol_new_hook
3382  tc_symbol_new_hook (symbolP);
3383#endif
3384
3385#ifdef DEBUG_SYMS
3386  verify_symbol_chain (symbol_rootP, symbol_lastP);
3387#endif /* DEBUG_SYMS  */
3388}
3389
3390static void
3391s_ltorg (int ignored ATTRIBUTE_UNUSED)
3392{
3393  unsigned int entry;
3394  literal_pool * pool;
3395  char sym_name[20];
3396
3397  pool = find_literal_pool ();
3398  if (pool == NULL
3399      || pool->symbol == NULL
3400      || pool->next_free_entry == 0)
3401    return;
3402
3403  /* Align pool as you have word accesses.
3404     Only make a frag if we have to.  */
3405  if (!need_pass_2)
3406    frag_align (pool->alignment, 0, 0);
3407
3408  record_alignment (now_seg, 2);
3409
3410#ifdef OBJ_ELF
3411  seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3412  make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3413#endif
3414  sprintf (sym_name, "$$lit_\002%x", pool->id);
3415
3416  symbol_locate (pool->symbol, sym_name, now_seg,
3417		 (valueT) frag_now_fix (), frag_now);
3418  symbol_table_insert (pool->symbol);
3419
3420  ARM_SET_THUMB (pool->symbol, thumb_mode);
3421
3422#if defined OBJ_COFF || defined OBJ_ELF
3423  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3424#endif
3425
3426  for (entry = 0; entry < pool->next_free_entry; entry ++)
3427    {
3428#ifdef OBJ_ELF
3429      if (debug_type == DEBUG_DWARF2)
3430	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3431#endif
3432      /* First output the expression in the instruction to the pool.  */
3433      emit_expr (&(pool->literals[entry]),
3434		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3435    }
3436
3437  /* Mark the pool as empty.  */
3438  pool->next_free_entry = 0;
3439  pool->symbol = NULL;
3440}
3441
3442#ifdef OBJ_ELF
3443/* Forward declarations for functions below, in the MD interface
3444   section.  */
3445static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3446static valueT create_unwind_entry (int);
3447static void start_unwind_section (const segT, int);
3448static void add_unwind_opcode (valueT, int);
3449static void flush_pending_unwind (void);
3450
3451/* Directives: Data.  */
3452
3453static void
3454s_arm_elf_cons (int nbytes)
3455{
3456  expressionS exp;
3457
3458#ifdef md_flush_pending_output
3459  md_flush_pending_output ();
3460#endif
3461
3462  if (is_it_end_of_statement ())
3463    {
3464      demand_empty_rest_of_line ();
3465      return;
3466    }
3467
3468#ifdef md_cons_align
3469  md_cons_align (nbytes);
3470#endif
3471
3472  mapping_state (MAP_DATA);
3473  do
3474    {
3475      int reloc;
3476      char *base = input_line_pointer;
3477
3478      expression (& exp);
3479
3480      if (exp.X_op != O_symbol)
3481	emit_expr (&exp, (unsigned int) nbytes);
3482      else
3483	{
3484	  char *before_reloc = input_line_pointer;
3485	  reloc = parse_reloc (&input_line_pointer);
3486	  if (reloc == -1)
3487	    {
3488	      as_bad (_("unrecognized relocation suffix"));
3489	      ignore_rest_of_line ();
3490	      return;
3491	    }
3492	  else if (reloc == BFD_RELOC_UNUSED)
3493	    emit_expr (&exp, (unsigned int) nbytes);
3494	  else
3495	    {
3496	      reloc_howto_type *howto = (reloc_howto_type *)
3497		  bfd_reloc_type_lookup (stdoutput,
3498					 (bfd_reloc_code_real_type) reloc);
3499	      int size = bfd_get_reloc_size (howto);
3500
3501	      if (reloc == BFD_RELOC_ARM_PLT32)
3502		{
3503		  as_bad (_("(plt) is only valid on branch targets"));
3504		  reloc = BFD_RELOC_UNUSED;
3505		  size = 0;
3506		}
3507
3508	      if (size > nbytes)
3509		as_bad (_("%s relocations do not fit in %d bytes"),
3510			howto->name, nbytes);
3511	      else
3512		{
3513		  /* We've parsed an expression stopping at O_symbol.
3514		     But there may be more expression left now that we
3515		     have parsed the relocation marker.  Parse it again.
3516		     XXX Surely there is a cleaner way to do this.  */
3517		  char *p = input_line_pointer;
3518		  int offset;
3519		  char *save_buf = (char *) alloca (input_line_pointer - base);
3520		  memcpy (save_buf, base, input_line_pointer - base);
3521		  memmove (base + (input_line_pointer - before_reloc),
3522			   base, before_reloc - base);
3523
3524		  input_line_pointer = base + (input_line_pointer-before_reloc);
3525		  expression (&exp);
3526		  memcpy (base, save_buf, p - base);
3527
3528		  offset = nbytes - size;
3529		  p = frag_more (nbytes);
3530		  memset (p, 0, nbytes);
3531		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3532			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3533		}
3534	    }
3535	}
3536    }
3537  while (*input_line_pointer++ == ',');
3538
3539  /* Put terminator back into stream.  */
3540  input_line_pointer --;
3541  demand_empty_rest_of_line ();
3542}
3543
3544/* Emit an expression containing a 32-bit thumb instruction.
3545   Implementation based on put_thumb32_insn.  */
3546
3547static void
3548emit_thumb32_expr (expressionS * exp)
3549{
3550  expressionS exp_high = *exp;
3551
3552  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3553  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3554  exp->X_add_number &= 0xffff;
3555  emit_expr (exp, (unsigned int) THUMB_SIZE);
3556}
3557
3558/*  Guess the instruction size based on the opcode.  */
3559
3560static int
3561thumb_insn_size (int opcode)
3562{
3563  if ((unsigned int) opcode < 0xe800u)
3564    return 2;
3565  else if ((unsigned int) opcode >= 0xe8000000u)
3566    return 4;
3567  else
3568    return 0;
3569}
3570
3571static bfd_boolean
3572emit_insn (expressionS *exp, int nbytes)
3573{
3574  int size = 0;
3575
3576  if (exp->X_op == O_constant)
3577    {
3578      size = nbytes;
3579
3580      if (size == 0)
3581	size = thumb_insn_size (exp->X_add_number);
3582
3583      if (size != 0)
3584	{
3585	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3586	    {
3587	      as_bad (_(".inst.n operand too big. "\
3588			"Use .inst.w instead"));
3589	      size = 0;
3590	    }
3591	  else
3592	    {
3593	      if (now_it.state == AUTOMATIC_IT_BLOCK)
3594		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3595	      else
3596		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3597
3598	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3599		emit_thumb32_expr (exp);
3600	      else
3601		emit_expr (exp, (unsigned int) size);
3602
3603	      it_fsm_post_encode ();
3604	    }
3605	}
3606      else
3607	as_bad (_("cannot determine Thumb instruction size. "	\
3608		  "Use .inst.n/.inst.w instead"));
3609    }
3610  else
3611    as_bad (_("constant expression required"));
3612
3613  return (size != 0);
3614}
3615
3616/* Like s_arm_elf_cons but do not use md_cons_align and
3617   set the mapping state to MAP_ARM/MAP_THUMB.  */
3618
3619static void
3620s_arm_elf_inst (int nbytes)
3621{
3622  if (is_it_end_of_statement ())
3623    {
3624      demand_empty_rest_of_line ();
3625      return;
3626    }
3627
3628  /* Calling mapping_state () here will not change ARM/THUMB,
3629     but will ensure not to be in DATA state.  */
3630
3631  if (thumb_mode)
3632    mapping_state (MAP_THUMB);
3633  else
3634    {
3635      if (nbytes != 0)
3636	{
3637	  as_bad (_("width suffixes are invalid in ARM mode"));
3638	  ignore_rest_of_line ();
3639	  return;
3640	}
3641
3642      nbytes = 4;
3643
3644      mapping_state (MAP_ARM);
3645    }
3646
3647  do
3648    {
3649      expressionS exp;
3650
3651      expression (& exp);
3652
3653      if (! emit_insn (& exp, nbytes))
3654	{
3655	  ignore_rest_of_line ();
3656	  return;
3657	}
3658    }
3659  while (*input_line_pointer++ == ',');
3660
3661  /* Put terminator back into stream.  */
3662  input_line_pointer --;
3663  demand_empty_rest_of_line ();
3664}
3665
3666/* Parse a .rel31 directive.  */
3667
3668static void
3669s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3670{
3671  expressionS exp;
3672  char *p;
3673  valueT highbit;
3674
3675  highbit = 0;
3676  if (*input_line_pointer == '1')
3677    highbit = 0x80000000;
3678  else if (*input_line_pointer != '0')
3679    as_bad (_("expected 0 or 1"));
3680
3681  input_line_pointer++;
3682  if (*input_line_pointer != ',')
3683    as_bad (_("missing comma"));
3684  input_line_pointer++;
3685
3686#ifdef md_flush_pending_output
3687  md_flush_pending_output ();
3688#endif
3689
3690#ifdef md_cons_align
3691  md_cons_align (4);
3692#endif
3693
3694  mapping_state (MAP_DATA);
3695
3696  expression (&exp);
3697
3698  p = frag_more (4);
3699  md_number_to_chars (p, highbit, 4);
3700  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3701	       BFD_RELOC_ARM_PREL31);
3702
3703  demand_empty_rest_of_line ();
3704}
3705
3706/* Directives: AEABI stack-unwind tables.  */
3707
3708/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3709
3710static void
3711s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3712{
3713  demand_empty_rest_of_line ();
3714  if (unwind.proc_start)
3715    {
3716      as_bad (_("duplicate .fnstart directive"));
3717      return;
3718    }
3719
3720  /* Mark the start of the function.  */
3721  unwind.proc_start = expr_build_dot ();
3722
3723  /* Reset the rest of the unwind info.	 */
3724  unwind.opcode_count = 0;
3725  unwind.table_entry = NULL;
3726  unwind.personality_routine = NULL;
3727  unwind.personality_index = -1;
3728  unwind.frame_size = 0;
3729  unwind.fp_offset = 0;
3730  unwind.fp_reg = REG_SP;
3731  unwind.fp_used = 0;
3732  unwind.sp_restored = 0;
3733}
3734
3735
3736/* Parse a handlerdata directive.  Creates the exception handling table entry
3737   for the function.  */
3738
3739static void
3740s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3741{
3742  demand_empty_rest_of_line ();
3743  if (!unwind.proc_start)
3744    as_bad (MISSING_FNSTART);
3745
3746  if (unwind.table_entry)
3747    as_bad (_("duplicate .handlerdata directive"));
3748
3749  create_unwind_entry (1);
3750}
3751
3752/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3753
3754static void
3755s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3756{
3757  long where;
3758  char *ptr;
3759  valueT val;
3760  unsigned int marked_pr_dependency;
3761
3762  demand_empty_rest_of_line ();
3763
3764  if (!unwind.proc_start)
3765    {
3766      as_bad (_(".fnend directive without .fnstart"));
3767      return;
3768    }
3769
3770  /* Add eh table entry.  */
3771  if (unwind.table_entry == NULL)
3772    val = create_unwind_entry (0);
3773  else
3774    val = 0;
3775
3776  /* Add index table entry.  This is two words.	 */
3777  start_unwind_section (unwind.saved_seg, 1);
3778  frag_align (2, 0, 0);
3779  record_alignment (now_seg, 2);
3780
3781  ptr = frag_more (8);
3782  memset (ptr, 0, 8);
3783  where = frag_now_fix () - 8;
3784
3785  /* Self relative offset of the function start.  */
3786  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3787	   BFD_RELOC_ARM_PREL31);
3788
3789  /* Indicate dependency on EHABI-defined personality routines to the
3790     linker, if it hasn't been done already.  */
3791  marked_pr_dependency
3792    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3793  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3794      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3795    {
3796      static const char *const name[] =
3797	{
3798	  "__aeabi_unwind_cpp_pr0",
3799	  "__aeabi_unwind_cpp_pr1",
3800	  "__aeabi_unwind_cpp_pr2"
3801	};
3802      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3803      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3804      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3805	|= 1 << unwind.personality_index;
3806    }
3807
3808  if (val)
3809    /* Inline exception table entry.  */
3810    md_number_to_chars (ptr + 4, val, 4);
3811  else
3812    /* Self relative offset of the table entry.	 */
3813    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3814	     BFD_RELOC_ARM_PREL31);
3815
3816  /* Restore the original section.  */
3817  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3818
3819  unwind.proc_start = NULL;
3820}
3821
3822
3823/* Parse an unwind_cantunwind directive.  */
3824
3825static void
3826s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3827{
3828  demand_empty_rest_of_line ();
3829  if (!unwind.proc_start)
3830    as_bad (MISSING_FNSTART);
3831
3832  if (unwind.personality_routine || unwind.personality_index != -1)
3833    as_bad (_("personality routine specified for cantunwind frame"));
3834
3835  unwind.personality_index = -2;
3836}
3837
3838
3839/* Parse a personalityindex directive.	*/
3840
3841static void
3842s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3843{
3844  expressionS exp;
3845
3846  if (!unwind.proc_start)
3847    as_bad (MISSING_FNSTART);
3848
3849  if (unwind.personality_routine || unwind.personality_index != -1)
3850    as_bad (_("duplicate .personalityindex directive"));
3851
3852  expression (&exp);
3853
3854  if (exp.X_op != O_constant
3855      || exp.X_add_number < 0 || exp.X_add_number > 15)
3856    {
3857      as_bad (_("bad personality routine number"));
3858      ignore_rest_of_line ();
3859      return;
3860    }
3861
3862  unwind.personality_index = exp.X_add_number;
3863
3864  demand_empty_rest_of_line ();
3865}
3866
3867
3868/* Parse a personality directive.  */
3869
3870static void
3871s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3872{
3873  char *name, *p, c;
3874
3875  if (!unwind.proc_start)
3876    as_bad (MISSING_FNSTART);
3877
3878  if (unwind.personality_routine || unwind.personality_index != -1)
3879    as_bad (_("duplicate .personality directive"));
3880
3881  c = get_symbol_name (& name);
3882  p = input_line_pointer;
3883  if (c == '"')
3884    ++ input_line_pointer;
3885  unwind.personality_routine = symbol_find_or_make (name);
3886  *p = c;
3887  demand_empty_rest_of_line ();
3888}
3889
3890
3891/* Parse a directive saving core registers.  */
3892
3893static void
3894s_arm_unwind_save_core (void)
3895{
3896  valueT op;
3897  long range;
3898  int n;
3899
3900  range = parse_reg_list (&input_line_pointer);
3901  if (range == FAIL)
3902    {
3903      as_bad (_("expected register list"));
3904      ignore_rest_of_line ();
3905      return;
3906    }
3907
3908  demand_empty_rest_of_line ();
3909
3910  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3911     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3912     ip because it is clobbered by calls.  */
3913  if (unwind.sp_restored && unwind.fp_reg == 12
3914      && (range & 0x3000) == 0x1000)
3915    {
3916      unwind.opcode_count--;
3917      unwind.sp_restored = 0;
3918      range = (range | 0x2000) & ~0x1000;
3919      unwind.pending_offset = 0;
3920    }
3921
3922  /* Pop r4-r15.  */
3923  if (range & 0xfff0)
3924    {
3925      /* See if we can use the short opcodes.  These pop a block of up to 8
3926	 registers starting with r4, plus maybe r14.  */
3927      for (n = 0; n < 8; n++)
3928	{
3929	  /* Break at the first non-saved register.	 */
3930	  if ((range & (1 << (n + 4))) == 0)
3931	    break;
3932	}
3933      /* See if there are any other bits set.  */
3934      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3935	{
3936	  /* Use the long form.  */
3937	  op = 0x8000 | ((range >> 4) & 0xfff);
3938	  add_unwind_opcode (op, 2);
3939	}
3940      else
3941	{
3942	  /* Use the short form.  */
3943	  if (range & 0x4000)
3944	    op = 0xa8; /* Pop r14.	*/
3945	  else
3946	    op = 0xa0; /* Do not pop r14.  */
3947	  op |= (n - 1);
3948	  add_unwind_opcode (op, 1);
3949	}
3950    }
3951
3952  /* Pop r0-r3.	 */
3953  if (range & 0xf)
3954    {
3955      op = 0xb100 | (range & 0xf);
3956      add_unwind_opcode (op, 2);
3957    }
3958
3959  /* Record the number of bytes pushed.	 */
3960  for (n = 0; n < 16; n++)
3961    {
3962      if (range & (1 << n))
3963	unwind.frame_size += 4;
3964    }
3965}
3966
3967
3968/* Parse a directive saving FPA registers.  */
3969
3970static void
3971s_arm_unwind_save_fpa (int reg)
3972{
3973  expressionS exp;
3974  int num_regs;
3975  valueT op;
3976
3977  /* Get Number of registers to transfer.  */
3978  if (skip_past_comma (&input_line_pointer) != FAIL)
3979    expression (&exp);
3980  else
3981    exp.X_op = O_illegal;
3982
3983  if (exp.X_op != O_constant)
3984    {
3985      as_bad (_("expected , <constant>"));
3986      ignore_rest_of_line ();
3987      return;
3988    }
3989
3990  num_regs = exp.X_add_number;
3991
3992  if (num_regs < 1 || num_regs > 4)
3993    {
3994      as_bad (_("number of registers must be in the range [1:4]"));
3995      ignore_rest_of_line ();
3996      return;
3997    }
3998
3999  demand_empty_rest_of_line ();
4000
4001  if (reg == 4)
4002    {
4003      /* Short form.  */
4004      op = 0xb4 | (num_regs - 1);
4005      add_unwind_opcode (op, 1);
4006    }
4007  else
4008    {
4009      /* Long form.  */
4010      op = 0xc800 | (reg << 4) | (num_regs - 1);
4011      add_unwind_opcode (op, 2);
4012    }
4013  unwind.frame_size += num_regs * 12;
4014}
4015
4016
4017/* Parse a directive saving VFP registers for ARMv6 and above.  */
4018
4019static void
4020s_arm_unwind_save_vfp_armv6 (void)
4021{
4022  int count;
4023  unsigned int start;
4024  valueT op;
4025  int num_vfpv3_regs = 0;
4026  int num_regs_below_16;
4027
4028  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4029  if (count == FAIL)
4030    {
4031      as_bad (_("expected register list"));
4032      ignore_rest_of_line ();
4033      return;
4034    }
4035
4036  demand_empty_rest_of_line ();
4037
4038  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4039     than FSTMX/FLDMX-style ones).  */
4040
4041  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4042  if (start >= 16)
4043    num_vfpv3_regs = count;
4044  else if (start + count > 16)
4045    num_vfpv3_regs = start + count - 16;
4046
4047  if (num_vfpv3_regs > 0)
4048    {
4049      int start_offset = start > 16 ? start - 16 : 0;
4050      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4051      add_unwind_opcode (op, 2);
4052    }
4053
4054  /* Generate opcode for registers numbered in the range 0 .. 15.  */
4055  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4056  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4057  if (num_regs_below_16 > 0)
4058    {
4059      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4060      add_unwind_opcode (op, 2);
4061    }
4062
4063  unwind.frame_size += count * 8;
4064}
4065
4066
4067/* Parse a directive saving VFP registers for pre-ARMv6.  */
4068
4069static void
4070s_arm_unwind_save_vfp (void)
4071{
4072  int count;
4073  unsigned int reg;
4074  valueT op;
4075
4076  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4077  if (count == FAIL)
4078    {
4079      as_bad (_("expected register list"));
4080      ignore_rest_of_line ();
4081      return;
4082    }
4083
4084  demand_empty_rest_of_line ();
4085
4086  if (reg == 8)
4087    {
4088      /* Short form.  */
4089      op = 0xb8 | (count - 1);
4090      add_unwind_opcode (op, 1);
4091    }
4092  else
4093    {
4094      /* Long form.  */
4095      op = 0xb300 | (reg << 4) | (count - 1);
4096      add_unwind_opcode (op, 2);
4097    }
4098  unwind.frame_size += count * 8 + 4;
4099}
4100
4101
4102/* Parse a directive saving iWMMXt data registers.  */
4103
4104static void
4105s_arm_unwind_save_mmxwr (void)
4106{
4107  int reg;
4108  int hi_reg;
4109  int i;
4110  unsigned mask = 0;
4111  valueT op;
4112
4113  if (*input_line_pointer == '{')
4114    input_line_pointer++;
4115
4116  do
4117    {
4118      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4119
4120      if (reg == FAIL)
4121	{
4122	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4123	  goto error;
4124	}
4125
4126      if (mask >> reg)
4127	as_tsktsk (_("register list not in ascending order"));
4128      mask |= 1 << reg;
4129
4130      if (*input_line_pointer == '-')
4131	{
4132	  input_line_pointer++;
4133	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4134	  if (hi_reg == FAIL)
4135	    {
4136	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4137	      goto error;
4138	    }
4139	  else if (reg >= hi_reg)
4140	    {
4141	      as_bad (_("bad register range"));
4142	      goto error;
4143	    }
4144	  for (; reg < hi_reg; reg++)
4145	    mask |= 1 << reg;
4146	}
4147    }
4148  while (skip_past_comma (&input_line_pointer) != FAIL);
4149
4150  skip_past_char (&input_line_pointer, '}');
4151
4152  demand_empty_rest_of_line ();
4153
4154  /* Generate any deferred opcodes because we're going to be looking at
4155     the list.	*/
4156  flush_pending_unwind ();
4157
4158  for (i = 0; i < 16; i++)
4159    {
4160      if (mask & (1 << i))
4161	unwind.frame_size += 8;
4162    }
4163
4164  /* Attempt to combine with a previous opcode.	 We do this because gcc
4165     likes to output separate unwind directives for a single block of
4166     registers.	 */
4167  if (unwind.opcode_count > 0)
4168    {
4169      i = unwind.opcodes[unwind.opcode_count - 1];
4170      if ((i & 0xf8) == 0xc0)
4171	{
4172	  i &= 7;
4173	  /* Only merge if the blocks are contiguous.  */
4174	  if (i < 6)
4175	    {
4176	      if ((mask & 0xfe00) == (1 << 9))
4177		{
4178		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4179		  unwind.opcode_count--;
4180		}
4181	    }
4182	  else if (i == 6 && unwind.opcode_count >= 2)
4183	    {
4184	      i = unwind.opcodes[unwind.opcode_count - 2];
4185	      reg = i >> 4;
4186	      i &= 0xf;
4187
4188	      op = 0xffff << (reg - 1);
4189	      if (reg > 0
4190		  && ((mask & op) == (1u << (reg - 1))))
4191		{
4192		  op = (1 << (reg + i + 1)) - 1;
4193		  op &= ~((1 << reg) - 1);
4194		  mask |= op;
4195		  unwind.opcode_count -= 2;
4196		}
4197	    }
4198	}
4199    }
4200
4201  hi_reg = 15;
4202  /* We want to generate opcodes in the order the registers have been
4203     saved, ie. descending order.  */
4204  for (reg = 15; reg >= -1; reg--)
4205    {
4206      /* Save registers in blocks.  */
4207      if (reg < 0
4208	  || !(mask & (1 << reg)))
4209	{
4210	  /* We found an unsaved reg.  Generate opcodes to save the
4211	     preceding block.	*/
4212	  if (reg != hi_reg)
4213	    {
4214	      if (reg == 9)
4215		{
4216		  /* Short form.  */
4217		  op = 0xc0 | (hi_reg - 10);
4218		  add_unwind_opcode (op, 1);
4219		}
4220	      else
4221		{
4222		  /* Long form.	 */
4223		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4224		  add_unwind_opcode (op, 2);
4225		}
4226	    }
4227	  hi_reg = reg - 1;
4228	}
4229    }
4230
4231  return;
4232error:
4233  ignore_rest_of_line ();
4234}
4235
4236static void
4237s_arm_unwind_save_mmxwcg (void)
4238{
4239  int reg;
4240  int hi_reg;
4241  unsigned mask = 0;
4242  valueT op;
4243
4244  if (*input_line_pointer == '{')
4245    input_line_pointer++;
4246
4247  skip_whitespace (input_line_pointer);
4248
4249  do
4250    {
4251      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4252
4253      if (reg == FAIL)
4254	{
4255	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4256	  goto error;
4257	}
4258
4259      reg -= 8;
4260      if (mask >> reg)
4261	as_tsktsk (_("register list not in ascending order"));
4262      mask |= 1 << reg;
4263
4264      if (*input_line_pointer == '-')
4265	{
4266	  input_line_pointer++;
4267	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4268	  if (hi_reg == FAIL)
4269	    {
4270	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4271	      goto error;
4272	    }
4273	  else if (reg >= hi_reg)
4274	    {
4275	      as_bad (_("bad register range"));
4276	      goto error;
4277	    }
4278	  for (; reg < hi_reg; reg++)
4279	    mask |= 1 << reg;
4280	}
4281    }
4282  while (skip_past_comma (&input_line_pointer) != FAIL);
4283
4284  skip_past_char (&input_line_pointer, '}');
4285
4286  demand_empty_rest_of_line ();
4287
4288  /* Generate any deferred opcodes because we're going to be looking at
4289     the list.	*/
4290  flush_pending_unwind ();
4291
4292  for (reg = 0; reg < 16; reg++)
4293    {
4294      if (mask & (1 << reg))
4295	unwind.frame_size += 4;
4296    }
4297  op = 0xc700 | mask;
4298  add_unwind_opcode (op, 2);
4299  return;
4300error:
4301  ignore_rest_of_line ();
4302}
4303
4304
4305/* Parse an unwind_save directive.
4306   If the argument is non-zero, this is a .vsave directive.  */
4307
4308static void
4309s_arm_unwind_save (int arch_v6)
4310{
4311  char *peek;
4312  struct reg_entry *reg;
4313  bfd_boolean had_brace = FALSE;
4314
4315  if (!unwind.proc_start)
4316    as_bad (MISSING_FNSTART);
4317
4318  /* Figure out what sort of save we have.  */
4319  peek = input_line_pointer;
4320
4321  if (*peek == '{')
4322    {
4323      had_brace = TRUE;
4324      peek++;
4325    }
4326
4327  reg = arm_reg_parse_multi (&peek);
4328
4329  if (!reg)
4330    {
4331      as_bad (_("register expected"));
4332      ignore_rest_of_line ();
4333      return;
4334    }
4335
4336  switch (reg->type)
4337    {
4338    case REG_TYPE_FN:
4339      if (had_brace)
4340	{
4341	  as_bad (_("FPA .unwind_save does not take a register list"));
4342	  ignore_rest_of_line ();
4343	  return;
4344	}
4345      input_line_pointer = peek;
4346      s_arm_unwind_save_fpa (reg->number);
4347      return;
4348
4349    case REG_TYPE_RN:
4350      s_arm_unwind_save_core ();
4351      return;
4352
4353    case REG_TYPE_VFD:
4354      if (arch_v6)
4355	s_arm_unwind_save_vfp_armv6 ();
4356      else
4357	s_arm_unwind_save_vfp ();
4358      return;
4359
4360    case REG_TYPE_MMXWR:
4361      s_arm_unwind_save_mmxwr ();
4362      return;
4363
4364    case REG_TYPE_MMXWCG:
4365      s_arm_unwind_save_mmxwcg ();
4366      return;
4367
4368    default:
4369      as_bad (_(".unwind_save does not support this kind of register"));
4370      ignore_rest_of_line ();
4371    }
4372}
4373
4374
4375/* Parse an unwind_movsp directive.  */
4376
4377static void
4378s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4379{
4380  int reg;
4381  valueT op;
4382  int offset;
4383
4384  if (!unwind.proc_start)
4385    as_bad (MISSING_FNSTART);
4386
4387  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4388  if (reg == FAIL)
4389    {
4390      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4391      ignore_rest_of_line ();
4392      return;
4393    }
4394
4395  /* Optional constant.	 */
4396  if (skip_past_comma (&input_line_pointer) != FAIL)
4397    {
4398      if (immediate_for_directive (&offset) == FAIL)
4399	return;
4400    }
4401  else
4402    offset = 0;
4403
4404  demand_empty_rest_of_line ();
4405
4406  if (reg == REG_SP || reg == REG_PC)
4407    {
4408      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4409      return;
4410    }
4411
4412  if (unwind.fp_reg != REG_SP)
4413    as_bad (_("unexpected .unwind_movsp directive"));
4414
4415  /* Generate opcode to restore the value.  */
4416  op = 0x90 | reg;
4417  add_unwind_opcode (op, 1);
4418
4419  /* Record the information for later.	*/
4420  unwind.fp_reg = reg;
4421  unwind.fp_offset = unwind.frame_size - offset;
4422  unwind.sp_restored = 1;
4423}
4424
4425/* Parse an unwind_pad directive.  */
4426
4427static void
4428s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4429{
4430  int offset;
4431
4432  if (!unwind.proc_start)
4433    as_bad (MISSING_FNSTART);
4434
4435  if (immediate_for_directive (&offset) == FAIL)
4436    return;
4437
4438  if (offset & 3)
4439    {
4440      as_bad (_("stack increment must be multiple of 4"));
4441      ignore_rest_of_line ();
4442      return;
4443    }
4444
4445  /* Don't generate any opcodes, just record the details for later.  */
4446  unwind.frame_size += offset;
4447  unwind.pending_offset += offset;
4448
4449  demand_empty_rest_of_line ();
4450}
4451
4452/* Parse an unwind_setfp directive.  */
4453
4454static void
4455s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4456{
4457  int sp_reg;
4458  int fp_reg;
4459  int offset;
4460
4461  if (!unwind.proc_start)
4462    as_bad (MISSING_FNSTART);
4463
4464  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4465  if (skip_past_comma (&input_line_pointer) == FAIL)
4466    sp_reg = FAIL;
4467  else
4468    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4469
4470  if (fp_reg == FAIL || sp_reg == FAIL)
4471    {
4472      as_bad (_("expected <reg>, <reg>"));
4473      ignore_rest_of_line ();
4474      return;
4475    }
4476
4477  /* Optional constant.	 */
4478  if (skip_past_comma (&input_line_pointer) != FAIL)
4479    {
4480      if (immediate_for_directive (&offset) == FAIL)
4481	return;
4482    }
4483  else
4484    offset = 0;
4485
4486  demand_empty_rest_of_line ();
4487
4488  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4489    {
4490      as_bad (_("register must be either sp or set by a previous"
4491		"unwind_movsp directive"));
4492      return;
4493    }
4494
4495  /* Don't generate any opcodes, just record the information for later.	 */
4496  unwind.fp_reg = fp_reg;
4497  unwind.fp_used = 1;
4498  if (sp_reg == REG_SP)
4499    unwind.fp_offset = unwind.frame_size - offset;
4500  else
4501    unwind.fp_offset -= offset;
4502}
4503
4504/* Parse an unwind_raw directive.  */
4505
4506static void
4507s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4508{
4509  expressionS exp;
4510  /* This is an arbitrary limit.	 */
4511  unsigned char op[16];
4512  int count;
4513
4514  if (!unwind.proc_start)
4515    as_bad (MISSING_FNSTART);
4516
4517  expression (&exp);
4518  if (exp.X_op == O_constant
4519      && skip_past_comma (&input_line_pointer) != FAIL)
4520    {
4521      unwind.frame_size += exp.X_add_number;
4522      expression (&exp);
4523    }
4524  else
4525    exp.X_op = O_illegal;
4526
4527  if (exp.X_op != O_constant)
4528    {
4529      as_bad (_("expected <offset>, <opcode>"));
4530      ignore_rest_of_line ();
4531      return;
4532    }
4533
4534  count = 0;
4535
4536  /* Parse the opcode.	*/
4537  for (;;)
4538    {
4539      if (count >= 16)
4540	{
4541	  as_bad (_("unwind opcode too long"));
4542	  ignore_rest_of_line ();
4543	}
4544      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4545	{
4546	  as_bad (_("invalid unwind opcode"));
4547	  ignore_rest_of_line ();
4548	  return;
4549	}
4550      op[count++] = exp.X_add_number;
4551
4552      /* Parse the next byte.  */
4553      if (skip_past_comma (&input_line_pointer) == FAIL)
4554	break;
4555
4556      expression (&exp);
4557    }
4558
4559  /* Add the opcode bytes in reverse order.  */
4560  while (count--)
4561    add_unwind_opcode (op[count], 1);
4562
4563  demand_empty_rest_of_line ();
4564}
4565
4566
4567/* Parse a .eabi_attribute directive.  */
4568
4569static void
4570s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4571{
4572  int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4573
4574  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4575    attributes_set_explicitly[tag] = 1;
4576}
4577
4578/* Emit a tls fix for the symbol.  */
4579
4580static void
4581s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4582{
4583  char *p;
4584  expressionS exp;
4585#ifdef md_flush_pending_output
4586  md_flush_pending_output ();
4587#endif
4588
4589#ifdef md_cons_align
4590  md_cons_align (4);
4591#endif
4592
4593  /* Since we're just labelling the code, there's no need to define a
4594     mapping symbol.  */
4595  expression (&exp);
4596  p = obstack_next_free (&frchain_now->frch_obstack);
4597  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4598	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4599	       : BFD_RELOC_ARM_TLS_DESCSEQ);
4600}
4601#endif /* OBJ_ELF */
4602
4603static void s_arm_arch (int);
4604static void s_arm_object_arch (int);
4605static void s_arm_cpu (int);
4606static void s_arm_fpu (int);
4607static void s_arm_arch_extension (int);
4608
4609#ifdef TE_PE
4610
4611static void
4612pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4613{
4614  expressionS exp;
4615
4616  do
4617    {
4618      expression (&exp);
4619      if (exp.X_op == O_symbol)
4620	exp.X_op = O_secrel;
4621
4622      emit_expr (&exp, 4);
4623    }
4624  while (*input_line_pointer++ == ',');
4625
4626  input_line_pointer--;
4627  demand_empty_rest_of_line ();
4628}
4629#endif /* TE_PE */
4630
4631/* This table describes all the machine specific pseudo-ops the assembler
4632   has to support.  The fields are:
4633     pseudo-op name without dot
4634     function to call to execute this pseudo-op
4635     Integer arg to pass to the function.  */
4636
4637const pseudo_typeS md_pseudo_table[] =
4638{
4639  /* Never called because '.req' does not start a line.	 */
4640  { "req",	   s_req,	  0 },
4641  /* Following two are likewise never called.  */
4642  { "dn",	   s_dn,          0 },
4643  { "qn",          s_qn,          0 },
4644  { "unreq",	   s_unreq,	  0 },
4645  { "bss",	   s_bss,	  0 },
4646  { "align",	   s_align_ptwo,  2 },
4647  { "arm",	   s_arm,	  0 },
4648  { "thumb",	   s_thumb,	  0 },
4649  { "code",	   s_code,	  0 },
4650  { "force_thumb", s_force_thumb, 0 },
4651  { "thumb_func",  s_thumb_func,  0 },
4652  { "thumb_set",   s_thumb_set,	  0 },
4653  { "even",	   s_even,	  0 },
4654  { "ltorg",	   s_ltorg,	  0 },
4655  { "pool",	   s_ltorg,	  0 },
4656  { "syntax",	   s_syntax,	  0 },
4657  { "cpu",	   s_arm_cpu,	  0 },
4658  { "arch",	   s_arm_arch,	  0 },
4659  { "object_arch", s_arm_object_arch,	0 },
4660  { "fpu",	   s_arm_fpu,	  0 },
4661  { "arch_extension", s_arm_arch_extension, 0 },
4662#ifdef OBJ_ELF
4663  { "word",	        s_arm_elf_cons, 4 },
4664  { "long",	        s_arm_elf_cons, 4 },
4665  { "inst.n",           s_arm_elf_inst, 2 },
4666  { "inst.w",           s_arm_elf_inst, 4 },
4667  { "inst",             s_arm_elf_inst, 0 },
4668  { "rel31",	        s_arm_rel31,	  0 },
4669  { "fnstart",		s_arm_unwind_fnstart,	0 },
4670  { "fnend",		s_arm_unwind_fnend,	0 },
4671  { "cantunwind",	s_arm_unwind_cantunwind, 0 },
4672  { "personality",	s_arm_unwind_personality, 0 },
4673  { "personalityindex",	s_arm_unwind_personalityindex, 0 },
4674  { "handlerdata",	s_arm_unwind_handlerdata, 0 },
4675  { "save",		s_arm_unwind_save,	0 },
4676  { "vsave",		s_arm_unwind_save,	1 },
4677  { "movsp",		s_arm_unwind_movsp,	0 },
4678  { "pad",		s_arm_unwind_pad,	0 },
4679  { "setfp",		s_arm_unwind_setfp,	0 },
4680  { "unwind_raw",	s_arm_unwind_raw,	0 },
4681  { "eabi_attribute",	s_arm_eabi_attribute,	0 },
4682  { "tlsdescseq",	s_arm_tls_descseq,      0 },
4683#else
4684  { "word",	   cons, 4},
4685
4686  /* These are used for dwarf.  */
4687  {"2byte", cons, 2},
4688  {"4byte", cons, 4},
4689  {"8byte", cons, 8},
4690  /* These are used for dwarf2.  */
4691  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4692  { "loc",  dwarf2_directive_loc,  0 },
4693  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4694#endif
4695  { "extend",	   float_cons, 'x' },
4696  { "ldouble",	   float_cons, 'x' },
4697  { "packed",	   float_cons, 'p' },
4698#ifdef TE_PE
4699  {"secrel32", pe_directive_secrel, 0},
4700#endif
4701
4702  /* These are for compatibility with CodeComposer Studio.  */
4703  {"ref",          s_ccs_ref,        0},
4704  {"def",          s_ccs_def,        0},
4705  {"asmfunc",      s_ccs_asmfunc,    0},
4706  {"endasmfunc",   s_ccs_endasmfunc, 0},
4707
4708  { 0, 0, 0 }
4709};
4710
4711/* Parser functions used exclusively in instruction operands.  */
4712
4713/* Generic immediate-value read function for use in insn parsing.
4714   STR points to the beginning of the immediate (the leading #);
4715   VAL receives the value; if the value is outside [MIN, MAX]
4716   issue an error.  PREFIX_OPT is true if the immediate prefix is
4717   optional.  */
4718
4719static int
4720parse_immediate (char **str, int *val, int min, int max,
4721		 bfd_boolean prefix_opt)
4722{
4723  expressionS exp;
4724  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4725  if (exp.X_op != O_constant)
4726    {
4727      inst.error = _("constant expression required");
4728      return FAIL;
4729    }
4730
4731  if (exp.X_add_number < min || exp.X_add_number > max)
4732    {
4733      inst.error = _("immediate value out of range");
4734      return FAIL;
4735    }
4736
4737  *val = exp.X_add_number;
4738  return SUCCESS;
4739}
4740
4741/* Less-generic immediate-value read function with the possibility of loading a
4742   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4743   instructions. Puts the result directly in inst.operands[i].  */
4744
4745static int
4746parse_big_immediate (char **str, int i, expressionS *in_exp,
4747		     bfd_boolean allow_symbol_p)
4748{
4749  expressionS exp;
4750  expressionS *exp_p = in_exp ? in_exp : &exp;
4751  char *ptr = *str;
4752
4753  my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4754
4755  if (exp_p->X_op == O_constant)
4756    {
4757      inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4758      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4759	 O_constant.  We have to be careful not to break compilation for
4760	 32-bit X_add_number, though.  */
4761      if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4762	{
4763	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
4764	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4765				  & 0xffffffff);
4766	  inst.operands[i].regisimm = 1;
4767	}
4768    }
4769  else if (exp_p->X_op == O_big
4770	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4771    {
4772      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4773
4774      /* Bignums have their least significant bits in
4775	 generic_bignum[0]. Make sure we put 32 bits in imm and
4776	 32 bits in reg,  in a (hopefully) portable way.  */
4777      gas_assert (parts != 0);
4778
4779      /* Make sure that the number is not too big.
4780	 PR 11972: Bignums can now be sign-extended to the
4781	 size of a .octa so check that the out of range bits
4782	 are all zero or all one.  */
4783      if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4784	{
4785	  LITTLENUM_TYPE m = -1;
4786
4787	  if (generic_bignum[parts * 2] != 0
4788	      && generic_bignum[parts * 2] != m)
4789	    return FAIL;
4790
4791	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4792	    if (generic_bignum[j] != generic_bignum[j-1])
4793	      return FAIL;
4794	}
4795
4796      inst.operands[i].imm = 0;
4797      for (j = 0; j < parts; j++, idx++)
4798	inst.operands[i].imm |= generic_bignum[idx]
4799				<< (LITTLENUM_NUMBER_OF_BITS * j);
4800      inst.operands[i].reg = 0;
4801      for (j = 0; j < parts; j++, idx++)
4802	inst.operands[i].reg |= generic_bignum[idx]
4803				<< (LITTLENUM_NUMBER_OF_BITS * j);
4804      inst.operands[i].regisimm = 1;
4805    }
4806  else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4807    return FAIL;
4808
4809  *str = ptr;
4810
4811  return SUCCESS;
4812}
4813
4814/* Returns the pseudo-register number of an FPA immediate constant,
4815   or FAIL if there isn't a valid constant here.  */
4816
4817static int
4818parse_fpa_immediate (char ** str)
4819{
4820  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4821  char *	 save_in;
4822  expressionS	 exp;
4823  int		 i;
4824  int		 j;
4825
4826  /* First try and match exact strings, this is to guarantee
4827     that some formats will work even for cross assembly.  */
4828
4829  for (i = 0; fp_const[i]; i++)
4830    {
4831      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4832	{
4833	  char *start = *str;
4834
4835	  *str += strlen (fp_const[i]);
4836	  if (is_end_of_line[(unsigned char) **str])
4837	    return i + 8;
4838	  *str = start;
4839	}
4840    }
4841
4842  /* Just because we didn't get a match doesn't mean that the constant
4843     isn't valid, just that it is in a format that we don't
4844     automatically recognize.  Try parsing it with the standard
4845     expression routines.  */
4846
4847  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4848
4849  /* Look for a raw floating point number.  */
4850  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4851      && is_end_of_line[(unsigned char) *save_in])
4852    {
4853      for (i = 0; i < NUM_FLOAT_VALS; i++)
4854	{
4855	  for (j = 0; j < MAX_LITTLENUMS; j++)
4856	    {
4857	      if (words[j] != fp_values[i][j])
4858		break;
4859	    }
4860
4861	  if (j == MAX_LITTLENUMS)
4862	    {
4863	      *str = save_in;
4864	      return i + 8;
4865	    }
4866	}
4867    }
4868
4869  /* Try and parse a more complex expression, this will probably fail
4870     unless the code uses a floating point prefix (eg "0f").  */
4871  save_in = input_line_pointer;
4872  input_line_pointer = *str;
4873  if (expression (&exp) == absolute_section
4874      && exp.X_op == O_big
4875      && exp.X_add_number < 0)
4876    {
4877      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4878	 Ditto for 15.	*/
4879#define X_PRECISION 5
4880#define E_PRECISION 15L
4881      if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4882	{
4883	  for (i = 0; i < NUM_FLOAT_VALS; i++)
4884	    {
4885	      for (j = 0; j < MAX_LITTLENUMS; j++)
4886		{
4887		  if (words[j] != fp_values[i][j])
4888		    break;
4889		}
4890
4891	      if (j == MAX_LITTLENUMS)
4892		{
4893		  *str = input_line_pointer;
4894		  input_line_pointer = save_in;
4895		  return i + 8;
4896		}
4897	    }
4898	}
4899    }
4900
4901  *str = input_line_pointer;
4902  input_line_pointer = save_in;
4903  inst.error = _("invalid FPA immediate expression");
4904  return FAIL;
4905}
4906
4907/* Returns 1 if a number has "quarter-precision" float format
4908   0baBbbbbbc defgh000 00000000 00000000.  */
4909
4910static int
4911is_quarter_float (unsigned imm)
4912{
4913  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4914  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4915}
4916
4917
4918/* Detect the presence of a floating point or integer zero constant,
4919   i.e. #0.0 or #0.  */
4920
4921static bfd_boolean
4922parse_ifimm_zero (char **in)
4923{
4924  int error_code;
4925
4926  if (!is_immediate_prefix (**in))
4927    return FALSE;
4928
4929  ++*in;
4930
4931  /* Accept #0x0 as a synonym for #0.  */
4932  if (strncmp (*in, "0x", 2) == 0)
4933    {
4934      int val;
4935      if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4936        return FALSE;
4937      return TRUE;
4938    }
4939
4940  error_code = atof_generic (in, ".", EXP_CHARS,
4941                             &generic_floating_point_number);
4942
4943  if (!error_code
4944      && generic_floating_point_number.sign == '+'
4945      && (generic_floating_point_number.low
4946          > generic_floating_point_number.leader))
4947    return TRUE;
4948
4949  return FALSE;
4950}
4951
4952/* Parse an 8-bit "quarter-precision" floating point number of the form:
4953   0baBbbbbbc defgh000 00000000 00000000.
4954   The zero and minus-zero cases need special handling, since they can't be
4955   encoded in the "quarter-precision" float format, but can nonetheless be
4956   loaded as integer constants.  */
4957
4958static unsigned
4959parse_qfloat_immediate (char **ccp, int *immed)
4960{
4961  char *str = *ccp;
4962  char *fpnum;
4963  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4964  int found_fpchar = 0;
4965
4966  skip_past_char (&str, '#');
4967
4968  /* We must not accidentally parse an integer as a floating-point number. Make
4969     sure that the value we parse is not an integer by checking for special
4970     characters '.' or 'e'.
4971     FIXME: This is a horrible hack, but doing better is tricky because type
4972     information isn't in a very usable state at parse time.  */
4973  fpnum = str;
4974  skip_whitespace (fpnum);
4975
4976  if (strncmp (fpnum, "0x", 2) == 0)
4977    return FAIL;
4978  else
4979    {
4980      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4981	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4982	  {
4983	    found_fpchar = 1;
4984	    break;
4985	  }
4986
4987      if (!found_fpchar)
4988	return FAIL;
4989    }
4990
4991  if ((str = atof_ieee (str, 's', words)) != NULL)
4992    {
4993      unsigned fpword = 0;
4994      int i;
4995
4996      /* Our FP word must be 32 bits (single-precision FP).  */
4997      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4998	{
4999	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5000	  fpword |= words[i];
5001	}
5002
5003      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5004	*immed = fpword;
5005      else
5006	return FAIL;
5007
5008      *ccp = str;
5009
5010      return SUCCESS;
5011    }
5012
5013  return FAIL;
5014}
5015
5016/* Shift operands.  */
5017enum shift_kind
5018{
5019  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5020};
5021
5022struct asm_shift_name
5023{
5024  const char	  *name;
5025  enum shift_kind  kind;
5026};
5027
5028/* Third argument to parse_shift.  */
5029enum parse_shift_mode
5030{
5031  NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5032  SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5033  SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5034  SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5035  SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5036};
5037
5038/* Parse a <shift> specifier on an ARM data processing instruction.
5039   This has three forms:
5040
5041     (LSL|LSR|ASL|ASR|ROR) Rs
5042     (LSL|LSR|ASL|ASR|ROR) #imm
5043     RRX
5044
5045   Note that ASL is assimilated to LSL in the instruction encoding, and
5046   RRX to ROR #0 (which cannot be written as such).  */
5047
5048static int
5049parse_shift (char **str, int i, enum parse_shift_mode mode)
5050{
5051  const struct asm_shift_name *shift_name;
5052  enum shift_kind shift;
5053  char *s = *str;
5054  char *p = s;
5055  int reg;
5056
5057  for (p = *str; ISALPHA (*p); p++)
5058    ;
5059
5060  if (p == *str)
5061    {
5062      inst.error = _("shift expression expected");
5063      return FAIL;
5064    }
5065
5066  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5067							    p - *str);
5068
5069  if (shift_name == NULL)
5070    {
5071      inst.error = _("shift expression expected");
5072      return FAIL;
5073    }
5074
5075  shift = shift_name->kind;
5076
5077  switch (mode)
5078    {
5079    case NO_SHIFT_RESTRICT:
5080    case SHIFT_IMMEDIATE:   break;
5081
5082    case SHIFT_LSL_OR_ASR_IMMEDIATE:
5083      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5084	{
5085	  inst.error = _("'LSL' or 'ASR' required");
5086	  return FAIL;
5087	}
5088      break;
5089
5090    case SHIFT_LSL_IMMEDIATE:
5091      if (shift != SHIFT_LSL)
5092	{
5093	  inst.error = _("'LSL' required");
5094	  return FAIL;
5095	}
5096      break;
5097
5098    case SHIFT_ASR_IMMEDIATE:
5099      if (shift != SHIFT_ASR)
5100	{
5101	  inst.error = _("'ASR' required");
5102	  return FAIL;
5103	}
5104      break;
5105
5106    default: abort ();
5107    }
5108
5109  if (shift != SHIFT_RRX)
5110    {
5111      /* Whitespace can appear here if the next thing is a bare digit.	*/
5112      skip_whitespace (p);
5113
5114      if (mode == NO_SHIFT_RESTRICT
5115	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5116	{
5117	  inst.operands[i].imm = reg;
5118	  inst.operands[i].immisreg = 1;
5119	}
5120      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5121	return FAIL;
5122    }
5123  inst.operands[i].shift_kind = shift;
5124  inst.operands[i].shifted = 1;
5125  *str = p;
5126  return SUCCESS;
5127}
5128
5129/* Parse a <shifter_operand> for an ARM data processing instruction:
5130
5131      #<immediate>
5132      #<immediate>, <rotate>
5133      <Rm>
5134      <Rm>, <shift>
5135
5136   where <shift> is defined by parse_shift above, and <rotate> is a
5137   multiple of 2 between 0 and 30.  Validation of immediate operands
5138   is deferred to md_apply_fix.  */
5139
5140static int
5141parse_shifter_operand (char **str, int i)
5142{
5143  int value;
5144  expressionS exp;
5145
5146  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5147    {
5148      inst.operands[i].reg = value;
5149      inst.operands[i].isreg = 1;
5150
5151      /* parse_shift will override this if appropriate */
5152      inst.reloc.exp.X_op = O_constant;
5153      inst.reloc.exp.X_add_number = 0;
5154
5155      if (skip_past_comma (str) == FAIL)
5156	return SUCCESS;
5157
5158      /* Shift operation on register.  */
5159      return parse_shift (str, i, NO_SHIFT_RESTRICT);
5160    }
5161
5162  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5163    return FAIL;
5164
5165  if (skip_past_comma (str) == SUCCESS)
5166    {
5167      /* #x, y -- ie explicit rotation by Y.  */
5168      if (my_get_expression (&exp, str, GE_NO_PREFIX))
5169	return FAIL;
5170
5171      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5172	{
5173	  inst.error = _("constant expression expected");
5174	  return FAIL;
5175	}
5176
5177      value = exp.X_add_number;
5178      if (value < 0 || value > 30 || value % 2 != 0)
5179	{
5180	  inst.error = _("invalid rotation");
5181	  return FAIL;
5182	}
5183      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5184	{
5185	  inst.error = _("invalid constant");
5186	  return FAIL;
5187	}
5188
5189      /* Encode as specified.  */
5190      inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5191      return SUCCESS;
5192    }
5193
5194  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5195  inst.reloc.pc_rel = 0;
5196  return SUCCESS;
5197}
5198
5199/* Group relocation information.  Each entry in the table contains the
5200   textual name of the relocation as may appear in assembler source
5201   and must end with a colon.
5202   Along with this textual name are the relocation codes to be used if
5203   the corresponding instruction is an ALU instruction (ADD or SUB only),
5204   an LDR, an LDRS, or an LDC.  */
5205
5206struct group_reloc_table_entry
5207{
5208  const char *name;
5209  int alu_code;
5210  int ldr_code;
5211  int ldrs_code;
5212  int ldc_code;
5213};
5214
5215typedef enum
5216{
5217  /* Varieties of non-ALU group relocation.  */
5218
5219  GROUP_LDR,
5220  GROUP_LDRS,
5221  GROUP_LDC
5222} group_reloc_type;
5223
5224static struct group_reloc_table_entry group_reloc_table[] =
5225  { /* Program counter relative: */
5226    { "pc_g0_nc",
5227      BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5228      0,				/* LDR */
5229      0,				/* LDRS */
5230      0 },				/* LDC */
5231    { "pc_g0",
5232      BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5233      BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5234      BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5235      BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5236    { "pc_g1_nc",
5237      BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5238      0,				/* LDR */
5239      0,				/* LDRS */
5240      0 },				/* LDC */
5241    { "pc_g1",
5242      BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5243      BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5244      BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5245      BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5246    { "pc_g2",
5247      BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5248      BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5249      BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5250      BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5251    /* Section base relative */
5252    { "sb_g0_nc",
5253      BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5254      0,				/* LDR */
5255      0,				/* LDRS */
5256      0 },				/* LDC */
5257    { "sb_g0",
5258      BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5259      BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5260      BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5261      BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5262    { "sb_g1_nc",
5263      BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5264      0,				/* LDR */
5265      0,				/* LDRS */
5266      0 },				/* LDC */
5267    { "sb_g1",
5268      BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5269      BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5270      BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5271      BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5272    { "sb_g2",
5273      BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5274      BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5275      BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5276      BFD_RELOC_ARM_LDC_SB_G2 }	};	/* LDC */
5277
5278/* Given the address of a pointer pointing to the textual name of a group
5279   relocation as may appear in assembler source, attempt to find its details
5280   in group_reloc_table.  The pointer will be updated to the character after
5281   the trailing colon.  On failure, FAIL will be returned; SUCCESS
5282   otherwise.  On success, *entry will be updated to point at the relevant
5283   group_reloc_table entry. */
5284
5285static int
5286find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5287{
5288  unsigned int i;
5289  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5290    {
5291      int length = strlen (group_reloc_table[i].name);
5292
5293      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5294	  && (*str)[length] == ':')
5295	{
5296	  *out = &group_reloc_table[i];
5297	  *str += (length + 1);
5298	  return SUCCESS;
5299	}
5300    }
5301
5302  return FAIL;
5303}
5304
5305/* Parse a <shifter_operand> for an ARM data processing instruction
5306   (as for parse_shifter_operand) where group relocations are allowed:
5307
5308      #<immediate>
5309      #<immediate>, <rotate>
5310      #:<group_reloc>:<expression>
5311      <Rm>
5312      <Rm>, <shift>
5313
5314   where <group_reloc> is one of the strings defined in group_reloc_table.
5315   The hashes are optional.
5316
5317   Everything else is as for parse_shifter_operand.  */
5318
5319static parse_operand_result
5320parse_shifter_operand_group_reloc (char **str, int i)
5321{
5322  /* Determine if we have the sequence of characters #: or just :
5323     coming next.  If we do, then we check for a group relocation.
5324     If we don't, punt the whole lot to parse_shifter_operand.  */
5325
5326  if (((*str)[0] == '#' && (*str)[1] == ':')
5327      || (*str)[0] == ':')
5328    {
5329      struct group_reloc_table_entry *entry;
5330
5331      if ((*str)[0] == '#')
5332	(*str) += 2;
5333      else
5334	(*str)++;
5335
5336      /* Try to parse a group relocation.  Anything else is an error.  */
5337      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5338	{
5339	  inst.error = _("unknown group relocation");
5340	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5341	}
5342
5343      /* We now have the group relocation table entry corresponding to
5344	 the name in the assembler source.  Next, we parse the expression.  */
5345      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5346	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5347
5348      /* Record the relocation type (always the ALU variant here).  */
5349      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5350      gas_assert (inst.reloc.type != 0);
5351
5352      return PARSE_OPERAND_SUCCESS;
5353    }
5354  else
5355    return parse_shifter_operand (str, i) == SUCCESS
5356	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5357
5358  /* Never reached.  */
5359}
5360
5361/* Parse a Neon alignment expression.  Information is written to
5362   inst.operands[i].  We assume the initial ':' has been skipped.
5363
5364   align	.imm = align << 8, .immisalign=1, .preind=0  */
5365static parse_operand_result
5366parse_neon_alignment (char **str, int i)
5367{
5368  char *p = *str;
5369  expressionS exp;
5370
5371  my_get_expression (&exp, &p, GE_NO_PREFIX);
5372
5373  if (exp.X_op != O_constant)
5374    {
5375      inst.error = _("alignment must be constant");
5376      return PARSE_OPERAND_FAIL;
5377    }
5378
5379  inst.operands[i].imm = exp.X_add_number << 8;
5380  inst.operands[i].immisalign = 1;
5381  /* Alignments are not pre-indexes.  */
5382  inst.operands[i].preind = 0;
5383
5384  *str = p;
5385  return PARSE_OPERAND_SUCCESS;
5386}
5387
5388/* Parse all forms of an ARM address expression.  Information is written
5389   to inst.operands[i] and/or inst.reloc.
5390
5391   Preindexed addressing (.preind=1):
5392
5393   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5394   [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5395   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5396		       .shift_kind=shift .reloc.exp=shift_imm
5397
5398   These three may have a trailing ! which causes .writeback to be set also.
5399
5400   Postindexed addressing (.postind=1, .writeback=1):
5401
5402   [Rn], #offset       .reg=Rn .reloc.exp=offset
5403   [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5404   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5405		       .shift_kind=shift .reloc.exp=shift_imm
5406
5407   Unindexed addressing (.preind=0, .postind=0):
5408
5409   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5410
5411   Other:
5412
5413   [Rn]{!}	       shorthand for [Rn,#0]{!}
5414   =immediate	       .isreg=0 .reloc.exp=immediate
5415   label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5416
5417  It is the caller's responsibility to check for addressing modes not
5418  supported by the instruction, and to set inst.reloc.type.  */
5419
5420static parse_operand_result
5421parse_address_main (char **str, int i, int group_relocations,
5422		    group_reloc_type group_type)
5423{
5424  char *p = *str;
5425  int reg;
5426
5427  if (skip_past_char (&p, '[') == FAIL)
5428    {
5429      if (skip_past_char (&p, '=') == FAIL)
5430	{
5431	  /* Bare address - translate to PC-relative offset.  */
5432	  inst.reloc.pc_rel = 1;
5433	  inst.operands[i].reg = REG_PC;
5434	  inst.operands[i].isreg = 1;
5435	  inst.operands[i].preind = 1;
5436
5437	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5438	    return PARSE_OPERAND_FAIL;
5439	}
5440      else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5441				    /*allow_symbol_p=*/TRUE))
5442	return PARSE_OPERAND_FAIL;
5443
5444      *str = p;
5445      return PARSE_OPERAND_SUCCESS;
5446    }
5447
5448  /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5449  skip_whitespace (p);
5450
5451  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5452    {
5453      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5454      return PARSE_OPERAND_FAIL;
5455    }
5456  inst.operands[i].reg = reg;
5457  inst.operands[i].isreg = 1;
5458
5459  if (skip_past_comma (&p) == SUCCESS)
5460    {
5461      inst.operands[i].preind = 1;
5462
5463      if (*p == '+') p++;
5464      else if (*p == '-') p++, inst.operands[i].negative = 1;
5465
5466      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5467	{
5468	  inst.operands[i].imm = reg;
5469	  inst.operands[i].immisreg = 1;
5470
5471	  if (skip_past_comma (&p) == SUCCESS)
5472	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5473	      return PARSE_OPERAND_FAIL;
5474	}
5475      else if (skip_past_char (&p, ':') == SUCCESS)
5476	{
5477	  /* FIXME: '@' should be used here, but it's filtered out by generic
5478	     code before we get to see it here. This may be subject to
5479	     change.  */
5480	  parse_operand_result result = parse_neon_alignment (&p, i);
5481
5482	  if (result != PARSE_OPERAND_SUCCESS)
5483	    return result;
5484	}
5485      else
5486	{
5487	  if (inst.operands[i].negative)
5488	    {
5489	      inst.operands[i].negative = 0;
5490	      p--;
5491	    }
5492
5493	  if (group_relocations
5494	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5495	    {
5496	      struct group_reloc_table_entry *entry;
5497
5498	      /* Skip over the #: or : sequence.  */
5499	      if (*p == '#')
5500		p += 2;
5501	      else
5502		p++;
5503
5504	      /* Try to parse a group relocation.  Anything else is an
5505		 error.  */
5506	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5507		{
5508		  inst.error = _("unknown group relocation");
5509		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5510		}
5511
5512	      /* We now have the group relocation table entry corresponding to
5513		 the name in the assembler source.  Next, we parse the
5514		 expression.  */
5515	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5516		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5517
5518	      /* Record the relocation type.  */
5519	      switch (group_type)
5520		{
5521		  case GROUP_LDR:
5522		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5523		    break;
5524
5525		  case GROUP_LDRS:
5526		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5527		    break;
5528
5529		  case GROUP_LDC:
5530		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5531		    break;
5532
5533		  default:
5534		    gas_assert (0);
5535		}
5536
5537	      if (inst.reloc.type == 0)
5538		{
5539		  inst.error = _("this group relocation is not allowed on this instruction");
5540		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5541		}
5542	    }
5543	  else
5544	    {
5545	      char *q = p;
5546	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5547		return PARSE_OPERAND_FAIL;
5548	      /* If the offset is 0, find out if it's a +0 or -0.  */
5549	      if (inst.reloc.exp.X_op == O_constant
5550		  && inst.reloc.exp.X_add_number == 0)
5551		{
5552		  skip_whitespace (q);
5553		  if (*q == '#')
5554		    {
5555		      q++;
5556		      skip_whitespace (q);
5557		    }
5558		  if (*q == '-')
5559		    inst.operands[i].negative = 1;
5560		}
5561	    }
5562	}
5563    }
5564  else if (skip_past_char (&p, ':') == SUCCESS)
5565    {
5566      /* FIXME: '@' should be used here, but it's filtered out by generic code
5567	 before we get to see it here. This may be subject to change.  */
5568      parse_operand_result result = parse_neon_alignment (&p, i);
5569
5570      if (result != PARSE_OPERAND_SUCCESS)
5571	return result;
5572    }
5573
5574  if (skip_past_char (&p, ']') == FAIL)
5575    {
5576      inst.error = _("']' expected");
5577      return PARSE_OPERAND_FAIL;
5578    }
5579
5580  if (skip_past_char (&p, '!') == SUCCESS)
5581    inst.operands[i].writeback = 1;
5582
5583  else if (skip_past_comma (&p) == SUCCESS)
5584    {
5585      if (skip_past_char (&p, '{') == SUCCESS)
5586	{
5587	  /* [Rn], {expr} - unindexed, with option */
5588	  if (parse_immediate (&p, &inst.operands[i].imm,
5589			       0, 255, TRUE) == FAIL)
5590	    return PARSE_OPERAND_FAIL;
5591
5592	  if (skip_past_char (&p, '}') == FAIL)
5593	    {
5594	      inst.error = _("'}' expected at end of 'option' field");
5595	      return PARSE_OPERAND_FAIL;
5596	    }
5597	  if (inst.operands[i].preind)
5598	    {
5599	      inst.error = _("cannot combine index with option");
5600	      return PARSE_OPERAND_FAIL;
5601	    }
5602	  *str = p;
5603	  return PARSE_OPERAND_SUCCESS;
5604	}
5605      else
5606	{
5607	  inst.operands[i].postind = 1;
5608	  inst.operands[i].writeback = 1;
5609
5610	  if (inst.operands[i].preind)
5611	    {
5612	      inst.error = _("cannot combine pre- and post-indexing");
5613	      return PARSE_OPERAND_FAIL;
5614	    }
5615
5616	  if (*p == '+') p++;
5617	  else if (*p == '-') p++, inst.operands[i].negative = 1;
5618
5619	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5620	    {
5621	      /* We might be using the immediate for alignment already. If we
5622		 are, OR the register number into the low-order bits.  */
5623	      if (inst.operands[i].immisalign)
5624		inst.operands[i].imm |= reg;
5625	      else
5626		inst.operands[i].imm = reg;
5627	      inst.operands[i].immisreg = 1;
5628
5629	      if (skip_past_comma (&p) == SUCCESS)
5630		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5631		  return PARSE_OPERAND_FAIL;
5632	    }
5633	  else
5634	    {
5635	      char *q = p;
5636	      if (inst.operands[i].negative)
5637		{
5638		  inst.operands[i].negative = 0;
5639		  p--;
5640		}
5641	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5642		return PARSE_OPERAND_FAIL;
5643	      /* If the offset is 0, find out if it's a +0 or -0.  */
5644	      if (inst.reloc.exp.X_op == O_constant
5645		  && inst.reloc.exp.X_add_number == 0)
5646		{
5647		  skip_whitespace (q);
5648		  if (*q == '#')
5649		    {
5650		      q++;
5651		      skip_whitespace (q);
5652		    }
5653		  if (*q == '-')
5654		    inst.operands[i].negative = 1;
5655		}
5656	    }
5657	}
5658    }
5659
5660  /* If at this point neither .preind nor .postind is set, we have a
5661     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5662  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5663    {
5664      inst.operands[i].preind = 1;
5665      inst.reloc.exp.X_op = O_constant;
5666      inst.reloc.exp.X_add_number = 0;
5667    }
5668  *str = p;
5669  return PARSE_OPERAND_SUCCESS;
5670}
5671
5672static int
5673parse_address (char **str, int i)
5674{
5675  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5676	 ? SUCCESS : FAIL;
5677}
5678
5679static parse_operand_result
5680parse_address_group_reloc (char **str, int i, group_reloc_type type)
5681{
5682  return parse_address_main (str, i, 1, type);
5683}
5684
5685/* Parse an operand for a MOVW or MOVT instruction.  */
5686static int
5687parse_half (char **str)
5688{
5689  char * p;
5690
5691  p = *str;
5692  skip_past_char (&p, '#');
5693  if (strncasecmp (p, ":lower16:", 9) == 0)
5694    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5695  else if (strncasecmp (p, ":upper16:", 9) == 0)
5696    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5697
5698  if (inst.reloc.type != BFD_RELOC_UNUSED)
5699    {
5700      p += 9;
5701      skip_whitespace (p);
5702    }
5703
5704  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5705    return FAIL;
5706
5707  if (inst.reloc.type == BFD_RELOC_UNUSED)
5708    {
5709      if (inst.reloc.exp.X_op != O_constant)
5710	{
5711	  inst.error = _("constant expression expected");
5712	  return FAIL;
5713	}
5714      if (inst.reloc.exp.X_add_number < 0
5715	  || inst.reloc.exp.X_add_number > 0xffff)
5716	{
5717	  inst.error = _("immediate value out of range");
5718	  return FAIL;
5719	}
5720    }
5721  *str = p;
5722  return SUCCESS;
5723}
5724
5725/* Miscellaneous. */
5726
5727/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5728   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5729static int
5730parse_psr (char **str, bfd_boolean lhs)
5731{
5732  char *p;
5733  unsigned long psr_field;
5734  const struct asm_psr *psr;
5735  char *start;
5736  bfd_boolean is_apsr = FALSE;
5737  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5738
5739  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5740     be TRUE, but we want to ignore it in this case as we are building for any
5741     CPU type, including non-m variants.  */
5742  if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5743    m_profile = FALSE;
5744
5745  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5746     feature for ease of use and backwards compatibility.  */
5747  p = *str;
5748  if (strncasecmp (p, "SPSR", 4) == 0)
5749    {
5750      if (m_profile)
5751	goto unsupported_psr;
5752
5753      psr_field = SPSR_BIT;
5754    }
5755  else if (strncasecmp (p, "CPSR", 4) == 0)
5756    {
5757      if (m_profile)
5758	goto unsupported_psr;
5759
5760      psr_field = 0;
5761    }
5762  else if (strncasecmp (p, "APSR", 4) == 0)
5763    {
5764      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5765	 and ARMv7-R architecture CPUs.  */
5766      is_apsr = TRUE;
5767      psr_field = 0;
5768    }
5769  else if (m_profile)
5770    {
5771      start = p;
5772      do
5773	p++;
5774      while (ISALNUM (*p) || *p == '_');
5775
5776      if (strncasecmp (start, "iapsr", 5) == 0
5777	  || strncasecmp (start, "eapsr", 5) == 0
5778	  || strncasecmp (start, "xpsr", 4) == 0
5779	  || strncasecmp (start, "psr", 3) == 0)
5780	p = start + strcspn (start, "rR") + 1;
5781
5782      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5783						  p - start);
5784
5785      if (!psr)
5786	return FAIL;
5787
5788      /* If APSR is being written, a bitfield may be specified.  Note that
5789	 APSR itself is handled above.  */
5790      if (psr->field <= 3)
5791	{
5792	  psr_field = psr->field;
5793	  is_apsr = TRUE;
5794	  goto check_suffix;
5795	}
5796
5797      *str = p;
5798      /* M-profile MSR instructions have the mask field set to "10", except
5799	 *PSR variants which modify APSR, which may use a different mask (and
5800	 have been handled already).  Do that by setting the PSR_f field
5801	 here.  */
5802      return psr->field | (lhs ? PSR_f : 0);
5803    }
5804  else
5805    goto unsupported_psr;
5806
5807  p += 4;
5808check_suffix:
5809  if (*p == '_')
5810    {
5811      /* A suffix follows.  */
5812      p++;
5813      start = p;
5814
5815      do
5816	p++;
5817      while (ISALNUM (*p) || *p == '_');
5818
5819      if (is_apsr)
5820	{
5821	  /* APSR uses a notation for bits, rather than fields.  */
5822	  unsigned int nzcvq_bits = 0;
5823	  unsigned int g_bit = 0;
5824	  char *bit;
5825
5826	  for (bit = start; bit != p; bit++)
5827	    {
5828	      switch (TOLOWER (*bit))
5829		{
5830		case 'n':
5831		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5832		  break;
5833
5834		case 'z':
5835		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5836		  break;
5837
5838		case 'c':
5839		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5840		  break;
5841
5842		case 'v':
5843		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5844		  break;
5845
5846		case 'q':
5847		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5848		  break;
5849
5850		case 'g':
5851		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5852		  break;
5853
5854		default:
5855		  inst.error = _("unexpected bit specified after APSR");
5856		  return FAIL;
5857		}
5858	    }
5859
5860	  if (nzcvq_bits == 0x1f)
5861	    psr_field |= PSR_f;
5862
5863	  if (g_bit == 0x1)
5864	    {
5865	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5866		{
5867		  inst.error = _("selected processor does not "
5868				 "support DSP extension");
5869		  return FAIL;
5870		}
5871
5872	      psr_field |= PSR_s;
5873	    }
5874
5875	  if ((nzcvq_bits & 0x20) != 0
5876	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5877	      || (g_bit & 0x2) != 0)
5878	    {
5879	      inst.error = _("bad bitmask specified after APSR");
5880	      return FAIL;
5881	    }
5882	}
5883      else
5884	{
5885	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5886						      p - start);
5887	  if (!psr)
5888	    goto error;
5889
5890	  psr_field |= psr->field;
5891	}
5892    }
5893  else
5894    {
5895      if (ISALNUM (*p))
5896	goto error;    /* Garbage after "[CS]PSR".  */
5897
5898      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5899	 is deprecated, but allow it anyway.  */
5900      if (is_apsr && lhs)
5901	{
5902	  psr_field |= PSR_f;
5903	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
5904		       "deprecated"));
5905	}
5906      else if (!m_profile)
5907	/* These bits are never right for M-profile devices: don't set them
5908	   (only code paths which read/write APSR reach here).  */
5909	psr_field |= (PSR_c | PSR_f);
5910    }
5911  *str = p;
5912  return psr_field;
5913
5914 unsupported_psr:
5915  inst.error = _("selected processor does not support requested special "
5916		 "purpose register");
5917  return FAIL;
5918
5919 error:
5920  inst.error = _("flag for {c}psr instruction expected");
5921  return FAIL;
5922}
5923
5924/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5925   value suitable for splatting into the AIF field of the instruction.	*/
5926
5927static int
5928parse_cps_flags (char **str)
5929{
5930  int val = 0;
5931  int saw_a_flag = 0;
5932  char *s = *str;
5933
5934  for (;;)
5935    switch (*s++)
5936      {
5937      case '\0': case ',':
5938	goto done;
5939
5940      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5941      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5942      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5943
5944      default:
5945	inst.error = _("unrecognized CPS flag");
5946	return FAIL;
5947      }
5948
5949 done:
5950  if (saw_a_flag == 0)
5951    {
5952      inst.error = _("missing CPS flags");
5953      return FAIL;
5954    }
5955
5956  *str = s - 1;
5957  return val;
5958}
5959
5960/* Parse an endian specifier ("BE" or "LE", case insensitive);
5961   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
5962
5963static int
5964parse_endian_specifier (char **str)
5965{
5966  int little_endian;
5967  char *s = *str;
5968
5969  if (strncasecmp (s, "BE", 2))
5970    little_endian = 0;
5971  else if (strncasecmp (s, "LE", 2))
5972    little_endian = 1;
5973  else
5974    {
5975      inst.error = _("valid endian specifiers are be or le");
5976      return FAIL;
5977    }
5978
5979  if (ISALNUM (s[2]) || s[2] == '_')
5980    {
5981      inst.error = _("valid endian specifiers are be or le");
5982      return FAIL;
5983    }
5984
5985  *str = s + 2;
5986  return little_endian;
5987}
5988
5989/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
5990   value suitable for poking into the rotate field of an sxt or sxta
5991   instruction, or FAIL on error.  */
5992
5993static int
5994parse_ror (char **str)
5995{
5996  int rot;
5997  char *s = *str;
5998
5999  if (strncasecmp (s, "ROR", 3) == 0)
6000    s += 3;
6001  else
6002    {
6003      inst.error = _("missing rotation field after comma");
6004      return FAIL;
6005    }
6006
6007  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6008    return FAIL;
6009
6010  switch (rot)
6011    {
6012    case  0: *str = s; return 0x0;
6013    case  8: *str = s; return 0x1;
6014    case 16: *str = s; return 0x2;
6015    case 24: *str = s; return 0x3;
6016
6017    default:
6018      inst.error = _("rotation can only be 0, 8, 16, or 24");
6019      return FAIL;
6020    }
6021}
6022
6023/* Parse a conditional code (from conds[] below).  The value returned is in the
6024   range 0 .. 14, or FAIL.  */
6025static int
6026parse_cond (char **str)
6027{
6028  char *q;
6029  const struct asm_cond *c;
6030  int n;
6031  /* Condition codes are always 2 characters, so matching up to
6032     3 characters is sufficient.  */
6033  char cond[3];
6034
6035  q = *str;
6036  n = 0;
6037  while (ISALPHA (*q) && n < 3)
6038    {
6039      cond[n] = TOLOWER (*q);
6040      q++;
6041      n++;
6042    }
6043
6044  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6045  if (!c)
6046    {
6047      inst.error = _("condition required");
6048      return FAIL;
6049    }
6050
6051  *str = q;
6052  return c->value;
6053}
6054
6055/* If the given feature available in the selected CPU, mark it as used.
6056   Returns TRUE iff feature is available.  */
6057static bfd_boolean
6058mark_feature_used (const arm_feature_set *feature)
6059{
6060  /* Ensure the option is valid on the current architecture.  */
6061  if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6062    return FALSE;
6063
6064  /* Add the appropriate architecture feature for the barrier option used.
6065     */
6066  if (thumb_mode)
6067    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6068  else
6069    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6070
6071  return TRUE;
6072}
6073
6074/* Parse an option for a barrier instruction.  Returns the encoding for the
6075   option, or FAIL.  */
6076static int
6077parse_barrier (char **str)
6078{
6079  char *p, *q;
6080  const struct asm_barrier_opt *o;
6081
6082  p = q = *str;
6083  while (ISALPHA (*q))
6084    q++;
6085
6086  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6087						    q - p);
6088  if (!o)
6089    return FAIL;
6090
6091  if (!mark_feature_used (&o->arch))
6092    return FAIL;
6093
6094  *str = q;
6095  return o->value;
6096}
6097
6098/* Parse the operands of a table branch instruction.  Similar to a memory
6099   operand.  */
6100static int
6101parse_tb (char **str)
6102{
6103  char * p = *str;
6104  int reg;
6105
6106  if (skip_past_char (&p, '[') == FAIL)
6107    {
6108      inst.error = _("'[' expected");
6109      return FAIL;
6110    }
6111
6112  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6113    {
6114      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6115      return FAIL;
6116    }
6117  inst.operands[0].reg = reg;
6118
6119  if (skip_past_comma (&p) == FAIL)
6120    {
6121      inst.error = _("',' expected");
6122      return FAIL;
6123    }
6124
6125  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6126    {
6127      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6128      return FAIL;
6129    }
6130  inst.operands[0].imm = reg;
6131
6132  if (skip_past_comma (&p) == SUCCESS)
6133    {
6134      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6135	return FAIL;
6136      if (inst.reloc.exp.X_add_number != 1)
6137	{
6138	  inst.error = _("invalid shift");
6139	  return FAIL;
6140	}
6141      inst.operands[0].shifted = 1;
6142    }
6143
6144  if (skip_past_char (&p, ']') == FAIL)
6145    {
6146      inst.error = _("']' expected");
6147      return FAIL;
6148    }
6149  *str = p;
6150  return SUCCESS;
6151}
6152
6153/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6154   information on the types the operands can take and how they are encoded.
6155   Up to four operands may be read; this function handles setting the
6156   ".present" field for each read operand itself.
6157   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6158   else returns FAIL.  */
6159
6160static int
6161parse_neon_mov (char **str, int *which_operand)
6162{
6163  int i = *which_operand, val;
6164  enum arm_reg_type rtype;
6165  char *ptr = *str;
6166  struct neon_type_el optype;
6167
6168  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6169    {
6170      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6171      inst.operands[i].reg = val;
6172      inst.operands[i].isscalar = 1;
6173      inst.operands[i].vectype = optype;
6174      inst.operands[i++].present = 1;
6175
6176      if (skip_past_comma (&ptr) == FAIL)
6177	goto wanted_comma;
6178
6179      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6180	goto wanted_arm;
6181
6182      inst.operands[i].reg = val;
6183      inst.operands[i].isreg = 1;
6184      inst.operands[i].present = 1;
6185    }
6186  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6187	   != FAIL)
6188    {
6189      /* Cases 0, 1, 2, 3, 5 (D only).  */
6190      if (skip_past_comma (&ptr) == FAIL)
6191	goto wanted_comma;
6192
6193      inst.operands[i].reg = val;
6194      inst.operands[i].isreg = 1;
6195      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6196      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6197      inst.operands[i].isvec = 1;
6198      inst.operands[i].vectype = optype;
6199      inst.operands[i++].present = 1;
6200
6201      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6202	{
6203	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6204	     Case 13: VMOV <Sd>, <Rm>  */
6205	  inst.operands[i].reg = val;
6206	  inst.operands[i].isreg = 1;
6207	  inst.operands[i].present = 1;
6208
6209	  if (rtype == REG_TYPE_NQ)
6210	    {
6211	      first_error (_("can't use Neon quad register here"));
6212	      return FAIL;
6213	    }
6214	  else if (rtype != REG_TYPE_VFS)
6215	    {
6216	      i++;
6217	      if (skip_past_comma (&ptr) == FAIL)
6218		goto wanted_comma;
6219	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6220		goto wanted_arm;
6221	      inst.operands[i].reg = val;
6222	      inst.operands[i].isreg = 1;
6223	      inst.operands[i].present = 1;
6224	    }
6225	}
6226      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6227					   &optype)) != FAIL)
6228	{
6229	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6230	     Case 1: VMOV<c><q> <Dd>, <Dm>
6231	     Case 8: VMOV.F32 <Sd>, <Sm>
6232	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6233
6234	  inst.operands[i].reg = val;
6235	  inst.operands[i].isreg = 1;
6236	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6237	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6238	  inst.operands[i].isvec = 1;
6239	  inst.operands[i].vectype = optype;
6240	  inst.operands[i].present = 1;
6241
6242	  if (skip_past_comma (&ptr) == SUCCESS)
6243	    {
6244	      /* Case 15.  */
6245	      i++;
6246
6247	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6248		goto wanted_arm;
6249
6250	      inst.operands[i].reg = val;
6251	      inst.operands[i].isreg = 1;
6252	      inst.operands[i++].present = 1;
6253
6254	      if (skip_past_comma (&ptr) == FAIL)
6255		goto wanted_comma;
6256
6257	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6258		goto wanted_arm;
6259
6260	      inst.operands[i].reg = val;
6261	      inst.operands[i].isreg = 1;
6262	      inst.operands[i].present = 1;
6263	    }
6264	}
6265      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6266	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6267	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6268	     Case 10: VMOV.F32 <Sd>, #<imm>
6269	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6270	inst.operands[i].immisfloat = 1;
6271      else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6272	       == SUCCESS)
6273	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6274	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6275	;
6276      else
6277	{
6278	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6279	  return FAIL;
6280	}
6281    }
6282  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6283    {
6284      /* Cases 6, 7.  */
6285      inst.operands[i].reg = val;
6286      inst.operands[i].isreg = 1;
6287      inst.operands[i++].present = 1;
6288
6289      if (skip_past_comma (&ptr) == FAIL)
6290	goto wanted_comma;
6291
6292      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6293	{
6294	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6295	  inst.operands[i].reg = val;
6296	  inst.operands[i].isscalar = 1;
6297	  inst.operands[i].present = 1;
6298	  inst.operands[i].vectype = optype;
6299	}
6300      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6301	{
6302	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6303	  inst.operands[i].reg = val;
6304	  inst.operands[i].isreg = 1;
6305	  inst.operands[i++].present = 1;
6306
6307	  if (skip_past_comma (&ptr) == FAIL)
6308	    goto wanted_comma;
6309
6310	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6311	      == FAIL)
6312	    {
6313	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6314	      return FAIL;
6315	    }
6316
6317	  inst.operands[i].reg = val;
6318	  inst.operands[i].isreg = 1;
6319	  inst.operands[i].isvec = 1;
6320	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6321	  inst.operands[i].vectype = optype;
6322	  inst.operands[i].present = 1;
6323
6324	  if (rtype == REG_TYPE_VFS)
6325	    {
6326	      /* Case 14.  */
6327	      i++;
6328	      if (skip_past_comma (&ptr) == FAIL)
6329		goto wanted_comma;
6330	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6331					      &optype)) == FAIL)
6332		{
6333		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6334		  return FAIL;
6335		}
6336	      inst.operands[i].reg = val;
6337	      inst.operands[i].isreg = 1;
6338	      inst.operands[i].isvec = 1;
6339	      inst.operands[i].issingle = 1;
6340	      inst.operands[i].vectype = optype;
6341	      inst.operands[i].present = 1;
6342	    }
6343	}
6344      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6345	       != FAIL)
6346	{
6347	  /* Case 13.  */
6348	  inst.operands[i].reg = val;
6349	  inst.operands[i].isreg = 1;
6350	  inst.operands[i].isvec = 1;
6351	  inst.operands[i].issingle = 1;
6352	  inst.operands[i].vectype = optype;
6353	  inst.operands[i].present = 1;
6354	}
6355    }
6356  else
6357    {
6358      first_error (_("parse error"));
6359      return FAIL;
6360    }
6361
6362  /* Successfully parsed the operands. Update args.  */
6363  *which_operand = i;
6364  *str = ptr;
6365  return SUCCESS;
6366
6367 wanted_comma:
6368  first_error (_("expected comma"));
6369  return FAIL;
6370
6371 wanted_arm:
6372  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6373  return FAIL;
6374}
6375
6376/* Use this macro when the operand constraints are different
6377   for ARM and THUMB (e.g. ldrd).  */
6378#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6379	((arm_operand) | ((thumb_operand) << 16))
6380
6381/* Matcher codes for parse_operands.  */
6382enum operand_parse_code
6383{
6384  OP_stop,	/* end of line */
6385
6386  OP_RR,	/* ARM register */
6387  OP_RRnpc,	/* ARM register, not r15 */
6388  OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6389  OP_RRnpcb,	/* ARM register, not r15, in square brackets */
6390  OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
6391		   optional trailing ! */
6392  OP_RRw,	/* ARM register, not r15, optional trailing ! */
6393  OP_RCP,	/* Coprocessor number */
6394  OP_RCN,	/* Coprocessor register */
6395  OP_RF,	/* FPA register */
6396  OP_RVS,	/* VFP single precision register */
6397  OP_RVD,	/* VFP double precision register (0..15) */
6398  OP_RND,       /* Neon double precision register (0..31) */
6399  OP_RNQ,	/* Neon quad precision register */
6400  OP_RVSD,	/* VFP single or double precision register */
6401  OP_RNDQ,      /* Neon double or quad precision register */
6402  OP_RNSDQ,	/* Neon single, double or quad precision register */
6403  OP_RNSC,      /* Neon scalar D[X] */
6404  OP_RVC,	/* VFP control register */
6405  OP_RMF,	/* Maverick F register */
6406  OP_RMD,	/* Maverick D register */
6407  OP_RMFX,	/* Maverick FX register */
6408  OP_RMDX,	/* Maverick DX register */
6409  OP_RMAX,	/* Maverick AX register */
6410  OP_RMDS,	/* Maverick DSPSC register */
6411  OP_RIWR,	/* iWMMXt wR register */
6412  OP_RIWC,	/* iWMMXt wC register */
6413  OP_RIWG,	/* iWMMXt wCG register */
6414  OP_RXA,	/* XScale accumulator register */
6415
6416  OP_REGLST,	/* ARM register list */
6417  OP_VRSLST,	/* VFP single-precision register list */
6418  OP_VRDLST,	/* VFP double-precision register list */
6419  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6420  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6421  OP_NSTRLST,   /* Neon element/structure list */
6422
6423  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6424  OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
6425  OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
6426  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6427  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6428  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6429  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6430  OP_VMOV,      /* Neon VMOV operands.  */
6431  OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
6432  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6433  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6434
6435  OP_I0,        /* immediate zero */
6436  OP_I7,	/* immediate value 0 .. 7 */
6437  OP_I15,	/*		   0 .. 15 */
6438  OP_I16,	/*		   1 .. 16 */
6439  OP_I16z,      /*                 0 .. 16 */
6440  OP_I31,	/*		   0 .. 31 */
6441  OP_I31w,	/*		   0 .. 31, optional trailing ! */
6442  OP_I32,	/*		   1 .. 32 */
6443  OP_I32z,	/*		   0 .. 32 */
6444  OP_I63,	/*		   0 .. 63 */
6445  OP_I63s,	/*		 -64 .. 63 */
6446  OP_I64,	/*		   1 .. 64 */
6447  OP_I64z,	/*		   0 .. 64 */
6448  OP_I255,	/*		   0 .. 255 */
6449
6450  OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
6451  OP_I7b,	/*			       0 .. 7 */
6452  OP_I15b,	/*			       0 .. 15 */
6453  OP_I31b,	/*			       0 .. 31 */
6454
6455  OP_SH,	/* shifter operand */
6456  OP_SHG,	/* shifter operand with possible group relocation */
6457  OP_ADDR,	/* Memory address expression (any mode) */
6458  OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
6459  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6460  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6461  OP_EXP,	/* arbitrary expression */
6462  OP_EXPi,	/* same, with optional immediate prefix */
6463  OP_EXPr,	/* same, with optional relocation suffix */
6464  OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
6465
6466  OP_CPSF,	/* CPS flags */
6467  OP_ENDI,	/* Endianness specifier */
6468  OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
6469  OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
6470  OP_COND,	/* conditional code */
6471  OP_TB,	/* Table branch.  */
6472
6473  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6474
6475  OP_RRnpc_I0,	/* ARM register or literal 0 */
6476  OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
6477  OP_RR_EXi,	/* ARM register or expression with imm prefix */
6478  OP_RF_IF,	/* FPA register or immediate */
6479  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6480  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6481
6482  /* Optional operands.	 */
6483  OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
6484  OP_oI31b,	 /*				0 .. 31 */
6485  OP_oI32b,      /*                             1 .. 32 */
6486  OP_oI32z,      /*                             0 .. 32 */
6487  OP_oIffffb,	 /*				0 .. 65535 */
6488  OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
6489
6490  OP_oRR,	 /* ARM register */
6491  OP_oRRnpc,	 /* ARM register, not the PC */
6492  OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6493  OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
6494  OP_oRND,       /* Optional Neon double precision register */
6495  OP_oRNQ,       /* Optional Neon quad precision register */
6496  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6497  OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
6498  OP_oSHll,	 /* LSL immediate */
6499  OP_oSHar,	 /* ASR immediate */
6500  OP_oSHllar,	 /* LSL or ASR immediate */
6501  OP_oROR,	 /* ROR 0/8/16/24 */
6502  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6503
6504  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6505  OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6506  OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6507  OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6508
6509  OP_FIRST_OPTIONAL = OP_oI7b
6510};
6511
6512/* Generic instruction operand parser.	This does no encoding and no
6513   semantic validation; it merely squirrels values away in the inst
6514   structure.  Returns SUCCESS or FAIL depending on whether the
6515   specified grammar matched.  */
6516static int
6517parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6518{
6519  unsigned const int *upat = pattern;
6520  char *backtrack_pos = 0;
6521  const char *backtrack_error = 0;
6522  int i, val = 0, backtrack_index = 0;
6523  enum arm_reg_type rtype;
6524  parse_operand_result result;
6525  unsigned int op_parse_code;
6526
6527#define po_char_or_fail(chr)			\
6528  do						\
6529    {						\
6530      if (skip_past_char (&str, chr) == FAIL)	\
6531	goto bad_args;				\
6532    }						\
6533  while (0)
6534
6535#define po_reg_or_fail(regtype)					\
6536  do								\
6537    {								\
6538      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6539				 & inst.operands[i].vectype);	\
6540      if (val == FAIL)						\
6541	{							\
6542	  first_error (_(reg_expected_msgs[regtype]));		\
6543	  goto failure;						\
6544	}							\
6545      inst.operands[i].reg = val;				\
6546      inst.operands[i].isreg = 1;				\
6547      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6548      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6549      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6550			     || rtype == REG_TYPE_VFD		\
6551			     || rtype == REG_TYPE_NQ);		\
6552    }								\
6553  while (0)
6554
6555#define po_reg_or_goto(regtype, label)				\
6556  do								\
6557    {								\
6558      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6559				 & inst.operands[i].vectype);	\
6560      if (val == FAIL)						\
6561	goto label;						\
6562								\
6563      inst.operands[i].reg = val;				\
6564      inst.operands[i].isreg = 1;				\
6565      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6566      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6567      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6568			     || rtype == REG_TYPE_VFD		\
6569			     || rtype == REG_TYPE_NQ);		\
6570    }								\
6571  while (0)
6572
6573#define po_imm_or_fail(min, max, popt)				\
6574  do								\
6575    {								\
6576      if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
6577	goto failure;						\
6578      inst.operands[i].imm = val;				\
6579    }								\
6580  while (0)
6581
6582#define po_scalar_or_goto(elsz, label)					\
6583  do									\
6584    {									\
6585      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
6586      if (val == FAIL)							\
6587	goto label;							\
6588      inst.operands[i].reg = val;					\
6589      inst.operands[i].isscalar = 1;					\
6590    }									\
6591  while (0)
6592
6593#define po_misc_or_fail(expr)			\
6594  do						\
6595    {						\
6596      if (expr)					\
6597	goto failure;				\
6598    }						\
6599  while (0)
6600
6601#define po_misc_or_fail_no_backtrack(expr)		\
6602  do							\
6603    {							\
6604      result = expr;					\
6605      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
6606	backtrack_pos = 0;				\
6607      if (result != PARSE_OPERAND_SUCCESS)		\
6608	goto failure;					\
6609    }							\
6610  while (0)
6611
6612#define po_barrier_or_imm(str)				   \
6613  do							   \
6614    {						 	   \
6615      val = parse_barrier (&str);			   \
6616      if (val == FAIL && ! ISALPHA (*str))		   \
6617	goto immediate;					   \
6618      if (val == FAIL					   \
6619	  /* ISB can only take SY as an option.  */	   \
6620	  || ((inst.instruction & 0xf0) == 0x60		   \
6621	       && val != 0xf))				   \
6622	{						   \
6623	   inst.error = _("invalid barrier type");	   \
6624	   backtrack_pos = 0;				   \
6625	   goto failure;				   \
6626	}						   \
6627    }							   \
6628  while (0)
6629
6630  skip_whitespace (str);
6631
6632  for (i = 0; upat[i] != OP_stop; i++)
6633    {
6634      op_parse_code = upat[i];
6635      if (op_parse_code >= 1<<16)
6636	op_parse_code = thumb ? (op_parse_code >> 16)
6637				: (op_parse_code & ((1<<16)-1));
6638
6639      if (op_parse_code >= OP_FIRST_OPTIONAL)
6640	{
6641	  /* Remember where we are in case we need to backtrack.  */
6642	  gas_assert (!backtrack_pos);
6643	  backtrack_pos = str;
6644	  backtrack_error = inst.error;
6645	  backtrack_index = i;
6646	}
6647
6648      if (i > 0 && (i > 1 || inst.operands[0].present))
6649	po_char_or_fail (',');
6650
6651      switch (op_parse_code)
6652	{
6653	  /* Registers */
6654	case OP_oRRnpc:
6655	case OP_oRRnpcsp:
6656	case OP_RRnpc:
6657	case OP_RRnpcsp:
6658	case OP_oRR:
6659	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
6660	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
6661	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
6662	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
6663	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
6664	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6665	case OP_oRND:
6666	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6667	case OP_RVC:
6668	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6669	  break;
6670	  /* Also accept generic coprocessor regs for unknown registers.  */
6671	  coproc_reg:
6672	  po_reg_or_fail (REG_TYPE_CN);
6673	  break;
6674	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
6675	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
6676	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
6677	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
6678	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
6679	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
6680	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
6681	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
6682	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6683	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6684	case OP_oRNQ:
6685	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6686	case OP_oRNDQ:
6687	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6688	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6689	case OP_oRNSDQ:
6690	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6691
6692	/* Neon scalar. Using an element size of 8 means that some invalid
6693	   scalars are accepted here, so deal with those in later code.  */
6694	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6695
6696	case OP_RNDQ_I0:
6697	  {
6698	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6699	    break;
6700	    try_imm0:
6701	    po_imm_or_fail (0, 0, TRUE);
6702	  }
6703	  break;
6704
6705	case OP_RVSD_I0:
6706	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6707	  break;
6708
6709	case OP_RSVD_FI0:
6710	  {
6711	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6712	    break;
6713	    try_ifimm0:
6714	    if (parse_ifimm_zero (&str))
6715	      inst.operands[i].imm = 0;
6716	    else
6717	    {
6718	      inst.error
6719	        = _("only floating point zero is allowed as immediate value");
6720	      goto failure;
6721	    }
6722	  }
6723	  break;
6724
6725	case OP_RR_RNSC:
6726	  {
6727	    po_scalar_or_goto (8, try_rr);
6728	    break;
6729	    try_rr:
6730	    po_reg_or_fail (REG_TYPE_RN);
6731	  }
6732	  break;
6733
6734	case OP_RNSDQ_RNSC:
6735	  {
6736	    po_scalar_or_goto (8, try_nsdq);
6737	    break;
6738	    try_nsdq:
6739	    po_reg_or_fail (REG_TYPE_NSDQ);
6740	  }
6741	  break;
6742
6743	case OP_RNDQ_RNSC:
6744	  {
6745	    po_scalar_or_goto (8, try_ndq);
6746	    break;
6747	    try_ndq:
6748	    po_reg_or_fail (REG_TYPE_NDQ);
6749	  }
6750	  break;
6751
6752	case OP_RND_RNSC:
6753	  {
6754	    po_scalar_or_goto (8, try_vfd);
6755	    break;
6756	    try_vfd:
6757	    po_reg_or_fail (REG_TYPE_VFD);
6758	  }
6759	  break;
6760
6761	case OP_VMOV:
6762	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6763	     not careful then bad things might happen.  */
6764	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6765	  break;
6766
6767	case OP_RNDQ_Ibig:
6768	  {
6769	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6770	    break;
6771	    try_immbig:
6772	    /* There's a possibility of getting a 64-bit immediate here, so
6773	       we need special handling.  */
6774	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6775		== FAIL)
6776	      {
6777		inst.error = _("immediate value is out of range");
6778		goto failure;
6779	      }
6780	  }
6781	  break;
6782
6783	case OP_RNDQ_I63b:
6784	  {
6785	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6786	    break;
6787	    try_shimm:
6788	    po_imm_or_fail (0, 63, TRUE);
6789	  }
6790	  break;
6791
6792	case OP_RRnpcb:
6793	  po_char_or_fail ('[');
6794	  po_reg_or_fail  (REG_TYPE_RN);
6795	  po_char_or_fail (']');
6796	  break;
6797
6798	case OP_RRnpctw:
6799	case OP_RRw:
6800	case OP_oRRw:
6801	  po_reg_or_fail (REG_TYPE_RN);
6802	  if (skip_past_char (&str, '!') == SUCCESS)
6803	    inst.operands[i].writeback = 1;
6804	  break;
6805
6806	  /* Immediates */
6807	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
6808	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
6809	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
6810	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
6811	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
6812	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
6813	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
6814	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
6815	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
6816	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
6817	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
6818	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
6819
6820	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
6821	case OP_oI7b:
6822	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
6823	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
6824	case OP_oI31b:
6825	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
6826	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6827	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6828	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
6829
6830	  /* Immediate variants */
6831	case OP_oI255c:
6832	  po_char_or_fail ('{');
6833	  po_imm_or_fail (0, 255, TRUE);
6834	  po_char_or_fail ('}');
6835	  break;
6836
6837	case OP_I31w:
6838	  /* The expression parser chokes on a trailing !, so we have
6839	     to find it first and zap it.  */
6840	  {
6841	    char *s = str;
6842	    while (*s && *s != ',')
6843	      s++;
6844	    if (s[-1] == '!')
6845	      {
6846		s[-1] = '\0';
6847		inst.operands[i].writeback = 1;
6848	      }
6849	    po_imm_or_fail (0, 31, TRUE);
6850	    if (str == s - 1)
6851	      str = s;
6852	  }
6853	  break;
6854
6855	  /* Expressions */
6856	case OP_EXPi:	EXPi:
6857	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6858					      GE_OPT_PREFIX));
6859	  break;
6860
6861	case OP_EXP:
6862	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6863					      GE_NO_PREFIX));
6864	  break;
6865
6866	case OP_EXPr:	EXPr:
6867	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6868					      GE_NO_PREFIX));
6869	  if (inst.reloc.exp.X_op == O_symbol)
6870	    {
6871	      val = parse_reloc (&str);
6872	      if (val == -1)
6873		{
6874		  inst.error = _("unrecognized relocation suffix");
6875		  goto failure;
6876		}
6877	      else if (val != BFD_RELOC_UNUSED)
6878		{
6879		  inst.operands[i].imm = val;
6880		  inst.operands[i].hasreloc = 1;
6881		}
6882	    }
6883	  break;
6884
6885	  /* Operand for MOVW or MOVT.  */
6886	case OP_HALF:
6887	  po_misc_or_fail (parse_half (&str));
6888	  break;
6889
6890	  /* Register or expression.  */
6891	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6892	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6893
6894	  /* Register or immediate.  */
6895	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6896	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
6897
6898	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6899	IF:
6900	  if (!is_immediate_prefix (*str))
6901	    goto bad_args;
6902	  str++;
6903	  val = parse_fpa_immediate (&str);
6904	  if (val == FAIL)
6905	    goto failure;
6906	  /* FPA immediates are encoded as registers 8-15.
6907	     parse_fpa_immediate has already applied the offset.  */
6908	  inst.operands[i].reg = val;
6909	  inst.operands[i].isreg = 1;
6910	  break;
6911
6912	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6913	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
6914
6915	  /* Two kinds of register.  */
6916	case OP_RIWR_RIWC:
6917	  {
6918	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6919	    if (!rege
6920		|| (rege->type != REG_TYPE_MMXWR
6921		    && rege->type != REG_TYPE_MMXWC
6922		    && rege->type != REG_TYPE_MMXWCG))
6923	      {
6924		inst.error = _("iWMMXt data or control register expected");
6925		goto failure;
6926	      }
6927	    inst.operands[i].reg = rege->number;
6928	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6929	  }
6930	  break;
6931
6932	case OP_RIWC_RIWG:
6933	  {
6934	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6935	    if (!rege
6936		|| (rege->type != REG_TYPE_MMXWC
6937		    && rege->type != REG_TYPE_MMXWCG))
6938	      {
6939		inst.error = _("iWMMXt control register expected");
6940		goto failure;
6941	      }
6942	    inst.operands[i].reg = rege->number;
6943	    inst.operands[i].isreg = 1;
6944	  }
6945	  break;
6946
6947	  /* Misc */
6948	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
6949	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
6950	case OP_oROR:	 val = parse_ror (&str);		break;
6951	case OP_COND:	 val = parse_cond (&str);		break;
6952	case OP_oBARRIER_I15:
6953	  po_barrier_or_imm (str); break;
6954	  immediate:
6955	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6956	    goto failure;
6957	  break;
6958
6959	case OP_wPSR:
6960	case OP_rPSR:
6961	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
6962	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6963	    {
6964	      inst.error = _("Banked registers are not available with this "
6965			     "architecture.");
6966	      goto failure;
6967	    }
6968	  break;
6969	  try_psr:
6970	  val = parse_psr (&str, op_parse_code == OP_wPSR);
6971	  break;
6972
6973	case OP_APSR_RR:
6974	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
6975	  break;
6976	  try_apsr:
6977	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6978	     instruction).  */
6979	  if (strncasecmp (str, "APSR_", 5) == 0)
6980	    {
6981	      unsigned found = 0;
6982	      str += 5;
6983	      while (found < 15)
6984		switch (*str++)
6985		  {
6986		  case 'c': found = (found & 1) ? 16 : found | 1; break;
6987		  case 'n': found = (found & 2) ? 16 : found | 2; break;
6988		  case 'z': found = (found & 4) ? 16 : found | 4; break;
6989		  case 'v': found = (found & 8) ? 16 : found | 8; break;
6990		  default: found = 16;
6991		  }
6992	      if (found != 15)
6993		goto failure;
6994	      inst.operands[i].isvec = 1;
6995	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
6996	      inst.operands[i].reg = REG_PC;
6997	    }
6998	  else
6999	    goto failure;
7000	  break;
7001
7002	case OP_TB:
7003	  po_misc_or_fail (parse_tb (&str));
7004	  break;
7005
7006	  /* Register lists.  */
7007	case OP_REGLST:
7008	  val = parse_reg_list (&str);
7009	  if (*str == '^')
7010	    {
7011	      inst.operands[i].writeback = 1;
7012	      str++;
7013	    }
7014	  break;
7015
7016	case OP_VRSLST:
7017	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7018	  break;
7019
7020	case OP_VRDLST:
7021	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7022	  break;
7023
7024	case OP_VRSDLST:
7025	  /* Allow Q registers too.  */
7026	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7027				    REGLIST_NEON_D);
7028	  if (val == FAIL)
7029	    {
7030	      inst.error = NULL;
7031	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7032					REGLIST_VFP_S);
7033	      inst.operands[i].issingle = 1;
7034	    }
7035	  break;
7036
7037	case OP_NRDLST:
7038	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7039				    REGLIST_NEON_D);
7040	  break;
7041
7042	case OP_NSTRLST:
7043	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7044					   &inst.operands[i].vectype);
7045	  break;
7046
7047	  /* Addressing modes */
7048	case OP_ADDR:
7049	  po_misc_or_fail (parse_address (&str, i));
7050	  break;
7051
7052	case OP_ADDRGLDR:
7053	  po_misc_or_fail_no_backtrack (
7054	    parse_address_group_reloc (&str, i, GROUP_LDR));
7055	  break;
7056
7057	case OP_ADDRGLDRS:
7058	  po_misc_or_fail_no_backtrack (
7059	    parse_address_group_reloc (&str, i, GROUP_LDRS));
7060	  break;
7061
7062	case OP_ADDRGLDC:
7063	  po_misc_or_fail_no_backtrack (
7064	    parse_address_group_reloc (&str, i, GROUP_LDC));
7065	  break;
7066
7067	case OP_SH:
7068	  po_misc_or_fail (parse_shifter_operand (&str, i));
7069	  break;
7070
7071	case OP_SHG:
7072	  po_misc_or_fail_no_backtrack (
7073	    parse_shifter_operand_group_reloc (&str, i));
7074	  break;
7075
7076	case OP_oSHll:
7077	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7078	  break;
7079
7080	case OP_oSHar:
7081	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7082	  break;
7083
7084	case OP_oSHllar:
7085	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7086	  break;
7087
7088	default:
7089	  as_fatal (_("unhandled operand code %d"), op_parse_code);
7090	}
7091
7092      /* Various value-based sanity checks and shared operations.  We
7093	 do not signal immediate failures for the register constraints;
7094	 this allows a syntax error to take precedence.	 */
7095      switch (op_parse_code)
7096	{
7097	case OP_oRRnpc:
7098	case OP_RRnpc:
7099	case OP_RRnpcb:
7100	case OP_RRw:
7101	case OP_oRRw:
7102	case OP_RRnpc_I0:
7103	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7104	    inst.error = BAD_PC;
7105	  break;
7106
7107	case OP_oRRnpcsp:
7108	case OP_RRnpcsp:
7109	  if (inst.operands[i].isreg)
7110	    {
7111	      if (inst.operands[i].reg == REG_PC)
7112		inst.error = BAD_PC;
7113	      else if (inst.operands[i].reg == REG_SP)
7114		inst.error = BAD_SP;
7115	    }
7116	  break;
7117
7118	case OP_RRnpctw:
7119	  if (inst.operands[i].isreg
7120	      && inst.operands[i].reg == REG_PC
7121	      && (inst.operands[i].writeback || thumb))
7122	    inst.error = BAD_PC;
7123	  break;
7124
7125	case OP_CPSF:
7126	case OP_ENDI:
7127	case OP_oROR:
7128	case OP_wPSR:
7129	case OP_rPSR:
7130	case OP_COND:
7131	case OP_oBARRIER_I15:
7132	case OP_REGLST:
7133	case OP_VRSLST:
7134	case OP_VRDLST:
7135	case OP_VRSDLST:
7136	case OP_NRDLST:
7137	case OP_NSTRLST:
7138	  if (val == FAIL)
7139	    goto failure;
7140	  inst.operands[i].imm = val;
7141	  break;
7142
7143	default:
7144	  break;
7145	}
7146
7147      /* If we get here, this operand was successfully parsed.	*/
7148      inst.operands[i].present = 1;
7149      continue;
7150
7151    bad_args:
7152      inst.error = BAD_ARGS;
7153
7154    failure:
7155      if (!backtrack_pos)
7156	{
7157	  /* The parse routine should already have set inst.error, but set a
7158	     default here just in case.  */
7159	  if (!inst.error)
7160	    inst.error = _("syntax error");
7161	  return FAIL;
7162	}
7163
7164      /* Do not backtrack over a trailing optional argument that
7165	 absorbed some text.  We will only fail again, with the
7166	 'garbage following instruction' error message, which is
7167	 probably less helpful than the current one.  */
7168      if (backtrack_index == i && backtrack_pos != str
7169	  && upat[i+1] == OP_stop)
7170	{
7171	  if (!inst.error)
7172	    inst.error = _("syntax error");
7173	  return FAIL;
7174	}
7175
7176      /* Try again, skipping the optional argument at backtrack_pos.  */
7177      str = backtrack_pos;
7178      inst.error = backtrack_error;
7179      inst.operands[backtrack_index].present = 0;
7180      i = backtrack_index;
7181      backtrack_pos = 0;
7182    }
7183
7184  /* Check that we have parsed all the arguments.  */
7185  if (*str != '\0' && !inst.error)
7186    inst.error = _("garbage following instruction");
7187
7188  return inst.error ? FAIL : SUCCESS;
7189}
7190
7191#undef po_char_or_fail
7192#undef po_reg_or_fail
7193#undef po_reg_or_goto
7194#undef po_imm_or_fail
7195#undef po_scalar_or_fail
7196#undef po_barrier_or_imm
7197
7198/* Shorthand macro for instruction encoding functions issuing errors.  */
7199#define constraint(expr, err)			\
7200  do						\
7201    {						\
7202      if (expr)					\
7203	{					\
7204	  inst.error = err;			\
7205	  return;				\
7206	}					\
7207    }						\
7208  while (0)
7209
7210/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
7211   instructions are unpredictable if these registers are used.  This
7212   is the BadReg predicate in ARM's Thumb-2 documentation.  */
7213#define reject_bad_reg(reg)				\
7214  do							\
7215   if (reg == REG_SP || reg == REG_PC)			\
7216     {							\
7217       inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
7218       return;						\
7219     }							\
7220  while (0)
7221
7222/* If REG is R13 (the stack pointer), warn that its use is
7223   deprecated.  */
7224#define warn_deprecated_sp(reg)			\
7225  do						\
7226    if (warn_on_deprecated && reg == REG_SP)	\
7227       as_tsktsk (_("use of r13 is deprecated"));	\
7228  while (0)
7229
7230/* Functions for operand encoding.  ARM, then Thumb.  */
7231
7232#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7233
7234/* If VAL can be encoded in the immediate field of an ARM instruction,
7235   return the encoded form.  Otherwise, return FAIL.  */
7236
7237static unsigned int
7238encode_arm_immediate (unsigned int val)
7239{
7240  unsigned int a, i;
7241
7242  for (i = 0; i < 32; i += 2)
7243    if ((a = rotate_left (val, i)) <= 0xff)
7244      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
7245
7246  return FAIL;
7247}
7248
7249/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7250   return the encoded form.  Otherwise, return FAIL.  */
7251static unsigned int
7252encode_thumb32_immediate (unsigned int val)
7253{
7254  unsigned int a, i;
7255
7256  if (val <= 0xff)
7257    return val;
7258
7259  for (i = 1; i <= 24; i++)
7260    {
7261      a = val >> i;
7262      if ((val & ~(0xff << i)) == 0)
7263	return ((val >> i) & 0x7f) | ((32 - i) << 7);
7264    }
7265
7266  a = val & 0xff;
7267  if (val == ((a << 16) | a))
7268    return 0x100 | a;
7269  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7270    return 0x300 | a;
7271
7272  a = val & 0xff00;
7273  if (val == ((a << 16) | a))
7274    return 0x200 | (a >> 8);
7275
7276  return FAIL;
7277}
7278/* Encode a VFP SP or DP register number into inst.instruction.  */
7279
7280static void
7281encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7282{
7283  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7284      && reg > 15)
7285    {
7286      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7287	{
7288	  if (thumb_mode)
7289	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7290				    fpu_vfp_ext_d32);
7291	  else
7292	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7293				    fpu_vfp_ext_d32);
7294	}
7295      else
7296	{
7297	  first_error (_("D register out of range for selected VFP version"));
7298	  return;
7299	}
7300    }
7301
7302  switch (pos)
7303    {
7304    case VFP_REG_Sd:
7305      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7306      break;
7307
7308    case VFP_REG_Sn:
7309      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7310      break;
7311
7312    case VFP_REG_Sm:
7313      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7314      break;
7315
7316    case VFP_REG_Dd:
7317      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7318      break;
7319
7320    case VFP_REG_Dn:
7321      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7322      break;
7323
7324    case VFP_REG_Dm:
7325      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7326      break;
7327
7328    default:
7329      abort ();
7330    }
7331}
7332
7333/* Encode a <shift> in an ARM-format instruction.  The immediate,
7334   if any, is handled by md_apply_fix.	 */
7335static void
7336encode_arm_shift (int i)
7337{
7338  if (inst.operands[i].shift_kind == SHIFT_RRX)
7339    inst.instruction |= SHIFT_ROR << 5;
7340  else
7341    {
7342      inst.instruction |= inst.operands[i].shift_kind << 5;
7343      if (inst.operands[i].immisreg)
7344	{
7345	  inst.instruction |= SHIFT_BY_REG;
7346	  inst.instruction |= inst.operands[i].imm << 8;
7347	}
7348      else
7349	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7350    }
7351}
7352
7353static void
7354encode_arm_shifter_operand (int i)
7355{
7356  if (inst.operands[i].isreg)
7357    {
7358      inst.instruction |= inst.operands[i].reg;
7359      encode_arm_shift (i);
7360    }
7361  else
7362    {
7363      inst.instruction |= INST_IMMEDIATE;
7364      if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7365	inst.instruction |= inst.operands[i].imm;
7366    }
7367}
7368
7369/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7370static void
7371encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7372{
7373  /* PR 14260:
7374     Generate an error if the operand is not a register.  */
7375  constraint (!inst.operands[i].isreg,
7376	      _("Instruction does not support =N addresses"));
7377
7378  inst.instruction |= inst.operands[i].reg << 16;
7379
7380  if (inst.operands[i].preind)
7381    {
7382      if (is_t)
7383	{
7384	  inst.error = _("instruction does not accept preindexed addressing");
7385	  return;
7386	}
7387      inst.instruction |= PRE_INDEX;
7388      if (inst.operands[i].writeback)
7389	inst.instruction |= WRITE_BACK;
7390
7391    }
7392  else if (inst.operands[i].postind)
7393    {
7394      gas_assert (inst.operands[i].writeback);
7395      if (is_t)
7396	inst.instruction |= WRITE_BACK;
7397    }
7398  else /* unindexed - only for coprocessor */
7399    {
7400      inst.error = _("instruction does not accept unindexed addressing");
7401      return;
7402    }
7403
7404  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7405      && (((inst.instruction & 0x000f0000) >> 16)
7406	  == ((inst.instruction & 0x0000f000) >> 12)))
7407    as_warn ((inst.instruction & LOAD_BIT)
7408	     ? _("destination register same as write-back base")
7409	     : _("source register same as write-back base"));
7410}
7411
7412/* inst.operands[i] was set up by parse_address.  Encode it into an
7413   ARM-format mode 2 load or store instruction.	 If is_t is true,
7414   reject forms that cannot be used with a T instruction (i.e. not
7415   post-indexed).  */
7416static void
7417encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7418{
7419  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7420
7421  encode_arm_addr_mode_common (i, is_t);
7422
7423  if (inst.operands[i].immisreg)
7424    {
7425      constraint ((inst.operands[i].imm == REG_PC
7426		   || (is_pc && inst.operands[i].writeback)),
7427		  BAD_PC_ADDRESSING);
7428      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7429      inst.instruction |= inst.operands[i].imm;
7430      if (!inst.operands[i].negative)
7431	inst.instruction |= INDEX_UP;
7432      if (inst.operands[i].shifted)
7433	{
7434	  if (inst.operands[i].shift_kind == SHIFT_RRX)
7435	    inst.instruction |= SHIFT_ROR << 5;
7436	  else
7437	    {
7438	      inst.instruction |= inst.operands[i].shift_kind << 5;
7439	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7440	    }
7441	}
7442    }
7443  else /* immediate offset in inst.reloc */
7444    {
7445      if (is_pc && !inst.reloc.pc_rel)
7446	{
7447	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7448
7449	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7450	     cannot use PC in addressing.
7451	     PC cannot be used in writeback addressing, either.  */
7452	  constraint ((is_t || inst.operands[i].writeback),
7453		      BAD_PC_ADDRESSING);
7454
7455	  /* Use of PC in str is deprecated for ARMv7.  */
7456	  if (warn_on_deprecated
7457	      && !is_load
7458	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7459	    as_tsktsk (_("use of PC in this instruction is deprecated"));
7460	}
7461
7462      if (inst.reloc.type == BFD_RELOC_UNUSED)
7463	{
7464	  /* Prefer + for zero encoded value.  */
7465	  if (!inst.operands[i].negative)
7466	    inst.instruction |= INDEX_UP;
7467	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7468	}
7469    }
7470}
7471
7472/* inst.operands[i] was set up by parse_address.  Encode it into an
7473   ARM-format mode 3 load or store instruction.	 Reject forms that
7474   cannot be used with such instructions.  If is_t is true, reject
7475   forms that cannot be used with a T instruction (i.e. not
7476   post-indexed).  */
7477static void
7478encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7479{
7480  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7481    {
7482      inst.error = _("instruction does not accept scaled register index");
7483      return;
7484    }
7485
7486  encode_arm_addr_mode_common (i, is_t);
7487
7488  if (inst.operands[i].immisreg)
7489    {
7490      constraint ((inst.operands[i].imm == REG_PC
7491		   || (is_t && inst.operands[i].reg == REG_PC)),
7492		  BAD_PC_ADDRESSING);
7493      constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7494		  BAD_PC_WRITEBACK);
7495      inst.instruction |= inst.operands[i].imm;
7496      if (!inst.operands[i].negative)
7497	inst.instruction |= INDEX_UP;
7498    }
7499  else /* immediate offset in inst.reloc */
7500    {
7501      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7502		   && inst.operands[i].writeback),
7503		  BAD_PC_WRITEBACK);
7504      inst.instruction |= HWOFFSET_IMM;
7505      if (inst.reloc.type == BFD_RELOC_UNUSED)
7506	{
7507	  /* Prefer + for zero encoded value.  */
7508	  if (!inst.operands[i].negative)
7509	    inst.instruction |= INDEX_UP;
7510
7511	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7512	}
7513    }
7514}
7515
7516/* Write immediate bits [7:0] to the following locations:
7517
7518  |28/24|23     19|18 16|15                    4|3     0|
7519  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7520
7521  This function is used by VMOV/VMVN/VORR/VBIC.  */
7522
7523static void
7524neon_write_immbits (unsigned immbits)
7525{
7526  inst.instruction |= immbits & 0xf;
7527  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7528  inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7529}
7530
7531/* Invert low-order SIZE bits of XHI:XLO.  */
7532
7533static void
7534neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7535{
7536  unsigned immlo = xlo ? *xlo : 0;
7537  unsigned immhi = xhi ? *xhi : 0;
7538
7539  switch (size)
7540    {
7541    case 8:
7542      immlo = (~immlo) & 0xff;
7543      break;
7544
7545    case 16:
7546      immlo = (~immlo) & 0xffff;
7547      break;
7548
7549    case 64:
7550      immhi = (~immhi) & 0xffffffff;
7551      /* fall through.  */
7552
7553    case 32:
7554      immlo = (~immlo) & 0xffffffff;
7555      break;
7556
7557    default:
7558      abort ();
7559    }
7560
7561  if (xlo)
7562    *xlo = immlo;
7563
7564  if (xhi)
7565    *xhi = immhi;
7566}
7567
7568/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7569   A, B, C, D.  */
7570
7571static int
7572neon_bits_same_in_bytes (unsigned imm)
7573{
7574  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7575	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7576	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7577	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7578}
7579
7580/* For immediate of above form, return 0bABCD.  */
7581
7582static unsigned
7583neon_squash_bits (unsigned imm)
7584{
7585  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7586	 | ((imm & 0x01000000) >> 21);
7587}
7588
7589/* Compress quarter-float representation to 0b...000 abcdefgh.  */
7590
7591static unsigned
7592neon_qfloat_bits (unsigned imm)
7593{
7594  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7595}
7596
7597/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7598   the instruction. *OP is passed as the initial value of the op field, and
7599   may be set to a different value depending on the constant (i.e.
7600   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7601   MVN).  If the immediate looks like a repeated pattern then also
7602   try smaller element sizes.  */
7603
7604static int
7605neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7606			 unsigned *immbits, int *op, int size,
7607			 enum neon_el_type type)
7608{
7609  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7610     float.  */
7611  if (type == NT_float && !float_p)
7612    return FAIL;
7613
7614  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7615    {
7616      if (size != 32 || *op == 1)
7617	return FAIL;
7618      *immbits = neon_qfloat_bits (immlo);
7619      return 0xf;
7620    }
7621
7622  if (size == 64)
7623    {
7624      if (neon_bits_same_in_bytes (immhi)
7625	  && neon_bits_same_in_bytes (immlo))
7626	{
7627	  if (*op == 1)
7628	    return FAIL;
7629	  *immbits = (neon_squash_bits (immhi) << 4)
7630		     | neon_squash_bits (immlo);
7631	  *op = 1;
7632	  return 0xe;
7633	}
7634
7635      if (immhi != immlo)
7636	return FAIL;
7637    }
7638
7639  if (size >= 32)
7640    {
7641      if (immlo == (immlo & 0x000000ff))
7642	{
7643	  *immbits = immlo;
7644	  return 0x0;
7645	}
7646      else if (immlo == (immlo & 0x0000ff00))
7647	{
7648	  *immbits = immlo >> 8;
7649	  return 0x2;
7650	}
7651      else if (immlo == (immlo & 0x00ff0000))
7652	{
7653	  *immbits = immlo >> 16;
7654	  return 0x4;
7655	}
7656      else if (immlo == (immlo & 0xff000000))
7657	{
7658	  *immbits = immlo >> 24;
7659	  return 0x6;
7660	}
7661      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7662	{
7663	  *immbits = (immlo >> 8) & 0xff;
7664	  return 0xc;
7665	}
7666      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7667	{
7668	  *immbits = (immlo >> 16) & 0xff;
7669	  return 0xd;
7670	}
7671
7672      if ((immlo & 0xffff) != (immlo >> 16))
7673	return FAIL;
7674      immlo &= 0xffff;
7675    }
7676
7677  if (size >= 16)
7678    {
7679      if (immlo == (immlo & 0x000000ff))
7680	{
7681	  *immbits = immlo;
7682	  return 0x8;
7683	}
7684      else if (immlo == (immlo & 0x0000ff00))
7685	{
7686	  *immbits = immlo >> 8;
7687	  return 0xa;
7688	}
7689
7690      if ((immlo & 0xff) != (immlo >> 8))
7691	return FAIL;
7692      immlo &= 0xff;
7693    }
7694
7695  if (immlo == (immlo & 0x000000ff))
7696    {
7697      /* Don't allow MVN with 8-bit immediate.  */
7698      if (*op == 1)
7699	return FAIL;
7700      *immbits = immlo;
7701      return 0xe;
7702    }
7703
7704  return FAIL;
7705}
7706
7707#if defined BFD_HOST_64_BIT
7708/* Returns TRUE if double precision value V may be cast
7709   to single precision without loss of accuracy.  */
7710
7711static bfd_boolean
7712is_double_a_single (bfd_int64_t v)
7713{
7714  int exp = (int)((v >> 52) & 0x7FF);
7715  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7716
7717  return (exp == 0 || exp == 0x7FF
7718	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
7719    && (mantissa & 0x1FFFFFFFl) == 0;
7720}
7721
7722/* Returns a double precision value casted to single precision
7723   (ignoring the least significant bits in exponent and mantissa).  */
7724
7725static int
7726double_to_single (bfd_int64_t v)
7727{
7728  int sign = (int) ((v >> 63) & 1l);
7729  int exp = (int) ((v >> 52) & 0x7FF);
7730  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7731
7732  if (exp == 0x7FF)
7733    exp = 0xFF;
7734  else
7735    {
7736      exp = exp - 1023 + 127;
7737      if (exp >= 0xFF)
7738	{
7739	  /* Infinity.  */
7740	  exp = 0x7F;
7741	  mantissa = 0;
7742	}
7743      else if (exp < 0)
7744	{
7745	  /* No denormalized numbers.  */
7746	  exp = 0;
7747	  mantissa = 0;
7748	}
7749    }
7750  mantissa >>= 29;
7751  return (sign << 31) | (exp << 23) | mantissa;
7752}
7753#endif /* BFD_HOST_64_BIT */
7754
7755enum lit_type
7756{
7757  CONST_THUMB,
7758  CONST_ARM,
7759  CONST_VEC
7760};
7761
7762static void do_vfp_nsyn_opcode (const char *);
7763
7764/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7765   Determine whether it can be performed with a move instruction; if
7766   it can, convert inst.instruction to that move instruction and
7767   return TRUE; if it can't, convert inst.instruction to a literal-pool
7768   load and return FALSE.  If this is not a valid thing to do in the
7769   current context, set inst.error and return TRUE.
7770
7771   inst.operands[i] describes the destination register.	 */
7772
7773static bfd_boolean
7774move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7775{
7776  unsigned long tbit;
7777  bfd_boolean thumb_p = (t == CONST_THUMB);
7778  bfd_boolean arm_p   = (t == CONST_ARM);
7779
7780  if (thumb_p)
7781    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7782  else
7783    tbit = LOAD_BIT;
7784
7785  if ((inst.instruction & tbit) == 0)
7786    {
7787      inst.error = _("invalid pseudo operation");
7788      return TRUE;
7789    }
7790
7791  if (inst.reloc.exp.X_op != O_constant
7792      && inst.reloc.exp.X_op != O_symbol
7793      && inst.reloc.exp.X_op != O_big)
7794    {
7795      inst.error = _("constant expression expected");
7796      return TRUE;
7797    }
7798
7799  if (inst.reloc.exp.X_op == O_constant
7800      || inst.reloc.exp.X_op == O_big)
7801    {
7802#if defined BFD_HOST_64_BIT
7803      bfd_int64_t v;
7804#else
7805      offsetT v;
7806#endif
7807      if (inst.reloc.exp.X_op == O_big)
7808	{
7809	  LITTLENUM_TYPE w[X_PRECISION];
7810	  LITTLENUM_TYPE * l;
7811
7812	  if (inst.reloc.exp.X_add_number == -1)
7813	    {
7814	      gen_to_words (w, X_PRECISION, E_PRECISION);
7815	      l = w;
7816	      /* FIXME: Should we check words w[2..5] ?  */
7817	    }
7818	  else
7819	    l = generic_bignum;
7820
7821#if defined BFD_HOST_64_BIT
7822	  v =
7823	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7824		  << LITTLENUM_NUMBER_OF_BITS)
7825		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7826		<< LITTLENUM_NUMBER_OF_BITS)
7827	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7828	      << LITTLENUM_NUMBER_OF_BITS)
7829	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7830#else
7831	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7832	    |  (l[0] & LITTLENUM_MASK);
7833#endif
7834	}
7835      else
7836	v = inst.reloc.exp.X_add_number;
7837
7838      if (!inst.operands[i].issingle)
7839	{
7840	  if (thumb_p)
7841	    {
7842	      if ((v & ~0xFF) == 0)
7843		{
7844		  /* This can be done with a mov(1) instruction.  */
7845		  inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7846		  inst.instruction |= v;
7847		  return TRUE;
7848		}
7849
7850	      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
7851		{
7852		  /* Check if on thumb2 it can be done with a mov.w or mvn.w
7853		     instruction.  */
7854		  unsigned int newimm;
7855		  bfd_boolean isNegated;
7856
7857		  newimm = encode_thumb32_immediate (v);
7858		  if (newimm != (unsigned int) FAIL)
7859		    isNegated = FALSE;
7860		  else
7861		    {
7862		      newimm = encode_thumb32_immediate (~v);
7863		      if (newimm != (unsigned int) FAIL)
7864			isNegated = TRUE;
7865		    }
7866
7867		  if (newimm != (unsigned int) FAIL)
7868		    {
7869		      inst.instruction = (0xf04f0000
7870					  | (inst.operands[i].reg << 8));
7871		      inst.instruction |= (isNegated ? 0x200000 : 0);
7872		      inst.instruction |= (newimm & 0x800) << 15;
7873		      inst.instruction |= (newimm & 0x700) << 4;
7874		      inst.instruction |= (newimm & 0x0ff);
7875		      return TRUE;
7876		    }
7877		  else if ((v & ~0xFFFF) == 0)
7878		    {
7879		      /* The number can be loaded with a mov.w instruction.  */
7880		      int imm = v & 0xFFFF;
7881
7882		      inst.instruction = 0xf2400000;  /* MOVW.  */
7883		      inst.instruction |= (inst.operands[i].reg << 8);
7884		      inst.instruction |= (imm & 0xf000) << 4;
7885		      inst.instruction |= (imm & 0x0800) << 15;
7886		      inst.instruction |= (imm & 0x0700) << 4;
7887		      inst.instruction |= (imm & 0x00ff);
7888		      return TRUE;
7889		    }
7890		}
7891	    }
7892	  else if (arm_p)
7893	    {
7894	      int value = encode_arm_immediate (v);
7895
7896	      if (value != FAIL)
7897		{
7898		  /* This can be done with a mov instruction.  */
7899		  inst.instruction &= LITERAL_MASK;
7900		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7901		  inst.instruction |= value & 0xfff;
7902		  return TRUE;
7903		}
7904
7905	      value = encode_arm_immediate (~ v);
7906	      if (value != FAIL)
7907		{
7908		  /* This can be done with a mvn instruction.  */
7909		  inst.instruction &= LITERAL_MASK;
7910		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7911		  inst.instruction |= value & 0xfff;
7912		  return TRUE;
7913		}
7914	    }
7915	  else if (t == CONST_VEC)
7916	    {
7917	      int op = 0;
7918	      unsigned immbits = 0;
7919	      unsigned immlo = inst.operands[1].imm;
7920	      unsigned immhi = inst.operands[1].regisimm
7921		? inst.operands[1].reg
7922		: inst.reloc.exp.X_unsigned
7923		? 0
7924		: ((bfd_int64_t)((int) immlo)) >> 32;
7925	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7926						   &op, 64, NT_invtype);
7927
7928	      if (cmode == FAIL)
7929		{
7930		  neon_invert_size (&immlo, &immhi, 64);
7931		  op = !op;
7932		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7933						   &op, 64, NT_invtype);
7934		}
7935
7936	      if (cmode != FAIL)
7937		{
7938		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7939		    | (1 << 23)
7940		    | (cmode << 8)
7941		    | (op << 5)
7942		    | (1 << 4);
7943
7944		  /* Fill other bits in vmov encoding for both thumb and arm.  */
7945		  if (thumb_mode)
7946		    inst.instruction |= (0x7U << 29) | (0xF << 24);
7947		  else
7948		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
7949		  neon_write_immbits (immbits);
7950		  return TRUE;
7951		}
7952	    }
7953	}
7954
7955      if (t == CONST_VEC)
7956	{
7957	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
7958	  if (inst.operands[i].issingle
7959	      && is_quarter_float (inst.operands[1].imm)
7960	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
7961	    {
7962	      inst.operands[1].imm =
7963		neon_qfloat_bits (v);
7964	      do_vfp_nsyn_opcode ("fconsts");
7965	      return TRUE;
7966	    }
7967
7968	  /* If our host does not support a 64-bit type then we cannot perform
7969	     the following optimization.  This mean that there will be a
7970	     discrepancy between the output produced by an assembler built for
7971	     a 32-bit-only host and the output produced from a 64-bit host, but
7972	     this cannot be helped.  */
7973#if defined BFD_HOST_64_BIT
7974	  else if (!inst.operands[1].issingle
7975		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
7976	    {
7977	      if (is_double_a_single (v)
7978		  && is_quarter_float (double_to_single (v)))
7979		{
7980		  inst.operands[1].imm =
7981		    neon_qfloat_bits (double_to_single (v));
7982		  do_vfp_nsyn_opcode ("fconstd");
7983		  return TRUE;
7984		}
7985	    }
7986#endif
7987	}
7988    }
7989
7990  if (add_to_lit_pool ((!inst.operands[i].isvec
7991			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
7992    return TRUE;
7993
7994  inst.operands[1].reg = REG_PC;
7995  inst.operands[1].isreg = 1;
7996  inst.operands[1].preind = 1;
7997  inst.reloc.pc_rel = 1;
7998  inst.reloc.type = (thumb_p
7999		     ? BFD_RELOC_ARM_THUMB_OFFSET
8000		     : (mode_3
8001			? BFD_RELOC_ARM_HWLITERAL
8002			: BFD_RELOC_ARM_LITERAL));
8003  return FALSE;
8004}
8005
8006/* inst.operands[i] was set up by parse_address.  Encode it into an
8007   ARM-format instruction.  Reject all forms which cannot be encoded
8008   into a coprocessor load/store instruction.  If wb_ok is false,
8009   reject use of writeback; if unind_ok is false, reject use of
8010   unindexed addressing.  If reloc_override is not 0, use it instead
8011   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8012   (in which case it is preserved).  */
8013
8014static int
8015encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8016{
8017  if (!inst.operands[i].isreg)
8018    {
8019      /* PR 18256 */
8020      if (! inst.operands[0].isvec)
8021	{
8022	  inst.error = _("invalid co-processor operand");
8023	  return FAIL;
8024	}
8025      if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8026	return SUCCESS;
8027    }
8028
8029  inst.instruction |= inst.operands[i].reg << 16;
8030
8031  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8032
8033  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8034    {
8035      gas_assert (!inst.operands[i].writeback);
8036      if (!unind_ok)
8037	{
8038	  inst.error = _("instruction does not support unindexed addressing");
8039	  return FAIL;
8040	}
8041      inst.instruction |= inst.operands[i].imm;
8042      inst.instruction |= INDEX_UP;
8043      return SUCCESS;
8044    }
8045
8046  if (inst.operands[i].preind)
8047    inst.instruction |= PRE_INDEX;
8048
8049  if (inst.operands[i].writeback)
8050    {
8051      if (inst.operands[i].reg == REG_PC)
8052	{
8053	  inst.error = _("pc may not be used with write-back");
8054	  return FAIL;
8055	}
8056      if (!wb_ok)
8057	{
8058	  inst.error = _("instruction does not support writeback");
8059	  return FAIL;
8060	}
8061      inst.instruction |= WRITE_BACK;
8062    }
8063
8064  if (reloc_override)
8065    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8066  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8067	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8068	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8069    {
8070      if (thumb_mode)
8071	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8072      else
8073	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8074    }
8075
8076  /* Prefer + for zero encoded value.  */
8077  if (!inst.operands[i].negative)
8078    inst.instruction |= INDEX_UP;
8079
8080  return SUCCESS;
8081}
8082
8083/* Functions for instruction encoding, sorted by sub-architecture.
8084   First some generics; their names are taken from the conventional
8085   bit positions for register arguments in ARM format instructions.  */
8086
8087static void
8088do_noargs (void)
8089{
8090}
8091
8092static void
8093do_rd (void)
8094{
8095  inst.instruction |= inst.operands[0].reg << 12;
8096}
8097
8098static void
8099do_rd_rm (void)
8100{
8101  inst.instruction |= inst.operands[0].reg << 12;
8102  inst.instruction |= inst.operands[1].reg;
8103}
8104
8105static void
8106do_rm_rn (void)
8107{
8108  inst.instruction |= inst.operands[0].reg;
8109  inst.instruction |= inst.operands[1].reg << 16;
8110}
8111
8112static void
8113do_rd_rn (void)
8114{
8115  inst.instruction |= inst.operands[0].reg << 12;
8116  inst.instruction |= inst.operands[1].reg << 16;
8117}
8118
8119static void
8120do_rn_rd (void)
8121{
8122  inst.instruction |= inst.operands[0].reg << 16;
8123  inst.instruction |= inst.operands[1].reg << 12;
8124}
8125
8126static bfd_boolean
8127check_obsolete (const arm_feature_set *feature, const char *msg)
8128{
8129  if (ARM_CPU_IS_ANY (cpu_variant))
8130    {
8131      as_tsktsk ("%s", msg);
8132      return TRUE;
8133    }
8134  else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8135    {
8136      as_bad ("%s", msg);
8137      return TRUE;
8138    }
8139
8140  return FALSE;
8141}
8142
8143static void
8144do_rd_rm_rn (void)
8145{
8146  unsigned Rn = inst.operands[2].reg;
8147  /* Enforce restrictions on SWP instruction.  */
8148  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8149    {
8150      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8151		  _("Rn must not overlap other operands"));
8152
8153      /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8154       */
8155      if (!check_obsolete (&arm_ext_v8,
8156			   _("swp{b} use is obsoleted for ARMv8 and later"))
8157	  && warn_on_deprecated
8158	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8159	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8160    }
8161
8162  inst.instruction |= inst.operands[0].reg << 12;
8163  inst.instruction |= inst.operands[1].reg;
8164  inst.instruction |= Rn << 16;
8165}
8166
8167static void
8168do_rd_rn_rm (void)
8169{
8170  inst.instruction |= inst.operands[0].reg << 12;
8171  inst.instruction |= inst.operands[1].reg << 16;
8172  inst.instruction |= inst.operands[2].reg;
8173}
8174
8175static void
8176do_rm_rd_rn (void)
8177{
8178  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8179  constraint (((inst.reloc.exp.X_op != O_constant
8180		&& inst.reloc.exp.X_op != O_illegal)
8181	       || inst.reloc.exp.X_add_number != 0),
8182	      BAD_ADDR_MODE);
8183  inst.instruction |= inst.operands[0].reg;
8184  inst.instruction |= inst.operands[1].reg << 12;
8185  inst.instruction |= inst.operands[2].reg << 16;
8186}
8187
8188static void
8189do_imm0 (void)
8190{
8191  inst.instruction |= inst.operands[0].imm;
8192}
8193
8194static void
8195do_rd_cpaddr (void)
8196{
8197  inst.instruction |= inst.operands[0].reg << 12;
8198  encode_arm_cp_address (1, TRUE, TRUE, 0);
8199}
8200
8201/* ARM instructions, in alphabetical order by function name (except
8202   that wrapper functions appear immediately after the function they
8203   wrap).  */
8204
8205/* This is a pseudo-op of the form "adr rd, label" to be converted
8206   into a relative address of the form "add rd, pc, #label-.-8".  */
8207
8208static void
8209do_adr (void)
8210{
8211  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8212
8213  /* Frag hacking will turn this into a sub instruction if the offset turns
8214     out to be negative.  */
8215  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8216  inst.reloc.pc_rel = 1;
8217  inst.reloc.exp.X_add_number -= 8;
8218}
8219
8220/* This is a pseudo-op of the form "adrl rd, label" to be converted
8221   into a relative address of the form:
8222   add rd, pc, #low(label-.-8)"
8223   add rd, rd, #high(label-.-8)"  */
8224
8225static void
8226do_adrl (void)
8227{
8228  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8229
8230  /* Frag hacking will turn this into a sub instruction if the offset turns
8231     out to be negative.  */
8232  inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8233  inst.reloc.pc_rel	       = 1;
8234  inst.size		       = INSN_SIZE * 2;
8235  inst.reloc.exp.X_add_number -= 8;
8236}
8237
8238static void
8239do_arit (void)
8240{
8241  if (!inst.operands[1].present)
8242    inst.operands[1].reg = inst.operands[0].reg;
8243  inst.instruction |= inst.operands[0].reg << 12;
8244  inst.instruction |= inst.operands[1].reg << 16;
8245  encode_arm_shifter_operand (2);
8246}
8247
8248static void
8249do_barrier (void)
8250{
8251  if (inst.operands[0].present)
8252    inst.instruction |= inst.operands[0].imm;
8253  else
8254    inst.instruction |= 0xf;
8255}
8256
8257static void
8258do_bfc (void)
8259{
8260  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8261  constraint (msb > 32, _("bit-field extends past end of register"));
8262  /* The instruction encoding stores the LSB and MSB,
8263     not the LSB and width.  */
8264  inst.instruction |= inst.operands[0].reg << 12;
8265  inst.instruction |= inst.operands[1].imm << 7;
8266  inst.instruction |= (msb - 1) << 16;
8267}
8268
8269static void
8270do_bfi (void)
8271{
8272  unsigned int msb;
8273
8274  /* #0 in second position is alternative syntax for bfc, which is
8275     the same instruction but with REG_PC in the Rm field.  */
8276  if (!inst.operands[1].isreg)
8277    inst.operands[1].reg = REG_PC;
8278
8279  msb = inst.operands[2].imm + inst.operands[3].imm;
8280  constraint (msb > 32, _("bit-field extends past end of register"));
8281  /* The instruction encoding stores the LSB and MSB,
8282     not the LSB and width.  */
8283  inst.instruction |= inst.operands[0].reg << 12;
8284  inst.instruction |= inst.operands[1].reg;
8285  inst.instruction |= inst.operands[2].imm << 7;
8286  inst.instruction |= (msb - 1) << 16;
8287}
8288
8289static void
8290do_bfx (void)
8291{
8292  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8293	      _("bit-field extends past end of register"));
8294  inst.instruction |= inst.operands[0].reg << 12;
8295  inst.instruction |= inst.operands[1].reg;
8296  inst.instruction |= inst.operands[2].imm << 7;
8297  inst.instruction |= (inst.operands[3].imm - 1) << 16;
8298}
8299
8300/* ARM V5 breakpoint instruction (argument parse)
8301     BKPT <16 bit unsigned immediate>
8302     Instruction is not conditional.
8303	The bit pattern given in insns[] has the COND_ALWAYS condition,
8304	and it is an error if the caller tried to override that.  */
8305
8306static void
8307do_bkpt (void)
8308{
8309  /* Top 12 of 16 bits to bits 19:8.  */
8310  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8311
8312  /* Bottom 4 of 16 bits to bits 3:0.  */
8313  inst.instruction |= inst.operands[0].imm & 0xf;
8314}
8315
8316static void
8317encode_branch (int default_reloc)
8318{
8319  if (inst.operands[0].hasreloc)
8320    {
8321      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8322		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8323		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8324      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8325	? BFD_RELOC_ARM_PLT32
8326	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8327    }
8328  else
8329    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8330  inst.reloc.pc_rel = 1;
8331}
8332
8333static void
8334do_branch (void)
8335{
8336#ifdef OBJ_ELF
8337  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8338    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8339  else
8340#endif
8341    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8342}
8343
8344static void
8345do_bl (void)
8346{
8347#ifdef OBJ_ELF
8348  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8349    {
8350      if (inst.cond == COND_ALWAYS)
8351	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8352      else
8353	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8354    }
8355  else
8356#endif
8357    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8358}
8359
8360/* ARM V5 branch-link-exchange instruction (argument parse)
8361     BLX <target_addr>		ie BLX(1)
8362     BLX{<condition>} <Rm>	ie BLX(2)
8363   Unfortunately, there are two different opcodes for this mnemonic.
8364   So, the insns[].value is not used, and the code here zaps values
8365	into inst.instruction.
8366   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
8367
8368static void
8369do_blx (void)
8370{
8371  if (inst.operands[0].isreg)
8372    {
8373      /* Arg is a register; the opcode provided by insns[] is correct.
8374	 It is not illegal to do "blx pc", just useless.  */
8375      if (inst.operands[0].reg == REG_PC)
8376	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8377
8378      inst.instruction |= inst.operands[0].reg;
8379    }
8380  else
8381    {
8382      /* Arg is an address; this instruction cannot be executed
8383	 conditionally, and the opcode must be adjusted.
8384	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8385	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
8386      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8387      inst.instruction = 0xfa000000;
8388      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8389    }
8390}
8391
8392static void
8393do_bx (void)
8394{
8395  bfd_boolean want_reloc;
8396
8397  if (inst.operands[0].reg == REG_PC)
8398    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8399
8400  inst.instruction |= inst.operands[0].reg;
8401  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8402     it is for ARMv4t or earlier.  */
8403  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8404  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8405      want_reloc = TRUE;
8406
8407#ifdef OBJ_ELF
8408  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8409#endif
8410    want_reloc = FALSE;
8411
8412  if (want_reloc)
8413    inst.reloc.type = BFD_RELOC_ARM_V4BX;
8414}
8415
8416
8417/* ARM v5TEJ.  Jump to Jazelle code.  */
8418
8419static void
8420do_bxj (void)
8421{
8422  if (inst.operands[0].reg == REG_PC)
8423    as_tsktsk (_("use of r15 in bxj is not really useful"));
8424
8425  inst.instruction |= inst.operands[0].reg;
8426}
8427
8428/* Co-processor data operation:
8429      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8430      CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
8431static void
8432do_cdp (void)
8433{
8434  inst.instruction |= inst.operands[0].reg << 8;
8435  inst.instruction |= inst.operands[1].imm << 20;
8436  inst.instruction |= inst.operands[2].reg << 12;
8437  inst.instruction |= inst.operands[3].reg << 16;
8438  inst.instruction |= inst.operands[4].reg;
8439  inst.instruction |= inst.operands[5].imm << 5;
8440}
8441
8442static void
8443do_cmp (void)
8444{
8445  inst.instruction |= inst.operands[0].reg << 16;
8446  encode_arm_shifter_operand (1);
8447}
8448
8449/* Transfer between coprocessor and ARM registers.
8450   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8451   MRC2
8452   MCR{cond}
8453   MCR2
8454
8455   No special properties.  */
8456
8457struct deprecated_coproc_regs_s
8458{
8459  unsigned cp;
8460  int opc1;
8461  unsigned crn;
8462  unsigned crm;
8463  int opc2;
8464  arm_feature_set deprecated;
8465  arm_feature_set obsoleted;
8466  const char *dep_msg;
8467  const char *obs_msg;
8468};
8469
8470#define DEPR_ACCESS_V8 \
8471  N_("This coprocessor register access is deprecated in ARMv8")
8472
8473/* Table of all deprecated coprocessor registers.  */
8474static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8475{
8476    {15, 0, 7, 10, 5,					/* CP15DMB.  */
8477     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8478     DEPR_ACCESS_V8, NULL},
8479    {15, 0, 7, 10, 4,					/* CP15DSB.  */
8480     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8481     DEPR_ACCESS_V8, NULL},
8482    {15, 0, 7,  5, 4,					/* CP15ISB.  */
8483     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8484     DEPR_ACCESS_V8, NULL},
8485    {14, 6, 1,  0, 0,					/* TEEHBR.  */
8486     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8487     DEPR_ACCESS_V8, NULL},
8488    {14, 6, 0,  0, 0,					/* TEECR.  */
8489     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8490     DEPR_ACCESS_V8, NULL},
8491};
8492
8493#undef DEPR_ACCESS_V8
8494
8495static const size_t deprecated_coproc_reg_count =
8496  sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8497
8498static void
8499do_co_reg (void)
8500{
8501  unsigned Rd;
8502  size_t i;
8503
8504  Rd = inst.operands[2].reg;
8505  if (thumb_mode)
8506    {
8507      if (inst.instruction == 0xee000010
8508	  || inst.instruction == 0xfe000010)
8509	/* MCR, MCR2  */
8510	reject_bad_reg (Rd);
8511      else
8512	/* MRC, MRC2  */
8513	constraint (Rd == REG_SP, BAD_SP);
8514    }
8515  else
8516    {
8517      /* MCR */
8518      if (inst.instruction == 0xe000010)
8519	constraint (Rd == REG_PC, BAD_PC);
8520    }
8521
8522    for (i = 0; i < deprecated_coproc_reg_count; ++i)
8523      {
8524	const struct deprecated_coproc_regs_s *r =
8525	  deprecated_coproc_regs + i;
8526
8527	if (inst.operands[0].reg == r->cp
8528	    && inst.operands[1].imm == r->opc1
8529	    && inst.operands[3].reg == r->crn
8530	    && inst.operands[4].reg == r->crm
8531	    && inst.operands[5].imm == r->opc2)
8532	  {
8533	    if (! ARM_CPU_IS_ANY (cpu_variant)
8534		&& warn_on_deprecated
8535		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8536	      as_tsktsk ("%s", r->dep_msg);
8537	  }
8538      }
8539
8540  inst.instruction |= inst.operands[0].reg << 8;
8541  inst.instruction |= inst.operands[1].imm << 21;
8542  inst.instruction |= Rd << 12;
8543  inst.instruction |= inst.operands[3].reg << 16;
8544  inst.instruction |= inst.operands[4].reg;
8545  inst.instruction |= inst.operands[5].imm << 5;
8546}
8547
8548/* Transfer between coprocessor register and pair of ARM registers.
8549   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8550   MCRR2
8551   MRRC{cond}
8552   MRRC2
8553
8554   Two XScale instructions are special cases of these:
8555
8556     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8557     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8558
8559   Result unpredictable if Rd or Rn is R15.  */
8560
8561static void
8562do_co_reg2c (void)
8563{
8564  unsigned Rd, Rn;
8565
8566  Rd = inst.operands[2].reg;
8567  Rn = inst.operands[3].reg;
8568
8569  if (thumb_mode)
8570    {
8571      reject_bad_reg (Rd);
8572      reject_bad_reg (Rn);
8573    }
8574  else
8575    {
8576      constraint (Rd == REG_PC, BAD_PC);
8577      constraint (Rn == REG_PC, BAD_PC);
8578    }
8579
8580  inst.instruction |= inst.operands[0].reg << 8;
8581  inst.instruction |= inst.operands[1].imm << 4;
8582  inst.instruction |= Rd << 12;
8583  inst.instruction |= Rn << 16;
8584  inst.instruction |= inst.operands[4].reg;
8585}
8586
8587static void
8588do_cpsi (void)
8589{
8590  inst.instruction |= inst.operands[0].imm << 6;
8591  if (inst.operands[1].present)
8592    {
8593      inst.instruction |= CPSI_MMOD;
8594      inst.instruction |= inst.operands[1].imm;
8595    }
8596}
8597
8598static void
8599do_dbg (void)
8600{
8601  inst.instruction |= inst.operands[0].imm;
8602}
8603
8604static void
8605do_div (void)
8606{
8607  unsigned Rd, Rn, Rm;
8608
8609  Rd = inst.operands[0].reg;
8610  Rn = (inst.operands[1].present
8611	? inst.operands[1].reg : Rd);
8612  Rm = inst.operands[2].reg;
8613
8614  constraint ((Rd == REG_PC), BAD_PC);
8615  constraint ((Rn == REG_PC), BAD_PC);
8616  constraint ((Rm == REG_PC), BAD_PC);
8617
8618  inst.instruction |= Rd << 16;
8619  inst.instruction |= Rn << 0;
8620  inst.instruction |= Rm << 8;
8621}
8622
8623static void
8624do_it (void)
8625{
8626  /* There is no IT instruction in ARM mode.  We
8627     process it to do the validation as if in
8628     thumb mode, just in case the code gets
8629     assembled for thumb using the unified syntax.  */
8630
8631  inst.size = 0;
8632  if (unified_syntax)
8633    {
8634      set_it_insn_type (IT_INSN);
8635      now_it.mask = (inst.instruction & 0xf) | 0x10;
8636      now_it.cc = inst.operands[0].imm;
8637    }
8638}
8639
8640/* If there is only one register in the register list,
8641   then return its register number.  Otherwise return -1.  */
8642static int
8643only_one_reg_in_list (int range)
8644{
8645  int i = ffs (range) - 1;
8646  return (i > 15 || range != (1 << i)) ? -1 : i;
8647}
8648
8649static void
8650encode_ldmstm(int from_push_pop_mnem)
8651{
8652  int base_reg = inst.operands[0].reg;
8653  int range = inst.operands[1].imm;
8654  int one_reg;
8655
8656  inst.instruction |= base_reg << 16;
8657  inst.instruction |= range;
8658
8659  if (inst.operands[1].writeback)
8660    inst.instruction |= LDM_TYPE_2_OR_3;
8661
8662  if (inst.operands[0].writeback)
8663    {
8664      inst.instruction |= WRITE_BACK;
8665      /* Check for unpredictable uses of writeback.  */
8666      if (inst.instruction & LOAD_BIT)
8667	{
8668	  /* Not allowed in LDM type 2.	 */
8669	  if ((inst.instruction & LDM_TYPE_2_OR_3)
8670	      && ((range & (1 << REG_PC)) == 0))
8671	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8672	  /* Only allowed if base reg not in list for other types.  */
8673	  else if (range & (1 << base_reg))
8674	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8675	}
8676      else /* STM.  */
8677	{
8678	  /* Not allowed for type 2.  */
8679	  if (inst.instruction & LDM_TYPE_2_OR_3)
8680	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8681	  /* Only allowed if base reg not in list, or first in list.  */
8682	  else if ((range & (1 << base_reg))
8683		   && (range & ((1 << base_reg) - 1)))
8684	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8685	}
8686    }
8687
8688  /* If PUSH/POP has only one register, then use the A2 encoding.  */
8689  one_reg = only_one_reg_in_list (range);
8690  if (from_push_pop_mnem && one_reg >= 0)
8691    {
8692      int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8693
8694      inst.instruction &= A_COND_MASK;
8695      inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8696      inst.instruction |= one_reg << 12;
8697    }
8698}
8699
8700static void
8701do_ldmstm (void)
8702{
8703  encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8704}
8705
8706/* ARMv5TE load-consecutive (argument parse)
8707   Mode is like LDRH.
8708
8709     LDRccD R, mode
8710     STRccD R, mode.  */
8711
8712static void
8713do_ldrd (void)
8714{
8715  constraint (inst.operands[0].reg % 2 != 0,
8716	      _("first transfer register must be even"));
8717  constraint (inst.operands[1].present
8718	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8719	      _("can only transfer two consecutive registers"));
8720  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8721  constraint (!inst.operands[2].isreg, _("'[' expected"));
8722
8723  if (!inst.operands[1].present)
8724    inst.operands[1].reg = inst.operands[0].reg + 1;
8725
8726  /* encode_arm_addr_mode_3 will diagnose overlap between the base
8727     register and the first register written; we have to diagnose
8728     overlap between the base and the second register written here.  */
8729
8730  if (inst.operands[2].reg == inst.operands[1].reg
8731      && (inst.operands[2].writeback || inst.operands[2].postind))
8732    as_warn (_("base register written back, and overlaps "
8733	       "second transfer register"));
8734
8735  if (!(inst.instruction & V4_STR_BIT))
8736    {
8737      /* For an index-register load, the index register must not overlap the
8738	destination (even if not write-back).  */
8739      if (inst.operands[2].immisreg
8740	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8741	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8742	as_warn (_("index register overlaps transfer register"));
8743    }
8744  inst.instruction |= inst.operands[0].reg << 12;
8745  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8746}
8747
8748static void
8749do_ldrex (void)
8750{
8751  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8752	      || inst.operands[1].postind || inst.operands[1].writeback
8753	      || inst.operands[1].immisreg || inst.operands[1].shifted
8754	      || inst.operands[1].negative
8755	      /* This can arise if the programmer has written
8756		   strex rN, rM, foo
8757		 or if they have mistakenly used a register name as the last
8758		 operand,  eg:
8759		   strex rN, rM, rX
8760		 It is very difficult to distinguish between these two cases
8761		 because "rX" might actually be a label. ie the register
8762		 name has been occluded by a symbol of the same name. So we
8763		 just generate a general 'bad addressing mode' type error
8764		 message and leave it up to the programmer to discover the
8765		 true cause and fix their mistake.  */
8766	      || (inst.operands[1].reg == REG_PC),
8767	      BAD_ADDR_MODE);
8768
8769  constraint (inst.reloc.exp.X_op != O_constant
8770	      || inst.reloc.exp.X_add_number != 0,
8771	      _("offset must be zero in ARM encoding"));
8772
8773  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8774
8775  inst.instruction |= inst.operands[0].reg << 12;
8776  inst.instruction |= inst.operands[1].reg << 16;
8777  inst.reloc.type = BFD_RELOC_UNUSED;
8778}
8779
8780static void
8781do_ldrexd (void)
8782{
8783  constraint (inst.operands[0].reg % 2 != 0,
8784	      _("even register required"));
8785  constraint (inst.operands[1].present
8786	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8787	      _("can only load two consecutive registers"));
8788  /* If op 1 were present and equal to PC, this function wouldn't
8789     have been called in the first place.  */
8790  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8791
8792  inst.instruction |= inst.operands[0].reg << 12;
8793  inst.instruction |= inst.operands[2].reg << 16;
8794}
8795
8796/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
8797   which is not a multiple of four is UNPREDICTABLE.  */
8798static void
8799check_ldr_r15_aligned (void)
8800{
8801  constraint (!(inst.operands[1].immisreg)
8802	      && (inst.operands[0].reg == REG_PC
8803	      && inst.operands[1].reg == REG_PC
8804	      && (inst.reloc.exp.X_add_number & 0x3)),
8805	      _("ldr to register 15 must be 4-byte alligned"));
8806}
8807
8808static void
8809do_ldst (void)
8810{
8811  inst.instruction |= inst.operands[0].reg << 12;
8812  if (!inst.operands[1].isreg)
8813    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8814      return;
8815  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8816  check_ldr_r15_aligned ();
8817}
8818
8819static void
8820do_ldstt (void)
8821{
8822  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8823     reject [Rn,...].  */
8824  if (inst.operands[1].preind)
8825    {
8826      constraint (inst.reloc.exp.X_op != O_constant
8827		  || inst.reloc.exp.X_add_number != 0,
8828		  _("this instruction requires a post-indexed address"));
8829
8830      inst.operands[1].preind = 0;
8831      inst.operands[1].postind = 1;
8832      inst.operands[1].writeback = 1;
8833    }
8834  inst.instruction |= inst.operands[0].reg << 12;
8835  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8836}
8837
8838/* Halfword and signed-byte load/store operations.  */
8839
8840static void
8841do_ldstv4 (void)
8842{
8843  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8844  inst.instruction |= inst.operands[0].reg << 12;
8845  if (!inst.operands[1].isreg)
8846    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8847      return;
8848  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8849}
8850
8851static void
8852do_ldsttv4 (void)
8853{
8854  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8855     reject [Rn,...].  */
8856  if (inst.operands[1].preind)
8857    {
8858      constraint (inst.reloc.exp.X_op != O_constant
8859		  || inst.reloc.exp.X_add_number != 0,
8860		  _("this instruction requires a post-indexed address"));
8861
8862      inst.operands[1].preind = 0;
8863      inst.operands[1].postind = 1;
8864      inst.operands[1].writeback = 1;
8865    }
8866  inst.instruction |= inst.operands[0].reg << 12;
8867  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8868}
8869
8870/* Co-processor register load/store.
8871   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
8872static void
8873do_lstc (void)
8874{
8875  inst.instruction |= inst.operands[0].reg << 8;
8876  inst.instruction |= inst.operands[1].reg << 12;
8877  encode_arm_cp_address (2, TRUE, TRUE, 0);
8878}
8879
8880static void
8881do_mlas (void)
8882{
8883  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
8884  if (inst.operands[0].reg == inst.operands[1].reg
8885      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8886      && !(inst.instruction & 0x00400000))
8887    as_tsktsk (_("Rd and Rm should be different in mla"));
8888
8889  inst.instruction |= inst.operands[0].reg << 16;
8890  inst.instruction |= inst.operands[1].reg;
8891  inst.instruction |= inst.operands[2].reg << 8;
8892  inst.instruction |= inst.operands[3].reg << 12;
8893}
8894
8895static void
8896do_mov (void)
8897{
8898  inst.instruction |= inst.operands[0].reg << 12;
8899  encode_arm_shifter_operand (1);
8900}
8901
8902/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
8903static void
8904do_mov16 (void)
8905{
8906  bfd_vma imm;
8907  bfd_boolean top;
8908
8909  top = (inst.instruction & 0x00400000) != 0;
8910  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8911	      _(":lower16: not allowed this instruction"));
8912  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8913	      _(":upper16: not allowed instruction"));
8914  inst.instruction |= inst.operands[0].reg << 12;
8915  if (inst.reloc.type == BFD_RELOC_UNUSED)
8916    {
8917      imm = inst.reloc.exp.X_add_number;
8918      /* The value is in two pieces: 0:11, 16:19.  */
8919      inst.instruction |= (imm & 0x00000fff);
8920      inst.instruction |= (imm & 0x0000f000) << 4;
8921    }
8922}
8923
8924static int
8925do_vfp_nsyn_mrs (void)
8926{
8927  if (inst.operands[0].isvec)
8928    {
8929      if (inst.operands[1].reg != 1)
8930	first_error (_("operand 1 must be FPSCR"));
8931      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8932      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8933      do_vfp_nsyn_opcode ("fmstat");
8934    }
8935  else if (inst.operands[1].isvec)
8936    do_vfp_nsyn_opcode ("fmrx");
8937  else
8938    return FAIL;
8939
8940  return SUCCESS;
8941}
8942
8943static int
8944do_vfp_nsyn_msr (void)
8945{
8946  if (inst.operands[0].isvec)
8947    do_vfp_nsyn_opcode ("fmxr");
8948  else
8949    return FAIL;
8950
8951  return SUCCESS;
8952}
8953
8954static void
8955do_vmrs (void)
8956{
8957  unsigned Rt = inst.operands[0].reg;
8958
8959  if (thumb_mode && Rt == REG_SP)
8960    {
8961      inst.error = BAD_SP;
8962      return;
8963    }
8964
8965  /* APSR_ sets isvec. All other refs to PC are illegal.  */
8966  if (!inst.operands[0].isvec && Rt == REG_PC)
8967    {
8968      inst.error = BAD_PC;
8969      return;
8970    }
8971
8972  /* If we get through parsing the register name, we just insert the number
8973     generated into the instruction without further validation.  */
8974  inst.instruction |= (inst.operands[1].reg << 16);
8975  inst.instruction |= (Rt << 12);
8976}
8977
8978static void
8979do_vmsr (void)
8980{
8981  unsigned Rt = inst.operands[1].reg;
8982
8983  if (thumb_mode)
8984    reject_bad_reg (Rt);
8985  else if (Rt == REG_PC)
8986    {
8987      inst.error = BAD_PC;
8988      return;
8989    }
8990
8991  /* If we get through parsing the register name, we just insert the number
8992     generated into the instruction without further validation.  */
8993  inst.instruction |= (inst.operands[0].reg << 16);
8994  inst.instruction |= (Rt << 12);
8995}
8996
8997static void
8998do_mrs (void)
8999{
9000  unsigned br;
9001
9002  if (do_vfp_nsyn_mrs () == SUCCESS)
9003    return;
9004
9005  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9006  inst.instruction |= inst.operands[0].reg << 12;
9007
9008  if (inst.operands[1].isreg)
9009    {
9010      br = inst.operands[1].reg;
9011      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9012	as_bad (_("bad register for mrs"));
9013    }
9014  else
9015    {
9016      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
9017      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9018		  != (PSR_c|PSR_f),
9019		  _("'APSR', 'CPSR' or 'SPSR' expected"));
9020      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9021    }
9022
9023  inst.instruction |= br;
9024}
9025
9026/* Two possible forms:
9027      "{C|S}PSR_<field>, Rm",
9028      "{C|S}PSR_f, #expression".  */
9029
9030static void
9031do_msr (void)
9032{
9033  if (do_vfp_nsyn_msr () == SUCCESS)
9034    return;
9035
9036  inst.instruction |= inst.operands[0].imm;
9037  if (inst.operands[1].isreg)
9038    inst.instruction |= inst.operands[1].reg;
9039  else
9040    {
9041      inst.instruction |= INST_IMMEDIATE;
9042      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9043      inst.reloc.pc_rel = 0;
9044    }
9045}
9046
9047static void
9048do_mul (void)
9049{
9050  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9051
9052  if (!inst.operands[2].present)
9053    inst.operands[2].reg = inst.operands[0].reg;
9054  inst.instruction |= inst.operands[0].reg << 16;
9055  inst.instruction |= inst.operands[1].reg;
9056  inst.instruction |= inst.operands[2].reg << 8;
9057
9058  if (inst.operands[0].reg == inst.operands[1].reg
9059      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9060    as_tsktsk (_("Rd and Rm should be different in mul"));
9061}
9062
9063/* Long Multiply Parser
9064   UMULL RdLo, RdHi, Rm, Rs
9065   SMULL RdLo, RdHi, Rm, Rs
9066   UMLAL RdLo, RdHi, Rm, Rs
9067   SMLAL RdLo, RdHi, Rm, Rs.  */
9068
9069static void
9070do_mull (void)
9071{
9072  inst.instruction |= inst.operands[0].reg << 12;
9073  inst.instruction |= inst.operands[1].reg << 16;
9074  inst.instruction |= inst.operands[2].reg;
9075  inst.instruction |= inst.operands[3].reg << 8;
9076
9077  /* rdhi and rdlo must be different.  */
9078  if (inst.operands[0].reg == inst.operands[1].reg)
9079    as_tsktsk (_("rdhi and rdlo must be different"));
9080
9081  /* rdhi, rdlo and rm must all be different before armv6.  */
9082  if ((inst.operands[0].reg == inst.operands[2].reg
9083      || inst.operands[1].reg == inst.operands[2].reg)
9084      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9085    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9086}
9087
9088static void
9089do_nop (void)
9090{
9091  if (inst.operands[0].present
9092      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9093    {
9094      /* Architectural NOP hints are CPSR sets with no bits selected.  */
9095      inst.instruction &= 0xf0000000;
9096      inst.instruction |= 0x0320f000;
9097      if (inst.operands[0].present)
9098	inst.instruction |= inst.operands[0].imm;
9099    }
9100}
9101
9102/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9103   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9104   Condition defaults to COND_ALWAYS.
9105   Error if Rd, Rn or Rm are R15.  */
9106
9107static void
9108do_pkhbt (void)
9109{
9110  inst.instruction |= inst.operands[0].reg << 12;
9111  inst.instruction |= inst.operands[1].reg << 16;
9112  inst.instruction |= inst.operands[2].reg;
9113  if (inst.operands[3].present)
9114    encode_arm_shift (3);
9115}
9116
9117/* ARM V6 PKHTB (Argument Parse).  */
9118
9119static void
9120do_pkhtb (void)
9121{
9122  if (!inst.operands[3].present)
9123    {
9124      /* If the shift specifier is omitted, turn the instruction
9125	 into pkhbt rd, rm, rn. */
9126      inst.instruction &= 0xfff00010;
9127      inst.instruction |= inst.operands[0].reg << 12;
9128      inst.instruction |= inst.operands[1].reg;
9129      inst.instruction |= inst.operands[2].reg << 16;
9130    }
9131  else
9132    {
9133      inst.instruction |= inst.operands[0].reg << 12;
9134      inst.instruction |= inst.operands[1].reg << 16;
9135      inst.instruction |= inst.operands[2].reg;
9136      encode_arm_shift (3);
9137    }
9138}
9139
9140/* ARMv5TE: Preload-Cache
9141   MP Extensions: Preload for write
9142
9143    PLD(W) <addr_mode>
9144
9145  Syntactically, like LDR with B=1, W=0, L=1.  */
9146
9147static void
9148do_pld (void)
9149{
9150  constraint (!inst.operands[0].isreg,
9151	      _("'[' expected after PLD mnemonic"));
9152  constraint (inst.operands[0].postind,
9153	      _("post-indexed expression used in preload instruction"));
9154  constraint (inst.operands[0].writeback,
9155	      _("writeback used in preload instruction"));
9156  constraint (!inst.operands[0].preind,
9157	      _("unindexed addressing used in preload instruction"));
9158  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9159}
9160
9161/* ARMv7: PLI <addr_mode>  */
9162static void
9163do_pli (void)
9164{
9165  constraint (!inst.operands[0].isreg,
9166	      _("'[' expected after PLI mnemonic"));
9167  constraint (inst.operands[0].postind,
9168	      _("post-indexed expression used in preload instruction"));
9169  constraint (inst.operands[0].writeback,
9170	      _("writeback used in preload instruction"));
9171  constraint (!inst.operands[0].preind,
9172	      _("unindexed addressing used in preload instruction"));
9173  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9174  inst.instruction &= ~PRE_INDEX;
9175}
9176
9177static void
9178do_push_pop (void)
9179{
9180  constraint (inst.operands[0].writeback,
9181	      _("push/pop do not support {reglist}^"));
9182  inst.operands[1] = inst.operands[0];
9183  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9184  inst.operands[0].isreg = 1;
9185  inst.operands[0].writeback = 1;
9186  inst.operands[0].reg = REG_SP;
9187  encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9188}
9189
9190/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9191   word at the specified address and the following word
9192   respectively.
9193   Unconditionally executed.
9194   Error if Rn is R15.	*/
9195
9196static void
9197do_rfe (void)
9198{
9199  inst.instruction |= inst.operands[0].reg << 16;
9200  if (inst.operands[0].writeback)
9201    inst.instruction |= WRITE_BACK;
9202}
9203
9204/* ARM V6 ssat (argument parse).  */
9205
9206static void
9207do_ssat (void)
9208{
9209  inst.instruction |= inst.operands[0].reg << 12;
9210  inst.instruction |= (inst.operands[1].imm - 1) << 16;
9211  inst.instruction |= inst.operands[2].reg;
9212
9213  if (inst.operands[3].present)
9214    encode_arm_shift (3);
9215}
9216
9217/* ARM V6 usat (argument parse).  */
9218
9219static void
9220do_usat (void)
9221{
9222  inst.instruction |= inst.operands[0].reg << 12;
9223  inst.instruction |= inst.operands[1].imm << 16;
9224  inst.instruction |= inst.operands[2].reg;
9225
9226  if (inst.operands[3].present)
9227    encode_arm_shift (3);
9228}
9229
9230/* ARM V6 ssat16 (argument parse).  */
9231
9232static void
9233do_ssat16 (void)
9234{
9235  inst.instruction |= inst.operands[0].reg << 12;
9236  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9237  inst.instruction |= inst.operands[2].reg;
9238}
9239
9240static void
9241do_usat16 (void)
9242{
9243  inst.instruction |= inst.operands[0].reg << 12;
9244  inst.instruction |= inst.operands[1].imm << 16;
9245  inst.instruction |= inst.operands[2].reg;
9246}
9247
9248/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
9249   preserving the other bits.
9250
9251   setend <endian_specifier>, where <endian_specifier> is either
9252   BE or LE.  */
9253
9254static void
9255do_setend (void)
9256{
9257  if (warn_on_deprecated
9258      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9259      as_tsktsk (_("setend use is deprecated for ARMv8"));
9260
9261  if (inst.operands[0].imm)
9262    inst.instruction |= 0x200;
9263}
9264
9265static void
9266do_shift (void)
9267{
9268  unsigned int Rm = (inst.operands[1].present
9269		     ? inst.operands[1].reg
9270		     : inst.operands[0].reg);
9271
9272  inst.instruction |= inst.operands[0].reg << 12;
9273  inst.instruction |= Rm;
9274  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
9275    {
9276      inst.instruction |= inst.operands[2].reg << 8;
9277      inst.instruction |= SHIFT_BY_REG;
9278      /* PR 12854: Error on extraneous shifts.  */
9279      constraint (inst.operands[2].shifted,
9280		  _("extraneous shift as part of operand to shift insn"));
9281    }
9282  else
9283    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9284}
9285
9286static void
9287do_smc (void)
9288{
9289  inst.reloc.type = BFD_RELOC_ARM_SMC;
9290  inst.reloc.pc_rel = 0;
9291}
9292
9293static void
9294do_hvc (void)
9295{
9296  inst.reloc.type = BFD_RELOC_ARM_HVC;
9297  inst.reloc.pc_rel = 0;
9298}
9299
9300static void
9301do_swi (void)
9302{
9303  inst.reloc.type = BFD_RELOC_ARM_SWI;
9304  inst.reloc.pc_rel = 0;
9305}
9306
9307static void
9308do_setpan (void)
9309{
9310  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9311	      _("selected processor does not support SETPAN instruction"));
9312
9313  inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9314}
9315
9316static void
9317do_t_setpan (void)
9318{
9319  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9320	      _("selected processor does not support SETPAN instruction"));
9321
9322  inst.instruction |= (inst.operands[0].imm << 3);
9323}
9324
9325/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9326   SMLAxy{cond} Rd,Rm,Rs,Rn
9327   SMLAWy{cond} Rd,Rm,Rs,Rn
9328   Error if any register is R15.  */
9329
9330static void
9331do_smla (void)
9332{
9333  inst.instruction |= inst.operands[0].reg << 16;
9334  inst.instruction |= inst.operands[1].reg;
9335  inst.instruction |= inst.operands[2].reg << 8;
9336  inst.instruction |= inst.operands[3].reg << 12;
9337}
9338
9339/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9340   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9341   Error if any register is R15.
9342   Warning if Rdlo == Rdhi.  */
9343
9344static void
9345do_smlal (void)
9346{
9347  inst.instruction |= inst.operands[0].reg << 12;
9348  inst.instruction |= inst.operands[1].reg << 16;
9349  inst.instruction |= inst.operands[2].reg;
9350  inst.instruction |= inst.operands[3].reg << 8;
9351
9352  if (inst.operands[0].reg == inst.operands[1].reg)
9353    as_tsktsk (_("rdhi and rdlo must be different"));
9354}
9355
9356/* ARM V5E (El Segundo) signed-multiply (argument parse)
9357   SMULxy{cond} Rd,Rm,Rs
9358   Error if any register is R15.  */
9359
9360static void
9361do_smul (void)
9362{
9363  inst.instruction |= inst.operands[0].reg << 16;
9364  inst.instruction |= inst.operands[1].reg;
9365  inst.instruction |= inst.operands[2].reg << 8;
9366}
9367
9368/* ARM V6 srs (argument parse).  The variable fields in the encoding are
9369   the same for both ARM and Thumb-2.  */
9370
9371static void
9372do_srs (void)
9373{
9374  int reg;
9375
9376  if (inst.operands[0].present)
9377    {
9378      reg = inst.operands[0].reg;
9379      constraint (reg != REG_SP, _("SRS base register must be r13"));
9380    }
9381  else
9382    reg = REG_SP;
9383
9384  inst.instruction |= reg << 16;
9385  inst.instruction |= inst.operands[1].imm;
9386  if (inst.operands[0].writeback || inst.operands[1].writeback)
9387    inst.instruction |= WRITE_BACK;
9388}
9389
9390/* ARM V6 strex (argument parse).  */
9391
9392static void
9393do_strex (void)
9394{
9395  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9396	      || inst.operands[2].postind || inst.operands[2].writeback
9397	      || inst.operands[2].immisreg || inst.operands[2].shifted
9398	      || inst.operands[2].negative
9399	      /* See comment in do_ldrex().  */
9400	      || (inst.operands[2].reg == REG_PC),
9401	      BAD_ADDR_MODE);
9402
9403  constraint (inst.operands[0].reg == inst.operands[1].reg
9404	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9405
9406  constraint (inst.reloc.exp.X_op != O_constant
9407	      || inst.reloc.exp.X_add_number != 0,
9408	      _("offset must be zero in ARM encoding"));
9409
9410  inst.instruction |= inst.operands[0].reg << 12;
9411  inst.instruction |= inst.operands[1].reg;
9412  inst.instruction |= inst.operands[2].reg << 16;
9413  inst.reloc.type = BFD_RELOC_UNUSED;
9414}
9415
9416static void
9417do_t_strexbh (void)
9418{
9419  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9420	      || inst.operands[2].postind || inst.operands[2].writeback
9421	      || inst.operands[2].immisreg || inst.operands[2].shifted
9422	      || inst.operands[2].negative,
9423	      BAD_ADDR_MODE);
9424
9425  constraint (inst.operands[0].reg == inst.operands[1].reg
9426	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9427
9428  do_rm_rd_rn ();
9429}
9430
9431static void
9432do_strexd (void)
9433{
9434  constraint (inst.operands[1].reg % 2 != 0,
9435	      _("even register required"));
9436  constraint (inst.operands[2].present
9437	      && inst.operands[2].reg != inst.operands[1].reg + 1,
9438	      _("can only store two consecutive registers"));
9439  /* If op 2 were present and equal to PC, this function wouldn't
9440     have been called in the first place.  */
9441  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9442
9443  constraint (inst.operands[0].reg == inst.operands[1].reg
9444	      || inst.operands[0].reg == inst.operands[1].reg + 1
9445	      || inst.operands[0].reg == inst.operands[3].reg,
9446	      BAD_OVERLAP);
9447
9448  inst.instruction |= inst.operands[0].reg << 12;
9449  inst.instruction |= inst.operands[1].reg;
9450  inst.instruction |= inst.operands[3].reg << 16;
9451}
9452
9453/* ARM V8 STRL.  */
9454static void
9455do_stlex (void)
9456{
9457  constraint (inst.operands[0].reg == inst.operands[1].reg
9458	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9459
9460  do_rd_rm_rn ();
9461}
9462
9463static void
9464do_t_stlex (void)
9465{
9466  constraint (inst.operands[0].reg == inst.operands[1].reg
9467	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9468
9469  do_rm_rd_rn ();
9470}
9471
9472/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9473   extends it to 32-bits, and adds the result to a value in another
9474   register.  You can specify a rotation by 0, 8, 16, or 24 bits
9475   before extracting the 16-bit value.
9476   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9477   Condition defaults to COND_ALWAYS.
9478   Error if any register uses R15.  */
9479
9480static void
9481do_sxtah (void)
9482{
9483  inst.instruction |= inst.operands[0].reg << 12;
9484  inst.instruction |= inst.operands[1].reg << 16;
9485  inst.instruction |= inst.operands[2].reg;
9486  inst.instruction |= inst.operands[3].imm << 10;
9487}
9488
9489/* ARM V6 SXTH.
9490
9491   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9492   Condition defaults to COND_ALWAYS.
9493   Error if any register uses R15.  */
9494
9495static void
9496do_sxth (void)
9497{
9498  inst.instruction |= inst.operands[0].reg << 12;
9499  inst.instruction |= inst.operands[1].reg;
9500  inst.instruction |= inst.operands[2].imm << 10;
9501}
9502
9503/* VFP instructions.  In a logical order: SP variant first, monad
9504   before dyad, arithmetic then move then load/store.  */
9505
9506static void
9507do_vfp_sp_monadic (void)
9508{
9509  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9510  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9511}
9512
9513static void
9514do_vfp_sp_dyadic (void)
9515{
9516  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9517  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9518  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9519}
9520
9521static void
9522do_vfp_sp_compare_z (void)
9523{
9524  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9525}
9526
9527static void
9528do_vfp_dp_sp_cvt (void)
9529{
9530  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9531  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9532}
9533
9534static void
9535do_vfp_sp_dp_cvt (void)
9536{
9537  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9538  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9539}
9540
9541static void
9542do_vfp_reg_from_sp (void)
9543{
9544  inst.instruction |= inst.operands[0].reg << 12;
9545  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9546}
9547
9548static void
9549do_vfp_reg2_from_sp2 (void)
9550{
9551  constraint (inst.operands[2].imm != 2,
9552	      _("only two consecutive VFP SP registers allowed here"));
9553  inst.instruction |= inst.operands[0].reg << 12;
9554  inst.instruction |= inst.operands[1].reg << 16;
9555  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9556}
9557
9558static void
9559do_vfp_sp_from_reg (void)
9560{
9561  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9562  inst.instruction |= inst.operands[1].reg << 12;
9563}
9564
9565static void
9566do_vfp_sp2_from_reg2 (void)
9567{
9568  constraint (inst.operands[0].imm != 2,
9569	      _("only two consecutive VFP SP registers allowed here"));
9570  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9571  inst.instruction |= inst.operands[1].reg << 12;
9572  inst.instruction |= inst.operands[2].reg << 16;
9573}
9574
9575static void
9576do_vfp_sp_ldst (void)
9577{
9578  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9579  encode_arm_cp_address (1, FALSE, TRUE, 0);
9580}
9581
9582static void
9583do_vfp_dp_ldst (void)
9584{
9585  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9586  encode_arm_cp_address (1, FALSE, TRUE, 0);
9587}
9588
9589
9590static void
9591vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9592{
9593  if (inst.operands[0].writeback)
9594    inst.instruction |= WRITE_BACK;
9595  else
9596    constraint (ldstm_type != VFP_LDSTMIA,
9597		_("this addressing mode requires base-register writeback"));
9598  inst.instruction |= inst.operands[0].reg << 16;
9599  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9600  inst.instruction |= inst.operands[1].imm;
9601}
9602
9603static void
9604vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9605{
9606  int count;
9607
9608  if (inst.operands[0].writeback)
9609    inst.instruction |= WRITE_BACK;
9610  else
9611    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9612		_("this addressing mode requires base-register writeback"));
9613
9614  inst.instruction |= inst.operands[0].reg << 16;
9615  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9616
9617  count = inst.operands[1].imm << 1;
9618  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9619    count += 1;
9620
9621  inst.instruction |= count;
9622}
9623
9624static void
9625do_vfp_sp_ldstmia (void)
9626{
9627  vfp_sp_ldstm (VFP_LDSTMIA);
9628}
9629
9630static void
9631do_vfp_sp_ldstmdb (void)
9632{
9633  vfp_sp_ldstm (VFP_LDSTMDB);
9634}
9635
9636static void
9637do_vfp_dp_ldstmia (void)
9638{
9639  vfp_dp_ldstm (VFP_LDSTMIA);
9640}
9641
9642static void
9643do_vfp_dp_ldstmdb (void)
9644{
9645  vfp_dp_ldstm (VFP_LDSTMDB);
9646}
9647
9648static void
9649do_vfp_xp_ldstmia (void)
9650{
9651  vfp_dp_ldstm (VFP_LDSTMIAX);
9652}
9653
9654static void
9655do_vfp_xp_ldstmdb (void)
9656{
9657  vfp_dp_ldstm (VFP_LDSTMDBX);
9658}
9659
9660static void
9661do_vfp_dp_rd_rm (void)
9662{
9663  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9664  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9665}
9666
9667static void
9668do_vfp_dp_rn_rd (void)
9669{
9670  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9671  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9672}
9673
9674static void
9675do_vfp_dp_rd_rn (void)
9676{
9677  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9678  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9679}
9680
9681static void
9682do_vfp_dp_rd_rn_rm (void)
9683{
9684  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9685  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9686  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9687}
9688
9689static void
9690do_vfp_dp_rd (void)
9691{
9692  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9693}
9694
9695static void
9696do_vfp_dp_rm_rd_rn (void)
9697{
9698  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9699  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9700  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9701}
9702
9703/* VFPv3 instructions.  */
9704static void
9705do_vfp_sp_const (void)
9706{
9707  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9708  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9709  inst.instruction |= (inst.operands[1].imm & 0x0f);
9710}
9711
9712static void
9713do_vfp_dp_const (void)
9714{
9715  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9716  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9717  inst.instruction |= (inst.operands[1].imm & 0x0f);
9718}
9719
9720static void
9721vfp_conv (int srcsize)
9722{
9723  int immbits = srcsize - inst.operands[1].imm;
9724
9725  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9726    {
9727      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9728	 i.e. immbits must be in range 0 - 16.  */
9729      inst.error = _("immediate value out of range, expected range [0, 16]");
9730      return;
9731    }
9732  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9733    {
9734      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9735	 i.e. immbits must be in range 0 - 31.  */
9736      inst.error = _("immediate value out of range, expected range [1, 32]");
9737      return;
9738    }
9739
9740  inst.instruction |= (immbits & 1) << 5;
9741  inst.instruction |= (immbits >> 1);
9742}
9743
9744static void
9745do_vfp_sp_conv_16 (void)
9746{
9747  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9748  vfp_conv (16);
9749}
9750
9751static void
9752do_vfp_dp_conv_16 (void)
9753{
9754  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9755  vfp_conv (16);
9756}
9757
9758static void
9759do_vfp_sp_conv_32 (void)
9760{
9761  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9762  vfp_conv (32);
9763}
9764
9765static void
9766do_vfp_dp_conv_32 (void)
9767{
9768  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9769  vfp_conv (32);
9770}
9771
9772/* FPA instructions.  Also in a logical order.	*/
9773
9774static void
9775do_fpa_cmp (void)
9776{
9777  inst.instruction |= inst.operands[0].reg << 16;
9778  inst.instruction |= inst.operands[1].reg;
9779}
9780
9781static void
9782do_fpa_ldmstm (void)
9783{
9784  inst.instruction |= inst.operands[0].reg << 12;
9785  switch (inst.operands[1].imm)
9786    {
9787    case 1: inst.instruction |= CP_T_X;		 break;
9788    case 2: inst.instruction |= CP_T_Y;		 break;
9789    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9790    case 4:					 break;
9791    default: abort ();
9792    }
9793
9794  if (inst.instruction & (PRE_INDEX | INDEX_UP))
9795    {
9796      /* The instruction specified "ea" or "fd", so we can only accept
9797	 [Rn]{!}.  The instruction does not really support stacking or
9798	 unstacking, so we have to emulate these by setting appropriate
9799	 bits and offsets.  */
9800      constraint (inst.reloc.exp.X_op != O_constant
9801		  || inst.reloc.exp.X_add_number != 0,
9802		  _("this instruction does not support indexing"));
9803
9804      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9805	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9806
9807      if (!(inst.instruction & INDEX_UP))
9808	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9809
9810      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9811	{
9812	  inst.operands[2].preind = 0;
9813	  inst.operands[2].postind = 1;
9814	}
9815    }
9816
9817  encode_arm_cp_address (2, TRUE, TRUE, 0);
9818}
9819
9820/* iWMMXt instructions: strictly in alphabetical order.	 */
9821
9822static void
9823do_iwmmxt_tandorc (void)
9824{
9825  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9826}
9827
9828static void
9829do_iwmmxt_textrc (void)
9830{
9831  inst.instruction |= inst.operands[0].reg << 12;
9832  inst.instruction |= inst.operands[1].imm;
9833}
9834
9835static void
9836do_iwmmxt_textrm (void)
9837{
9838  inst.instruction |= inst.operands[0].reg << 12;
9839  inst.instruction |= inst.operands[1].reg << 16;
9840  inst.instruction |= inst.operands[2].imm;
9841}
9842
9843static void
9844do_iwmmxt_tinsr (void)
9845{
9846  inst.instruction |= inst.operands[0].reg << 16;
9847  inst.instruction |= inst.operands[1].reg << 12;
9848  inst.instruction |= inst.operands[2].imm;
9849}
9850
9851static void
9852do_iwmmxt_tmia (void)
9853{
9854  inst.instruction |= inst.operands[0].reg << 5;
9855  inst.instruction |= inst.operands[1].reg;
9856  inst.instruction |= inst.operands[2].reg << 12;
9857}
9858
9859static void
9860do_iwmmxt_waligni (void)
9861{
9862  inst.instruction |= inst.operands[0].reg << 12;
9863  inst.instruction |= inst.operands[1].reg << 16;
9864  inst.instruction |= inst.operands[2].reg;
9865  inst.instruction |= inst.operands[3].imm << 20;
9866}
9867
9868static void
9869do_iwmmxt_wmerge (void)
9870{
9871  inst.instruction |= inst.operands[0].reg << 12;
9872  inst.instruction |= inst.operands[1].reg << 16;
9873  inst.instruction |= inst.operands[2].reg;
9874  inst.instruction |= inst.operands[3].imm << 21;
9875}
9876
9877static void
9878do_iwmmxt_wmov (void)
9879{
9880  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
9881  inst.instruction |= inst.operands[0].reg << 12;
9882  inst.instruction |= inst.operands[1].reg << 16;
9883  inst.instruction |= inst.operands[1].reg;
9884}
9885
9886static void
9887do_iwmmxt_wldstbh (void)
9888{
9889  int reloc;
9890  inst.instruction |= inst.operands[0].reg << 12;
9891  if (thumb_mode)
9892    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9893  else
9894    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9895  encode_arm_cp_address (1, TRUE, FALSE, reloc);
9896}
9897
9898static void
9899do_iwmmxt_wldstw (void)
9900{
9901  /* RIWR_RIWC clears .isreg for a control register.  */
9902  if (!inst.operands[0].isreg)
9903    {
9904      constraint (inst.cond != COND_ALWAYS, BAD_COND);
9905      inst.instruction |= 0xf0000000;
9906    }
9907
9908  inst.instruction |= inst.operands[0].reg << 12;
9909  encode_arm_cp_address (1, TRUE, TRUE, 0);
9910}
9911
9912static void
9913do_iwmmxt_wldstd (void)
9914{
9915  inst.instruction |= inst.operands[0].reg << 12;
9916  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9917      && inst.operands[1].immisreg)
9918    {
9919      inst.instruction &= ~0x1a000ff;
9920      inst.instruction |= (0xfU << 28);
9921      if (inst.operands[1].preind)
9922	inst.instruction |= PRE_INDEX;
9923      if (!inst.operands[1].negative)
9924	inst.instruction |= INDEX_UP;
9925      if (inst.operands[1].writeback)
9926	inst.instruction |= WRITE_BACK;
9927      inst.instruction |= inst.operands[1].reg << 16;
9928      inst.instruction |= inst.reloc.exp.X_add_number << 4;
9929      inst.instruction |= inst.operands[1].imm;
9930    }
9931  else
9932    encode_arm_cp_address (1, TRUE, FALSE, 0);
9933}
9934
9935static void
9936do_iwmmxt_wshufh (void)
9937{
9938  inst.instruction |= inst.operands[0].reg << 12;
9939  inst.instruction |= inst.operands[1].reg << 16;
9940  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9941  inst.instruction |= (inst.operands[2].imm & 0x0f);
9942}
9943
9944static void
9945do_iwmmxt_wzero (void)
9946{
9947  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
9948  inst.instruction |= inst.operands[0].reg;
9949  inst.instruction |= inst.operands[0].reg << 12;
9950  inst.instruction |= inst.operands[0].reg << 16;
9951}
9952
9953static void
9954do_iwmmxt_wrwrwr_or_imm5 (void)
9955{
9956  if (inst.operands[2].isreg)
9957    do_rd_rn_rm ();
9958  else {
9959    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9960		_("immediate operand requires iWMMXt2"));
9961    do_rd_rn ();
9962    if (inst.operands[2].imm == 0)
9963      {
9964	switch ((inst.instruction >> 20) & 0xf)
9965	  {
9966	  case 4:
9967	  case 5:
9968	  case 6:
9969	  case 7:
9970	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
9971	    inst.operands[2].imm = 16;
9972	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9973	    break;
9974	  case 8:
9975	  case 9:
9976	  case 10:
9977	  case 11:
9978	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
9979	    inst.operands[2].imm = 32;
9980	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9981	    break;
9982	  case 12:
9983	  case 13:
9984	  case 14:
9985	  case 15:
9986	    {
9987	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
9988	      unsigned long wrn;
9989	      wrn = (inst.instruction >> 16) & 0xf;
9990	      inst.instruction &= 0xff0fff0f;
9991	      inst.instruction |= wrn;
9992	      /* Bail out here; the instruction is now assembled.  */
9993	      return;
9994	    }
9995	  }
9996      }
9997    /* Map 32 -> 0, etc.  */
9998    inst.operands[2].imm &= 0x1f;
9999    inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10000  }
10001}
10002
10003/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
10004   operations first, then control, shift, and load/store.  */
10005
10006/* Insns like "foo X,Y,Z".  */
10007
10008static void
10009do_mav_triple (void)
10010{
10011  inst.instruction |= inst.operands[0].reg << 16;
10012  inst.instruction |= inst.operands[1].reg;
10013  inst.instruction |= inst.operands[2].reg << 12;
10014}
10015
10016/* Insns like "foo W,X,Y,Z".
10017    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
10018
10019static void
10020do_mav_quad (void)
10021{
10022  inst.instruction |= inst.operands[0].reg << 5;
10023  inst.instruction |= inst.operands[1].reg << 12;
10024  inst.instruction |= inst.operands[2].reg << 16;
10025  inst.instruction |= inst.operands[3].reg;
10026}
10027
10028/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
10029static void
10030do_mav_dspsc (void)
10031{
10032  inst.instruction |= inst.operands[1].reg << 12;
10033}
10034
10035/* Maverick shift immediate instructions.
10036   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10037   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
10038
10039static void
10040do_mav_shift (void)
10041{
10042  int imm = inst.operands[2].imm;
10043
10044  inst.instruction |= inst.operands[0].reg << 12;
10045  inst.instruction |= inst.operands[1].reg << 16;
10046
10047  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10048     Bits 5-7 of the insn should have bits 4-6 of the immediate.
10049     Bit 4 should be 0.	 */
10050  imm = (imm & 0xf) | ((imm & 0x70) << 1);
10051
10052  inst.instruction |= imm;
10053}
10054
10055/* XScale instructions.	 Also sorted arithmetic before move.  */
10056
10057/* Xscale multiply-accumulate (argument parse)
10058     MIAcc   acc0,Rm,Rs
10059     MIAPHcc acc0,Rm,Rs
10060     MIAxycc acc0,Rm,Rs.  */
10061
10062static void
10063do_xsc_mia (void)
10064{
10065  inst.instruction |= inst.operands[1].reg;
10066  inst.instruction |= inst.operands[2].reg << 12;
10067}
10068
10069/* Xscale move-accumulator-register (argument parse)
10070
10071     MARcc   acc0,RdLo,RdHi.  */
10072
10073static void
10074do_xsc_mar (void)
10075{
10076  inst.instruction |= inst.operands[1].reg << 12;
10077  inst.instruction |= inst.operands[2].reg << 16;
10078}
10079
10080/* Xscale move-register-accumulator (argument parse)
10081
10082     MRAcc   RdLo,RdHi,acc0.  */
10083
10084static void
10085do_xsc_mra (void)
10086{
10087  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10088  inst.instruction |= inst.operands[0].reg << 12;
10089  inst.instruction |= inst.operands[1].reg << 16;
10090}
10091
10092/* Encoding functions relevant only to Thumb.  */
10093
10094/* inst.operands[i] is a shifted-register operand; encode
10095   it into inst.instruction in the format used by Thumb32.  */
10096
10097static void
10098encode_thumb32_shifted_operand (int i)
10099{
10100  unsigned int value = inst.reloc.exp.X_add_number;
10101  unsigned int shift = inst.operands[i].shift_kind;
10102
10103  constraint (inst.operands[i].immisreg,
10104	      _("shift by register not allowed in thumb mode"));
10105  inst.instruction |= inst.operands[i].reg;
10106  if (shift == SHIFT_RRX)
10107    inst.instruction |= SHIFT_ROR << 4;
10108  else
10109    {
10110      constraint (inst.reloc.exp.X_op != O_constant,
10111		  _("expression too complex"));
10112
10113      constraint (value > 32
10114		  || (value == 32 && (shift == SHIFT_LSL
10115				      || shift == SHIFT_ROR)),
10116		  _("shift expression is too large"));
10117
10118      if (value == 0)
10119	shift = SHIFT_LSL;
10120      else if (value == 32)
10121	value = 0;
10122
10123      inst.instruction |= shift << 4;
10124      inst.instruction |= (value & 0x1c) << 10;
10125      inst.instruction |= (value & 0x03) << 6;
10126    }
10127}
10128
10129
10130/* inst.operands[i] was set up by parse_address.  Encode it into a
10131   Thumb32 format load or store instruction.  Reject forms that cannot
10132   be used with such instructions.  If is_t is true, reject forms that
10133   cannot be used with a T instruction; if is_d is true, reject forms
10134   that cannot be used with a D instruction.  If it is a store insn,
10135   reject PC in Rn.  */
10136
10137static void
10138encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10139{
10140  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10141
10142  constraint (!inst.operands[i].isreg,
10143	      _("Instruction does not support =N addresses"));
10144
10145  inst.instruction |= inst.operands[i].reg << 16;
10146  if (inst.operands[i].immisreg)
10147    {
10148      constraint (is_pc, BAD_PC_ADDRESSING);
10149      constraint (is_t || is_d, _("cannot use register index with this instruction"));
10150      constraint (inst.operands[i].negative,
10151		  _("Thumb does not support negative register indexing"));
10152      constraint (inst.operands[i].postind,
10153		  _("Thumb does not support register post-indexing"));
10154      constraint (inst.operands[i].writeback,
10155		  _("Thumb does not support register indexing with writeback"));
10156      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10157		  _("Thumb supports only LSL in shifted register indexing"));
10158
10159      inst.instruction |= inst.operands[i].imm;
10160      if (inst.operands[i].shifted)
10161	{
10162	  constraint (inst.reloc.exp.X_op != O_constant,
10163		      _("expression too complex"));
10164	  constraint (inst.reloc.exp.X_add_number < 0
10165		      || inst.reloc.exp.X_add_number > 3,
10166		      _("shift out of range"));
10167	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
10168	}
10169      inst.reloc.type = BFD_RELOC_UNUSED;
10170    }
10171  else if (inst.operands[i].preind)
10172    {
10173      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10174      constraint (is_t && inst.operands[i].writeback,
10175		  _("cannot use writeback with this instruction"));
10176      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10177		  BAD_PC_ADDRESSING);
10178
10179      if (is_d)
10180	{
10181	  inst.instruction |= 0x01000000;
10182	  if (inst.operands[i].writeback)
10183	    inst.instruction |= 0x00200000;
10184	}
10185      else
10186	{
10187	  inst.instruction |= 0x00000c00;
10188	  if (inst.operands[i].writeback)
10189	    inst.instruction |= 0x00000100;
10190	}
10191      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10192    }
10193  else if (inst.operands[i].postind)
10194    {
10195      gas_assert (inst.operands[i].writeback);
10196      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10197      constraint (is_t, _("cannot use post-indexing with this instruction"));
10198
10199      if (is_d)
10200	inst.instruction |= 0x00200000;
10201      else
10202	inst.instruction |= 0x00000900;
10203      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10204    }
10205  else /* unindexed - only for coprocessor */
10206    inst.error = _("instruction does not accept unindexed addressing");
10207}
10208
10209/* Table of Thumb instructions which exist in both 16- and 32-bit
10210   encodings (the latter only in post-V6T2 cores).  The index is the
10211   value used in the insns table below.  When there is more than one
10212   possible 16-bit encoding for the instruction, this table always
10213   holds variant (1).
10214   Also contains several pseudo-instructions used during relaxation.  */
10215#define T16_32_TAB				\
10216  X(_adc,   4140, eb400000),			\
10217  X(_adcs,  4140, eb500000),			\
10218  X(_add,   1c00, eb000000),			\
10219  X(_adds,  1c00, eb100000),			\
10220  X(_addi,  0000, f1000000),			\
10221  X(_addis, 0000, f1100000),			\
10222  X(_add_pc,000f, f20f0000),			\
10223  X(_add_sp,000d, f10d0000),			\
10224  X(_adr,   000f, f20f0000),			\
10225  X(_and,   4000, ea000000),			\
10226  X(_ands,  4000, ea100000),			\
10227  X(_asr,   1000, fa40f000),			\
10228  X(_asrs,  1000, fa50f000),			\
10229  X(_b,     e000, f000b000),			\
10230  X(_bcond, d000, f0008000),			\
10231  X(_bic,   4380, ea200000),			\
10232  X(_bics,  4380, ea300000),			\
10233  X(_cmn,   42c0, eb100f00),			\
10234  X(_cmp,   2800, ebb00f00),			\
10235  X(_cpsie, b660, f3af8400),			\
10236  X(_cpsid, b670, f3af8600),			\
10237  X(_cpy,   4600, ea4f0000),			\
10238  X(_dec_sp,80dd, f1ad0d00),			\
10239  X(_eor,   4040, ea800000),			\
10240  X(_eors,  4040, ea900000),			\
10241  X(_inc_sp,00dd, f10d0d00),			\
10242  X(_ldmia, c800, e8900000),			\
10243  X(_ldr,   6800, f8500000),			\
10244  X(_ldrb,  7800, f8100000),			\
10245  X(_ldrh,  8800, f8300000),			\
10246  X(_ldrsb, 5600, f9100000),			\
10247  X(_ldrsh, 5e00, f9300000),			\
10248  X(_ldr_pc,4800, f85f0000),			\
10249  X(_ldr_pc2,4800, f85f0000),			\
10250  X(_ldr_sp,9800, f85d0000),			\
10251  X(_lsl,   0000, fa00f000),			\
10252  X(_lsls,  0000, fa10f000),			\
10253  X(_lsr,   0800, fa20f000),			\
10254  X(_lsrs,  0800, fa30f000),			\
10255  X(_mov,   2000, ea4f0000),			\
10256  X(_movs,  2000, ea5f0000),			\
10257  X(_mul,   4340, fb00f000),                     \
10258  X(_muls,  4340, ffffffff), /* no 32b muls */	\
10259  X(_mvn,   43c0, ea6f0000),			\
10260  X(_mvns,  43c0, ea7f0000),			\
10261  X(_neg,   4240, f1c00000), /* rsb #0 */	\
10262  X(_negs,  4240, f1d00000), /* rsbs #0 */	\
10263  X(_orr,   4300, ea400000),			\
10264  X(_orrs,  4300, ea500000),			\
10265  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
10266  X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
10267  X(_rev,   ba00, fa90f080),			\
10268  X(_rev16, ba40, fa90f090),			\
10269  X(_revsh, bac0, fa90f0b0),			\
10270  X(_ror,   41c0, fa60f000),			\
10271  X(_rors,  41c0, fa70f000),			\
10272  X(_sbc,   4180, eb600000),			\
10273  X(_sbcs,  4180, eb700000),			\
10274  X(_stmia, c000, e8800000),			\
10275  X(_str,   6000, f8400000),			\
10276  X(_strb,  7000, f8000000),			\
10277  X(_strh,  8000, f8200000),			\
10278  X(_str_sp,9000, f84d0000),			\
10279  X(_sub,   1e00, eba00000),			\
10280  X(_subs,  1e00, ebb00000),			\
10281  X(_subi,  8000, f1a00000),			\
10282  X(_subis, 8000, f1b00000),			\
10283  X(_sxtb,  b240, fa4ff080),			\
10284  X(_sxth,  b200, fa0ff080),			\
10285  X(_tst,   4200, ea100f00),			\
10286  X(_uxtb,  b2c0, fa5ff080),			\
10287  X(_uxth,  b280, fa1ff080),			\
10288  X(_nop,   bf00, f3af8000),			\
10289  X(_yield, bf10, f3af8001),			\
10290  X(_wfe,   bf20, f3af8002),			\
10291  X(_wfi,   bf30, f3af8003),			\
10292  X(_sev,   bf40, f3af8004),                    \
10293  X(_sevl,  bf50, f3af8005),			\
10294  X(_udf,   de00, f7f0a000)
10295
10296/* To catch errors in encoding functions, the codes are all offset by
10297   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10298   as 16-bit instructions.  */
10299#define X(a,b,c) T_MNEM##a
10300enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10301#undef X
10302
10303#define X(a,b,c) 0x##b
10304static const unsigned short thumb_op16[] = { T16_32_TAB };
10305#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10306#undef X
10307
10308#define X(a,b,c) 0x##c
10309static const unsigned int thumb_op32[] = { T16_32_TAB };
10310#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10311#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
10312#undef X
10313#undef T16_32_TAB
10314
10315/* Thumb instruction encoders, in alphabetical order.  */
10316
10317/* ADDW or SUBW.  */
10318
10319static void
10320do_t_add_sub_w (void)
10321{
10322  int Rd, Rn;
10323
10324  Rd = inst.operands[0].reg;
10325  Rn = inst.operands[1].reg;
10326
10327  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10328     is the SP-{plus,minus}-immediate form of the instruction.  */
10329  if (Rn == REG_SP)
10330    constraint (Rd == REG_PC, BAD_PC);
10331  else
10332    reject_bad_reg (Rd);
10333
10334  inst.instruction |= (Rn << 16) | (Rd << 8);
10335  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10336}
10337
10338/* Parse an add or subtract instruction.  We get here with inst.instruction
10339   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
10340
10341static void
10342do_t_add_sub (void)
10343{
10344  int Rd, Rs, Rn;
10345
10346  Rd = inst.operands[0].reg;
10347  Rs = (inst.operands[1].present
10348	? inst.operands[1].reg    /* Rd, Rs, foo */
10349	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10350
10351  if (Rd == REG_PC)
10352    set_it_insn_type_last ();
10353
10354  if (unified_syntax)
10355    {
10356      bfd_boolean flags;
10357      bfd_boolean narrow;
10358      int opcode;
10359
10360      flags = (inst.instruction == T_MNEM_adds
10361	       || inst.instruction == T_MNEM_subs);
10362      if (flags)
10363	narrow = !in_it_block ();
10364      else
10365	narrow = in_it_block ();
10366      if (!inst.operands[2].isreg)
10367	{
10368	  int add;
10369
10370	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10371
10372	  add = (inst.instruction == T_MNEM_add
10373		 || inst.instruction == T_MNEM_adds);
10374	  opcode = 0;
10375	  if (inst.size_req != 4)
10376	    {
10377	      /* Attempt to use a narrow opcode, with relaxation if
10378		 appropriate.  */
10379	      if (Rd == REG_SP && Rs == REG_SP && !flags)
10380		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10381	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10382		opcode = T_MNEM_add_sp;
10383	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10384		opcode = T_MNEM_add_pc;
10385	      else if (Rd <= 7 && Rs <= 7 && narrow)
10386		{
10387		  if (flags)
10388		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
10389		  else
10390		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
10391		}
10392	      if (opcode)
10393		{
10394		  inst.instruction = THUMB_OP16(opcode);
10395		  inst.instruction |= (Rd << 4) | Rs;
10396		  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10397		  if (inst.size_req != 2)
10398		    inst.relax = opcode;
10399		}
10400	      else
10401		constraint (inst.size_req == 2, BAD_HIREG);
10402	    }
10403	  if (inst.size_req == 4
10404	      || (inst.size_req != 2 && !opcode))
10405	    {
10406	      if (Rd == REG_PC)
10407		{
10408		  constraint (add, BAD_PC);
10409		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10410			     _("only SUBS PC, LR, #const allowed"));
10411		  constraint (inst.reloc.exp.X_op != O_constant,
10412			      _("expression too complex"));
10413		  constraint (inst.reloc.exp.X_add_number < 0
10414			      || inst.reloc.exp.X_add_number > 0xff,
10415			     _("immediate value out of range"));
10416		  inst.instruction = T2_SUBS_PC_LR
10417				     | inst.reloc.exp.X_add_number;
10418		  inst.reloc.type = BFD_RELOC_UNUSED;
10419		  return;
10420		}
10421	      else if (Rs == REG_PC)
10422		{
10423		  /* Always use addw/subw.  */
10424		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10425		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10426		}
10427	      else
10428		{
10429		  inst.instruction = THUMB_OP32 (inst.instruction);
10430		  inst.instruction = (inst.instruction & 0xe1ffffff)
10431				     | 0x10000000;
10432		  if (flags)
10433		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10434		  else
10435		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10436		}
10437	      inst.instruction |= Rd << 8;
10438	      inst.instruction |= Rs << 16;
10439	    }
10440	}
10441      else
10442	{
10443	  unsigned int value = inst.reloc.exp.X_add_number;
10444	  unsigned int shift = inst.operands[2].shift_kind;
10445
10446	  Rn = inst.operands[2].reg;
10447	  /* See if we can do this with a 16-bit instruction.  */
10448	  if (!inst.operands[2].shifted && inst.size_req != 4)
10449	    {
10450	      if (Rd > 7 || Rs > 7 || Rn > 7)
10451		narrow = FALSE;
10452
10453	      if (narrow)
10454		{
10455		  inst.instruction = ((inst.instruction == T_MNEM_adds
10456				       || inst.instruction == T_MNEM_add)
10457				      ? T_OPCODE_ADD_R3
10458				      : T_OPCODE_SUB_R3);
10459		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10460		  return;
10461		}
10462
10463	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10464		{
10465		  /* Thumb-1 cores (except v6-M) require at least one high
10466		     register in a narrow non flag setting add.  */
10467		  if (Rd > 7 || Rn > 7
10468		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10469		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10470		    {
10471		      if (Rd == Rn)
10472			{
10473			  Rn = Rs;
10474			  Rs = Rd;
10475			}
10476		      inst.instruction = T_OPCODE_ADD_HI;
10477		      inst.instruction |= (Rd & 8) << 4;
10478		      inst.instruction |= (Rd & 7);
10479		      inst.instruction |= Rn << 3;
10480		      return;
10481		    }
10482		}
10483	    }
10484
10485	  constraint (Rd == REG_PC, BAD_PC);
10486	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10487	  constraint (Rs == REG_PC, BAD_PC);
10488	  reject_bad_reg (Rn);
10489
10490	  /* If we get here, it can't be done in 16 bits.  */
10491	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10492		      _("shift must be constant"));
10493	  inst.instruction = THUMB_OP32 (inst.instruction);
10494	  inst.instruction |= Rd << 8;
10495	  inst.instruction |= Rs << 16;
10496	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10497		      _("shift value over 3 not allowed in thumb mode"));
10498	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10499		      _("only LSL shift allowed in thumb mode"));
10500	  encode_thumb32_shifted_operand (2);
10501	}
10502    }
10503  else
10504    {
10505      constraint (inst.instruction == T_MNEM_adds
10506		  || inst.instruction == T_MNEM_subs,
10507		  BAD_THUMB32);
10508
10509      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10510	{
10511	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10512		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10513		      BAD_HIREG);
10514
10515	  inst.instruction = (inst.instruction == T_MNEM_add
10516			      ? 0x0000 : 0x8000);
10517	  inst.instruction |= (Rd << 4) | Rs;
10518	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10519	  return;
10520	}
10521
10522      Rn = inst.operands[2].reg;
10523      constraint (inst.operands[2].shifted, _("unshifted register required"));
10524
10525      /* We now have Rd, Rs, and Rn set to registers.  */
10526      if (Rd > 7 || Rs > 7 || Rn > 7)
10527	{
10528	  /* Can't do this for SUB.	 */
10529	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10530	  inst.instruction = T_OPCODE_ADD_HI;
10531	  inst.instruction |= (Rd & 8) << 4;
10532	  inst.instruction |= (Rd & 7);
10533	  if (Rs == Rd)
10534	    inst.instruction |= Rn << 3;
10535	  else if (Rn == Rd)
10536	    inst.instruction |= Rs << 3;
10537	  else
10538	    constraint (1, _("dest must overlap one source register"));
10539	}
10540      else
10541	{
10542	  inst.instruction = (inst.instruction == T_MNEM_add
10543			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10544	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10545	}
10546    }
10547}
10548
10549static void
10550do_t_adr (void)
10551{
10552  unsigned Rd;
10553
10554  Rd = inst.operands[0].reg;
10555  reject_bad_reg (Rd);
10556
10557  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10558    {
10559      /* Defer to section relaxation.  */
10560      inst.relax = inst.instruction;
10561      inst.instruction = THUMB_OP16 (inst.instruction);
10562      inst.instruction |= Rd << 4;
10563    }
10564  else if (unified_syntax && inst.size_req != 2)
10565    {
10566      /* Generate a 32-bit opcode.  */
10567      inst.instruction = THUMB_OP32 (inst.instruction);
10568      inst.instruction |= Rd << 8;
10569      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10570      inst.reloc.pc_rel = 1;
10571    }
10572  else
10573    {
10574      /* Generate a 16-bit opcode.  */
10575      inst.instruction = THUMB_OP16 (inst.instruction);
10576      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10577      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
10578      inst.reloc.pc_rel = 1;
10579
10580      inst.instruction |= Rd << 4;
10581    }
10582}
10583
10584/* Arithmetic instructions for which there is just one 16-bit
10585   instruction encoding, and it allows only two low registers.
10586   For maximal compatibility with ARM syntax, we allow three register
10587   operands even when Thumb-32 instructions are not available, as long
10588   as the first two are identical.  For instance, both "sbc r0,r1" and
10589   "sbc r0,r0,r1" are allowed.  */
10590static void
10591do_t_arit3 (void)
10592{
10593  int Rd, Rs, Rn;
10594
10595  Rd = inst.operands[0].reg;
10596  Rs = (inst.operands[1].present
10597	? inst.operands[1].reg    /* Rd, Rs, foo */
10598	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10599  Rn = inst.operands[2].reg;
10600
10601  reject_bad_reg (Rd);
10602  reject_bad_reg (Rs);
10603  if (inst.operands[2].isreg)
10604    reject_bad_reg (Rn);
10605
10606  if (unified_syntax)
10607    {
10608      if (!inst.operands[2].isreg)
10609	{
10610	  /* For an immediate, we always generate a 32-bit opcode;
10611	     section relaxation will shrink it later if possible.  */
10612	  inst.instruction = THUMB_OP32 (inst.instruction);
10613	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10614	  inst.instruction |= Rd << 8;
10615	  inst.instruction |= Rs << 16;
10616	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10617	}
10618      else
10619	{
10620	  bfd_boolean narrow;
10621
10622	  /* See if we can do this with a 16-bit instruction.  */
10623	  if (THUMB_SETS_FLAGS (inst.instruction))
10624	    narrow = !in_it_block ();
10625	  else
10626	    narrow = in_it_block ();
10627
10628	  if (Rd > 7 || Rn > 7 || Rs > 7)
10629	    narrow = FALSE;
10630	  if (inst.operands[2].shifted)
10631	    narrow = FALSE;
10632	  if (inst.size_req == 4)
10633	    narrow = FALSE;
10634
10635	  if (narrow
10636	      && Rd == Rs)
10637	    {
10638	      inst.instruction = THUMB_OP16 (inst.instruction);
10639	      inst.instruction |= Rd;
10640	      inst.instruction |= Rn << 3;
10641	      return;
10642	    }
10643
10644	  /* If we get here, it can't be done in 16 bits.  */
10645	  constraint (inst.operands[2].shifted
10646		      && inst.operands[2].immisreg,
10647		      _("shift must be constant"));
10648	  inst.instruction = THUMB_OP32 (inst.instruction);
10649	  inst.instruction |= Rd << 8;
10650	  inst.instruction |= Rs << 16;
10651	  encode_thumb32_shifted_operand (2);
10652	}
10653    }
10654  else
10655    {
10656      /* On its face this is a lie - the instruction does set the
10657	 flags.  However, the only supported mnemonic in this mode
10658	 says it doesn't.  */
10659      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10660
10661      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10662		  _("unshifted register required"));
10663      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10664      constraint (Rd != Rs,
10665		  _("dest and source1 must be the same register"));
10666
10667      inst.instruction = THUMB_OP16 (inst.instruction);
10668      inst.instruction |= Rd;
10669      inst.instruction |= Rn << 3;
10670    }
10671}
10672
10673/* Similarly, but for instructions where the arithmetic operation is
10674   commutative, so we can allow either of them to be different from
10675   the destination operand in a 16-bit instruction.  For instance, all
10676   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10677   accepted.  */
10678static void
10679do_t_arit3c (void)
10680{
10681  int Rd, Rs, Rn;
10682
10683  Rd = inst.operands[0].reg;
10684  Rs = (inst.operands[1].present
10685	? inst.operands[1].reg    /* Rd, Rs, foo */
10686	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10687  Rn = inst.operands[2].reg;
10688
10689  reject_bad_reg (Rd);
10690  reject_bad_reg (Rs);
10691  if (inst.operands[2].isreg)
10692    reject_bad_reg (Rn);
10693
10694  if (unified_syntax)
10695    {
10696      if (!inst.operands[2].isreg)
10697	{
10698	  /* For an immediate, we always generate a 32-bit opcode;
10699	     section relaxation will shrink it later if possible.  */
10700	  inst.instruction = THUMB_OP32 (inst.instruction);
10701	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10702	  inst.instruction |= Rd << 8;
10703	  inst.instruction |= Rs << 16;
10704	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10705	}
10706      else
10707	{
10708	  bfd_boolean narrow;
10709
10710	  /* See if we can do this with a 16-bit instruction.  */
10711	  if (THUMB_SETS_FLAGS (inst.instruction))
10712	    narrow = !in_it_block ();
10713	  else
10714	    narrow = in_it_block ();
10715
10716	  if (Rd > 7 || Rn > 7 || Rs > 7)
10717	    narrow = FALSE;
10718	  if (inst.operands[2].shifted)
10719	    narrow = FALSE;
10720	  if (inst.size_req == 4)
10721	    narrow = FALSE;
10722
10723	  if (narrow)
10724	    {
10725	      if (Rd == Rs)
10726		{
10727		  inst.instruction = THUMB_OP16 (inst.instruction);
10728		  inst.instruction |= Rd;
10729		  inst.instruction |= Rn << 3;
10730		  return;
10731		}
10732	      if (Rd == Rn)
10733		{
10734		  inst.instruction = THUMB_OP16 (inst.instruction);
10735		  inst.instruction |= Rd;
10736		  inst.instruction |= Rs << 3;
10737		  return;
10738		}
10739	    }
10740
10741	  /* If we get here, it can't be done in 16 bits.  */
10742	  constraint (inst.operands[2].shifted
10743		      && inst.operands[2].immisreg,
10744		      _("shift must be constant"));
10745	  inst.instruction = THUMB_OP32 (inst.instruction);
10746	  inst.instruction |= Rd << 8;
10747	  inst.instruction |= Rs << 16;
10748	  encode_thumb32_shifted_operand (2);
10749	}
10750    }
10751  else
10752    {
10753      /* On its face this is a lie - the instruction does set the
10754	 flags.  However, the only supported mnemonic in this mode
10755	 says it doesn't.  */
10756      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10757
10758      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10759		  _("unshifted register required"));
10760      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10761
10762      inst.instruction = THUMB_OP16 (inst.instruction);
10763      inst.instruction |= Rd;
10764
10765      if (Rd == Rs)
10766	inst.instruction |= Rn << 3;
10767      else if (Rd == Rn)
10768	inst.instruction |= Rs << 3;
10769      else
10770	constraint (1, _("dest must overlap one source register"));
10771    }
10772}
10773
10774static void
10775do_t_bfc (void)
10776{
10777  unsigned Rd;
10778  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10779  constraint (msb > 32, _("bit-field extends past end of register"));
10780  /* The instruction encoding stores the LSB and MSB,
10781     not the LSB and width.  */
10782  Rd = inst.operands[0].reg;
10783  reject_bad_reg (Rd);
10784  inst.instruction |= Rd << 8;
10785  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10786  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10787  inst.instruction |= msb - 1;
10788}
10789
10790static void
10791do_t_bfi (void)
10792{
10793  int Rd, Rn;
10794  unsigned int msb;
10795
10796  Rd = inst.operands[0].reg;
10797  reject_bad_reg (Rd);
10798
10799  /* #0 in second position is alternative syntax for bfc, which is
10800     the same instruction but with REG_PC in the Rm field.  */
10801  if (!inst.operands[1].isreg)
10802    Rn = REG_PC;
10803  else
10804    {
10805      Rn = inst.operands[1].reg;
10806      reject_bad_reg (Rn);
10807    }
10808
10809  msb = inst.operands[2].imm + inst.operands[3].imm;
10810  constraint (msb > 32, _("bit-field extends past end of register"));
10811  /* The instruction encoding stores the LSB and MSB,
10812     not the LSB and width.  */
10813  inst.instruction |= Rd << 8;
10814  inst.instruction |= Rn << 16;
10815  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10816  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10817  inst.instruction |= msb - 1;
10818}
10819
10820static void
10821do_t_bfx (void)
10822{
10823  unsigned Rd, Rn;
10824
10825  Rd = inst.operands[0].reg;
10826  Rn = inst.operands[1].reg;
10827
10828  reject_bad_reg (Rd);
10829  reject_bad_reg (Rn);
10830
10831  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10832	      _("bit-field extends past end of register"));
10833  inst.instruction |= Rd << 8;
10834  inst.instruction |= Rn << 16;
10835  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10836  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10837  inst.instruction |= inst.operands[3].imm - 1;
10838}
10839
10840/* ARM V5 Thumb BLX (argument parse)
10841	BLX <target_addr>	which is BLX(1)
10842	BLX <Rm>		which is BLX(2)
10843   Unfortunately, there are two different opcodes for this mnemonic.
10844   So, the insns[].value is not used, and the code here zaps values
10845	into inst.instruction.
10846
10847   ??? How to take advantage of the additional two bits of displacement
10848   available in Thumb32 mode?  Need new relocation?  */
10849
10850static void
10851do_t_blx (void)
10852{
10853  set_it_insn_type_last ();
10854
10855  if (inst.operands[0].isreg)
10856    {
10857      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10858      /* We have a register, so this is BLX(2).  */
10859      inst.instruction |= inst.operands[0].reg << 3;
10860    }
10861  else
10862    {
10863      /* No register.  This must be BLX(1).  */
10864      inst.instruction = 0xf000e800;
10865      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10866    }
10867}
10868
10869static void
10870do_t_branch (void)
10871{
10872  int opcode;
10873  int cond;
10874  int reloc;
10875
10876  cond = inst.cond;
10877  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10878
10879  if (in_it_block ())
10880    {
10881      /* Conditional branches inside IT blocks are encoded as unconditional
10882	 branches.  */
10883      cond = COND_ALWAYS;
10884    }
10885  else
10886    cond = inst.cond;
10887
10888  if (cond != COND_ALWAYS)
10889    opcode = T_MNEM_bcond;
10890  else
10891    opcode = inst.instruction;
10892
10893  if (unified_syntax
10894      && (inst.size_req == 4
10895	  || (inst.size_req != 2
10896	      && (inst.operands[0].hasreloc
10897		  || inst.reloc.exp.X_op == O_constant))))
10898    {
10899      inst.instruction = THUMB_OP32(opcode);
10900      if (cond == COND_ALWAYS)
10901	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10902      else
10903	{
10904	  gas_assert (cond != 0xF);
10905	  inst.instruction |= cond << 22;
10906	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10907	}
10908    }
10909  else
10910    {
10911      inst.instruction = THUMB_OP16(opcode);
10912      if (cond == COND_ALWAYS)
10913	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10914      else
10915	{
10916	  inst.instruction |= cond << 8;
10917	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10918	}
10919      /* Allow section relaxation.  */
10920      if (unified_syntax && inst.size_req != 2)
10921	inst.relax = opcode;
10922    }
10923  inst.reloc.type = reloc;
10924  inst.reloc.pc_rel = 1;
10925}
10926
10927/* Actually do the work for Thumb state bkpt and hlt.  The only difference
10928   between the two is the maximum immediate allowed - which is passed in
10929   RANGE.  */
10930static void
10931do_t_bkpt_hlt1 (int range)
10932{
10933  constraint (inst.cond != COND_ALWAYS,
10934	      _("instruction is always unconditional"));
10935  if (inst.operands[0].present)
10936    {
10937      constraint (inst.operands[0].imm > range,
10938		  _("immediate value out of range"));
10939      inst.instruction |= inst.operands[0].imm;
10940    }
10941
10942  set_it_insn_type (NEUTRAL_IT_INSN);
10943}
10944
10945static void
10946do_t_hlt (void)
10947{
10948  do_t_bkpt_hlt1 (63);
10949}
10950
10951static void
10952do_t_bkpt (void)
10953{
10954  do_t_bkpt_hlt1 (255);
10955}
10956
10957static void
10958do_t_branch23 (void)
10959{
10960  set_it_insn_type_last ();
10961  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10962
10963  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10964     this file.  We used to simply ignore the PLT reloc type here --
10965     the branch encoding is now needed to deal with TLSCALL relocs.
10966     So if we see a PLT reloc now, put it back to how it used to be to
10967     keep the preexisting behaviour.  */
10968  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10969    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10970
10971#if defined(OBJ_COFF)
10972  /* If the destination of the branch is a defined symbol which does not have
10973     the THUMB_FUNC attribute, then we must be calling a function which has
10974     the (interfacearm) attribute.  We look for the Thumb entry point to that
10975     function and change the branch to refer to that function instead.	*/
10976  if (	 inst.reloc.exp.X_op == O_symbol
10977      && inst.reloc.exp.X_add_symbol != NULL
10978      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10979      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10980    inst.reloc.exp.X_add_symbol =
10981      find_real_start (inst.reloc.exp.X_add_symbol);
10982#endif
10983}
10984
10985static void
10986do_t_bx (void)
10987{
10988  set_it_insn_type_last ();
10989  inst.instruction |= inst.operands[0].reg << 3;
10990  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
10991     should cause the alignment to be checked once it is known.	 This is
10992     because BX PC only works if the instruction is word aligned.  */
10993}
10994
10995static void
10996do_t_bxj (void)
10997{
10998  int Rm;
10999
11000  set_it_insn_type_last ();
11001  Rm = inst.operands[0].reg;
11002  reject_bad_reg (Rm);
11003  inst.instruction |= Rm << 16;
11004}
11005
11006static void
11007do_t_clz (void)
11008{
11009  unsigned Rd;
11010  unsigned Rm;
11011
11012  Rd = inst.operands[0].reg;
11013  Rm = inst.operands[1].reg;
11014
11015  reject_bad_reg (Rd);
11016  reject_bad_reg (Rm);
11017
11018  inst.instruction |= Rd << 8;
11019  inst.instruction |= Rm << 16;
11020  inst.instruction |= Rm;
11021}
11022
11023static void
11024do_t_cps (void)
11025{
11026  set_it_insn_type (OUTSIDE_IT_INSN);
11027  inst.instruction |= inst.operands[0].imm;
11028}
11029
11030static void
11031do_t_cpsi (void)
11032{
11033  set_it_insn_type (OUTSIDE_IT_INSN);
11034  if (unified_syntax
11035      && (inst.operands[1].present || inst.size_req == 4)
11036      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11037    {
11038      unsigned int imod = (inst.instruction & 0x0030) >> 4;
11039      inst.instruction = 0xf3af8000;
11040      inst.instruction |= imod << 9;
11041      inst.instruction |= inst.operands[0].imm << 5;
11042      if (inst.operands[1].present)
11043	inst.instruction |= 0x100 | inst.operands[1].imm;
11044    }
11045  else
11046    {
11047      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11048		  && (inst.operands[0].imm & 4),
11049		  _("selected processor does not support 'A' form "
11050		    "of this instruction"));
11051      constraint (inst.operands[1].present || inst.size_req == 4,
11052		  _("Thumb does not support the 2-argument "
11053		    "form of this instruction"));
11054      inst.instruction |= inst.operands[0].imm;
11055    }
11056}
11057
11058/* THUMB CPY instruction (argument parse).  */
11059
11060static void
11061do_t_cpy (void)
11062{
11063  if (inst.size_req == 4)
11064    {
11065      inst.instruction = THUMB_OP32 (T_MNEM_mov);
11066      inst.instruction |= inst.operands[0].reg << 8;
11067      inst.instruction |= inst.operands[1].reg;
11068    }
11069  else
11070    {
11071      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11072      inst.instruction |= (inst.operands[0].reg & 0x7);
11073      inst.instruction |= inst.operands[1].reg << 3;
11074    }
11075}
11076
11077static void
11078do_t_cbz (void)
11079{
11080  set_it_insn_type (OUTSIDE_IT_INSN);
11081  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11082  inst.instruction |= inst.operands[0].reg;
11083  inst.reloc.pc_rel = 1;
11084  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11085}
11086
11087static void
11088do_t_dbg (void)
11089{
11090  inst.instruction |= inst.operands[0].imm;
11091}
11092
11093static void
11094do_t_div (void)
11095{
11096  unsigned Rd, Rn, Rm;
11097
11098  Rd = inst.operands[0].reg;
11099  Rn = (inst.operands[1].present
11100	? inst.operands[1].reg : Rd);
11101  Rm = inst.operands[2].reg;
11102
11103  reject_bad_reg (Rd);
11104  reject_bad_reg (Rn);
11105  reject_bad_reg (Rm);
11106
11107  inst.instruction |= Rd << 8;
11108  inst.instruction |= Rn << 16;
11109  inst.instruction |= Rm;
11110}
11111
11112static void
11113do_t_hint (void)
11114{
11115  if (unified_syntax && inst.size_req == 4)
11116    inst.instruction = THUMB_OP32 (inst.instruction);
11117  else
11118    inst.instruction = THUMB_OP16 (inst.instruction);
11119}
11120
11121static void
11122do_t_it (void)
11123{
11124  unsigned int cond = inst.operands[0].imm;
11125
11126  set_it_insn_type (IT_INSN);
11127  now_it.mask = (inst.instruction & 0xf) | 0x10;
11128  now_it.cc = cond;
11129  now_it.warn_deprecated = FALSE;
11130
11131  /* If the condition is a negative condition, invert the mask.  */
11132  if ((cond & 0x1) == 0x0)
11133    {
11134      unsigned int mask = inst.instruction & 0x000f;
11135
11136      if ((mask & 0x7) == 0)
11137	{
11138	  /* No conversion needed.  */
11139	  now_it.block_length = 1;
11140	}
11141      else if ((mask & 0x3) == 0)
11142	{
11143	  mask ^= 0x8;
11144	  now_it.block_length = 2;
11145	}
11146      else if ((mask & 0x1) == 0)
11147	{
11148	  mask ^= 0xC;
11149	  now_it.block_length = 3;
11150	}
11151      else
11152	{
11153	  mask ^= 0xE;
11154	  now_it.block_length = 4;
11155	}
11156
11157      inst.instruction &= 0xfff0;
11158      inst.instruction |= mask;
11159    }
11160
11161  inst.instruction |= cond << 4;
11162}
11163
11164/* Helper function used for both push/pop and ldm/stm.  */
11165static void
11166encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11167{
11168  bfd_boolean load;
11169
11170  load = (inst.instruction & (1 << 20)) != 0;
11171
11172  if (mask & (1 << 13))
11173    inst.error =  _("SP not allowed in register list");
11174
11175  if ((mask & (1 << base)) != 0
11176      && writeback)
11177    inst.error = _("having the base register in the register list when "
11178		   "using write back is UNPREDICTABLE");
11179
11180  if (load)
11181    {
11182      if (mask & (1 << 15))
11183	{
11184	  if (mask & (1 << 14))
11185	    inst.error = _("LR and PC should not both be in register list");
11186	  else
11187	    set_it_insn_type_last ();
11188	}
11189    }
11190  else
11191    {
11192      if (mask & (1 << 15))
11193	inst.error = _("PC not allowed in register list");
11194    }
11195
11196  if ((mask & (mask - 1)) == 0)
11197    {
11198      /* Single register transfers implemented as str/ldr.  */
11199      if (writeback)
11200	{
11201	  if (inst.instruction & (1 << 23))
11202	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11203	  else
11204	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11205	}
11206      else
11207	{
11208	  if (inst.instruction & (1 << 23))
11209	    inst.instruction = 0x00800000; /* ia -> [base] */
11210	  else
11211	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11212	}
11213
11214      inst.instruction |= 0xf8400000;
11215      if (load)
11216	inst.instruction |= 0x00100000;
11217
11218      mask = ffs (mask) - 1;
11219      mask <<= 12;
11220    }
11221  else if (writeback)
11222    inst.instruction |= WRITE_BACK;
11223
11224  inst.instruction |= mask;
11225  inst.instruction |= base << 16;
11226}
11227
11228static void
11229do_t_ldmstm (void)
11230{
11231  /* This really doesn't seem worth it.  */
11232  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11233	      _("expression too complex"));
11234  constraint (inst.operands[1].writeback,
11235	      _("Thumb load/store multiple does not support {reglist}^"));
11236
11237  if (unified_syntax)
11238    {
11239      bfd_boolean narrow;
11240      unsigned mask;
11241
11242      narrow = FALSE;
11243      /* See if we can use a 16-bit instruction.  */
11244      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11245	  && inst.size_req != 4
11246	  && !(inst.operands[1].imm & ~0xff))
11247	{
11248	  mask = 1 << inst.operands[0].reg;
11249
11250	  if (inst.operands[0].reg <= 7)
11251	    {
11252	      if (inst.instruction == T_MNEM_stmia
11253		  ? inst.operands[0].writeback
11254		  : (inst.operands[0].writeback
11255		     == !(inst.operands[1].imm & mask)))
11256		{
11257		  if (inst.instruction == T_MNEM_stmia
11258		      && (inst.operands[1].imm & mask)
11259		      && (inst.operands[1].imm & (mask - 1)))
11260		    as_warn (_("value stored for r%d is UNKNOWN"),
11261			     inst.operands[0].reg);
11262
11263		  inst.instruction = THUMB_OP16 (inst.instruction);
11264		  inst.instruction |= inst.operands[0].reg << 8;
11265		  inst.instruction |= inst.operands[1].imm;
11266		  narrow = TRUE;
11267		}
11268	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11269		{
11270		  /* This means 1 register in reg list one of 3 situations:
11271		     1. Instruction is stmia, but without writeback.
11272		     2. lmdia without writeback, but with Rn not in
11273			reglist.
11274		     3. ldmia with writeback, but with Rn in reglist.
11275		     Case 3 is UNPREDICTABLE behaviour, so we handle
11276		     case 1 and 2 which can be converted into a 16-bit
11277		     str or ldr. The SP cases are handled below.  */
11278		  unsigned long opcode;
11279		  /* First, record an error for Case 3.  */
11280		  if (inst.operands[1].imm & mask
11281		      && inst.operands[0].writeback)
11282		    inst.error =
11283			_("having the base register in the register list when "
11284			  "using write back is UNPREDICTABLE");
11285
11286		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11287							     : T_MNEM_ldr);
11288		  inst.instruction = THUMB_OP16 (opcode);
11289		  inst.instruction |= inst.operands[0].reg << 3;
11290		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
11291		  narrow = TRUE;
11292		}
11293	    }
11294	  else if (inst.operands[0] .reg == REG_SP)
11295	    {
11296	      if (inst.operands[0].writeback)
11297		{
11298		  inst.instruction =
11299			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11300				    ? T_MNEM_push : T_MNEM_pop);
11301		  inst.instruction |= inst.operands[1].imm;
11302		  narrow = TRUE;
11303		}
11304	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11305		{
11306		  inst.instruction =
11307			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11308				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11309		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11310		  narrow = TRUE;
11311		}
11312	    }
11313	}
11314
11315      if (!narrow)
11316	{
11317	  if (inst.instruction < 0xffff)
11318	    inst.instruction = THUMB_OP32 (inst.instruction);
11319
11320	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11321				inst.operands[0].writeback);
11322	}
11323    }
11324  else
11325    {
11326      constraint (inst.operands[0].reg > 7
11327		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11328      constraint (inst.instruction != T_MNEM_ldmia
11329		  && inst.instruction != T_MNEM_stmia,
11330		  _("Thumb-2 instruction only valid in unified syntax"));
11331      if (inst.instruction == T_MNEM_stmia)
11332	{
11333	  if (!inst.operands[0].writeback)
11334	    as_warn (_("this instruction will write back the base register"));
11335	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11336	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11337	    as_warn (_("value stored for r%d is UNKNOWN"),
11338		     inst.operands[0].reg);
11339	}
11340      else
11341	{
11342	  if (!inst.operands[0].writeback
11343	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11344	    as_warn (_("this instruction will write back the base register"));
11345	  else if (inst.operands[0].writeback
11346		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11347	    as_warn (_("this instruction will not write back the base register"));
11348	}
11349
11350      inst.instruction = THUMB_OP16 (inst.instruction);
11351      inst.instruction |= inst.operands[0].reg << 8;
11352      inst.instruction |= inst.operands[1].imm;
11353    }
11354}
11355
11356static void
11357do_t_ldrex (void)
11358{
11359  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11360	      || inst.operands[1].postind || inst.operands[1].writeback
11361	      || inst.operands[1].immisreg || inst.operands[1].shifted
11362	      || inst.operands[1].negative,
11363	      BAD_ADDR_MODE);
11364
11365  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11366
11367  inst.instruction |= inst.operands[0].reg << 12;
11368  inst.instruction |= inst.operands[1].reg << 16;
11369  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11370}
11371
11372static void
11373do_t_ldrexd (void)
11374{
11375  if (!inst.operands[1].present)
11376    {
11377      constraint (inst.operands[0].reg == REG_LR,
11378		  _("r14 not allowed as first register "
11379		    "when second register is omitted"));
11380      inst.operands[1].reg = inst.operands[0].reg + 1;
11381    }
11382  constraint (inst.operands[0].reg == inst.operands[1].reg,
11383	      BAD_OVERLAP);
11384
11385  inst.instruction |= inst.operands[0].reg << 12;
11386  inst.instruction |= inst.operands[1].reg << 8;
11387  inst.instruction |= inst.operands[2].reg << 16;
11388}
11389
11390static void
11391do_t_ldst (void)
11392{
11393  unsigned long opcode;
11394  int Rn;
11395
11396  if (inst.operands[0].isreg
11397      && !inst.operands[0].preind
11398      && inst.operands[0].reg == REG_PC)
11399    set_it_insn_type_last ();
11400
11401  opcode = inst.instruction;
11402  if (unified_syntax)
11403    {
11404      if (!inst.operands[1].isreg)
11405	{
11406	  if (opcode <= 0xffff)
11407	    inst.instruction = THUMB_OP32 (opcode);
11408	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11409	    return;
11410	}
11411      if (inst.operands[1].isreg
11412	  && !inst.operands[1].writeback
11413	  && !inst.operands[1].shifted && !inst.operands[1].postind
11414	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
11415	  && opcode <= 0xffff
11416	  && inst.size_req != 4)
11417	{
11418	  /* Insn may have a 16-bit form.  */
11419	  Rn = inst.operands[1].reg;
11420	  if (inst.operands[1].immisreg)
11421	    {
11422	      inst.instruction = THUMB_OP16 (opcode);
11423	      /* [Rn, Rik] */
11424	      if (Rn <= 7 && inst.operands[1].imm <= 7)
11425		goto op16;
11426	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11427		reject_bad_reg (inst.operands[1].imm);
11428	    }
11429	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11430		    && opcode != T_MNEM_ldrsb)
11431		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11432		   || (Rn == REG_SP && opcode == T_MNEM_str))
11433	    {
11434	      /* [Rn, #const] */
11435	      if (Rn > 7)
11436		{
11437		  if (Rn == REG_PC)
11438		    {
11439		      if (inst.reloc.pc_rel)
11440			opcode = T_MNEM_ldr_pc2;
11441		      else
11442			opcode = T_MNEM_ldr_pc;
11443		    }
11444		  else
11445		    {
11446		      if (opcode == T_MNEM_ldr)
11447			opcode = T_MNEM_ldr_sp;
11448		      else
11449			opcode = T_MNEM_str_sp;
11450		    }
11451		  inst.instruction = inst.operands[0].reg << 8;
11452		}
11453	      else
11454		{
11455		  inst.instruction = inst.operands[0].reg;
11456		  inst.instruction |= inst.operands[1].reg << 3;
11457		}
11458	      inst.instruction |= THUMB_OP16 (opcode);
11459	      if (inst.size_req == 2)
11460		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11461	      else
11462		inst.relax = opcode;
11463	      return;
11464	    }
11465	}
11466      /* Definitely a 32-bit variant.  */
11467
11468      /* Warning for Erratum 752419.  */
11469      if (opcode == T_MNEM_ldr
11470	  && inst.operands[0].reg == REG_SP
11471	  && inst.operands[1].writeback == 1
11472	  && !inst.operands[1].immisreg)
11473	{
11474	  if (no_cpu_selected ()
11475	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11476		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11477		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11478	    as_warn (_("This instruction may be unpredictable "
11479		       "if executed on M-profile cores "
11480		       "with interrupts enabled."));
11481	}
11482
11483      /* Do some validations regarding addressing modes.  */
11484      if (inst.operands[1].immisreg)
11485	reject_bad_reg (inst.operands[1].imm);
11486
11487      constraint (inst.operands[1].writeback == 1
11488		  && inst.operands[0].reg == inst.operands[1].reg,
11489		  BAD_OVERLAP);
11490
11491      inst.instruction = THUMB_OP32 (opcode);
11492      inst.instruction |= inst.operands[0].reg << 12;
11493      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11494      check_ldr_r15_aligned ();
11495      return;
11496    }
11497
11498  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11499
11500  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11501    {
11502      /* Only [Rn,Rm] is acceptable.  */
11503      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11504      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11505		  || inst.operands[1].postind || inst.operands[1].shifted
11506		  || inst.operands[1].negative,
11507		  _("Thumb does not support this addressing mode"));
11508      inst.instruction = THUMB_OP16 (inst.instruction);
11509      goto op16;
11510    }
11511
11512  inst.instruction = THUMB_OP16 (inst.instruction);
11513  if (!inst.operands[1].isreg)
11514    if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11515      return;
11516
11517  constraint (!inst.operands[1].preind
11518	      || inst.operands[1].shifted
11519	      || inst.operands[1].writeback,
11520	      _("Thumb does not support this addressing mode"));
11521  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11522    {
11523      constraint (inst.instruction & 0x0600,
11524		  _("byte or halfword not valid for base register"));
11525      constraint (inst.operands[1].reg == REG_PC
11526		  && !(inst.instruction & THUMB_LOAD_BIT),
11527		  _("r15 based store not allowed"));
11528      constraint (inst.operands[1].immisreg,
11529		  _("invalid base register for register offset"));
11530
11531      if (inst.operands[1].reg == REG_PC)
11532	inst.instruction = T_OPCODE_LDR_PC;
11533      else if (inst.instruction & THUMB_LOAD_BIT)
11534	inst.instruction = T_OPCODE_LDR_SP;
11535      else
11536	inst.instruction = T_OPCODE_STR_SP;
11537
11538      inst.instruction |= inst.operands[0].reg << 8;
11539      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11540      return;
11541    }
11542
11543  constraint (inst.operands[1].reg > 7, BAD_HIREG);
11544  if (!inst.operands[1].immisreg)
11545    {
11546      /* Immediate offset.  */
11547      inst.instruction |= inst.operands[0].reg;
11548      inst.instruction |= inst.operands[1].reg << 3;
11549      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11550      return;
11551    }
11552
11553  /* Register offset.  */
11554  constraint (inst.operands[1].imm > 7, BAD_HIREG);
11555  constraint (inst.operands[1].negative,
11556	      _("Thumb does not support this addressing mode"));
11557
11558 op16:
11559  switch (inst.instruction)
11560    {
11561    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11562    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11563    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11564    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11565    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11566    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11567    case 0x5600 /* ldrsb */:
11568    case 0x5e00 /* ldrsh */: break;
11569    default: abort ();
11570    }
11571
11572  inst.instruction |= inst.operands[0].reg;
11573  inst.instruction |= inst.operands[1].reg << 3;
11574  inst.instruction |= inst.operands[1].imm << 6;
11575}
11576
11577static void
11578do_t_ldstd (void)
11579{
11580  if (!inst.operands[1].present)
11581    {
11582      inst.operands[1].reg = inst.operands[0].reg + 1;
11583      constraint (inst.operands[0].reg == REG_LR,
11584		  _("r14 not allowed here"));
11585      constraint (inst.operands[0].reg == REG_R12,
11586		  _("r12 not allowed here"));
11587    }
11588
11589  if (inst.operands[2].writeback
11590      && (inst.operands[0].reg == inst.operands[2].reg
11591      || inst.operands[1].reg == inst.operands[2].reg))
11592    as_warn (_("base register written back, and overlaps "
11593	       "one of transfer registers"));
11594
11595  inst.instruction |= inst.operands[0].reg << 12;
11596  inst.instruction |= inst.operands[1].reg << 8;
11597  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11598}
11599
11600static void
11601do_t_ldstt (void)
11602{
11603  inst.instruction |= inst.operands[0].reg << 12;
11604  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11605}
11606
11607static void
11608do_t_mla (void)
11609{
11610  unsigned Rd, Rn, Rm, Ra;
11611
11612  Rd = inst.operands[0].reg;
11613  Rn = inst.operands[1].reg;
11614  Rm = inst.operands[2].reg;
11615  Ra = inst.operands[3].reg;
11616
11617  reject_bad_reg (Rd);
11618  reject_bad_reg (Rn);
11619  reject_bad_reg (Rm);
11620  reject_bad_reg (Ra);
11621
11622  inst.instruction |= Rd << 8;
11623  inst.instruction |= Rn << 16;
11624  inst.instruction |= Rm;
11625  inst.instruction |= Ra << 12;
11626}
11627
11628static void
11629do_t_mlal (void)
11630{
11631  unsigned RdLo, RdHi, Rn, Rm;
11632
11633  RdLo = inst.operands[0].reg;
11634  RdHi = inst.operands[1].reg;
11635  Rn = inst.operands[2].reg;
11636  Rm = inst.operands[3].reg;
11637
11638  reject_bad_reg (RdLo);
11639  reject_bad_reg (RdHi);
11640  reject_bad_reg (Rn);
11641  reject_bad_reg (Rm);
11642
11643  inst.instruction |= RdLo << 12;
11644  inst.instruction |= RdHi << 8;
11645  inst.instruction |= Rn << 16;
11646  inst.instruction |= Rm;
11647}
11648
11649static void
11650do_t_mov_cmp (void)
11651{
11652  unsigned Rn, Rm;
11653
11654  Rn = inst.operands[0].reg;
11655  Rm = inst.operands[1].reg;
11656
11657  if (Rn == REG_PC)
11658    set_it_insn_type_last ();
11659
11660  if (unified_syntax)
11661    {
11662      int r0off = (inst.instruction == T_MNEM_mov
11663		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
11664      unsigned long opcode;
11665      bfd_boolean narrow;
11666      bfd_boolean low_regs;
11667
11668      low_regs = (Rn <= 7 && Rm <= 7);
11669      opcode = inst.instruction;
11670      if (in_it_block ())
11671	narrow = opcode != T_MNEM_movs;
11672      else
11673	narrow = opcode != T_MNEM_movs || low_regs;
11674      if (inst.size_req == 4
11675	  || inst.operands[1].shifted)
11676	narrow = FALSE;
11677
11678      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
11679      if (opcode == T_MNEM_movs && inst.operands[1].isreg
11680	  && !inst.operands[1].shifted
11681	  && Rn == REG_PC
11682	  && Rm == REG_LR)
11683	{
11684	  inst.instruction = T2_SUBS_PC_LR;
11685	  return;
11686	}
11687
11688      if (opcode == T_MNEM_cmp)
11689	{
11690	  constraint (Rn == REG_PC, BAD_PC);
11691	  if (narrow)
11692	    {
11693	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11694		 but valid.  */
11695	      warn_deprecated_sp (Rm);
11696	      /* R15 was documented as a valid choice for Rm in ARMv6,
11697		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
11698		 tools reject R15, so we do too.  */
11699	      constraint (Rm == REG_PC, BAD_PC);
11700	    }
11701	  else
11702	    reject_bad_reg (Rm);
11703	}
11704      else if (opcode == T_MNEM_mov
11705	       || opcode == T_MNEM_movs)
11706	{
11707	  if (inst.operands[1].isreg)
11708	    {
11709	      if (opcode == T_MNEM_movs)
11710		{
11711		  reject_bad_reg (Rn);
11712		  reject_bad_reg (Rm);
11713		}
11714	      else if (narrow)
11715		{
11716		  /* This is mov.n.  */
11717		  if ((Rn == REG_SP || Rn == REG_PC)
11718		      && (Rm == REG_SP || Rm == REG_PC))
11719		    {
11720		      as_tsktsk (_("Use of r%u as a source register is "
11721				 "deprecated when r%u is the destination "
11722				 "register."), Rm, Rn);
11723		    }
11724		}
11725	      else
11726		{
11727		  /* This is mov.w.  */
11728		  constraint (Rn == REG_PC, BAD_PC);
11729		  constraint (Rm == REG_PC, BAD_PC);
11730		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11731		}
11732	    }
11733	  else
11734	    reject_bad_reg (Rn);
11735	}
11736
11737      if (!inst.operands[1].isreg)
11738	{
11739	  /* Immediate operand.  */
11740	  if (!in_it_block () && opcode == T_MNEM_mov)
11741	    narrow = 0;
11742	  if (low_regs && narrow)
11743	    {
11744	      inst.instruction = THUMB_OP16 (opcode);
11745	      inst.instruction |= Rn << 8;
11746	      if (inst.size_req == 2)
11747		inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11748	      else
11749		inst.relax = opcode;
11750	    }
11751	  else
11752	    {
11753	      inst.instruction = THUMB_OP32 (inst.instruction);
11754	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11755	      inst.instruction |= Rn << r0off;
11756	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11757	    }
11758	}
11759      else if (inst.operands[1].shifted && inst.operands[1].immisreg
11760	       && (inst.instruction == T_MNEM_mov
11761		   || inst.instruction == T_MNEM_movs))
11762	{
11763	  /* Register shifts are encoded as separate shift instructions.  */
11764	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11765
11766	  if (in_it_block ())
11767	    narrow = !flags;
11768	  else
11769	    narrow = flags;
11770
11771	  if (inst.size_req == 4)
11772	    narrow = FALSE;
11773
11774	  if (!low_regs || inst.operands[1].imm > 7)
11775	    narrow = FALSE;
11776
11777	  if (Rn != Rm)
11778	    narrow = FALSE;
11779
11780	  switch (inst.operands[1].shift_kind)
11781	    {
11782	    case SHIFT_LSL:
11783	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11784	      break;
11785	    case SHIFT_ASR:
11786	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11787	      break;
11788	    case SHIFT_LSR:
11789	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11790	      break;
11791	    case SHIFT_ROR:
11792	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11793	      break;
11794	    default:
11795	      abort ();
11796	    }
11797
11798	  inst.instruction = opcode;
11799	  if (narrow)
11800	    {
11801	      inst.instruction |= Rn;
11802	      inst.instruction |= inst.operands[1].imm << 3;
11803	    }
11804	  else
11805	    {
11806	      if (flags)
11807		inst.instruction |= CONDS_BIT;
11808
11809	      inst.instruction |= Rn << 8;
11810	      inst.instruction |= Rm << 16;
11811	      inst.instruction |= inst.operands[1].imm;
11812	    }
11813	}
11814      else if (!narrow)
11815	{
11816	  /* Some mov with immediate shift have narrow variants.
11817	     Register shifts are handled above.  */
11818	  if (low_regs && inst.operands[1].shifted
11819	      && (inst.instruction == T_MNEM_mov
11820		  || inst.instruction == T_MNEM_movs))
11821	    {
11822	      if (in_it_block ())
11823		narrow = (inst.instruction == T_MNEM_mov);
11824	      else
11825		narrow = (inst.instruction == T_MNEM_movs);
11826	    }
11827
11828	  if (narrow)
11829	    {
11830	      switch (inst.operands[1].shift_kind)
11831		{
11832		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11833		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11834		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11835		default: narrow = FALSE; break;
11836		}
11837	    }
11838
11839	  if (narrow)
11840	    {
11841	      inst.instruction |= Rn;
11842	      inst.instruction |= Rm << 3;
11843	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11844	    }
11845	  else
11846	    {
11847	      inst.instruction = THUMB_OP32 (inst.instruction);
11848	      inst.instruction |= Rn << r0off;
11849	      encode_thumb32_shifted_operand (1);
11850	    }
11851	}
11852      else
11853	switch (inst.instruction)
11854	  {
11855	  case T_MNEM_mov:
11856	    /* In v4t or v5t a move of two lowregs produces unpredictable
11857	       results. Don't allow this.  */
11858	    if (low_regs)
11859	      {
11860		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11861			    "MOV Rd, Rs with two low registers is not "
11862			    "permitted on this architecture");
11863		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11864					arm_ext_v6);
11865	      }
11866
11867	    inst.instruction = T_OPCODE_MOV_HR;
11868	    inst.instruction |= (Rn & 0x8) << 4;
11869	    inst.instruction |= (Rn & 0x7);
11870	    inst.instruction |= Rm << 3;
11871	    break;
11872
11873	  case T_MNEM_movs:
11874	    /* We know we have low registers at this point.
11875	       Generate LSLS Rd, Rs, #0.  */
11876	    inst.instruction = T_OPCODE_LSL_I;
11877	    inst.instruction |= Rn;
11878	    inst.instruction |= Rm << 3;
11879	    break;
11880
11881	  case T_MNEM_cmp:
11882	    if (low_regs)
11883	      {
11884		inst.instruction = T_OPCODE_CMP_LR;
11885		inst.instruction |= Rn;
11886		inst.instruction |= Rm << 3;
11887	      }
11888	    else
11889	      {
11890		inst.instruction = T_OPCODE_CMP_HR;
11891		inst.instruction |= (Rn & 0x8) << 4;
11892		inst.instruction |= (Rn & 0x7);
11893		inst.instruction |= Rm << 3;
11894	      }
11895	    break;
11896	  }
11897      return;
11898    }
11899
11900  inst.instruction = THUMB_OP16 (inst.instruction);
11901
11902  /* PR 10443: Do not silently ignore shifted operands.  */
11903  constraint (inst.operands[1].shifted,
11904	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11905
11906  if (inst.operands[1].isreg)
11907    {
11908      if (Rn < 8 && Rm < 8)
11909	{
11910	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11911	     since a MOV instruction produces unpredictable results.  */
11912	  if (inst.instruction == T_OPCODE_MOV_I8)
11913	    inst.instruction = T_OPCODE_ADD_I3;
11914	  else
11915	    inst.instruction = T_OPCODE_CMP_LR;
11916
11917	  inst.instruction |= Rn;
11918	  inst.instruction |= Rm << 3;
11919	}
11920      else
11921	{
11922	  if (inst.instruction == T_OPCODE_MOV_I8)
11923	    inst.instruction = T_OPCODE_MOV_HR;
11924	  else
11925	    inst.instruction = T_OPCODE_CMP_HR;
11926	  do_t_cpy ();
11927	}
11928    }
11929  else
11930    {
11931      constraint (Rn > 7,
11932		  _("only lo regs allowed with immediate"));
11933      inst.instruction |= Rn << 8;
11934      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11935    }
11936}
11937
11938static void
11939do_t_mov16 (void)
11940{
11941  unsigned Rd;
11942  bfd_vma imm;
11943  bfd_boolean top;
11944
11945  top = (inst.instruction & 0x00800000) != 0;
11946  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11947    {
11948      constraint (top, _(":lower16: not allowed this instruction"));
11949      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11950    }
11951  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11952    {
11953      constraint (!top, _(":upper16: not allowed this instruction"));
11954      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11955    }
11956
11957  Rd = inst.operands[0].reg;
11958  reject_bad_reg (Rd);
11959
11960  inst.instruction |= Rd << 8;
11961  if (inst.reloc.type == BFD_RELOC_UNUSED)
11962    {
11963      imm = inst.reloc.exp.X_add_number;
11964      inst.instruction |= (imm & 0xf000) << 4;
11965      inst.instruction |= (imm & 0x0800) << 15;
11966      inst.instruction |= (imm & 0x0700) << 4;
11967      inst.instruction |= (imm & 0x00ff);
11968    }
11969}
11970
11971static void
11972do_t_mvn_tst (void)
11973{
11974  unsigned Rn, Rm;
11975
11976  Rn = inst.operands[0].reg;
11977  Rm = inst.operands[1].reg;
11978
11979  if (inst.instruction == T_MNEM_cmp
11980      || inst.instruction == T_MNEM_cmn)
11981    constraint (Rn == REG_PC, BAD_PC);
11982  else
11983    reject_bad_reg (Rn);
11984  reject_bad_reg (Rm);
11985
11986  if (unified_syntax)
11987    {
11988      int r0off = (inst.instruction == T_MNEM_mvn
11989		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11990      bfd_boolean narrow;
11991
11992      if (inst.size_req == 4
11993	  || inst.instruction > 0xffff
11994	  || inst.operands[1].shifted
11995	  || Rn > 7 || Rm > 7)
11996	narrow = FALSE;
11997      else if (inst.instruction == T_MNEM_cmn
11998	       || inst.instruction == T_MNEM_tst)
11999	narrow = TRUE;
12000      else if (THUMB_SETS_FLAGS (inst.instruction))
12001	narrow = !in_it_block ();
12002      else
12003	narrow = in_it_block ();
12004
12005      if (!inst.operands[1].isreg)
12006	{
12007	  /* For an immediate, we always generate a 32-bit opcode;
12008	     section relaxation will shrink it later if possible.  */
12009	  if (inst.instruction < 0xffff)
12010	    inst.instruction = THUMB_OP32 (inst.instruction);
12011	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12012	  inst.instruction |= Rn << r0off;
12013	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12014	}
12015      else
12016	{
12017	  /* See if we can do this with a 16-bit instruction.  */
12018	  if (narrow)
12019	    {
12020	      inst.instruction = THUMB_OP16 (inst.instruction);
12021	      inst.instruction |= Rn;
12022	      inst.instruction |= Rm << 3;
12023	    }
12024	  else
12025	    {
12026	      constraint (inst.operands[1].shifted
12027			  && inst.operands[1].immisreg,
12028			  _("shift must be constant"));
12029	      if (inst.instruction < 0xffff)
12030		inst.instruction = THUMB_OP32 (inst.instruction);
12031	      inst.instruction |= Rn << r0off;
12032	      encode_thumb32_shifted_operand (1);
12033	    }
12034	}
12035    }
12036  else
12037    {
12038      constraint (inst.instruction > 0xffff
12039		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12040      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12041		  _("unshifted register required"));
12042      constraint (Rn > 7 || Rm > 7,
12043		  BAD_HIREG);
12044
12045      inst.instruction = THUMB_OP16 (inst.instruction);
12046      inst.instruction |= Rn;
12047      inst.instruction |= Rm << 3;
12048    }
12049}
12050
12051static void
12052do_t_mrs (void)
12053{
12054  unsigned Rd;
12055
12056  if (do_vfp_nsyn_mrs () == SUCCESS)
12057    return;
12058
12059  Rd = inst.operands[0].reg;
12060  reject_bad_reg (Rd);
12061  inst.instruction |= Rd << 8;
12062
12063  if (inst.operands[1].isreg)
12064    {
12065      unsigned br = inst.operands[1].reg;
12066      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12067	as_bad (_("bad register for mrs"));
12068
12069      inst.instruction |= br & (0xf << 16);
12070      inst.instruction |= (br & 0x300) >> 4;
12071      inst.instruction |= (br & SPSR_BIT) >> 2;
12072    }
12073  else
12074    {
12075      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12076
12077      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12078	{
12079	  /* PR gas/12698:  The constraint is only applied for m_profile.
12080	     If the user has specified -march=all, we want to ignore it as
12081	     we are building for any CPU type, including non-m variants.  */
12082	  bfd_boolean m_profile =
12083	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12084	  constraint ((flags != 0) && m_profile, _("selected processor does "
12085						   "not support requested special purpose register"));
12086	}
12087      else
12088	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12089	   devices).  */
12090	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12091		    _("'APSR', 'CPSR' or 'SPSR' expected"));
12092
12093      inst.instruction |= (flags & SPSR_BIT) >> 2;
12094      inst.instruction |= inst.operands[1].imm & 0xff;
12095      inst.instruction |= 0xf0000;
12096    }
12097}
12098
12099static void
12100do_t_msr (void)
12101{
12102  int flags;
12103  unsigned Rn;
12104
12105  if (do_vfp_nsyn_msr () == SUCCESS)
12106    return;
12107
12108  constraint (!inst.operands[1].isreg,
12109	      _("Thumb encoding does not support an immediate here"));
12110
12111  if (inst.operands[0].isreg)
12112    flags = (int)(inst.operands[0].reg);
12113  else
12114    flags = inst.operands[0].imm;
12115
12116  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12117    {
12118      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12119
12120      /* PR gas/12698:  The constraint is only applied for m_profile.
12121	 If the user has specified -march=all, we want to ignore it as
12122	 we are building for any CPU type, including non-m variants.  */
12123      bfd_boolean m_profile =
12124	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12125      constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12126	   && (bits & ~(PSR_s | PSR_f)) != 0)
12127	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12128	      && bits != PSR_f)) && m_profile,
12129	  _("selected processor does not support requested special "
12130	    "purpose register"));
12131    }
12132  else
12133     constraint ((flags & 0xff) != 0, _("selected processor does not support "
12134		 "requested special purpose register"));
12135
12136  Rn = inst.operands[1].reg;
12137  reject_bad_reg (Rn);
12138
12139  inst.instruction |= (flags & SPSR_BIT) >> 2;
12140  inst.instruction |= (flags & 0xf0000) >> 8;
12141  inst.instruction |= (flags & 0x300) >> 4;
12142  inst.instruction |= (flags & 0xff);
12143  inst.instruction |= Rn << 16;
12144}
12145
12146static void
12147do_t_mul (void)
12148{
12149  bfd_boolean narrow;
12150  unsigned Rd, Rn, Rm;
12151
12152  if (!inst.operands[2].present)
12153    inst.operands[2].reg = inst.operands[0].reg;
12154
12155  Rd = inst.operands[0].reg;
12156  Rn = inst.operands[1].reg;
12157  Rm = inst.operands[2].reg;
12158
12159  if (unified_syntax)
12160    {
12161      if (inst.size_req == 4
12162	  || (Rd != Rn
12163	      && Rd != Rm)
12164	  || Rn > 7
12165	  || Rm > 7)
12166	narrow = FALSE;
12167      else if (inst.instruction == T_MNEM_muls)
12168	narrow = !in_it_block ();
12169      else
12170	narrow = in_it_block ();
12171    }
12172  else
12173    {
12174      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12175      constraint (Rn > 7 || Rm > 7,
12176		  BAD_HIREG);
12177      narrow = TRUE;
12178    }
12179
12180  if (narrow)
12181    {
12182      /* 16-bit MULS/Conditional MUL.  */
12183      inst.instruction = THUMB_OP16 (inst.instruction);
12184      inst.instruction |= Rd;
12185
12186      if (Rd == Rn)
12187	inst.instruction |= Rm << 3;
12188      else if (Rd == Rm)
12189	inst.instruction |= Rn << 3;
12190      else
12191	constraint (1, _("dest must overlap one source register"));
12192    }
12193  else
12194    {
12195      constraint (inst.instruction != T_MNEM_mul,
12196		  _("Thumb-2 MUL must not set flags"));
12197      /* 32-bit MUL.  */
12198      inst.instruction = THUMB_OP32 (inst.instruction);
12199      inst.instruction |= Rd << 8;
12200      inst.instruction |= Rn << 16;
12201      inst.instruction |= Rm << 0;
12202
12203      reject_bad_reg (Rd);
12204      reject_bad_reg (Rn);
12205      reject_bad_reg (Rm);
12206    }
12207}
12208
12209static void
12210do_t_mull (void)
12211{
12212  unsigned RdLo, RdHi, Rn, Rm;
12213
12214  RdLo = inst.operands[0].reg;
12215  RdHi = inst.operands[1].reg;
12216  Rn = inst.operands[2].reg;
12217  Rm = inst.operands[3].reg;
12218
12219  reject_bad_reg (RdLo);
12220  reject_bad_reg (RdHi);
12221  reject_bad_reg (Rn);
12222  reject_bad_reg (Rm);
12223
12224  inst.instruction |= RdLo << 12;
12225  inst.instruction |= RdHi << 8;
12226  inst.instruction |= Rn << 16;
12227  inst.instruction |= Rm;
12228
12229 if (RdLo == RdHi)
12230    as_tsktsk (_("rdhi and rdlo must be different"));
12231}
12232
12233static void
12234do_t_nop (void)
12235{
12236  set_it_insn_type (NEUTRAL_IT_INSN);
12237
12238  if (unified_syntax)
12239    {
12240      if (inst.size_req == 4 || inst.operands[0].imm > 15)
12241	{
12242	  inst.instruction = THUMB_OP32 (inst.instruction);
12243	  inst.instruction |= inst.operands[0].imm;
12244	}
12245      else
12246	{
12247	  /* PR9722: Check for Thumb2 availability before
12248	     generating a thumb2 nop instruction.  */
12249	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12250	    {
12251	      inst.instruction = THUMB_OP16 (inst.instruction);
12252	      inst.instruction |= inst.operands[0].imm << 4;
12253	    }
12254	  else
12255	    inst.instruction = 0x46c0;
12256	}
12257    }
12258  else
12259    {
12260      constraint (inst.operands[0].present,
12261		  _("Thumb does not support NOP with hints"));
12262      inst.instruction = 0x46c0;
12263    }
12264}
12265
12266static void
12267do_t_neg (void)
12268{
12269  if (unified_syntax)
12270    {
12271      bfd_boolean narrow;
12272
12273      if (THUMB_SETS_FLAGS (inst.instruction))
12274	narrow = !in_it_block ();
12275      else
12276	narrow = in_it_block ();
12277      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12278	narrow = FALSE;
12279      if (inst.size_req == 4)
12280	narrow = FALSE;
12281
12282      if (!narrow)
12283	{
12284	  inst.instruction = THUMB_OP32 (inst.instruction);
12285	  inst.instruction |= inst.operands[0].reg << 8;
12286	  inst.instruction |= inst.operands[1].reg << 16;
12287	}
12288      else
12289	{
12290	  inst.instruction = THUMB_OP16 (inst.instruction);
12291	  inst.instruction |= inst.operands[0].reg;
12292	  inst.instruction |= inst.operands[1].reg << 3;
12293	}
12294    }
12295  else
12296    {
12297      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12298		  BAD_HIREG);
12299      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12300
12301      inst.instruction = THUMB_OP16 (inst.instruction);
12302      inst.instruction |= inst.operands[0].reg;
12303      inst.instruction |= inst.operands[1].reg << 3;
12304    }
12305}
12306
12307static void
12308do_t_orn (void)
12309{
12310  unsigned Rd, Rn;
12311
12312  Rd = inst.operands[0].reg;
12313  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12314
12315  reject_bad_reg (Rd);
12316  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
12317  reject_bad_reg (Rn);
12318
12319  inst.instruction |= Rd << 8;
12320  inst.instruction |= Rn << 16;
12321
12322  if (!inst.operands[2].isreg)
12323    {
12324      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12325      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12326    }
12327  else
12328    {
12329      unsigned Rm;
12330
12331      Rm = inst.operands[2].reg;
12332      reject_bad_reg (Rm);
12333
12334      constraint (inst.operands[2].shifted
12335		  && inst.operands[2].immisreg,
12336		  _("shift must be constant"));
12337      encode_thumb32_shifted_operand (2);
12338    }
12339}
12340
12341static void
12342do_t_pkhbt (void)
12343{
12344  unsigned Rd, Rn, Rm;
12345
12346  Rd = inst.operands[0].reg;
12347  Rn = inst.operands[1].reg;
12348  Rm = inst.operands[2].reg;
12349
12350  reject_bad_reg (Rd);
12351  reject_bad_reg (Rn);
12352  reject_bad_reg (Rm);
12353
12354  inst.instruction |= Rd << 8;
12355  inst.instruction |= Rn << 16;
12356  inst.instruction |= Rm;
12357  if (inst.operands[3].present)
12358    {
12359      unsigned int val = inst.reloc.exp.X_add_number;
12360      constraint (inst.reloc.exp.X_op != O_constant,
12361		  _("expression too complex"));
12362      inst.instruction |= (val & 0x1c) << 10;
12363      inst.instruction |= (val & 0x03) << 6;
12364    }
12365}
12366
12367static void
12368do_t_pkhtb (void)
12369{
12370  if (!inst.operands[3].present)
12371    {
12372      unsigned Rtmp;
12373
12374      inst.instruction &= ~0x00000020;
12375
12376      /* PR 10168.  Swap the Rm and Rn registers.  */
12377      Rtmp = inst.operands[1].reg;
12378      inst.operands[1].reg = inst.operands[2].reg;
12379      inst.operands[2].reg = Rtmp;
12380    }
12381  do_t_pkhbt ();
12382}
12383
12384static void
12385do_t_pld (void)
12386{
12387  if (inst.operands[0].immisreg)
12388    reject_bad_reg (inst.operands[0].imm);
12389
12390  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12391}
12392
12393static void
12394do_t_push_pop (void)
12395{
12396  unsigned mask;
12397
12398  constraint (inst.operands[0].writeback,
12399	      _("push/pop do not support {reglist}^"));
12400  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12401	      _("expression too complex"));
12402
12403  mask = inst.operands[0].imm;
12404  if (inst.size_req != 4 && (mask & ~0xff) == 0)
12405    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12406  else if (inst.size_req != 4
12407	   && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12408				       ? REG_LR : REG_PC)))
12409    {
12410      inst.instruction = THUMB_OP16 (inst.instruction);
12411      inst.instruction |= THUMB_PP_PC_LR;
12412      inst.instruction |= mask & 0xff;
12413    }
12414  else if (unified_syntax)
12415    {
12416      inst.instruction = THUMB_OP32 (inst.instruction);
12417      encode_thumb2_ldmstm (13, mask, TRUE);
12418    }
12419  else
12420    {
12421      inst.error = _("invalid register list to push/pop instruction");
12422      return;
12423    }
12424}
12425
12426static void
12427do_t_rbit (void)
12428{
12429  unsigned Rd, Rm;
12430
12431  Rd = inst.operands[0].reg;
12432  Rm = inst.operands[1].reg;
12433
12434  reject_bad_reg (Rd);
12435  reject_bad_reg (Rm);
12436
12437  inst.instruction |= Rd << 8;
12438  inst.instruction |= Rm << 16;
12439  inst.instruction |= Rm;
12440}
12441
12442static void
12443do_t_rev (void)
12444{
12445  unsigned Rd, Rm;
12446
12447  Rd = inst.operands[0].reg;
12448  Rm = inst.operands[1].reg;
12449
12450  reject_bad_reg (Rd);
12451  reject_bad_reg (Rm);
12452
12453  if (Rd <= 7 && Rm <= 7
12454      && inst.size_req != 4)
12455    {
12456      inst.instruction = THUMB_OP16 (inst.instruction);
12457      inst.instruction |= Rd;
12458      inst.instruction |= Rm << 3;
12459    }
12460  else if (unified_syntax)
12461    {
12462      inst.instruction = THUMB_OP32 (inst.instruction);
12463      inst.instruction |= Rd << 8;
12464      inst.instruction |= Rm << 16;
12465      inst.instruction |= Rm;
12466    }
12467  else
12468    inst.error = BAD_HIREG;
12469}
12470
12471static void
12472do_t_rrx (void)
12473{
12474  unsigned Rd, Rm;
12475
12476  Rd = inst.operands[0].reg;
12477  Rm = inst.operands[1].reg;
12478
12479  reject_bad_reg (Rd);
12480  reject_bad_reg (Rm);
12481
12482  inst.instruction |= Rd << 8;
12483  inst.instruction |= Rm;
12484}
12485
12486static void
12487do_t_rsb (void)
12488{
12489  unsigned Rd, Rs;
12490
12491  Rd = inst.operands[0].reg;
12492  Rs = (inst.operands[1].present
12493	? inst.operands[1].reg    /* Rd, Rs, foo */
12494	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
12495
12496  reject_bad_reg (Rd);
12497  reject_bad_reg (Rs);
12498  if (inst.operands[2].isreg)
12499    reject_bad_reg (inst.operands[2].reg);
12500
12501  inst.instruction |= Rd << 8;
12502  inst.instruction |= Rs << 16;
12503  if (!inst.operands[2].isreg)
12504    {
12505      bfd_boolean narrow;
12506
12507      if ((inst.instruction & 0x00100000) != 0)
12508	narrow = !in_it_block ();
12509      else
12510	narrow = in_it_block ();
12511
12512      if (Rd > 7 || Rs > 7)
12513	narrow = FALSE;
12514
12515      if (inst.size_req == 4 || !unified_syntax)
12516	narrow = FALSE;
12517
12518      if (inst.reloc.exp.X_op != O_constant
12519	  || inst.reloc.exp.X_add_number != 0)
12520	narrow = FALSE;
12521
12522      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
12523	 relaxation, but it doesn't seem worth the hassle.  */
12524      if (narrow)
12525	{
12526	  inst.reloc.type = BFD_RELOC_UNUSED;
12527	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
12528	  inst.instruction |= Rs << 3;
12529	  inst.instruction |= Rd;
12530	}
12531      else
12532	{
12533	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12534	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12535	}
12536    }
12537  else
12538    encode_thumb32_shifted_operand (2);
12539}
12540
12541static void
12542do_t_setend (void)
12543{
12544  if (warn_on_deprecated
12545      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12546      as_tsktsk (_("setend use is deprecated for ARMv8"));
12547
12548  set_it_insn_type (OUTSIDE_IT_INSN);
12549  if (inst.operands[0].imm)
12550    inst.instruction |= 0x8;
12551}
12552
12553static void
12554do_t_shift (void)
12555{
12556  if (!inst.operands[1].present)
12557    inst.operands[1].reg = inst.operands[0].reg;
12558
12559  if (unified_syntax)
12560    {
12561      bfd_boolean narrow;
12562      int shift_kind;
12563
12564      switch (inst.instruction)
12565	{
12566	case T_MNEM_asr:
12567	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12568	case T_MNEM_lsl:
12569	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12570	case T_MNEM_lsr:
12571	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12572	case T_MNEM_ror:
12573	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12574	default: abort ();
12575	}
12576
12577      if (THUMB_SETS_FLAGS (inst.instruction))
12578	narrow = !in_it_block ();
12579      else
12580	narrow = in_it_block ();
12581      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12582	narrow = FALSE;
12583      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12584	narrow = FALSE;
12585      if (inst.operands[2].isreg
12586	  && (inst.operands[1].reg != inst.operands[0].reg
12587	      || inst.operands[2].reg > 7))
12588	narrow = FALSE;
12589      if (inst.size_req == 4)
12590	narrow = FALSE;
12591
12592      reject_bad_reg (inst.operands[0].reg);
12593      reject_bad_reg (inst.operands[1].reg);
12594
12595      if (!narrow)
12596	{
12597	  if (inst.operands[2].isreg)
12598	    {
12599	      reject_bad_reg (inst.operands[2].reg);
12600	      inst.instruction = THUMB_OP32 (inst.instruction);
12601	      inst.instruction |= inst.operands[0].reg << 8;
12602	      inst.instruction |= inst.operands[1].reg << 16;
12603	      inst.instruction |= inst.operands[2].reg;
12604
12605	      /* PR 12854: Error on extraneous shifts.  */
12606	      constraint (inst.operands[2].shifted,
12607			  _("extraneous shift as part of operand to shift insn"));
12608	    }
12609	  else
12610	    {
12611	      inst.operands[1].shifted = 1;
12612	      inst.operands[1].shift_kind = shift_kind;
12613	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12614					     ? T_MNEM_movs : T_MNEM_mov);
12615	      inst.instruction |= inst.operands[0].reg << 8;
12616	      encode_thumb32_shifted_operand (1);
12617	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
12618	      inst.reloc.type = BFD_RELOC_UNUSED;
12619	    }
12620	}
12621      else
12622	{
12623	  if (inst.operands[2].isreg)
12624	    {
12625	      switch (shift_kind)
12626		{
12627		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12628		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12629		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12630		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12631		default: abort ();
12632		}
12633
12634	      inst.instruction |= inst.operands[0].reg;
12635	      inst.instruction |= inst.operands[2].reg << 3;
12636
12637	      /* PR 12854: Error on extraneous shifts.  */
12638	      constraint (inst.operands[2].shifted,
12639			  _("extraneous shift as part of operand to shift insn"));
12640	    }
12641	  else
12642	    {
12643	      switch (shift_kind)
12644		{
12645		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12646		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12647		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12648		default: abort ();
12649		}
12650	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12651	      inst.instruction |= inst.operands[0].reg;
12652	      inst.instruction |= inst.operands[1].reg << 3;
12653	    }
12654	}
12655    }
12656  else
12657    {
12658      constraint (inst.operands[0].reg > 7
12659		  || inst.operands[1].reg > 7, BAD_HIREG);
12660      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12661
12662      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
12663	{
12664	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
12665	  constraint (inst.operands[0].reg != inst.operands[1].reg,
12666		      _("source1 and dest must be same register"));
12667
12668	  switch (inst.instruction)
12669	    {
12670	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12671	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12672	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12673	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12674	    default: abort ();
12675	    }
12676
12677	  inst.instruction |= inst.operands[0].reg;
12678	  inst.instruction |= inst.operands[2].reg << 3;
12679
12680	  /* PR 12854: Error on extraneous shifts.  */
12681	  constraint (inst.operands[2].shifted,
12682		      _("extraneous shift as part of operand to shift insn"));
12683	}
12684      else
12685	{
12686	  switch (inst.instruction)
12687	    {
12688	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12689	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12690	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12691	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12692	    default: abort ();
12693	    }
12694	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12695	  inst.instruction |= inst.operands[0].reg;
12696	  inst.instruction |= inst.operands[1].reg << 3;
12697	}
12698    }
12699}
12700
12701static void
12702do_t_simd (void)
12703{
12704  unsigned Rd, Rn, Rm;
12705
12706  Rd = inst.operands[0].reg;
12707  Rn = inst.operands[1].reg;
12708  Rm = inst.operands[2].reg;
12709
12710  reject_bad_reg (Rd);
12711  reject_bad_reg (Rn);
12712  reject_bad_reg (Rm);
12713
12714  inst.instruction |= Rd << 8;
12715  inst.instruction |= Rn << 16;
12716  inst.instruction |= Rm;
12717}
12718
12719static void
12720do_t_simd2 (void)
12721{
12722  unsigned Rd, Rn, Rm;
12723
12724  Rd = inst.operands[0].reg;
12725  Rm = inst.operands[1].reg;
12726  Rn = inst.operands[2].reg;
12727
12728  reject_bad_reg (Rd);
12729  reject_bad_reg (Rn);
12730  reject_bad_reg (Rm);
12731
12732  inst.instruction |= Rd << 8;
12733  inst.instruction |= Rn << 16;
12734  inst.instruction |= Rm;
12735}
12736
12737static void
12738do_t_smc (void)
12739{
12740  unsigned int value = inst.reloc.exp.X_add_number;
12741  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12742	      _("SMC is not permitted on this architecture"));
12743  constraint (inst.reloc.exp.X_op != O_constant,
12744	      _("expression too complex"));
12745  inst.reloc.type = BFD_RELOC_UNUSED;
12746  inst.instruction |= (value & 0xf000) >> 12;
12747  inst.instruction |= (value & 0x0ff0);
12748  inst.instruction |= (value & 0x000f) << 16;
12749  /* PR gas/15623: SMC instructions must be last in an IT block.  */
12750  set_it_insn_type_last ();
12751}
12752
12753static void
12754do_t_hvc (void)
12755{
12756  unsigned int value = inst.reloc.exp.X_add_number;
12757
12758  inst.reloc.type = BFD_RELOC_UNUSED;
12759  inst.instruction |= (value & 0x0fff);
12760  inst.instruction |= (value & 0xf000) << 4;
12761}
12762
12763static void
12764do_t_ssat_usat (int bias)
12765{
12766  unsigned Rd, Rn;
12767
12768  Rd = inst.operands[0].reg;
12769  Rn = inst.operands[2].reg;
12770
12771  reject_bad_reg (Rd);
12772  reject_bad_reg (Rn);
12773
12774  inst.instruction |= Rd << 8;
12775  inst.instruction |= inst.operands[1].imm - bias;
12776  inst.instruction |= Rn << 16;
12777
12778  if (inst.operands[3].present)
12779    {
12780      offsetT shift_amount = inst.reloc.exp.X_add_number;
12781
12782      inst.reloc.type = BFD_RELOC_UNUSED;
12783
12784      constraint (inst.reloc.exp.X_op != O_constant,
12785		  _("expression too complex"));
12786
12787      if (shift_amount != 0)
12788	{
12789	  constraint (shift_amount > 31,
12790		      _("shift expression is too large"));
12791
12792	  if (inst.operands[3].shift_kind == SHIFT_ASR)
12793	    inst.instruction |= 0x00200000;  /* sh bit.  */
12794
12795	  inst.instruction |= (shift_amount & 0x1c) << 10;
12796	  inst.instruction |= (shift_amount & 0x03) << 6;
12797	}
12798    }
12799}
12800
12801static void
12802do_t_ssat (void)
12803{
12804  do_t_ssat_usat (1);
12805}
12806
12807static void
12808do_t_ssat16 (void)
12809{
12810  unsigned Rd, Rn;
12811
12812  Rd = inst.operands[0].reg;
12813  Rn = inst.operands[2].reg;
12814
12815  reject_bad_reg (Rd);
12816  reject_bad_reg (Rn);
12817
12818  inst.instruction |= Rd << 8;
12819  inst.instruction |= inst.operands[1].imm - 1;
12820  inst.instruction |= Rn << 16;
12821}
12822
12823static void
12824do_t_strex (void)
12825{
12826  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12827	      || inst.operands[2].postind || inst.operands[2].writeback
12828	      || inst.operands[2].immisreg || inst.operands[2].shifted
12829	      || inst.operands[2].negative,
12830	      BAD_ADDR_MODE);
12831
12832  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12833
12834  inst.instruction |= inst.operands[0].reg << 8;
12835  inst.instruction |= inst.operands[1].reg << 12;
12836  inst.instruction |= inst.operands[2].reg << 16;
12837  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12838}
12839
12840static void
12841do_t_strexd (void)
12842{
12843  if (!inst.operands[2].present)
12844    inst.operands[2].reg = inst.operands[1].reg + 1;
12845
12846  constraint (inst.operands[0].reg == inst.operands[1].reg
12847	      || inst.operands[0].reg == inst.operands[2].reg
12848	      || inst.operands[0].reg == inst.operands[3].reg,
12849	      BAD_OVERLAP);
12850
12851  inst.instruction |= inst.operands[0].reg;
12852  inst.instruction |= inst.operands[1].reg << 12;
12853  inst.instruction |= inst.operands[2].reg << 8;
12854  inst.instruction |= inst.operands[3].reg << 16;
12855}
12856
12857static void
12858do_t_sxtah (void)
12859{
12860  unsigned Rd, Rn, Rm;
12861
12862  Rd = inst.operands[0].reg;
12863  Rn = inst.operands[1].reg;
12864  Rm = inst.operands[2].reg;
12865
12866  reject_bad_reg (Rd);
12867  reject_bad_reg (Rn);
12868  reject_bad_reg (Rm);
12869
12870  inst.instruction |= Rd << 8;
12871  inst.instruction |= Rn << 16;
12872  inst.instruction |= Rm;
12873  inst.instruction |= inst.operands[3].imm << 4;
12874}
12875
12876static void
12877do_t_sxth (void)
12878{
12879  unsigned Rd, Rm;
12880
12881  Rd = inst.operands[0].reg;
12882  Rm = inst.operands[1].reg;
12883
12884  reject_bad_reg (Rd);
12885  reject_bad_reg (Rm);
12886
12887  if (inst.instruction <= 0xffff
12888      && inst.size_req != 4
12889      && Rd <= 7 && Rm <= 7
12890      && (!inst.operands[2].present || inst.operands[2].imm == 0))
12891    {
12892      inst.instruction = THUMB_OP16 (inst.instruction);
12893      inst.instruction |= Rd;
12894      inst.instruction |= Rm << 3;
12895    }
12896  else if (unified_syntax)
12897    {
12898      if (inst.instruction <= 0xffff)
12899	inst.instruction = THUMB_OP32 (inst.instruction);
12900      inst.instruction |= Rd << 8;
12901      inst.instruction |= Rm;
12902      inst.instruction |= inst.operands[2].imm << 4;
12903    }
12904  else
12905    {
12906      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12907		  _("Thumb encoding does not support rotation"));
12908      constraint (1, BAD_HIREG);
12909    }
12910}
12911
12912static void
12913do_t_swi (void)
12914{
12915  /* We have to do the following check manually as ARM_EXT_OS only applies
12916     to ARM_EXT_V6M.  */
12917  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12918    {
12919      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12920	  /* This only applies to the v6m howver, not later architectures.  */
12921	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12922	as_bad (_("SVC is not permitted on this architecture"));
12923      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12924    }
12925
12926  inst.reloc.type = BFD_RELOC_ARM_SWI;
12927}
12928
12929static void
12930do_t_tb (void)
12931{
12932  unsigned Rn, Rm;
12933  int half;
12934
12935  half = (inst.instruction & 0x10) != 0;
12936  set_it_insn_type_last ();
12937  constraint (inst.operands[0].immisreg,
12938	      _("instruction requires register index"));
12939
12940  Rn = inst.operands[0].reg;
12941  Rm = inst.operands[0].imm;
12942
12943  constraint (Rn == REG_SP, BAD_SP);
12944  reject_bad_reg (Rm);
12945
12946  constraint (!half && inst.operands[0].shifted,
12947	      _("instruction does not allow shifted index"));
12948  inst.instruction |= (Rn << 16) | Rm;
12949}
12950
12951static void
12952do_t_udf (void)
12953{
12954  if (!inst.operands[0].present)
12955    inst.operands[0].imm = 0;
12956
12957  if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12958    {
12959      constraint (inst.size_req == 2,
12960                  _("immediate value out of range"));
12961      inst.instruction = THUMB_OP32 (inst.instruction);
12962      inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12963      inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12964    }
12965  else
12966    {
12967      inst.instruction = THUMB_OP16 (inst.instruction);
12968      inst.instruction |= inst.operands[0].imm;
12969    }
12970
12971  set_it_insn_type (NEUTRAL_IT_INSN);
12972}
12973
12974
12975static void
12976do_t_usat (void)
12977{
12978  do_t_ssat_usat (0);
12979}
12980
12981static void
12982do_t_usat16 (void)
12983{
12984  unsigned Rd, Rn;
12985
12986  Rd = inst.operands[0].reg;
12987  Rn = inst.operands[2].reg;
12988
12989  reject_bad_reg (Rd);
12990  reject_bad_reg (Rn);
12991
12992  inst.instruction |= Rd << 8;
12993  inst.instruction |= inst.operands[1].imm;
12994  inst.instruction |= Rn << 16;
12995}
12996
12997/* Neon instruction encoder helpers.  */
12998
12999/* Encodings for the different types for various Neon opcodes.  */
13000
13001/* An "invalid" code for the following tables.  */
13002#define N_INV -1u
13003
13004struct neon_tab_entry
13005{
13006  unsigned integer;
13007  unsigned float_or_poly;
13008  unsigned scalar_or_imm;
13009};
13010
13011/* Map overloaded Neon opcodes to their respective encodings.  */
13012#define NEON_ENC_TAB					\
13013  X(vabd,	0x0000700, 0x1200d00, N_INV),		\
13014  X(vmax,	0x0000600, 0x0000f00, N_INV),		\
13015  X(vmin,	0x0000610, 0x0200f00, N_INV),		\
13016  X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
13017  X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
13018  X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
13019  X(vadd,	0x0000800, 0x0000d00, N_INV),		\
13020  X(vsub,	0x1000800, 0x0200d00, N_INV),		\
13021  X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
13022  X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
13023  X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
13024  /* Register variants of the following two instructions are encoded as
13025     vcge / vcgt with the operands reversed.  */  	\
13026  X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
13027  X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
13028  X(vfma,	N_INV, 0x0000c10, N_INV),		\
13029  X(vfms,	N_INV, 0x0200c10, N_INV),		\
13030  X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
13031  X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
13032  X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
13033  X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
13034  X(vmlal,	0x0800800, N_INV,     0x0800240),	\
13035  X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
13036  X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
13037  X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
13038  X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
13039  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
13040  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
13041  X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
13042  X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
13043  X(vshl,	0x0000400, N_INV,     0x0800510),	\
13044  X(vqshl,	0x0000410, N_INV,     0x0800710),	\
13045  X(vand,	0x0000110, N_INV,     0x0800030),	\
13046  X(vbic,	0x0100110, N_INV,     0x0800030),	\
13047  X(veor,	0x1000110, N_INV,     N_INV),		\
13048  X(vorn,	0x0300110, N_INV,     0x0800010),	\
13049  X(vorr,	0x0200110, N_INV,     0x0800010),	\
13050  X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
13051  X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
13052  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
13053  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
13054  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
13055  X(vst1,	0x0000000, 0x0800000, N_INV),		\
13056  X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
13057  X(vst2,	0x0000100, 0x0800100, N_INV),		\
13058  X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
13059  X(vst3,	0x0000200, 0x0800200, N_INV),		\
13060  X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
13061  X(vst4,	0x0000300, 0x0800300, N_INV),		\
13062  X(vmovn,	0x1b20200, N_INV,     N_INV),		\
13063  X(vtrn,	0x1b20080, N_INV,     N_INV),		\
13064  X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
13065  X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
13066  X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
13067  X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
13068  X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
13069  X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
13070  X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
13071  X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
13072  X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
13073  X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
13074  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
13075  X(vseleq,	0xe000a00, N_INV,     N_INV),		\
13076  X(vselvs,	0xe100a00, N_INV,     N_INV),		\
13077  X(vselge,	0xe200a00, N_INV,     N_INV),		\
13078  X(vselgt,	0xe300a00, N_INV,     N_INV),		\
13079  X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
13080  X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
13081  X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
13082  X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
13083  X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
13084  X(aes,	0x3b00300, N_INV,     N_INV),		\
13085  X(sha3op,	0x2000c00, N_INV,     N_INV),		\
13086  X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
13087  X(sha2op,     0x3ba0380, N_INV,     N_INV)
13088
13089enum neon_opc
13090{
13091#define X(OPC,I,F,S) N_MNEM_##OPC
13092NEON_ENC_TAB
13093#undef X
13094};
13095
13096static const struct neon_tab_entry neon_enc_tab[] =
13097{
13098#define X(OPC,I,F,S) { (I), (F), (S) }
13099NEON_ENC_TAB
13100#undef X
13101};
13102
13103/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
13104#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13105#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
13106#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13107#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13108#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13109#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13110#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13111#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13112#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13113#define NEON_ENC_SINGLE_(X) \
13114  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13115#define NEON_ENC_DOUBLE_(X) \
13116  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13117#define NEON_ENC_FPV8_(X) \
13118  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13119
13120#define NEON_ENCODE(type, inst)					\
13121  do								\
13122    {								\
13123      inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
13124      inst.is_neon = 1;						\
13125    }								\
13126  while (0)
13127
13128#define check_neon_suffixes						\
13129  do									\
13130    {									\
13131      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
13132	{								\
13133	  as_bad (_("invalid neon suffix for non neon instruction"));	\
13134	  return;							\
13135	}								\
13136    }									\
13137  while (0)
13138
13139/* Define shapes for instruction operands. The following mnemonic characters
13140   are used in this table:
13141
13142     F - VFP S<n> register
13143     D - Neon D<n> register
13144     Q - Neon Q<n> register
13145     I - Immediate
13146     S - Scalar
13147     R - ARM register
13148     L - D<n> register list
13149
13150   This table is used to generate various data:
13151     - enumerations of the form NS_DDR to be used as arguments to
13152       neon_select_shape.
13153     - a table classifying shapes into single, double, quad, mixed.
13154     - a table used to drive neon_select_shape.  */
13155
13156#define NEON_SHAPE_DEF			\
13157  X(3, (D, D, D), DOUBLE),		\
13158  X(3, (Q, Q, Q), QUAD),		\
13159  X(3, (D, D, I), DOUBLE),		\
13160  X(3, (Q, Q, I), QUAD),		\
13161  X(3, (D, D, S), DOUBLE),		\
13162  X(3, (Q, Q, S), QUAD),		\
13163  X(2, (D, D), DOUBLE),			\
13164  X(2, (Q, Q), QUAD),			\
13165  X(2, (D, S), DOUBLE),			\
13166  X(2, (Q, S), QUAD),			\
13167  X(2, (D, R), DOUBLE),			\
13168  X(2, (Q, R), QUAD),			\
13169  X(2, (D, I), DOUBLE),			\
13170  X(2, (Q, I), QUAD),			\
13171  X(3, (D, L, D), DOUBLE),		\
13172  X(2, (D, Q), MIXED),			\
13173  X(2, (Q, D), MIXED),			\
13174  X(3, (D, Q, I), MIXED),		\
13175  X(3, (Q, D, I), MIXED),		\
13176  X(3, (Q, D, D), MIXED),		\
13177  X(3, (D, Q, Q), MIXED),		\
13178  X(3, (Q, Q, D), MIXED),		\
13179  X(3, (Q, D, S), MIXED),		\
13180  X(3, (D, Q, S), MIXED),		\
13181  X(4, (D, D, D, I), DOUBLE),		\
13182  X(4, (Q, Q, Q, I), QUAD),		\
13183  X(2, (F, F), SINGLE),			\
13184  X(3, (F, F, F), SINGLE),		\
13185  X(2, (F, I), SINGLE),			\
13186  X(2, (F, D), MIXED),			\
13187  X(2, (D, F), MIXED),			\
13188  X(3, (F, F, I), MIXED),		\
13189  X(4, (R, R, F, F), SINGLE),		\
13190  X(4, (F, F, R, R), SINGLE),		\
13191  X(3, (D, R, R), DOUBLE),		\
13192  X(3, (R, R, D), DOUBLE),		\
13193  X(2, (S, R), SINGLE),			\
13194  X(2, (R, S), SINGLE),			\
13195  X(2, (F, R), SINGLE),			\
13196  X(2, (R, F), SINGLE)
13197
13198#define S2(A,B)		NS_##A##B
13199#define S3(A,B,C)	NS_##A##B##C
13200#define S4(A,B,C,D)	NS_##A##B##C##D
13201
13202#define X(N, L, C) S##N L
13203
13204enum neon_shape
13205{
13206  NEON_SHAPE_DEF,
13207  NS_NULL
13208};
13209
13210#undef X
13211#undef S2
13212#undef S3
13213#undef S4
13214
13215enum neon_shape_class
13216{
13217  SC_SINGLE,
13218  SC_DOUBLE,
13219  SC_QUAD,
13220  SC_MIXED
13221};
13222
13223#define X(N, L, C) SC_##C
13224
13225static enum neon_shape_class neon_shape_class[] =
13226{
13227  NEON_SHAPE_DEF
13228};
13229
13230#undef X
13231
13232enum neon_shape_el
13233{
13234  SE_F,
13235  SE_D,
13236  SE_Q,
13237  SE_I,
13238  SE_S,
13239  SE_R,
13240  SE_L
13241};
13242
13243/* Register widths of above.  */
13244static unsigned neon_shape_el_size[] =
13245{
13246  32,
13247  64,
13248  128,
13249  0,
13250  32,
13251  32,
13252  0
13253};
13254
13255struct neon_shape_info
13256{
13257  unsigned els;
13258  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13259};
13260
13261#define S2(A,B)		{ SE_##A, SE_##B }
13262#define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
13263#define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
13264
13265#define X(N, L, C) { N, S##N L }
13266
13267static struct neon_shape_info neon_shape_tab[] =
13268{
13269  NEON_SHAPE_DEF
13270};
13271
13272#undef X
13273#undef S2
13274#undef S3
13275#undef S4
13276
13277/* Bit masks used in type checking given instructions.
13278  'N_EQK' means the type must be the same as (or based on in some way) the key
13279   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13280   set, various other bits can be set as well in order to modify the meaning of
13281   the type constraint.  */
13282
13283enum neon_type_mask
13284{
13285  N_S8   = 0x0000001,
13286  N_S16  = 0x0000002,
13287  N_S32  = 0x0000004,
13288  N_S64  = 0x0000008,
13289  N_U8   = 0x0000010,
13290  N_U16  = 0x0000020,
13291  N_U32  = 0x0000040,
13292  N_U64  = 0x0000080,
13293  N_I8   = 0x0000100,
13294  N_I16  = 0x0000200,
13295  N_I32  = 0x0000400,
13296  N_I64  = 0x0000800,
13297  N_8    = 0x0001000,
13298  N_16   = 0x0002000,
13299  N_32   = 0x0004000,
13300  N_64   = 0x0008000,
13301  N_P8   = 0x0010000,
13302  N_P16  = 0x0020000,
13303  N_F16  = 0x0040000,
13304  N_F32  = 0x0080000,
13305  N_F64  = 0x0100000,
13306  N_P64	 = 0x0200000,
13307  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
13308  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
13309  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
13310  N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
13311  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
13312  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
13313  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
13314  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
13315  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
13316  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
13317  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
13318  N_UTYP = 0,
13319  N_MAX_NONSPECIAL = N_P64
13320};
13321
13322#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13323
13324#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13325#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13326#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13327#define N_SUF_32   (N_SU_32 | N_F32)
13328#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
13329#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
13330
13331/* Pass this as the first type argument to neon_check_type to ignore types
13332   altogether.  */
13333#define N_IGNORE_TYPE (N_KEY | N_EQK)
13334
13335/* Select a "shape" for the current instruction (describing register types or
13336   sizes) from a list of alternatives. Return NS_NULL if the current instruction
13337   doesn't fit. For non-polymorphic shapes, checking is usually done as a
13338   function of operand parsing, so this function doesn't need to be called.
13339   Shapes should be listed in order of decreasing length.  */
13340
13341static enum neon_shape
13342neon_select_shape (enum neon_shape shape, ...)
13343{
13344  va_list ap;
13345  enum neon_shape first_shape = shape;
13346
13347  /* Fix missing optional operands. FIXME: we don't know at this point how
13348     many arguments we should have, so this makes the assumption that we have
13349     > 1. This is true of all current Neon opcodes, I think, but may not be
13350     true in the future.  */
13351  if (!inst.operands[1].present)
13352    inst.operands[1] = inst.operands[0];
13353
13354  va_start (ap, shape);
13355
13356  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13357    {
13358      unsigned j;
13359      int matches = 1;
13360
13361      for (j = 0; j < neon_shape_tab[shape].els; j++)
13362	{
13363	  if (!inst.operands[j].present)
13364	    {
13365	      matches = 0;
13366	      break;
13367	    }
13368
13369	  switch (neon_shape_tab[shape].el[j])
13370	    {
13371	    case SE_F:
13372	      if (!(inst.operands[j].isreg
13373		    && inst.operands[j].isvec
13374		    && inst.operands[j].issingle
13375		    && !inst.operands[j].isquad))
13376		matches = 0;
13377	      break;
13378
13379	    case SE_D:
13380	      if (!(inst.operands[j].isreg
13381		    && inst.operands[j].isvec
13382		    && !inst.operands[j].isquad
13383		    && !inst.operands[j].issingle))
13384		matches = 0;
13385	      break;
13386
13387	    case SE_R:
13388	      if (!(inst.operands[j].isreg
13389		    && !inst.operands[j].isvec))
13390		matches = 0;
13391	      break;
13392
13393	    case SE_Q:
13394	      if (!(inst.operands[j].isreg
13395		    && inst.operands[j].isvec
13396		    && inst.operands[j].isquad
13397		    && !inst.operands[j].issingle))
13398		matches = 0;
13399	      break;
13400
13401	    case SE_I:
13402	      if (!(!inst.operands[j].isreg
13403		    && !inst.operands[j].isscalar))
13404		matches = 0;
13405	      break;
13406
13407	    case SE_S:
13408	      if (!(!inst.operands[j].isreg
13409		    && inst.operands[j].isscalar))
13410		matches = 0;
13411	      break;
13412
13413	    case SE_L:
13414	      break;
13415	    }
13416	  if (!matches)
13417	    break;
13418	}
13419      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13420	/* We've matched all the entries in the shape table, and we don't
13421	   have any left over operands which have not been matched.  */
13422	break;
13423    }
13424
13425  va_end (ap);
13426
13427  if (shape == NS_NULL && first_shape != NS_NULL)
13428    first_error (_("invalid instruction shape"));
13429
13430  return shape;
13431}
13432
13433/* True if SHAPE is predominantly a quadword operation (most of the time, this
13434   means the Q bit should be set).  */
13435
13436static int
13437neon_quad (enum neon_shape shape)
13438{
13439  return neon_shape_class[shape] == SC_QUAD;
13440}
13441
13442static void
13443neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13444		       unsigned *g_size)
13445{
13446  /* Allow modification to be made to types which are constrained to be
13447     based on the key element, based on bits set alongside N_EQK.  */
13448  if ((typebits & N_EQK) != 0)
13449    {
13450      if ((typebits & N_HLF) != 0)
13451	*g_size /= 2;
13452      else if ((typebits & N_DBL) != 0)
13453	*g_size *= 2;
13454      if ((typebits & N_SGN) != 0)
13455	*g_type = NT_signed;
13456      else if ((typebits & N_UNS) != 0)
13457	*g_type = NT_unsigned;
13458      else if ((typebits & N_INT) != 0)
13459	*g_type = NT_integer;
13460      else if ((typebits & N_FLT) != 0)
13461	*g_type = NT_float;
13462      else if ((typebits & N_SIZ) != 0)
13463	*g_type = NT_untyped;
13464    }
13465}
13466
13467/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13468   operand type, i.e. the single type specified in a Neon instruction when it
13469   is the only one given.  */
13470
13471static struct neon_type_el
13472neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13473{
13474  struct neon_type_el dest = *key;
13475
13476  gas_assert ((thisarg & N_EQK) != 0);
13477
13478  neon_modify_type_size (thisarg, &dest.type, &dest.size);
13479
13480  return dest;
13481}
13482
13483/* Convert Neon type and size into compact bitmask representation.  */
13484
13485static enum neon_type_mask
13486type_chk_of_el_type (enum neon_el_type type, unsigned size)
13487{
13488  switch (type)
13489    {
13490    case NT_untyped:
13491      switch (size)
13492	{
13493	case 8:  return N_8;
13494	case 16: return N_16;
13495	case 32: return N_32;
13496	case 64: return N_64;
13497	default: ;
13498	}
13499      break;
13500
13501    case NT_integer:
13502      switch (size)
13503	{
13504	case 8:  return N_I8;
13505	case 16: return N_I16;
13506	case 32: return N_I32;
13507	case 64: return N_I64;
13508	default: ;
13509	}
13510      break;
13511
13512    case NT_float:
13513      switch (size)
13514	{
13515	case 16: return N_F16;
13516	case 32: return N_F32;
13517	case 64: return N_F64;
13518	default: ;
13519	}
13520      break;
13521
13522    case NT_poly:
13523      switch (size)
13524	{
13525	case 8:  return N_P8;
13526	case 16: return N_P16;
13527	case 64: return N_P64;
13528	default: ;
13529	}
13530      break;
13531
13532    case NT_signed:
13533      switch (size)
13534	{
13535	case 8:  return N_S8;
13536	case 16: return N_S16;
13537	case 32: return N_S32;
13538	case 64: return N_S64;
13539	default: ;
13540	}
13541      break;
13542
13543    case NT_unsigned:
13544      switch (size)
13545	{
13546	case 8:  return N_U8;
13547	case 16: return N_U16;
13548	case 32: return N_U32;
13549	case 64: return N_U64;
13550	default: ;
13551	}
13552      break;
13553
13554    default: ;
13555    }
13556
13557  return N_UTYP;
13558}
13559
13560/* Convert compact Neon bitmask type representation to a type and size. Only
13561   handles the case where a single bit is set in the mask.  */
13562
13563static int
13564el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13565		     enum neon_type_mask mask)
13566{
13567  if ((mask & N_EQK) != 0)
13568    return FAIL;
13569
13570  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13571    *size = 8;
13572  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13573    *size = 16;
13574  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13575    *size = 32;
13576  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13577    *size = 64;
13578  else
13579    return FAIL;
13580
13581  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13582    *type = NT_signed;
13583  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13584    *type = NT_unsigned;
13585  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13586    *type = NT_integer;
13587  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13588    *type = NT_untyped;
13589  else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13590    *type = NT_poly;
13591  else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13592    *type = NT_float;
13593  else
13594    return FAIL;
13595
13596  return SUCCESS;
13597}
13598
13599/* Modify a bitmask of allowed types. This is only needed for type
13600   relaxation.  */
13601
13602static unsigned
13603modify_types_allowed (unsigned allowed, unsigned mods)
13604{
13605  unsigned size;
13606  enum neon_el_type type;
13607  unsigned destmask;
13608  int i;
13609
13610  destmask = 0;
13611
13612  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13613    {
13614      if (el_type_of_type_chk (&type, &size,
13615			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
13616	{
13617	  neon_modify_type_size (mods, &type, &size);
13618	  destmask |= type_chk_of_el_type (type, size);
13619	}
13620    }
13621
13622  return destmask;
13623}
13624
13625/* Check type and return type classification.
13626   The manual states (paraphrase): If one datatype is given, it indicates the
13627   type given in:
13628    - the second operand, if there is one
13629    - the operand, if there is no second operand
13630    - the result, if there are no operands.
13631   This isn't quite good enough though, so we use a concept of a "key" datatype
13632   which is set on a per-instruction basis, which is the one which matters when
13633   only one data type is written.
13634   Note: this function has side-effects (e.g. filling in missing operands). All
13635   Neon instructions should call it before performing bit encoding.  */
13636
13637static struct neon_type_el
13638neon_check_type (unsigned els, enum neon_shape ns, ...)
13639{
13640  va_list ap;
13641  unsigned i, pass, key_el = 0;
13642  unsigned types[NEON_MAX_TYPE_ELS];
13643  enum neon_el_type k_type = NT_invtype;
13644  unsigned k_size = -1u;
13645  struct neon_type_el badtype = {NT_invtype, -1};
13646  unsigned key_allowed = 0;
13647
13648  /* Optional registers in Neon instructions are always (not) in operand 1.
13649     Fill in the missing operand here, if it was omitted.  */
13650  if (els > 1 && !inst.operands[1].present)
13651    inst.operands[1] = inst.operands[0];
13652
13653  /* Suck up all the varargs.  */
13654  va_start (ap, ns);
13655  for (i = 0; i < els; i++)
13656    {
13657      unsigned thisarg = va_arg (ap, unsigned);
13658      if (thisarg == N_IGNORE_TYPE)
13659	{
13660	  va_end (ap);
13661	  return badtype;
13662	}
13663      types[i] = thisarg;
13664      if ((thisarg & N_KEY) != 0)
13665	key_el = i;
13666    }
13667  va_end (ap);
13668
13669  if (inst.vectype.elems > 0)
13670    for (i = 0; i < els; i++)
13671      if (inst.operands[i].vectype.type != NT_invtype)
13672	{
13673	  first_error (_("types specified in both the mnemonic and operands"));
13674	  return badtype;
13675	}
13676
13677  /* Duplicate inst.vectype elements here as necessary.
13678     FIXME: No idea if this is exactly the same as the ARM assembler,
13679     particularly when an insn takes one register and one non-register
13680     operand. */
13681  if (inst.vectype.elems == 1 && els > 1)
13682    {
13683      unsigned j;
13684      inst.vectype.elems = els;
13685      inst.vectype.el[key_el] = inst.vectype.el[0];
13686      for (j = 0; j < els; j++)
13687	if (j != key_el)
13688	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13689						  types[j]);
13690    }
13691  else if (inst.vectype.elems == 0 && els > 0)
13692    {
13693      unsigned j;
13694      /* No types were given after the mnemonic, so look for types specified
13695	 after each operand. We allow some flexibility here; as long as the
13696	 "key" operand has a type, we can infer the others.  */
13697      for (j = 0; j < els; j++)
13698	if (inst.operands[j].vectype.type != NT_invtype)
13699	  inst.vectype.el[j] = inst.operands[j].vectype;
13700
13701      if (inst.operands[key_el].vectype.type != NT_invtype)
13702	{
13703	  for (j = 0; j < els; j++)
13704	    if (inst.operands[j].vectype.type == NT_invtype)
13705	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13706						      types[j]);
13707	}
13708      else
13709	{
13710	  first_error (_("operand types can't be inferred"));
13711	  return badtype;
13712	}
13713    }
13714  else if (inst.vectype.elems != els)
13715    {
13716      first_error (_("type specifier has the wrong number of parts"));
13717      return badtype;
13718    }
13719
13720  for (pass = 0; pass < 2; pass++)
13721    {
13722      for (i = 0; i < els; i++)
13723	{
13724	  unsigned thisarg = types[i];
13725	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13726	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13727	  enum neon_el_type g_type = inst.vectype.el[i].type;
13728	  unsigned g_size = inst.vectype.el[i].size;
13729
13730	  /* Decay more-specific signed & unsigned types to sign-insensitive
13731	     integer types if sign-specific variants are unavailable.  */
13732	  if ((g_type == NT_signed || g_type == NT_unsigned)
13733	      && (types_allowed & N_SU_ALL) == 0)
13734	    g_type = NT_integer;
13735
13736	  /* If only untyped args are allowed, decay any more specific types to
13737	     them. Some instructions only care about signs for some element
13738	     sizes, so handle that properly.  */
13739	  if (((types_allowed & N_UNT) == 0)
13740	      && ((g_size == 8 && (types_allowed & N_8) != 0)
13741		  || (g_size == 16 && (types_allowed & N_16) != 0)
13742		  || (g_size == 32 && (types_allowed & N_32) != 0)
13743		  || (g_size == 64 && (types_allowed & N_64) != 0)))
13744	    g_type = NT_untyped;
13745
13746	  if (pass == 0)
13747	    {
13748	      if ((thisarg & N_KEY) != 0)
13749		{
13750		  k_type = g_type;
13751		  k_size = g_size;
13752		  key_allowed = thisarg & ~N_KEY;
13753		}
13754	    }
13755	  else
13756	    {
13757	      if ((thisarg & N_VFP) != 0)
13758		{
13759		  enum neon_shape_el regshape;
13760		  unsigned regwidth, match;
13761
13762		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
13763		  if (ns == NS_NULL)
13764		    {
13765		      first_error (_("invalid instruction shape"));
13766		      return badtype;
13767		    }
13768		  regshape = neon_shape_tab[ns].el[i];
13769		  regwidth = neon_shape_el_size[regshape];
13770
13771		  /* In VFP mode, operands must match register widths. If we
13772		     have a key operand, use its width, else use the width of
13773		     the current operand.  */
13774		  if (k_size != -1u)
13775		    match = k_size;
13776		  else
13777		    match = g_size;
13778
13779		  if (regwidth != match)
13780		    {
13781		      first_error (_("operand size must match register width"));
13782		      return badtype;
13783		    }
13784		}
13785
13786	      if ((thisarg & N_EQK) == 0)
13787		{
13788		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
13789
13790		  if ((given_type & types_allowed) == 0)
13791		    {
13792		      first_error (_("bad type in Neon instruction"));
13793		      return badtype;
13794		    }
13795		}
13796	      else
13797		{
13798		  enum neon_el_type mod_k_type = k_type;
13799		  unsigned mod_k_size = k_size;
13800		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13801		  if (g_type != mod_k_type || g_size != mod_k_size)
13802		    {
13803		      first_error (_("inconsistent types in Neon instruction"));
13804		      return badtype;
13805		    }
13806		}
13807	    }
13808	}
13809    }
13810
13811  return inst.vectype.el[key_el];
13812}
13813
13814/* Neon-style VFP instruction forwarding.  */
13815
13816/* Thumb VFP instructions have 0xE in the condition field.  */
13817
13818static void
13819do_vfp_cond_or_thumb (void)
13820{
13821  inst.is_neon = 1;
13822
13823  if (thumb_mode)
13824    inst.instruction |= 0xe0000000;
13825  else
13826    inst.instruction |= inst.cond << 28;
13827}
13828
13829/* Look up and encode a simple mnemonic, for use as a helper function for the
13830   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
13831   etc.  It is assumed that operand parsing has already been done, and that the
13832   operands are in the form expected by the given opcode (this isn't necessarily
13833   the same as the form in which they were parsed, hence some massaging must
13834   take place before this function is called).
13835   Checks current arch version against that in the looked-up opcode.  */
13836
13837static void
13838do_vfp_nsyn_opcode (const char *opname)
13839{
13840  const struct asm_opcode *opcode;
13841
13842  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13843
13844  if (!opcode)
13845    abort ();
13846
13847  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13848		thumb_mode ? *opcode->tvariant : *opcode->avariant),
13849	      _(BAD_FPU));
13850
13851  inst.is_neon = 1;
13852
13853  if (thumb_mode)
13854    {
13855      inst.instruction = opcode->tvalue;
13856      opcode->tencode ();
13857    }
13858  else
13859    {
13860      inst.instruction = (inst.cond << 28) | opcode->avalue;
13861      opcode->aencode ();
13862    }
13863}
13864
13865static void
13866do_vfp_nsyn_add_sub (enum neon_shape rs)
13867{
13868  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13869
13870  if (rs == NS_FFF)
13871    {
13872      if (is_add)
13873	do_vfp_nsyn_opcode ("fadds");
13874      else
13875	do_vfp_nsyn_opcode ("fsubs");
13876    }
13877  else
13878    {
13879      if (is_add)
13880	do_vfp_nsyn_opcode ("faddd");
13881      else
13882	do_vfp_nsyn_opcode ("fsubd");
13883    }
13884}
13885
13886/* Check operand types to see if this is a VFP instruction, and if so call
13887   PFN ().  */
13888
13889static int
13890try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13891{
13892  enum neon_shape rs;
13893  struct neon_type_el et;
13894
13895  switch (args)
13896    {
13897    case 2:
13898      rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13899      et = neon_check_type (2, rs,
13900	N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13901      break;
13902
13903    case 3:
13904      rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13905      et = neon_check_type (3, rs,
13906	N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13907      break;
13908
13909    default:
13910      abort ();
13911    }
13912
13913  if (et.type != NT_invtype)
13914    {
13915      pfn (rs);
13916      return SUCCESS;
13917    }
13918
13919  inst.error = NULL;
13920  return FAIL;
13921}
13922
13923static void
13924do_vfp_nsyn_mla_mls (enum neon_shape rs)
13925{
13926  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13927
13928  if (rs == NS_FFF)
13929    {
13930      if (is_mla)
13931	do_vfp_nsyn_opcode ("fmacs");
13932      else
13933	do_vfp_nsyn_opcode ("fnmacs");
13934    }
13935  else
13936    {
13937      if (is_mla)
13938	do_vfp_nsyn_opcode ("fmacd");
13939      else
13940	do_vfp_nsyn_opcode ("fnmacd");
13941    }
13942}
13943
13944static void
13945do_vfp_nsyn_fma_fms (enum neon_shape rs)
13946{
13947  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13948
13949  if (rs == NS_FFF)
13950    {
13951      if (is_fma)
13952	do_vfp_nsyn_opcode ("ffmas");
13953      else
13954	do_vfp_nsyn_opcode ("ffnmas");
13955    }
13956  else
13957    {
13958      if (is_fma)
13959	do_vfp_nsyn_opcode ("ffmad");
13960      else
13961	do_vfp_nsyn_opcode ("ffnmad");
13962    }
13963}
13964
13965static void
13966do_vfp_nsyn_mul (enum neon_shape rs)
13967{
13968  if (rs == NS_FFF)
13969    do_vfp_nsyn_opcode ("fmuls");
13970  else
13971    do_vfp_nsyn_opcode ("fmuld");
13972}
13973
13974static void
13975do_vfp_nsyn_abs_neg (enum neon_shape rs)
13976{
13977  int is_neg = (inst.instruction & 0x80) != 0;
13978  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13979
13980  if (rs == NS_FF)
13981    {
13982      if (is_neg)
13983	do_vfp_nsyn_opcode ("fnegs");
13984      else
13985	do_vfp_nsyn_opcode ("fabss");
13986    }
13987  else
13988    {
13989      if (is_neg)
13990	do_vfp_nsyn_opcode ("fnegd");
13991      else
13992	do_vfp_nsyn_opcode ("fabsd");
13993    }
13994}
13995
13996/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13997   insns belong to Neon, and are handled elsewhere.  */
13998
13999static void
14000do_vfp_nsyn_ldm_stm (int is_dbmode)
14001{
14002  int is_ldm = (inst.instruction & (1 << 20)) != 0;
14003  if (is_ldm)
14004    {
14005      if (is_dbmode)
14006	do_vfp_nsyn_opcode ("fldmdbs");
14007      else
14008	do_vfp_nsyn_opcode ("fldmias");
14009    }
14010  else
14011    {
14012      if (is_dbmode)
14013	do_vfp_nsyn_opcode ("fstmdbs");
14014      else
14015	do_vfp_nsyn_opcode ("fstmias");
14016    }
14017}
14018
14019static void
14020do_vfp_nsyn_sqrt (void)
14021{
14022  enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14023  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14024
14025  if (rs == NS_FF)
14026    do_vfp_nsyn_opcode ("fsqrts");
14027  else
14028    do_vfp_nsyn_opcode ("fsqrtd");
14029}
14030
14031static void
14032do_vfp_nsyn_div (void)
14033{
14034  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14035  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14036    N_F32 | N_F64 | N_KEY | N_VFP);
14037
14038  if (rs == NS_FFF)
14039    do_vfp_nsyn_opcode ("fdivs");
14040  else
14041    do_vfp_nsyn_opcode ("fdivd");
14042}
14043
14044static void
14045do_vfp_nsyn_nmul (void)
14046{
14047  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14048  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14049    N_F32 | N_F64 | N_KEY | N_VFP);
14050
14051  if (rs == NS_FFF)
14052    {
14053      NEON_ENCODE (SINGLE, inst);
14054      do_vfp_sp_dyadic ();
14055    }
14056  else
14057    {
14058      NEON_ENCODE (DOUBLE, inst);
14059      do_vfp_dp_rd_rn_rm ();
14060    }
14061  do_vfp_cond_or_thumb ();
14062}
14063
14064static void
14065do_vfp_nsyn_cmp (void)
14066{
14067  if (inst.operands[1].isreg)
14068    {
14069      enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14070      neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14071
14072      if (rs == NS_FF)
14073	{
14074	  NEON_ENCODE (SINGLE, inst);
14075	  do_vfp_sp_monadic ();
14076	}
14077      else
14078	{
14079	  NEON_ENCODE (DOUBLE, inst);
14080	  do_vfp_dp_rd_rm ();
14081	}
14082    }
14083  else
14084    {
14085      enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
14086      neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
14087
14088      switch (inst.instruction & 0x0fffffff)
14089	{
14090	case N_MNEM_vcmp:
14091	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14092	  break;
14093	case N_MNEM_vcmpe:
14094	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14095	  break;
14096	default:
14097	  abort ();
14098	}
14099
14100      if (rs == NS_FI)
14101	{
14102	  NEON_ENCODE (SINGLE, inst);
14103	  do_vfp_sp_compare_z ();
14104	}
14105      else
14106	{
14107	  NEON_ENCODE (DOUBLE, inst);
14108	  do_vfp_dp_rd ();
14109	}
14110    }
14111  do_vfp_cond_or_thumb ();
14112}
14113
14114static void
14115nsyn_insert_sp (void)
14116{
14117  inst.operands[1] = inst.operands[0];
14118  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14119  inst.operands[0].reg = REG_SP;
14120  inst.operands[0].isreg = 1;
14121  inst.operands[0].writeback = 1;
14122  inst.operands[0].present = 1;
14123}
14124
14125static void
14126do_vfp_nsyn_push (void)
14127{
14128  nsyn_insert_sp ();
14129  if (inst.operands[1].issingle)
14130    do_vfp_nsyn_opcode ("fstmdbs");
14131  else
14132    do_vfp_nsyn_opcode ("fstmdbd");
14133}
14134
14135static void
14136do_vfp_nsyn_pop (void)
14137{
14138  nsyn_insert_sp ();
14139  if (inst.operands[1].issingle)
14140    do_vfp_nsyn_opcode ("fldmias");
14141  else
14142    do_vfp_nsyn_opcode ("fldmiad");
14143}
14144
14145/* Fix up Neon data-processing instructions, ORing in the correct bits for
14146   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
14147
14148static void
14149neon_dp_fixup (struct arm_it* insn)
14150{
14151  unsigned int i = insn->instruction;
14152  insn->is_neon = 1;
14153
14154  if (thumb_mode)
14155    {
14156      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
14157      if (i & (1 << 24))
14158	i |= 1 << 28;
14159
14160      i &= ~(1 << 24);
14161
14162      i |= 0xef000000;
14163    }
14164  else
14165    i |= 0xf2000000;
14166
14167  insn->instruction = i;
14168}
14169
14170/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14171   (0, 1, 2, 3).  */
14172
14173static unsigned
14174neon_logbits (unsigned x)
14175{
14176  return ffs (x) - 4;
14177}
14178
14179#define LOW4(R) ((R) & 0xf)
14180#define HI1(R) (((R) >> 4) & 1)
14181
14182/* Encode insns with bit pattern:
14183
14184  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14185  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
14186
14187  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14188  different meaning for some instruction.  */
14189
14190static void
14191neon_three_same (int isquad, int ubit, int size)
14192{
14193  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14194  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14195  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14196  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14197  inst.instruction |= LOW4 (inst.operands[2].reg);
14198  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14199  inst.instruction |= (isquad != 0) << 6;
14200  inst.instruction |= (ubit != 0) << 24;
14201  if (size != -1)
14202    inst.instruction |= neon_logbits (size) << 20;
14203
14204  neon_dp_fixup (&inst);
14205}
14206
14207/* Encode instructions of the form:
14208
14209  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
14210  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
14211
14212  Don't write size if SIZE == -1.  */
14213
14214static void
14215neon_two_same (int qbit, int ubit, int size)
14216{
14217  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14218  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14219  inst.instruction |= LOW4 (inst.operands[1].reg);
14220  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14221  inst.instruction |= (qbit != 0) << 6;
14222  inst.instruction |= (ubit != 0) << 24;
14223
14224  if (size != -1)
14225    inst.instruction |= neon_logbits (size) << 18;
14226
14227  neon_dp_fixup (&inst);
14228}
14229
14230/* Neon instruction encoders, in approximate order of appearance.  */
14231
14232static void
14233do_neon_dyadic_i_su (void)
14234{
14235  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14236  struct neon_type_el et = neon_check_type (3, rs,
14237    N_EQK, N_EQK, N_SU_32 | N_KEY);
14238  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14239}
14240
14241static void
14242do_neon_dyadic_i64_su (void)
14243{
14244  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14245  struct neon_type_el et = neon_check_type (3, rs,
14246    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14247  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14248}
14249
14250static void
14251neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14252		unsigned immbits)
14253{
14254  unsigned size = et.size >> 3;
14255  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14256  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14257  inst.instruction |= LOW4 (inst.operands[1].reg);
14258  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14259  inst.instruction |= (isquad != 0) << 6;
14260  inst.instruction |= immbits << 16;
14261  inst.instruction |= (size >> 3) << 7;
14262  inst.instruction |= (size & 0x7) << 19;
14263  if (write_ubit)
14264    inst.instruction |= (uval != 0) << 24;
14265
14266  neon_dp_fixup (&inst);
14267}
14268
14269static void
14270do_neon_shl_imm (void)
14271{
14272  if (!inst.operands[2].isreg)
14273    {
14274      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14275      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14276      int imm = inst.operands[2].imm;
14277
14278      constraint (imm < 0 || (unsigned)imm >= et.size,
14279		  _("immediate out of range for shift"));
14280      NEON_ENCODE (IMMED, inst);
14281      neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14282    }
14283  else
14284    {
14285      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14286      struct neon_type_el et = neon_check_type (3, rs,
14287	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14288      unsigned int tmp;
14289
14290      /* VSHL/VQSHL 3-register variants have syntax such as:
14291	   vshl.xx Dd, Dm, Dn
14292	 whereas other 3-register operations encoded by neon_three_same have
14293	 syntax like:
14294	   vadd.xx Dd, Dn, Dm
14295	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14296	 here.  */
14297      tmp = inst.operands[2].reg;
14298      inst.operands[2].reg = inst.operands[1].reg;
14299      inst.operands[1].reg = tmp;
14300      NEON_ENCODE (INTEGER, inst);
14301      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14302    }
14303}
14304
14305static void
14306do_neon_qshl_imm (void)
14307{
14308  if (!inst.operands[2].isreg)
14309    {
14310      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14311      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14312      int imm = inst.operands[2].imm;
14313
14314      constraint (imm < 0 || (unsigned)imm >= et.size,
14315		  _("immediate out of range for shift"));
14316      NEON_ENCODE (IMMED, inst);
14317      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14318    }
14319  else
14320    {
14321      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14322      struct neon_type_el et = neon_check_type (3, rs,
14323	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14324      unsigned int tmp;
14325
14326      /* See note in do_neon_shl_imm.  */
14327      tmp = inst.operands[2].reg;
14328      inst.operands[2].reg = inst.operands[1].reg;
14329      inst.operands[1].reg = tmp;
14330      NEON_ENCODE (INTEGER, inst);
14331      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14332    }
14333}
14334
14335static void
14336do_neon_rshl (void)
14337{
14338  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14339  struct neon_type_el et = neon_check_type (3, rs,
14340    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14341  unsigned int tmp;
14342
14343  tmp = inst.operands[2].reg;
14344  inst.operands[2].reg = inst.operands[1].reg;
14345  inst.operands[1].reg = tmp;
14346  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14347}
14348
14349static int
14350neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14351{
14352  /* Handle .I8 pseudo-instructions.  */
14353  if (size == 8)
14354    {
14355      /* Unfortunately, this will make everything apart from zero out-of-range.
14356	 FIXME is this the intended semantics? There doesn't seem much point in
14357	 accepting .I8 if so.  */
14358      immediate |= immediate << 8;
14359      size = 16;
14360    }
14361
14362  if (size >= 32)
14363    {
14364      if (immediate == (immediate & 0x000000ff))
14365	{
14366	  *immbits = immediate;
14367	  return 0x1;
14368	}
14369      else if (immediate == (immediate & 0x0000ff00))
14370	{
14371	  *immbits = immediate >> 8;
14372	  return 0x3;
14373	}
14374      else if (immediate == (immediate & 0x00ff0000))
14375	{
14376	  *immbits = immediate >> 16;
14377	  return 0x5;
14378	}
14379      else if (immediate == (immediate & 0xff000000))
14380	{
14381	  *immbits = immediate >> 24;
14382	  return 0x7;
14383	}
14384      if ((immediate & 0xffff) != (immediate >> 16))
14385	goto bad_immediate;
14386      immediate &= 0xffff;
14387    }
14388
14389  if (immediate == (immediate & 0x000000ff))
14390    {
14391      *immbits = immediate;
14392      return 0x9;
14393    }
14394  else if (immediate == (immediate & 0x0000ff00))
14395    {
14396      *immbits = immediate >> 8;
14397      return 0xb;
14398    }
14399
14400  bad_immediate:
14401  first_error (_("immediate value out of range"));
14402  return FAIL;
14403}
14404
14405static void
14406do_neon_logic (void)
14407{
14408  if (inst.operands[2].present && inst.operands[2].isreg)
14409    {
14410      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14411      neon_check_type (3, rs, N_IGNORE_TYPE);
14412      /* U bit and size field were set as part of the bitmask.  */
14413      NEON_ENCODE (INTEGER, inst);
14414      neon_three_same (neon_quad (rs), 0, -1);
14415    }
14416  else
14417    {
14418      const int three_ops_form = (inst.operands[2].present
14419				  && !inst.operands[2].isreg);
14420      const int immoperand = (three_ops_form ? 2 : 1);
14421      enum neon_shape rs = (three_ops_form
14422			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14423			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14424      struct neon_type_el et = neon_check_type (2, rs,
14425	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14426      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14427      unsigned immbits;
14428      int cmode;
14429
14430      if (et.type == NT_invtype)
14431	return;
14432
14433      if (three_ops_form)
14434	constraint (inst.operands[0].reg != inst.operands[1].reg,
14435		    _("first and second operands shall be the same register"));
14436
14437      NEON_ENCODE (IMMED, inst);
14438
14439      immbits = inst.operands[immoperand].imm;
14440      if (et.size == 64)
14441	{
14442	  /* .i64 is a pseudo-op, so the immediate must be a repeating
14443	     pattern.  */
14444	  if (immbits != (inst.operands[immoperand].regisimm ?
14445			  inst.operands[immoperand].reg : 0))
14446	    {
14447	      /* Set immbits to an invalid constant.  */
14448	      immbits = 0xdeadbeef;
14449	    }
14450	}
14451
14452      switch (opcode)
14453	{
14454	case N_MNEM_vbic:
14455	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14456	  break;
14457
14458	case N_MNEM_vorr:
14459	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14460	  break;
14461
14462	case N_MNEM_vand:
14463	  /* Pseudo-instruction for VBIC.  */
14464	  neon_invert_size (&immbits, 0, et.size);
14465	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14466	  break;
14467
14468	case N_MNEM_vorn:
14469	  /* Pseudo-instruction for VORR.  */
14470	  neon_invert_size (&immbits, 0, et.size);
14471	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14472	  break;
14473
14474	default:
14475	  abort ();
14476	}
14477
14478      if (cmode == FAIL)
14479	return;
14480
14481      inst.instruction |= neon_quad (rs) << 6;
14482      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14483      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14484      inst.instruction |= cmode << 8;
14485      neon_write_immbits (immbits);
14486
14487      neon_dp_fixup (&inst);
14488    }
14489}
14490
14491static void
14492do_neon_bitfield (void)
14493{
14494  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14495  neon_check_type (3, rs, N_IGNORE_TYPE);
14496  neon_three_same (neon_quad (rs), 0, -1);
14497}
14498
14499static void
14500neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14501		  unsigned destbits)
14502{
14503  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14504  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14505					    types | N_KEY);
14506  if (et.type == NT_float)
14507    {
14508      NEON_ENCODE (FLOAT, inst);
14509      neon_three_same (neon_quad (rs), 0, -1);
14510    }
14511  else
14512    {
14513      NEON_ENCODE (INTEGER, inst);
14514      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14515    }
14516}
14517
14518static void
14519do_neon_dyadic_if_su (void)
14520{
14521  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14522}
14523
14524static void
14525do_neon_dyadic_if_su_d (void)
14526{
14527  /* This version only allow D registers, but that constraint is enforced during
14528     operand parsing so we don't need to do anything extra here.  */
14529  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14530}
14531
14532static void
14533do_neon_dyadic_if_i_d (void)
14534{
14535  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14536     affected if we specify unsigned args.  */
14537  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14538}
14539
14540enum vfp_or_neon_is_neon_bits
14541{
14542  NEON_CHECK_CC = 1,
14543  NEON_CHECK_ARCH = 2,
14544  NEON_CHECK_ARCH8 = 4
14545};
14546
14547/* Call this function if an instruction which may have belonged to the VFP or
14548   Neon instruction sets, but turned out to be a Neon instruction (due to the
14549   operand types involved, etc.). We have to check and/or fix-up a couple of
14550   things:
14551
14552     - Make sure the user hasn't attempted to make a Neon instruction
14553       conditional.
14554     - Alter the value in the condition code field if necessary.
14555     - Make sure that the arch supports Neon instructions.
14556
14557   Which of these operations take place depends on bits from enum
14558   vfp_or_neon_is_neon_bits.
14559
14560   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14561   current instruction's condition is COND_ALWAYS, the condition field is
14562   changed to inst.uncond_value. This is necessary because instructions shared
14563   between VFP and Neon may be conditional for the VFP variants only, and the
14564   unconditional Neon version must have, e.g., 0xF in the condition field.  */
14565
14566static int
14567vfp_or_neon_is_neon (unsigned check)
14568{
14569  /* Conditions are always legal in Thumb mode (IT blocks).  */
14570  if (!thumb_mode && (check & NEON_CHECK_CC))
14571    {
14572      if (inst.cond != COND_ALWAYS)
14573	{
14574	  first_error (_(BAD_COND));
14575	  return FAIL;
14576	}
14577      if (inst.uncond_value != -1)
14578	inst.instruction |= inst.uncond_value << 28;
14579    }
14580
14581  if ((check & NEON_CHECK_ARCH)
14582      && !mark_feature_used (&fpu_neon_ext_v1))
14583    {
14584      first_error (_(BAD_FPU));
14585      return FAIL;
14586    }
14587
14588  if ((check & NEON_CHECK_ARCH8)
14589      && !mark_feature_used (&fpu_neon_ext_armv8))
14590    {
14591      first_error (_(BAD_FPU));
14592      return FAIL;
14593    }
14594
14595  return SUCCESS;
14596}
14597
14598static void
14599do_neon_addsub_if_i (void)
14600{
14601  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14602    return;
14603
14604  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14605    return;
14606
14607  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14608     affected if we specify unsigned args.  */
14609  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14610}
14611
14612/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14613   result to be:
14614     V<op> A,B     (A is operand 0, B is operand 2)
14615   to mean:
14616     V<op> A,B,A
14617   not:
14618     V<op> A,B,B
14619   so handle that case specially.  */
14620
14621static void
14622neon_exchange_operands (void)
14623{
14624  void *scratch = alloca (sizeof (inst.operands[0]));
14625  if (inst.operands[1].present)
14626    {
14627      /* Swap operands[1] and operands[2].  */
14628      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14629      inst.operands[1] = inst.operands[2];
14630      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14631    }
14632  else
14633    {
14634      inst.operands[1] = inst.operands[2];
14635      inst.operands[2] = inst.operands[0];
14636    }
14637}
14638
14639static void
14640neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14641{
14642  if (inst.operands[2].isreg)
14643    {
14644      if (invert)
14645	neon_exchange_operands ();
14646      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14647    }
14648  else
14649    {
14650      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14651      struct neon_type_el et = neon_check_type (2, rs,
14652	N_EQK | N_SIZ, immtypes | N_KEY);
14653
14654      NEON_ENCODE (IMMED, inst);
14655      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14656      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14657      inst.instruction |= LOW4 (inst.operands[1].reg);
14658      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14659      inst.instruction |= neon_quad (rs) << 6;
14660      inst.instruction |= (et.type == NT_float) << 10;
14661      inst.instruction |= neon_logbits (et.size) << 18;
14662
14663      neon_dp_fixup (&inst);
14664    }
14665}
14666
14667static void
14668do_neon_cmp (void)
14669{
14670  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14671}
14672
14673static void
14674do_neon_cmp_inv (void)
14675{
14676  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14677}
14678
14679static void
14680do_neon_ceq (void)
14681{
14682  neon_compare (N_IF_32, N_IF_32, FALSE);
14683}
14684
14685/* For multiply instructions, we have the possibility of 16-bit or 32-bit
14686   scalars, which are encoded in 5 bits, M : Rm.
14687   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14688   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14689   index in M.  */
14690
14691static unsigned
14692neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14693{
14694  unsigned regno = NEON_SCALAR_REG (scalar);
14695  unsigned elno = NEON_SCALAR_INDEX (scalar);
14696
14697  switch (elsize)
14698    {
14699    case 16:
14700      if (regno > 7 || elno > 3)
14701	goto bad_scalar;
14702      return regno | (elno << 3);
14703
14704    case 32:
14705      if (regno > 15 || elno > 1)
14706	goto bad_scalar;
14707      return regno | (elno << 4);
14708
14709    default:
14710    bad_scalar:
14711      first_error (_("scalar out of range for multiply instruction"));
14712    }
14713
14714  return 0;
14715}
14716
14717/* Encode multiply / multiply-accumulate scalar instructions.  */
14718
14719static void
14720neon_mul_mac (struct neon_type_el et, int ubit)
14721{
14722  unsigned scalar;
14723
14724  /* Give a more helpful error message if we have an invalid type.  */
14725  if (et.type == NT_invtype)
14726    return;
14727
14728  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14729  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14730  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14731  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14732  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14733  inst.instruction |= LOW4 (scalar);
14734  inst.instruction |= HI1 (scalar) << 5;
14735  inst.instruction |= (et.type == NT_float) << 8;
14736  inst.instruction |= neon_logbits (et.size) << 20;
14737  inst.instruction |= (ubit != 0) << 24;
14738
14739  neon_dp_fixup (&inst);
14740}
14741
14742static void
14743do_neon_mac_maybe_scalar (void)
14744{
14745  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14746    return;
14747
14748  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14749    return;
14750
14751  if (inst.operands[2].isscalar)
14752    {
14753      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14754      struct neon_type_el et = neon_check_type (3, rs,
14755	N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14756      NEON_ENCODE (SCALAR, inst);
14757      neon_mul_mac (et, neon_quad (rs));
14758    }
14759  else
14760    {
14761      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
14762	 affected if we specify unsigned args.  */
14763      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14764    }
14765}
14766
14767static void
14768do_neon_fmac (void)
14769{
14770  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14771    return;
14772
14773  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14774    return;
14775
14776  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14777}
14778
14779static void
14780do_neon_tst (void)
14781{
14782  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14783  struct neon_type_el et = neon_check_type (3, rs,
14784    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14785  neon_three_same (neon_quad (rs), 0, et.size);
14786}
14787
14788/* VMUL with 3 registers allows the P8 type. The scalar version supports the
14789   same types as the MAC equivalents. The polynomial type for this instruction
14790   is encoded the same as the integer type.  */
14791
14792static void
14793do_neon_mul (void)
14794{
14795  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14796    return;
14797
14798  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14799    return;
14800
14801  if (inst.operands[2].isscalar)
14802    do_neon_mac_maybe_scalar ();
14803  else
14804    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14805}
14806
14807static void
14808do_neon_qdmulh (void)
14809{
14810  if (inst.operands[2].isscalar)
14811    {
14812      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14813      struct neon_type_el et = neon_check_type (3, rs,
14814	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14815      NEON_ENCODE (SCALAR, inst);
14816      neon_mul_mac (et, neon_quad (rs));
14817    }
14818  else
14819    {
14820      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14821      struct neon_type_el et = neon_check_type (3, rs,
14822	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14823      NEON_ENCODE (INTEGER, inst);
14824      /* The U bit (rounding) comes from bit mask.  */
14825      neon_three_same (neon_quad (rs), 0, et.size);
14826    }
14827}
14828
14829static void
14830do_neon_fcmp_absolute (void)
14831{
14832  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14833  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14834  /* Size field comes from bit mask.  */
14835  neon_three_same (neon_quad (rs), 1, -1);
14836}
14837
14838static void
14839do_neon_fcmp_absolute_inv (void)
14840{
14841  neon_exchange_operands ();
14842  do_neon_fcmp_absolute ();
14843}
14844
14845static void
14846do_neon_step (void)
14847{
14848  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14849  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14850  neon_three_same (neon_quad (rs), 0, -1);
14851}
14852
14853static void
14854do_neon_abs_neg (void)
14855{
14856  enum neon_shape rs;
14857  struct neon_type_el et;
14858
14859  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14860    return;
14861
14862  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14863    return;
14864
14865  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14866  et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14867
14868  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14869  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14870  inst.instruction |= LOW4 (inst.operands[1].reg);
14871  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14872  inst.instruction |= neon_quad (rs) << 6;
14873  inst.instruction |= (et.type == NT_float) << 10;
14874  inst.instruction |= neon_logbits (et.size) << 18;
14875
14876  neon_dp_fixup (&inst);
14877}
14878
14879static void
14880do_neon_sli (void)
14881{
14882  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14883  struct neon_type_el et = neon_check_type (2, rs,
14884    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14885  int imm = inst.operands[2].imm;
14886  constraint (imm < 0 || (unsigned)imm >= et.size,
14887	      _("immediate out of range for insert"));
14888  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14889}
14890
14891static void
14892do_neon_sri (void)
14893{
14894  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14895  struct neon_type_el et = neon_check_type (2, rs,
14896    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14897  int imm = inst.operands[2].imm;
14898  constraint (imm < 1 || (unsigned)imm > et.size,
14899	      _("immediate out of range for insert"));
14900  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14901}
14902
14903static void
14904do_neon_qshlu_imm (void)
14905{
14906  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14907  struct neon_type_el et = neon_check_type (2, rs,
14908    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14909  int imm = inst.operands[2].imm;
14910  constraint (imm < 0 || (unsigned)imm >= et.size,
14911	      _("immediate out of range for shift"));
14912  /* Only encodes the 'U present' variant of the instruction.
14913     In this case, signed types have OP (bit 8) set to 0.
14914     Unsigned types have OP set to 1.  */
14915  inst.instruction |= (et.type == NT_unsigned) << 8;
14916  /* The rest of the bits are the same as other immediate shifts.  */
14917  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14918}
14919
14920static void
14921do_neon_qmovn (void)
14922{
14923  struct neon_type_el et = neon_check_type (2, NS_DQ,
14924    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14925  /* Saturating move where operands can be signed or unsigned, and the
14926     destination has the same signedness.  */
14927  NEON_ENCODE (INTEGER, inst);
14928  if (et.type == NT_unsigned)
14929    inst.instruction |= 0xc0;
14930  else
14931    inst.instruction |= 0x80;
14932  neon_two_same (0, 1, et.size / 2);
14933}
14934
14935static void
14936do_neon_qmovun (void)
14937{
14938  struct neon_type_el et = neon_check_type (2, NS_DQ,
14939    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14940  /* Saturating move with unsigned results. Operands must be signed.  */
14941  NEON_ENCODE (INTEGER, inst);
14942  neon_two_same (0, 1, et.size / 2);
14943}
14944
14945static void
14946do_neon_rshift_sat_narrow (void)
14947{
14948  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14949     or unsigned. If operands are unsigned, results must also be unsigned.  */
14950  struct neon_type_el et = neon_check_type (2, NS_DQI,
14951    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14952  int imm = inst.operands[2].imm;
14953  /* This gets the bounds check, size encoding and immediate bits calculation
14954     right.  */
14955  et.size /= 2;
14956
14957  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14958     VQMOVN.I<size> <Dd>, <Qm>.  */
14959  if (imm == 0)
14960    {
14961      inst.operands[2].present = 0;
14962      inst.instruction = N_MNEM_vqmovn;
14963      do_neon_qmovn ();
14964      return;
14965    }
14966
14967  constraint (imm < 1 || (unsigned)imm > et.size,
14968	      _("immediate out of range"));
14969  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14970}
14971
14972static void
14973do_neon_rshift_sat_narrow_u (void)
14974{
14975  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14976     or unsigned. If operands are unsigned, results must also be unsigned.  */
14977  struct neon_type_el et = neon_check_type (2, NS_DQI,
14978    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14979  int imm = inst.operands[2].imm;
14980  /* This gets the bounds check, size encoding and immediate bits calculation
14981     right.  */
14982  et.size /= 2;
14983
14984  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14985     VQMOVUN.I<size> <Dd>, <Qm>.  */
14986  if (imm == 0)
14987    {
14988      inst.operands[2].present = 0;
14989      inst.instruction = N_MNEM_vqmovun;
14990      do_neon_qmovun ();
14991      return;
14992    }
14993
14994  constraint (imm < 1 || (unsigned)imm > et.size,
14995	      _("immediate out of range"));
14996  /* FIXME: The manual is kind of unclear about what value U should have in
14997     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14998     must be 1.  */
14999  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15000}
15001
15002static void
15003do_neon_movn (void)
15004{
15005  struct neon_type_el et = neon_check_type (2, NS_DQ,
15006    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15007  NEON_ENCODE (INTEGER, inst);
15008  neon_two_same (0, 1, et.size / 2);
15009}
15010
15011static void
15012do_neon_rshift_narrow (void)
15013{
15014  struct neon_type_el et = neon_check_type (2, NS_DQI,
15015    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15016  int imm = inst.operands[2].imm;
15017  /* This gets the bounds check, size encoding and immediate bits calculation
15018     right.  */
15019  et.size /= 2;
15020
15021  /* If immediate is zero then we are a pseudo-instruction for
15022     VMOVN.I<size> <Dd>, <Qm>  */
15023  if (imm == 0)
15024    {
15025      inst.operands[2].present = 0;
15026      inst.instruction = N_MNEM_vmovn;
15027      do_neon_movn ();
15028      return;
15029    }
15030
15031  constraint (imm < 1 || (unsigned)imm > et.size,
15032	      _("immediate out of range for narrowing operation"));
15033  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15034}
15035
15036static void
15037do_neon_shll (void)
15038{
15039  /* FIXME: Type checking when lengthening.  */
15040  struct neon_type_el et = neon_check_type (2, NS_QDI,
15041    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15042  unsigned imm = inst.operands[2].imm;
15043
15044  if (imm == et.size)
15045    {
15046      /* Maximum shift variant.  */
15047      NEON_ENCODE (INTEGER, inst);
15048      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15049      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15050      inst.instruction |= LOW4 (inst.operands[1].reg);
15051      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15052      inst.instruction |= neon_logbits (et.size) << 18;
15053
15054      neon_dp_fixup (&inst);
15055    }
15056  else
15057    {
15058      /* A more-specific type check for non-max versions.  */
15059      et = neon_check_type (2, NS_QDI,
15060	N_EQK | N_DBL, N_SU_32 | N_KEY);
15061      NEON_ENCODE (IMMED, inst);
15062      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15063    }
15064}
15065
15066/* Check the various types for the VCVT instruction, and return which version
15067   the current instruction is.  */
15068
15069#define CVT_FLAVOUR_VAR							      \
15070  CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
15071  CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
15072  CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
15073  CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
15074  /* Half-precision conversions.  */					      \
15075  CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
15076  CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
15077  /* VFP instructions.  */						      \
15078  CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
15079  CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
15080  CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15081  CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15082  CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
15083  CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
15084  /* VFP instructions with bitshift.  */				      \
15085  CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
15086  CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
15087  CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
15088  CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
15089  CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
15090  CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
15091  CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
15092  CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
15093
15094#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15095  neon_cvt_flavour_##C,
15096
15097/* The different types of conversions we can do.  */
15098enum neon_cvt_flavour
15099{
15100  CVT_FLAVOUR_VAR
15101  neon_cvt_flavour_invalid,
15102  neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15103};
15104
15105#undef CVT_VAR
15106
15107static enum neon_cvt_flavour
15108get_neon_cvt_flavour (enum neon_shape rs)
15109{
15110#define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
15111  et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
15112  if (et.type != NT_invtype)				\
15113    {							\
15114      inst.error = NULL;				\
15115      return (neon_cvt_flavour_##C);			\
15116    }
15117
15118  struct neon_type_el et;
15119  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15120			|| rs == NS_FF) ? N_VFP : 0;
15121  /* The instruction versions which take an immediate take one register
15122     argument, which is extended to the width of the full register. Thus the
15123     "source" and "destination" registers must have the same width.  Hack that
15124     here by making the size equal to the key (wider, in this case) operand.  */
15125  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15126
15127  CVT_FLAVOUR_VAR;
15128
15129  return neon_cvt_flavour_invalid;
15130#undef CVT_VAR
15131}
15132
15133enum neon_cvt_mode
15134{
15135  neon_cvt_mode_a,
15136  neon_cvt_mode_n,
15137  neon_cvt_mode_p,
15138  neon_cvt_mode_m,
15139  neon_cvt_mode_z,
15140  neon_cvt_mode_x,
15141  neon_cvt_mode_r
15142};
15143
15144/* Neon-syntax VFP conversions.  */
15145
15146static void
15147do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15148{
15149  const char *opname = 0;
15150
15151  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15152    {
15153      /* Conversions with immediate bitshift.  */
15154      const char *enc[] =
15155	{
15156#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15157	  CVT_FLAVOUR_VAR
15158	  NULL
15159#undef CVT_VAR
15160	};
15161
15162      if (flavour < (int) ARRAY_SIZE (enc))
15163	{
15164	  opname = enc[flavour];
15165	  constraint (inst.operands[0].reg != inst.operands[1].reg,
15166		      _("operands 0 and 1 must be the same register"));
15167	  inst.operands[1] = inst.operands[2];
15168	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15169	}
15170    }
15171  else
15172    {
15173      /* Conversions without bitshift.  */
15174      const char *enc[] =
15175	{
15176#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15177	  CVT_FLAVOUR_VAR
15178	  NULL
15179#undef CVT_VAR
15180	};
15181
15182      if (flavour < (int) ARRAY_SIZE (enc))
15183	opname = enc[flavour];
15184    }
15185
15186  if (opname)
15187    do_vfp_nsyn_opcode (opname);
15188}
15189
15190static void
15191do_vfp_nsyn_cvtz (void)
15192{
15193  enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15194  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15195  const char *enc[] =
15196    {
15197#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15198      CVT_FLAVOUR_VAR
15199      NULL
15200#undef CVT_VAR
15201    };
15202
15203  if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15204    do_vfp_nsyn_opcode (enc[flavour]);
15205}
15206
15207static void
15208do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15209		      enum neon_cvt_mode mode)
15210{
15211  int sz, op;
15212  int rm;
15213
15214  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15215     D register operands.  */
15216  if (flavour == neon_cvt_flavour_s32_f64
15217      || flavour == neon_cvt_flavour_u32_f64)
15218    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15219		_(BAD_FPU));
15220
15221  set_it_insn_type (OUTSIDE_IT_INSN);
15222
15223  switch (flavour)
15224    {
15225    case neon_cvt_flavour_s32_f64:
15226      sz = 1;
15227      op = 1;
15228      break;
15229    case neon_cvt_flavour_s32_f32:
15230      sz = 0;
15231      op = 1;
15232      break;
15233    case neon_cvt_flavour_u32_f64:
15234      sz = 1;
15235      op = 0;
15236      break;
15237    case neon_cvt_flavour_u32_f32:
15238      sz = 0;
15239      op = 0;
15240      break;
15241    default:
15242      first_error (_("invalid instruction shape"));
15243      return;
15244    }
15245
15246  switch (mode)
15247    {
15248    case neon_cvt_mode_a: rm = 0; break;
15249    case neon_cvt_mode_n: rm = 1; break;
15250    case neon_cvt_mode_p: rm = 2; break;
15251    case neon_cvt_mode_m: rm = 3; break;
15252    default: first_error (_("invalid rounding mode")); return;
15253    }
15254
15255  NEON_ENCODE (FPV8, inst);
15256  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15257  encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15258  inst.instruction |= sz << 8;
15259  inst.instruction |= op << 7;
15260  inst.instruction |= rm << 16;
15261  inst.instruction |= 0xf0000000;
15262  inst.is_neon = TRUE;
15263}
15264
15265static void
15266do_neon_cvt_1 (enum neon_cvt_mode mode)
15267{
15268  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15269    NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15270  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15271
15272  /* PR11109: Handle round-to-zero for VCVT conversions.  */
15273  if (mode == neon_cvt_mode_z
15274      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15275      && (flavour == neon_cvt_flavour_s32_f32
15276	  || flavour == neon_cvt_flavour_u32_f32
15277	  || flavour == neon_cvt_flavour_s32_f64
15278	  || flavour == neon_cvt_flavour_u32_f64)
15279      && (rs == NS_FD || rs == NS_FF))
15280    {
15281      do_vfp_nsyn_cvtz ();
15282      return;
15283    }
15284
15285  /* VFP rather than Neon conversions.  */
15286  if (flavour >= neon_cvt_flavour_first_fp)
15287    {
15288      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15289	do_vfp_nsyn_cvt (rs, flavour);
15290      else
15291	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15292
15293      return;
15294    }
15295
15296  switch (rs)
15297    {
15298    case NS_DDI:
15299    case NS_QQI:
15300      {
15301	unsigned immbits;
15302	unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15303
15304	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15305	  return;
15306
15307	/* Fixed-point conversion with #0 immediate is encoded as an
15308	   integer conversion.  */
15309	if (inst.operands[2].present && inst.operands[2].imm == 0)
15310	  goto int_encode;
15311       immbits = 32 - inst.operands[2].imm;
15312	NEON_ENCODE (IMMED, inst);
15313	if (flavour != neon_cvt_flavour_invalid)
15314	  inst.instruction |= enctab[flavour];
15315	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15316	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15317	inst.instruction |= LOW4 (inst.operands[1].reg);
15318	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15319	inst.instruction |= neon_quad (rs) << 6;
15320	inst.instruction |= 1 << 21;
15321	inst.instruction |= immbits << 16;
15322
15323	neon_dp_fixup (&inst);
15324      }
15325      break;
15326
15327    case NS_DD:
15328    case NS_QQ:
15329      if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15330	{
15331	  NEON_ENCODE (FLOAT, inst);
15332	  set_it_insn_type (OUTSIDE_IT_INSN);
15333
15334	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15335	    return;
15336
15337	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15338	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15339	  inst.instruction |= LOW4 (inst.operands[1].reg);
15340	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15341	  inst.instruction |= neon_quad (rs) << 6;
15342	  inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15343	  inst.instruction |= mode << 8;
15344	  if (thumb_mode)
15345	    inst.instruction |= 0xfc000000;
15346	  else
15347	    inst.instruction |= 0xf0000000;
15348	}
15349      else
15350	{
15351    int_encode:
15352	  {
15353	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15354
15355	    NEON_ENCODE (INTEGER, inst);
15356
15357	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15358	      return;
15359
15360	    if (flavour != neon_cvt_flavour_invalid)
15361	      inst.instruction |= enctab[flavour];
15362
15363	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15364	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15365	    inst.instruction |= LOW4 (inst.operands[1].reg);
15366	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15367	    inst.instruction |= neon_quad (rs) << 6;
15368	    inst.instruction |= 2 << 18;
15369
15370	    neon_dp_fixup (&inst);
15371	  }
15372	}
15373      break;
15374
15375    /* Half-precision conversions for Advanced SIMD -- neon.  */
15376    case NS_QD:
15377    case NS_DQ:
15378
15379      if ((rs == NS_DQ)
15380	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15381	  {
15382	    as_bad (_("operand size must match register width"));
15383	    break;
15384	  }
15385
15386      if ((rs == NS_QD)
15387	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15388	  {
15389	    as_bad (_("operand size must match register width"));
15390	    break;
15391	  }
15392
15393      if (rs == NS_DQ)
15394	inst.instruction = 0x3b60600;
15395      else
15396	inst.instruction = 0x3b60700;
15397
15398      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15399      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15400      inst.instruction |= LOW4 (inst.operands[1].reg);
15401      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15402      neon_dp_fixup (&inst);
15403      break;
15404
15405    default:
15406      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
15407      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15408	do_vfp_nsyn_cvt (rs, flavour);
15409      else
15410	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15411    }
15412}
15413
15414static void
15415do_neon_cvtr (void)
15416{
15417  do_neon_cvt_1 (neon_cvt_mode_x);
15418}
15419
15420static void
15421do_neon_cvt (void)
15422{
15423  do_neon_cvt_1 (neon_cvt_mode_z);
15424}
15425
15426static void
15427do_neon_cvta (void)
15428{
15429  do_neon_cvt_1 (neon_cvt_mode_a);
15430}
15431
15432static void
15433do_neon_cvtn (void)
15434{
15435  do_neon_cvt_1 (neon_cvt_mode_n);
15436}
15437
15438static void
15439do_neon_cvtp (void)
15440{
15441  do_neon_cvt_1 (neon_cvt_mode_p);
15442}
15443
15444static void
15445do_neon_cvtm (void)
15446{
15447  do_neon_cvt_1 (neon_cvt_mode_m);
15448}
15449
15450static void
15451do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15452{
15453  if (is_double)
15454    mark_feature_used (&fpu_vfp_ext_armv8);
15455
15456  encode_arm_vfp_reg (inst.operands[0].reg,
15457		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15458  encode_arm_vfp_reg (inst.operands[1].reg,
15459		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15460  inst.instruction |= to ? 0x10000 : 0;
15461  inst.instruction |= t ? 0x80 : 0;
15462  inst.instruction |= is_double ? 0x100 : 0;
15463  do_vfp_cond_or_thumb ();
15464}
15465
15466static void
15467do_neon_cvttb_1 (bfd_boolean t)
15468{
15469  enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15470
15471  if (rs == NS_NULL)
15472    return;
15473  else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15474    {
15475      inst.error = NULL;
15476      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15477    }
15478  else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15479    {
15480      inst.error = NULL;
15481      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15482    }
15483  else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15484    {
15485      /* The VCVTB and VCVTT instructions with D-register operands
15486         don't work for SP only targets.  */
15487      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15488		  _(BAD_FPU));
15489
15490      inst.error = NULL;
15491      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15492    }
15493  else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15494    {
15495      /* The VCVTB and VCVTT instructions with D-register operands
15496         don't work for SP only targets.  */
15497      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15498		  _(BAD_FPU));
15499
15500      inst.error = NULL;
15501      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15502    }
15503  else
15504    return;
15505}
15506
15507static void
15508do_neon_cvtb (void)
15509{
15510  do_neon_cvttb_1 (FALSE);
15511}
15512
15513
15514static void
15515do_neon_cvtt (void)
15516{
15517  do_neon_cvttb_1 (TRUE);
15518}
15519
15520static void
15521neon_move_immediate (void)
15522{
15523  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15524  struct neon_type_el et = neon_check_type (2, rs,
15525    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15526  unsigned immlo, immhi = 0, immbits;
15527  int op, cmode, float_p;
15528
15529  constraint (et.type == NT_invtype,
15530	      _("operand size must be specified for immediate VMOV"));
15531
15532  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
15533  op = (inst.instruction & (1 << 5)) != 0;
15534
15535  immlo = inst.operands[1].imm;
15536  if (inst.operands[1].regisimm)
15537    immhi = inst.operands[1].reg;
15538
15539  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15540	      _("immediate has bits set outside the operand size"));
15541
15542  float_p = inst.operands[1].immisfloat;
15543
15544  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15545					et.size, et.type)) == FAIL)
15546    {
15547      /* Invert relevant bits only.  */
15548      neon_invert_size (&immlo, &immhi, et.size);
15549      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15550	 with one or the other; those cases are caught by
15551	 neon_cmode_for_move_imm.  */
15552      op = !op;
15553      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15554					    &op, et.size, et.type)) == FAIL)
15555	{
15556	  first_error (_("immediate out of range"));
15557	  return;
15558	}
15559    }
15560
15561  inst.instruction &= ~(1 << 5);
15562  inst.instruction |= op << 5;
15563
15564  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15565  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15566  inst.instruction |= neon_quad (rs) << 6;
15567  inst.instruction |= cmode << 8;
15568
15569  neon_write_immbits (immbits);
15570}
15571
15572static void
15573do_neon_mvn (void)
15574{
15575  if (inst.operands[1].isreg)
15576    {
15577      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15578
15579      NEON_ENCODE (INTEGER, inst);
15580      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15581      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15582      inst.instruction |= LOW4 (inst.operands[1].reg);
15583      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15584      inst.instruction |= neon_quad (rs) << 6;
15585    }
15586  else
15587    {
15588      NEON_ENCODE (IMMED, inst);
15589      neon_move_immediate ();
15590    }
15591
15592  neon_dp_fixup (&inst);
15593}
15594
15595/* Encode instructions of form:
15596
15597  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
15598  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
15599
15600static void
15601neon_mixed_length (struct neon_type_el et, unsigned size)
15602{
15603  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15604  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15605  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15606  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15607  inst.instruction |= LOW4 (inst.operands[2].reg);
15608  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15609  inst.instruction |= (et.type == NT_unsigned) << 24;
15610  inst.instruction |= neon_logbits (size) << 20;
15611
15612  neon_dp_fixup (&inst);
15613}
15614
15615static void
15616do_neon_dyadic_long (void)
15617{
15618  /* FIXME: Type checking for lengthening op.  */
15619  struct neon_type_el et = neon_check_type (3, NS_QDD,
15620    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15621  neon_mixed_length (et, et.size);
15622}
15623
15624static void
15625do_neon_abal (void)
15626{
15627  struct neon_type_el et = neon_check_type (3, NS_QDD,
15628    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15629  neon_mixed_length (et, et.size);
15630}
15631
15632static void
15633neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15634{
15635  if (inst.operands[2].isscalar)
15636    {
15637      struct neon_type_el et = neon_check_type (3, NS_QDS,
15638	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15639      NEON_ENCODE (SCALAR, inst);
15640      neon_mul_mac (et, et.type == NT_unsigned);
15641    }
15642  else
15643    {
15644      struct neon_type_el et = neon_check_type (3, NS_QDD,
15645	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15646      NEON_ENCODE (INTEGER, inst);
15647      neon_mixed_length (et, et.size);
15648    }
15649}
15650
15651static void
15652do_neon_mac_maybe_scalar_long (void)
15653{
15654  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15655}
15656
15657static void
15658do_neon_dyadic_wide (void)
15659{
15660  struct neon_type_el et = neon_check_type (3, NS_QQD,
15661    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15662  neon_mixed_length (et, et.size);
15663}
15664
15665static void
15666do_neon_dyadic_narrow (void)
15667{
15668  struct neon_type_el et = neon_check_type (3, NS_QDD,
15669    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15670  /* Operand sign is unimportant, and the U bit is part of the opcode,
15671     so force the operand type to integer.  */
15672  et.type = NT_integer;
15673  neon_mixed_length (et, et.size / 2);
15674}
15675
15676static void
15677do_neon_mul_sat_scalar_long (void)
15678{
15679  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15680}
15681
15682static void
15683do_neon_vmull (void)
15684{
15685  if (inst.operands[2].isscalar)
15686    do_neon_mac_maybe_scalar_long ();
15687  else
15688    {
15689      struct neon_type_el et = neon_check_type (3, NS_QDD,
15690	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15691
15692      if (et.type == NT_poly)
15693	NEON_ENCODE (POLY, inst);
15694      else
15695	NEON_ENCODE (INTEGER, inst);
15696
15697      /* For polynomial encoding the U bit must be zero, and the size must
15698	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15699	 obviously, as 0b10).  */
15700      if (et.size == 64)
15701	{
15702	  /* Check we're on the correct architecture.  */
15703	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
15704	    inst.error =
15705	      _("Instruction form not available on this architecture.");
15706
15707	  et.size = 32;
15708	}
15709
15710      neon_mixed_length (et, et.size);
15711    }
15712}
15713
15714static void
15715do_neon_ext (void)
15716{
15717  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15718  struct neon_type_el et = neon_check_type (3, rs,
15719    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15720  unsigned imm = (inst.operands[3].imm * et.size) / 8;
15721
15722  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15723	      _("shift out of range"));
15724  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15725  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15726  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15727  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15728  inst.instruction |= LOW4 (inst.operands[2].reg);
15729  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15730  inst.instruction |= neon_quad (rs) << 6;
15731  inst.instruction |= imm << 8;
15732
15733  neon_dp_fixup (&inst);
15734}
15735
15736static void
15737do_neon_rev (void)
15738{
15739  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15740  struct neon_type_el et = neon_check_type (2, rs,
15741    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15742  unsigned op = (inst.instruction >> 7) & 3;
15743  /* N (width of reversed regions) is encoded as part of the bitmask. We
15744     extract it here to check the elements to be reversed are smaller.
15745     Otherwise we'd get a reserved instruction.  */
15746  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15747  gas_assert (elsize != 0);
15748  constraint (et.size >= elsize,
15749	      _("elements must be smaller than reversal region"));
15750  neon_two_same (neon_quad (rs), 1, et.size);
15751}
15752
15753static void
15754do_neon_dup (void)
15755{
15756  if (inst.operands[1].isscalar)
15757    {
15758      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15759      struct neon_type_el et = neon_check_type (2, rs,
15760	N_EQK, N_8 | N_16 | N_32 | N_KEY);
15761      unsigned sizebits = et.size >> 3;
15762      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15763      int logsize = neon_logbits (et.size);
15764      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15765
15766      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15767	return;
15768
15769      NEON_ENCODE (SCALAR, inst);
15770      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15771      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15772      inst.instruction |= LOW4 (dm);
15773      inst.instruction |= HI1 (dm) << 5;
15774      inst.instruction |= neon_quad (rs) << 6;
15775      inst.instruction |= x << 17;
15776      inst.instruction |= sizebits << 16;
15777
15778      neon_dp_fixup (&inst);
15779    }
15780  else
15781    {
15782      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15783      struct neon_type_el et = neon_check_type (2, rs,
15784	N_8 | N_16 | N_32 | N_KEY, N_EQK);
15785      /* Duplicate ARM register to lanes of vector.  */
15786      NEON_ENCODE (ARMREG, inst);
15787      switch (et.size)
15788	{
15789	case 8:  inst.instruction |= 0x400000; break;
15790	case 16: inst.instruction |= 0x000020; break;
15791	case 32: inst.instruction |= 0x000000; break;
15792	default: break;
15793	}
15794      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15795      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15796      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15797      inst.instruction |= neon_quad (rs) << 21;
15798      /* The encoding for this instruction is identical for the ARM and Thumb
15799	 variants, except for the condition field.  */
15800      do_vfp_cond_or_thumb ();
15801    }
15802}
15803
15804/* VMOV has particularly many variations. It can be one of:
15805     0. VMOV<c><q> <Qd>, <Qm>
15806     1. VMOV<c><q> <Dd>, <Dm>
15807   (Register operations, which are VORR with Rm = Rn.)
15808     2. VMOV<c><q>.<dt> <Qd>, #<imm>
15809     3. VMOV<c><q>.<dt> <Dd>, #<imm>
15810   (Immediate loads.)
15811     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15812   (ARM register to scalar.)
15813     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15814   (Two ARM registers to vector.)
15815     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15816   (Scalar to ARM register.)
15817     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15818   (Vector to two ARM registers.)
15819     8. VMOV.F32 <Sd>, <Sm>
15820     9. VMOV.F64 <Dd>, <Dm>
15821   (VFP register moves.)
15822    10. VMOV.F32 <Sd>, #imm
15823    11. VMOV.F64 <Dd>, #imm
15824   (VFP float immediate load.)
15825    12. VMOV <Rd>, <Sm>
15826   (VFP single to ARM reg.)
15827    13. VMOV <Sd>, <Rm>
15828   (ARM reg to VFP single.)
15829    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15830   (Two ARM regs to two VFP singles.)
15831    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15832   (Two VFP singles to two ARM regs.)
15833
15834   These cases can be disambiguated using neon_select_shape, except cases 1/9
15835   and 3/11 which depend on the operand type too.
15836
15837   All the encoded bits are hardcoded by this function.
15838
15839   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15840   Cases 5, 7 may be used with VFPv2 and above.
15841
15842   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15843   can specify a type where it doesn't make sense to, and is ignored).  */
15844
15845static void
15846do_neon_mov (void)
15847{
15848  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15849    NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15850    NS_NULL);
15851  struct neon_type_el et;
15852  const char *ldconst = 0;
15853
15854  switch (rs)
15855    {
15856    case NS_DD:  /* case 1/9.  */
15857      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15858      /* It is not an error here if no type is given.  */
15859      inst.error = NULL;
15860      if (et.type == NT_float && et.size == 64)
15861	{
15862	  do_vfp_nsyn_opcode ("fcpyd");
15863	  break;
15864	}
15865      /* fall through.  */
15866
15867    case NS_QQ:  /* case 0/1.  */
15868      {
15869	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15870	  return;
15871	/* The architecture manual I have doesn't explicitly state which
15872	   value the U bit should have for register->register moves, but
15873	   the equivalent VORR instruction has U = 0, so do that.  */
15874	inst.instruction = 0x0200110;
15875	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15876	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15877	inst.instruction |= LOW4 (inst.operands[1].reg);
15878	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15879	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15880	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15881	inst.instruction |= neon_quad (rs) << 6;
15882
15883	neon_dp_fixup (&inst);
15884      }
15885      break;
15886
15887    case NS_DI:  /* case 3/11.  */
15888      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15889      inst.error = NULL;
15890      if (et.type == NT_float && et.size == 64)
15891	{
15892	  /* case 11 (fconstd).  */
15893	  ldconst = "fconstd";
15894	  goto encode_fconstd;
15895	}
15896      /* fall through.  */
15897
15898    case NS_QI:  /* case 2/3.  */
15899      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15900	return;
15901      inst.instruction = 0x0800010;
15902      neon_move_immediate ();
15903      neon_dp_fixup (&inst);
15904      break;
15905
15906    case NS_SR:  /* case 4.  */
15907      {
15908	unsigned bcdebits = 0;
15909	int logsize;
15910	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15911	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15912
15913	/* .<size> is optional here, defaulting to .32. */
15914	if (inst.vectype.elems == 0
15915	    && inst.operands[0].vectype.type == NT_invtype
15916	    && inst.operands[1].vectype.type == NT_invtype)
15917	  {
15918	    inst.vectype.el[0].type = NT_untyped;
15919	    inst.vectype.el[0].size = 32;
15920	    inst.vectype.elems = 1;
15921	  }
15922
15923	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15924	logsize = neon_logbits (et.size);
15925
15926	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15927		    _(BAD_FPU));
15928	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15929		    && et.size != 32, _(BAD_FPU));
15930	constraint (et.type == NT_invtype, _("bad type for scalar"));
15931	constraint (x >= 64 / et.size, _("scalar index out of range"));
15932
15933	switch (et.size)
15934	  {
15935	  case 8:  bcdebits = 0x8; break;
15936	  case 16: bcdebits = 0x1; break;
15937	  case 32: bcdebits = 0x0; break;
15938	  default: ;
15939	  }
15940
15941	bcdebits |= x << logsize;
15942
15943	inst.instruction = 0xe000b10;
15944	do_vfp_cond_or_thumb ();
15945	inst.instruction |= LOW4 (dn) << 16;
15946	inst.instruction |= HI1 (dn) << 7;
15947	inst.instruction |= inst.operands[1].reg << 12;
15948	inst.instruction |= (bcdebits & 3) << 5;
15949	inst.instruction |= (bcdebits >> 2) << 21;
15950      }
15951      break;
15952
15953    case NS_DRR:  /* case 5 (fmdrr).  */
15954      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15955		  _(BAD_FPU));
15956
15957      inst.instruction = 0xc400b10;
15958      do_vfp_cond_or_thumb ();
15959      inst.instruction |= LOW4 (inst.operands[0].reg);
15960      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15961      inst.instruction |= inst.operands[1].reg << 12;
15962      inst.instruction |= inst.operands[2].reg << 16;
15963      break;
15964
15965    case NS_RS:  /* case 6.  */
15966      {
15967	unsigned logsize;
15968	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15969	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15970	unsigned abcdebits = 0;
15971
15972	/* .<dt> is optional here, defaulting to .32. */
15973	if (inst.vectype.elems == 0
15974	    && inst.operands[0].vectype.type == NT_invtype
15975	    && inst.operands[1].vectype.type == NT_invtype)
15976	  {
15977	    inst.vectype.el[0].type = NT_untyped;
15978	    inst.vectype.el[0].size = 32;
15979	    inst.vectype.elems = 1;
15980	  }
15981
15982	et = neon_check_type (2, NS_NULL,
15983			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15984	logsize = neon_logbits (et.size);
15985
15986	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15987		    _(BAD_FPU));
15988	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15989		    && et.size != 32, _(BAD_FPU));
15990	constraint (et.type == NT_invtype, _("bad type for scalar"));
15991	constraint (x >= 64 / et.size, _("scalar index out of range"));
15992
15993	switch (et.size)
15994	  {
15995	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15996	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15997	  case 32: abcdebits = 0x00; break;
15998	  default: ;
15999	  }
16000
16001	abcdebits |= x << logsize;
16002	inst.instruction = 0xe100b10;
16003	do_vfp_cond_or_thumb ();
16004	inst.instruction |= LOW4 (dn) << 16;
16005	inst.instruction |= HI1 (dn) << 7;
16006	inst.instruction |= inst.operands[0].reg << 12;
16007	inst.instruction |= (abcdebits & 3) << 5;
16008	inst.instruction |= (abcdebits >> 2) << 21;
16009      }
16010      break;
16011
16012    case NS_RRD:  /* case 7 (fmrrd).  */
16013      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16014		  _(BAD_FPU));
16015
16016      inst.instruction = 0xc500b10;
16017      do_vfp_cond_or_thumb ();
16018      inst.instruction |= inst.operands[0].reg << 12;
16019      inst.instruction |= inst.operands[1].reg << 16;
16020      inst.instruction |= LOW4 (inst.operands[2].reg);
16021      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16022      break;
16023
16024    case NS_FF:  /* case 8 (fcpys).  */
16025      do_vfp_nsyn_opcode ("fcpys");
16026      break;
16027
16028    case NS_FI:  /* case 10 (fconsts).  */
16029      ldconst = "fconsts";
16030      encode_fconstd:
16031      if (is_quarter_float (inst.operands[1].imm))
16032	{
16033	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16034	  do_vfp_nsyn_opcode (ldconst);
16035	}
16036      else
16037	first_error (_("immediate out of range"));
16038      break;
16039
16040    case NS_RF:  /* case 12 (fmrs).  */
16041      do_vfp_nsyn_opcode ("fmrs");
16042      break;
16043
16044    case NS_FR:  /* case 13 (fmsr).  */
16045      do_vfp_nsyn_opcode ("fmsr");
16046      break;
16047
16048    /* The encoders for the fmrrs and fmsrr instructions expect three operands
16049       (one of which is a list), but we have parsed four.  Do some fiddling to
16050       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16051       expect.  */
16052    case NS_RRFF:  /* case 14 (fmrrs).  */
16053      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16054		  _("VFP registers must be adjacent"));
16055      inst.operands[2].imm = 2;
16056      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16057      do_vfp_nsyn_opcode ("fmrrs");
16058      break;
16059
16060    case NS_FFRR:  /* case 15 (fmsrr).  */
16061      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16062		  _("VFP registers must be adjacent"));
16063      inst.operands[1] = inst.operands[2];
16064      inst.operands[2] = inst.operands[3];
16065      inst.operands[0].imm = 2;
16066      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16067      do_vfp_nsyn_opcode ("fmsrr");
16068      break;
16069
16070    case NS_NULL:
16071      /* neon_select_shape has determined that the instruction
16072	 shape is wrong and has already set the error message.  */
16073      break;
16074
16075    default:
16076      abort ();
16077    }
16078}
16079
16080static void
16081do_neon_rshift_round_imm (void)
16082{
16083  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16084  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16085  int imm = inst.operands[2].imm;
16086
16087  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
16088  if (imm == 0)
16089    {
16090      inst.operands[2].present = 0;
16091      do_neon_mov ();
16092      return;
16093    }
16094
16095  constraint (imm < 1 || (unsigned)imm > et.size,
16096	      _("immediate out of range for shift"));
16097  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16098		  et.size - imm);
16099}
16100
16101static void
16102do_neon_movl (void)
16103{
16104  struct neon_type_el et = neon_check_type (2, NS_QD,
16105    N_EQK | N_DBL, N_SU_32 | N_KEY);
16106  unsigned sizebits = et.size >> 3;
16107  inst.instruction |= sizebits << 19;
16108  neon_two_same (0, et.type == NT_unsigned, -1);
16109}
16110
16111static void
16112do_neon_trn (void)
16113{
16114  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16115  struct neon_type_el et = neon_check_type (2, rs,
16116    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16117  NEON_ENCODE (INTEGER, inst);
16118  neon_two_same (neon_quad (rs), 1, et.size);
16119}
16120
16121static void
16122do_neon_zip_uzp (void)
16123{
16124  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16125  struct neon_type_el et = neon_check_type (2, rs,
16126    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16127  if (rs == NS_DD && et.size == 32)
16128    {
16129      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
16130      inst.instruction = N_MNEM_vtrn;
16131      do_neon_trn ();
16132      return;
16133    }
16134  neon_two_same (neon_quad (rs), 1, et.size);
16135}
16136
16137static void
16138do_neon_sat_abs_neg (void)
16139{
16140  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16141  struct neon_type_el et = neon_check_type (2, rs,
16142    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16143  neon_two_same (neon_quad (rs), 1, et.size);
16144}
16145
16146static void
16147do_neon_pair_long (void)
16148{
16149  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16150  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16151  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
16152  inst.instruction |= (et.type == NT_unsigned) << 7;
16153  neon_two_same (neon_quad (rs), 1, et.size);
16154}
16155
16156static void
16157do_neon_recip_est (void)
16158{
16159  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16160  struct neon_type_el et = neon_check_type (2, rs,
16161    N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16162  inst.instruction |= (et.type == NT_float) << 8;
16163  neon_two_same (neon_quad (rs), 1, et.size);
16164}
16165
16166static void
16167do_neon_cls (void)
16168{
16169  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16170  struct neon_type_el et = neon_check_type (2, rs,
16171    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16172  neon_two_same (neon_quad (rs), 1, et.size);
16173}
16174
16175static void
16176do_neon_clz (void)
16177{
16178  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16179  struct neon_type_el et = neon_check_type (2, rs,
16180    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16181  neon_two_same (neon_quad (rs), 1, et.size);
16182}
16183
16184static void
16185do_neon_cnt (void)
16186{
16187  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16188  struct neon_type_el et = neon_check_type (2, rs,
16189    N_EQK | N_INT, N_8 | N_KEY);
16190  neon_two_same (neon_quad (rs), 1, et.size);
16191}
16192
16193static void
16194do_neon_swp (void)
16195{
16196  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16197  neon_two_same (neon_quad (rs), 1, -1);
16198}
16199
16200static void
16201do_neon_tbl_tbx (void)
16202{
16203  unsigned listlenbits;
16204  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16205
16206  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16207    {
16208      first_error (_("bad list length for table lookup"));
16209      return;
16210    }
16211
16212  listlenbits = inst.operands[1].imm - 1;
16213  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16214  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16215  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16216  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16217  inst.instruction |= LOW4 (inst.operands[2].reg);
16218  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16219  inst.instruction |= listlenbits << 8;
16220
16221  neon_dp_fixup (&inst);
16222}
16223
16224static void
16225do_neon_ldm_stm (void)
16226{
16227  /* P, U and L bits are part of bitmask.  */
16228  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16229  unsigned offsetbits = inst.operands[1].imm * 2;
16230
16231  if (inst.operands[1].issingle)
16232    {
16233      do_vfp_nsyn_ldm_stm (is_dbmode);
16234      return;
16235    }
16236
16237  constraint (is_dbmode && !inst.operands[0].writeback,
16238	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
16239
16240  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16241	      _("register list must contain at least 1 and at most 16 "
16242		"registers"));
16243
16244  inst.instruction |= inst.operands[0].reg << 16;
16245  inst.instruction |= inst.operands[0].writeback << 21;
16246  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16247  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16248
16249  inst.instruction |= offsetbits;
16250
16251  do_vfp_cond_or_thumb ();
16252}
16253
16254static void
16255do_neon_ldr_str (void)
16256{
16257  int is_ldr = (inst.instruction & (1 << 20)) != 0;
16258
16259  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16260     And is UNPREDICTABLE in thumb mode.  */
16261  if (!is_ldr
16262      && inst.operands[1].reg == REG_PC
16263      && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16264    {
16265      if (thumb_mode)
16266	inst.error = _("Use of PC here is UNPREDICTABLE");
16267      else if (warn_on_deprecated)
16268	as_tsktsk (_("Use of PC here is deprecated"));
16269    }
16270
16271  if (inst.operands[0].issingle)
16272    {
16273      if (is_ldr)
16274	do_vfp_nsyn_opcode ("flds");
16275      else
16276	do_vfp_nsyn_opcode ("fsts");
16277    }
16278  else
16279    {
16280      if (is_ldr)
16281	do_vfp_nsyn_opcode ("fldd");
16282      else
16283	do_vfp_nsyn_opcode ("fstd");
16284    }
16285}
16286
16287/* "interleave" version also handles non-interleaving register VLD1/VST1
16288   instructions.  */
16289
16290static void
16291do_neon_ld_st_interleave (void)
16292{
16293  struct neon_type_el et = neon_check_type (1, NS_NULL,
16294					    N_8 | N_16 | N_32 | N_64);
16295  unsigned alignbits = 0;
16296  unsigned idx;
16297  /* The bits in this table go:
16298     0: register stride of one (0) or two (1)
16299     1,2: register list length, minus one (1, 2, 3, 4).
16300     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16301     We use -1 for invalid entries.  */
16302  const int typetable[] =
16303    {
16304      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
16305       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
16306       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
16307       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
16308    };
16309  int typebits;
16310
16311  if (et.type == NT_invtype)
16312    return;
16313
16314  if (inst.operands[1].immisalign)
16315    switch (inst.operands[1].imm >> 8)
16316      {
16317      case 64: alignbits = 1; break;
16318      case 128:
16319	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16320	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16321	  goto bad_alignment;
16322	alignbits = 2;
16323	break;
16324      case 256:
16325	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16326	  goto bad_alignment;
16327	alignbits = 3;
16328	break;
16329      default:
16330      bad_alignment:
16331	first_error (_("bad alignment"));
16332	return;
16333      }
16334
16335  inst.instruction |= alignbits << 4;
16336  inst.instruction |= neon_logbits (et.size) << 6;
16337
16338  /* Bits [4:6] of the immediate in a list specifier encode register stride
16339     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16340     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16341     up the right value for "type" in a table based on this value and the given
16342     list style, then stick it back.  */
16343  idx = ((inst.operands[0].imm >> 4) & 7)
16344	| (((inst.instruction >> 8) & 3) << 3);
16345
16346  typebits = typetable[idx];
16347
16348  constraint (typebits == -1, _("bad list type for instruction"));
16349  constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16350	      _("bad element type for instruction"));
16351
16352  inst.instruction &= ~0xf00;
16353  inst.instruction |= typebits << 8;
16354}
16355
16356/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16357   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16358   otherwise. The variable arguments are a list of pairs of legal (size, align)
16359   values, terminated with -1.  */
16360
16361static int
16362neon_alignment_bit (int size, int align, int *do_align, ...)
16363{
16364  va_list ap;
16365  int result = FAIL, thissize, thisalign;
16366
16367  if (!inst.operands[1].immisalign)
16368    {
16369      *do_align = 0;
16370      return SUCCESS;
16371    }
16372
16373  va_start (ap, do_align);
16374
16375  do
16376    {
16377      thissize = va_arg (ap, int);
16378      if (thissize == -1)
16379	break;
16380      thisalign = va_arg (ap, int);
16381
16382      if (size == thissize && align == thisalign)
16383	result = SUCCESS;
16384    }
16385  while (result != SUCCESS);
16386
16387  va_end (ap);
16388
16389  if (result == SUCCESS)
16390    *do_align = 1;
16391  else
16392    first_error (_("unsupported alignment for instruction"));
16393
16394  return result;
16395}
16396
16397static void
16398do_neon_ld_st_lane (void)
16399{
16400  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16401  int align_good, do_align = 0;
16402  int logsize = neon_logbits (et.size);
16403  int align = inst.operands[1].imm >> 8;
16404  int n = (inst.instruction >> 8) & 3;
16405  int max_el = 64 / et.size;
16406
16407  if (et.type == NT_invtype)
16408    return;
16409
16410  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16411	      _("bad list length"));
16412  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16413	      _("scalar index out of range"));
16414  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16415	      && et.size == 8,
16416	      _("stride of 2 unavailable when element size is 8"));
16417
16418  switch (n)
16419    {
16420    case 0:  /* VLD1 / VST1.  */
16421      align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16422				       32, 32, -1);
16423      if (align_good == FAIL)
16424	return;
16425      if (do_align)
16426	{
16427	  unsigned alignbits = 0;
16428	  switch (et.size)
16429	    {
16430	    case 16: alignbits = 0x1; break;
16431	    case 32: alignbits = 0x3; break;
16432	    default: ;
16433	    }
16434	  inst.instruction |= alignbits << 4;
16435	}
16436      break;
16437
16438    case 1:  /* VLD2 / VST2.  */
16439      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16440				       32, 64, -1);
16441      if (align_good == FAIL)
16442	return;
16443      if (do_align)
16444	inst.instruction |= 1 << 4;
16445      break;
16446
16447    case 2:  /* VLD3 / VST3.  */
16448      constraint (inst.operands[1].immisalign,
16449		  _("can't use alignment with this instruction"));
16450      break;
16451
16452    case 3:  /* VLD4 / VST4.  */
16453      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16454				       16, 64, 32, 64, 32, 128, -1);
16455      if (align_good == FAIL)
16456	return;
16457      if (do_align)
16458	{
16459	  unsigned alignbits = 0;
16460	  switch (et.size)
16461	    {
16462	    case 8:  alignbits = 0x1; break;
16463	    case 16: alignbits = 0x1; break;
16464	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16465	    default: ;
16466	    }
16467	  inst.instruction |= alignbits << 4;
16468	}
16469      break;
16470
16471    default: ;
16472    }
16473
16474  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
16475  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16476    inst.instruction |= 1 << (4 + logsize);
16477
16478  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16479  inst.instruction |= logsize << 10;
16480}
16481
16482/* Encode single n-element structure to all lanes VLD<n> instructions.  */
16483
16484static void
16485do_neon_ld_dup (void)
16486{
16487  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16488  int align_good, do_align = 0;
16489
16490  if (et.type == NT_invtype)
16491    return;
16492
16493  switch ((inst.instruction >> 8) & 3)
16494    {
16495    case 0:  /* VLD1.  */
16496      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16497      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16498				       &do_align, 16, 16, 32, 32, -1);
16499      if (align_good == FAIL)
16500	return;
16501      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16502	{
16503	case 1: break;
16504	case 2: inst.instruction |= 1 << 5; break;
16505	default: first_error (_("bad list length")); return;
16506	}
16507      inst.instruction |= neon_logbits (et.size) << 6;
16508      break;
16509
16510    case 1:  /* VLD2.  */
16511      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16512				       &do_align, 8, 16, 16, 32, 32, 64, -1);
16513      if (align_good == FAIL)
16514	return;
16515      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16516		  _("bad list length"));
16517      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16518	inst.instruction |= 1 << 5;
16519      inst.instruction |= neon_logbits (et.size) << 6;
16520      break;
16521
16522    case 2:  /* VLD3.  */
16523      constraint (inst.operands[1].immisalign,
16524		  _("can't use alignment with this instruction"));
16525      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16526		  _("bad list length"));
16527      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16528	inst.instruction |= 1 << 5;
16529      inst.instruction |= neon_logbits (et.size) << 6;
16530      break;
16531
16532    case 3:  /* VLD4.  */
16533      {
16534	int align = inst.operands[1].imm >> 8;
16535	align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16536					 16, 64, 32, 64, 32, 128, -1);
16537	if (align_good == FAIL)
16538	  return;
16539	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16540		    _("bad list length"));
16541	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16542	  inst.instruction |= 1 << 5;
16543	if (et.size == 32 && align == 128)
16544	  inst.instruction |= 0x3 << 6;
16545	else
16546	  inst.instruction |= neon_logbits (et.size) << 6;
16547      }
16548      break;
16549
16550    default: ;
16551    }
16552
16553  inst.instruction |= do_align << 4;
16554}
16555
16556/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16557   apart from bits [11:4].  */
16558
16559static void
16560do_neon_ldx_stx (void)
16561{
16562  if (inst.operands[1].isreg)
16563    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16564
16565  switch (NEON_LANE (inst.operands[0].imm))
16566    {
16567    case NEON_INTERLEAVE_LANES:
16568      NEON_ENCODE (INTERLV, inst);
16569      do_neon_ld_st_interleave ();
16570      break;
16571
16572    case NEON_ALL_LANES:
16573      NEON_ENCODE (DUP, inst);
16574      if (inst.instruction == N_INV)
16575	{
16576	  first_error ("only loads support such operands");
16577	  break;
16578	}
16579      do_neon_ld_dup ();
16580      break;
16581
16582    default:
16583      NEON_ENCODE (LANE, inst);
16584      do_neon_ld_st_lane ();
16585    }
16586
16587  /* L bit comes from bit mask.  */
16588  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16589  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16590  inst.instruction |= inst.operands[1].reg << 16;
16591
16592  if (inst.operands[1].postind)
16593    {
16594      int postreg = inst.operands[1].imm & 0xf;
16595      constraint (!inst.operands[1].immisreg,
16596		  _("post-index must be a register"));
16597      constraint (postreg == 0xd || postreg == 0xf,
16598		  _("bad register for post-index"));
16599      inst.instruction |= postreg;
16600    }
16601  else
16602    {
16603      constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16604      constraint (inst.reloc.exp.X_op != O_constant
16605		  || inst.reloc.exp.X_add_number != 0,
16606		  BAD_ADDR_MODE);
16607
16608      if (inst.operands[1].writeback)
16609	{
16610	  inst.instruction |= 0xd;
16611	}
16612      else
16613	inst.instruction |= 0xf;
16614    }
16615
16616  if (thumb_mode)
16617    inst.instruction |= 0xf9000000;
16618  else
16619    inst.instruction |= 0xf4000000;
16620}
16621
16622/* FP v8.  */
16623static void
16624do_vfp_nsyn_fpv8 (enum neon_shape rs)
16625{
16626  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16627     D register operands.  */
16628  if (neon_shape_class[rs] == SC_DOUBLE)
16629    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16630		_(BAD_FPU));
16631
16632  NEON_ENCODE (FPV8, inst);
16633
16634  if (rs == NS_FFF)
16635    do_vfp_sp_dyadic ();
16636  else
16637    do_vfp_dp_rd_rn_rm ();
16638
16639  if (rs == NS_DDD)
16640    inst.instruction |= 0x100;
16641
16642  inst.instruction |= 0xf0000000;
16643}
16644
16645static void
16646do_vsel (void)
16647{
16648  set_it_insn_type (OUTSIDE_IT_INSN);
16649
16650  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16651    first_error (_("invalid instruction shape"));
16652}
16653
16654static void
16655do_vmaxnm (void)
16656{
16657  set_it_insn_type (OUTSIDE_IT_INSN);
16658
16659  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16660    return;
16661
16662  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16663    return;
16664
16665  neon_dyadic_misc (NT_untyped, N_F32, 0);
16666}
16667
16668static void
16669do_vrint_1 (enum neon_cvt_mode mode)
16670{
16671  enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16672  struct neon_type_el et;
16673
16674  if (rs == NS_NULL)
16675    return;
16676
16677  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16678     D register operands.  */
16679  if (neon_shape_class[rs] == SC_DOUBLE)
16680    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16681		_(BAD_FPU));
16682
16683  et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16684  if (et.type != NT_invtype)
16685    {
16686      /* VFP encodings.  */
16687      if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16688	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16689	set_it_insn_type (OUTSIDE_IT_INSN);
16690
16691      NEON_ENCODE (FPV8, inst);
16692      if (rs == NS_FF)
16693	do_vfp_sp_monadic ();
16694      else
16695	do_vfp_dp_rd_rm ();
16696
16697      switch (mode)
16698	{
16699	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16700	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16701	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16702	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16703	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16704	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16705	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16706	default: abort ();
16707	}
16708
16709      inst.instruction |= (rs == NS_DD) << 8;
16710      do_vfp_cond_or_thumb ();
16711    }
16712  else
16713    {
16714      /* Neon encodings (or something broken...).  */
16715      inst.error = NULL;
16716      et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16717
16718      if (et.type == NT_invtype)
16719	return;
16720
16721      set_it_insn_type (OUTSIDE_IT_INSN);
16722      NEON_ENCODE (FLOAT, inst);
16723
16724      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16725	return;
16726
16727      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16728      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16729      inst.instruction |= LOW4 (inst.operands[1].reg);
16730      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16731      inst.instruction |= neon_quad (rs) << 6;
16732      switch (mode)
16733	{
16734	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16735	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16736	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16737	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16738	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16739	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16740	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16741	default: abort ();
16742	}
16743
16744      if (thumb_mode)
16745	inst.instruction |= 0xfc000000;
16746      else
16747	inst.instruction |= 0xf0000000;
16748    }
16749}
16750
16751static void
16752do_vrintx (void)
16753{
16754  do_vrint_1 (neon_cvt_mode_x);
16755}
16756
16757static void
16758do_vrintz (void)
16759{
16760  do_vrint_1 (neon_cvt_mode_z);
16761}
16762
16763static void
16764do_vrintr (void)
16765{
16766  do_vrint_1 (neon_cvt_mode_r);
16767}
16768
16769static void
16770do_vrinta (void)
16771{
16772  do_vrint_1 (neon_cvt_mode_a);
16773}
16774
16775static void
16776do_vrintn (void)
16777{
16778  do_vrint_1 (neon_cvt_mode_n);
16779}
16780
16781static void
16782do_vrintp (void)
16783{
16784  do_vrint_1 (neon_cvt_mode_p);
16785}
16786
16787static void
16788do_vrintm (void)
16789{
16790  do_vrint_1 (neon_cvt_mode_m);
16791}
16792
16793/* Crypto v1 instructions.  */
16794static void
16795do_crypto_2op_1 (unsigned elttype, int op)
16796{
16797  set_it_insn_type (OUTSIDE_IT_INSN);
16798
16799  if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16800      == NT_invtype)
16801    return;
16802
16803  inst.error = NULL;
16804
16805  NEON_ENCODE (INTEGER, inst);
16806  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16807  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16808  inst.instruction |= LOW4 (inst.operands[1].reg);
16809  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16810  if (op != -1)
16811    inst.instruction |= op << 6;
16812
16813  if (thumb_mode)
16814    inst.instruction |= 0xfc000000;
16815  else
16816    inst.instruction |= 0xf0000000;
16817}
16818
16819static void
16820do_crypto_3op_1 (int u, int op)
16821{
16822  set_it_insn_type (OUTSIDE_IT_INSN);
16823
16824  if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16825		       N_32 | N_UNT | N_KEY).type == NT_invtype)
16826    return;
16827
16828  inst.error = NULL;
16829
16830  NEON_ENCODE (INTEGER, inst);
16831  neon_three_same (1, u, 8 << op);
16832}
16833
16834static void
16835do_aese (void)
16836{
16837  do_crypto_2op_1 (N_8, 0);
16838}
16839
16840static void
16841do_aesd (void)
16842{
16843  do_crypto_2op_1 (N_8, 1);
16844}
16845
16846static void
16847do_aesmc (void)
16848{
16849  do_crypto_2op_1 (N_8, 2);
16850}
16851
16852static void
16853do_aesimc (void)
16854{
16855  do_crypto_2op_1 (N_8, 3);
16856}
16857
16858static void
16859do_sha1c (void)
16860{
16861  do_crypto_3op_1 (0, 0);
16862}
16863
16864static void
16865do_sha1p (void)
16866{
16867  do_crypto_3op_1 (0, 1);
16868}
16869
16870static void
16871do_sha1m (void)
16872{
16873  do_crypto_3op_1 (0, 2);
16874}
16875
16876static void
16877do_sha1su0 (void)
16878{
16879  do_crypto_3op_1 (0, 3);
16880}
16881
16882static void
16883do_sha256h (void)
16884{
16885  do_crypto_3op_1 (1, 0);
16886}
16887
16888static void
16889do_sha256h2 (void)
16890{
16891  do_crypto_3op_1 (1, 1);
16892}
16893
16894static void
16895do_sha256su1 (void)
16896{
16897  do_crypto_3op_1 (1, 2);
16898}
16899
16900static void
16901do_sha1h (void)
16902{
16903  do_crypto_2op_1 (N_32, -1);
16904}
16905
16906static void
16907do_sha1su1 (void)
16908{
16909  do_crypto_2op_1 (N_32, 0);
16910}
16911
16912static void
16913do_sha256su0 (void)
16914{
16915  do_crypto_2op_1 (N_32, 1);
16916}
16917
16918static void
16919do_crc32_1 (unsigned int poly, unsigned int sz)
16920{
16921  unsigned int Rd = inst.operands[0].reg;
16922  unsigned int Rn = inst.operands[1].reg;
16923  unsigned int Rm = inst.operands[2].reg;
16924
16925  set_it_insn_type (OUTSIDE_IT_INSN);
16926  inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16927  inst.instruction |= LOW4 (Rn) << 16;
16928  inst.instruction |= LOW4 (Rm);
16929  inst.instruction |= sz << (thumb_mode ? 4 : 21);
16930  inst.instruction |= poly << (thumb_mode ? 20 : 9);
16931
16932  if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16933    as_warn (UNPRED_REG ("r15"));
16934  if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16935    as_warn (UNPRED_REG ("r13"));
16936}
16937
16938static void
16939do_crc32b (void)
16940{
16941  do_crc32_1 (0, 0);
16942}
16943
16944static void
16945do_crc32h (void)
16946{
16947  do_crc32_1 (0, 1);
16948}
16949
16950static void
16951do_crc32w (void)
16952{
16953  do_crc32_1 (0, 2);
16954}
16955
16956static void
16957do_crc32cb (void)
16958{
16959  do_crc32_1 (1, 0);
16960}
16961
16962static void
16963do_crc32ch (void)
16964{
16965  do_crc32_1 (1, 1);
16966}
16967
16968static void
16969do_crc32cw (void)
16970{
16971  do_crc32_1 (1, 2);
16972}
16973
16974
16975/* Overall per-instruction processing.	*/
16976
16977/* We need to be able to fix up arbitrary expressions in some statements.
16978   This is so that we can handle symbols that are an arbitrary distance from
16979   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16980   which returns part of an address in a form which will be valid for
16981   a data instruction.	We do this by pushing the expression into a symbol
16982   in the expr_section, and creating a fix for that.  */
16983
16984static void
16985fix_new_arm (fragS *	   frag,
16986	     int	   where,
16987	     short int	   size,
16988	     expressionS * exp,
16989	     int	   pc_rel,
16990	     int	   reloc)
16991{
16992  fixS *	   new_fix;
16993
16994  switch (exp->X_op)
16995    {
16996    case O_constant:
16997      if (pc_rel)
16998	{
16999	  /* Create an absolute valued symbol, so we have something to
17000	     refer to in the object file.  Unfortunately for us, gas's
17001	     generic expression parsing will already have folded out
17002	     any use of .set foo/.type foo %function that may have
17003	     been used to set type information of the target location,
17004	     that's being specified symbolically.  We have to presume
17005	     the user knows what they are doing.  */
17006	  char name[16 + 8];
17007	  symbolS *symbol;
17008
17009	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17010
17011	  symbol = symbol_find_or_make (name);
17012	  S_SET_SEGMENT (symbol, absolute_section);
17013	  symbol_set_frag (symbol, &zero_address_frag);
17014	  S_SET_VALUE (symbol, exp->X_add_number);
17015	  exp->X_op = O_symbol;
17016	  exp->X_add_symbol = symbol;
17017	  exp->X_add_number = 0;
17018	}
17019      /* FALLTHROUGH */
17020    case O_symbol:
17021    case O_add:
17022    case O_subtract:
17023      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17024			     (enum bfd_reloc_code_real) reloc);
17025      break;
17026
17027    default:
17028      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17029				  pc_rel, (enum bfd_reloc_code_real) reloc);
17030      break;
17031    }
17032
17033  /* Mark whether the fix is to a THUMB instruction, or an ARM
17034     instruction.  */
17035  new_fix->tc_fix_data = thumb_mode;
17036}
17037
17038/* Create a frg for an instruction requiring relaxation.  */
17039static void
17040output_relax_insn (void)
17041{
17042  char * to;
17043  symbolS *sym;
17044  int offset;
17045
17046  /* The size of the instruction is unknown, so tie the debug info to the
17047     start of the instruction.  */
17048  dwarf2_emit_insn (0);
17049
17050  switch (inst.reloc.exp.X_op)
17051    {
17052    case O_symbol:
17053      sym = inst.reloc.exp.X_add_symbol;
17054      offset = inst.reloc.exp.X_add_number;
17055      break;
17056    case O_constant:
17057      sym = NULL;
17058      offset = inst.reloc.exp.X_add_number;
17059      break;
17060    default:
17061      sym = make_expr_symbol (&inst.reloc.exp);
17062      offset = 0;
17063      break;
17064  }
17065  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17066		 inst.relax, sym, offset, NULL/*offset, opcode*/);
17067  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17068}
17069
17070/* Write a 32-bit thumb instruction to buf.  */
17071static void
17072put_thumb32_insn (char * buf, unsigned long insn)
17073{
17074  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17075  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17076}
17077
17078static void
17079output_inst (const char * str)
17080{
17081  char * to = NULL;
17082
17083  if (inst.error)
17084    {
17085      as_bad ("%s -- `%s'", inst.error, str);
17086      return;
17087    }
17088  if (inst.relax)
17089    {
17090      output_relax_insn ();
17091      return;
17092    }
17093  if (inst.size == 0)
17094    return;
17095
17096  to = frag_more (inst.size);
17097  /* PR 9814: Record the thumb mode into the current frag so that we know
17098     what type of NOP padding to use, if necessary.  We override any previous
17099     setting so that if the mode has changed then the NOPS that we use will
17100     match the encoding of the last instruction in the frag.  */
17101  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17102
17103  if (thumb_mode && (inst.size > THUMB_SIZE))
17104    {
17105      gas_assert (inst.size == (2 * THUMB_SIZE));
17106      put_thumb32_insn (to, inst.instruction);
17107    }
17108  else if (inst.size > INSN_SIZE)
17109    {
17110      gas_assert (inst.size == (2 * INSN_SIZE));
17111      md_number_to_chars (to, inst.instruction, INSN_SIZE);
17112      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17113    }
17114  else
17115    md_number_to_chars (to, inst.instruction, inst.size);
17116
17117  if (inst.reloc.type != BFD_RELOC_UNUSED)
17118    fix_new_arm (frag_now, to - frag_now->fr_literal,
17119		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17120		 inst.reloc.type);
17121
17122  dwarf2_emit_insn (inst.size);
17123}
17124
17125static char *
17126output_it_inst (int cond, int mask, char * to)
17127{
17128  unsigned long instruction = 0xbf00;
17129
17130  mask &= 0xf;
17131  instruction |= mask;
17132  instruction |= cond << 4;
17133
17134  if (to == NULL)
17135    {
17136      to = frag_more (2);
17137#ifdef OBJ_ELF
17138      dwarf2_emit_insn (2);
17139#endif
17140    }
17141
17142  md_number_to_chars (to, instruction, 2);
17143
17144  return to;
17145}
17146
17147/* Tag values used in struct asm_opcode's tag field.  */
17148enum opcode_tag
17149{
17150  OT_unconditional,	/* Instruction cannot be conditionalized.
17151			   The ARM condition field is still 0xE.  */
17152  OT_unconditionalF,	/* Instruction cannot be conditionalized
17153			   and carries 0xF in its ARM condition field.  */
17154  OT_csuffix,		/* Instruction takes a conditional suffix.  */
17155  OT_csuffixF,		/* Some forms of the instruction take a conditional
17156			   suffix, others place 0xF where the condition field
17157			   would be.  */
17158  OT_cinfix3,		/* Instruction takes a conditional infix,
17159			   beginning at character index 3.  (In
17160			   unified mode, it becomes a suffix.)  */
17161  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
17162			    tsts, cmps, cmns, and teqs. */
17163  OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
17164			   character index 3, even in unified mode.  Used for
17165			   legacy instructions where suffix and infix forms
17166			   may be ambiguous.  */
17167  OT_csuf_or_in3,	/* Instruction takes either a conditional
17168			   suffix or an infix at character index 3.  */
17169  OT_odd_infix_unc,	/* This is the unconditional variant of an
17170			   instruction that takes a conditional infix
17171			   at an unusual position.  In unified mode,
17172			   this variant will accept a suffix.  */
17173  OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
17174			   are the conditional variants of instructions that
17175			   take conditional infixes in unusual positions.
17176			   The infix appears at character index
17177			   (tag - OT_odd_infix_0).  These are not accepted
17178			   in unified mode.  */
17179};
17180
17181/* Subroutine of md_assemble, responsible for looking up the primary
17182   opcode from the mnemonic the user wrote.  STR points to the
17183   beginning of the mnemonic.
17184
17185   This is not simply a hash table lookup, because of conditional
17186   variants.  Most instructions have conditional variants, which are
17187   expressed with a _conditional affix_ to the mnemonic.  If we were
17188   to encode each conditional variant as a literal string in the opcode
17189   table, it would have approximately 20,000 entries.
17190
17191   Most mnemonics take this affix as a suffix, and in unified syntax,
17192   'most' is upgraded to 'all'.  However, in the divided syntax, some
17193   instructions take the affix as an infix, notably the s-variants of
17194   the arithmetic instructions.  Of those instructions, all but six
17195   have the infix appear after the third character of the mnemonic.
17196
17197   Accordingly, the algorithm for looking up primary opcodes given
17198   an identifier is:
17199
17200   1. Look up the identifier in the opcode table.
17201      If we find a match, go to step U.
17202
17203   2. Look up the last two characters of the identifier in the
17204      conditions table.  If we find a match, look up the first N-2
17205      characters of the identifier in the opcode table.  If we
17206      find a match, go to step CE.
17207
17208   3. Look up the fourth and fifth characters of the identifier in
17209      the conditions table.  If we find a match, extract those
17210      characters from the identifier, and look up the remaining
17211      characters in the opcode table.  If we find a match, go
17212      to step CM.
17213
17214   4. Fail.
17215
17216   U. Examine the tag field of the opcode structure, in case this is
17217      one of the six instructions with its conditional infix in an
17218      unusual place.  If it is, the tag tells us where to find the
17219      infix; look it up in the conditions table and set inst.cond
17220      accordingly.  Otherwise, this is an unconditional instruction.
17221      Again set inst.cond accordingly.  Return the opcode structure.
17222
17223  CE. Examine the tag field to make sure this is an instruction that
17224      should receive a conditional suffix.  If it is not, fail.
17225      Otherwise, set inst.cond from the suffix we already looked up,
17226      and return the opcode structure.
17227
17228  CM. Examine the tag field to make sure this is an instruction that
17229      should receive a conditional infix after the third character.
17230      If it is not, fail.  Otherwise, undo the edits to the current
17231      line of input and proceed as for case CE.  */
17232
17233static const struct asm_opcode *
17234opcode_lookup (char **str)
17235{
17236  char *end, *base;
17237  char *affix;
17238  const struct asm_opcode *opcode;
17239  const struct asm_cond *cond;
17240  char save[2];
17241
17242  /* Scan up to the end of the mnemonic, which must end in white space,
17243     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
17244  for (base = end = *str; *end != '\0'; end++)
17245    if (*end == ' ' || *end == '.')
17246      break;
17247
17248  if (end == base)
17249    return NULL;
17250
17251  /* Handle a possible width suffix and/or Neon type suffix.  */
17252  if (end[0] == '.')
17253    {
17254      int offset = 2;
17255
17256      /* The .w and .n suffixes are only valid if the unified syntax is in
17257	 use.  */
17258      if (unified_syntax && end[1] == 'w')
17259	inst.size_req = 4;
17260      else if (unified_syntax && end[1] == 'n')
17261	inst.size_req = 2;
17262      else
17263	offset = 0;
17264
17265      inst.vectype.elems = 0;
17266
17267      *str = end + offset;
17268
17269      if (end[offset] == '.')
17270	{
17271	  /* See if we have a Neon type suffix (possible in either unified or
17272	     non-unified ARM syntax mode).  */
17273	  if (parse_neon_type (&inst.vectype, str) == FAIL)
17274	    return NULL;
17275	}
17276      else if (end[offset] != '\0' && end[offset] != ' ')
17277	return NULL;
17278    }
17279  else
17280    *str = end;
17281
17282  /* Look for unaffixed or special-case affixed mnemonic.  */
17283  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17284						    end - base);
17285  if (opcode)
17286    {
17287      /* step U */
17288      if (opcode->tag < OT_odd_infix_0)
17289	{
17290	  inst.cond = COND_ALWAYS;
17291	  return opcode;
17292	}
17293
17294      if (warn_on_deprecated && unified_syntax)
17295	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17296      affix = base + (opcode->tag - OT_odd_infix_0);
17297      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17298      gas_assert (cond);
17299
17300      inst.cond = cond->value;
17301      return opcode;
17302    }
17303
17304  /* Cannot have a conditional suffix on a mnemonic of less than two
17305     characters.  */
17306  if (end - base < 3)
17307    return NULL;
17308
17309  /* Look for suffixed mnemonic.  */
17310  affix = end - 2;
17311  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17312  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17313						    affix - base);
17314  if (opcode && cond)
17315    {
17316      /* step CE */
17317      switch (opcode->tag)
17318	{
17319	case OT_cinfix3_legacy:
17320	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
17321	  break;
17322
17323	case OT_cinfix3:
17324	case OT_cinfix3_deprecated:
17325	case OT_odd_infix_unc:
17326	  if (!unified_syntax)
17327	    return 0;
17328	  /* else fall through */
17329
17330	case OT_csuffix:
17331	case OT_csuffixF:
17332	case OT_csuf_or_in3:
17333	  inst.cond = cond->value;
17334	  return opcode;
17335
17336	case OT_unconditional:
17337	case OT_unconditionalF:
17338	  if (thumb_mode)
17339	    inst.cond = cond->value;
17340	  else
17341	    {
17342	      /* Delayed diagnostic.  */
17343	      inst.error = BAD_COND;
17344	      inst.cond = COND_ALWAYS;
17345	    }
17346	  return opcode;
17347
17348	default:
17349	  return NULL;
17350	}
17351    }
17352
17353  /* Cannot have a usual-position infix on a mnemonic of less than
17354     six characters (five would be a suffix).  */
17355  if (end - base < 6)
17356    return NULL;
17357
17358  /* Look for infixed mnemonic in the usual position.  */
17359  affix = base + 3;
17360  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17361  if (!cond)
17362    return NULL;
17363
17364  memcpy (save, affix, 2);
17365  memmove (affix, affix + 2, (end - affix) - 2);
17366  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17367						    (end - base) - 2);
17368  memmove (affix + 2, affix, (end - affix) - 2);
17369  memcpy (affix, save, 2);
17370
17371  if (opcode
17372      && (opcode->tag == OT_cinfix3
17373	  || opcode->tag == OT_cinfix3_deprecated
17374	  || opcode->tag == OT_csuf_or_in3
17375	  || opcode->tag == OT_cinfix3_legacy))
17376    {
17377      /* Step CM.  */
17378      if (warn_on_deprecated && unified_syntax
17379	  && (opcode->tag == OT_cinfix3
17380	      || opcode->tag == OT_cinfix3_deprecated))
17381	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17382
17383      inst.cond = cond->value;
17384      return opcode;
17385    }
17386
17387  return NULL;
17388}
17389
17390/* This function generates an initial IT instruction, leaving its block
17391   virtually open for the new instructions. Eventually,
17392   the mask will be updated by now_it_add_mask () each time
17393   a new instruction needs to be included in the IT block.
17394   Finally, the block is closed with close_automatic_it_block ().
17395   The block closure can be requested either from md_assemble (),
17396   a tencode (), or due to a label hook.  */
17397
17398static void
17399new_automatic_it_block (int cond)
17400{
17401  now_it.state = AUTOMATIC_IT_BLOCK;
17402  now_it.mask = 0x18;
17403  now_it.cc = cond;
17404  now_it.block_length = 1;
17405  mapping_state (MAP_THUMB);
17406  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17407  now_it.warn_deprecated = FALSE;
17408  now_it.insn_cond = TRUE;
17409}
17410
17411/* Close an automatic IT block.
17412   See comments in new_automatic_it_block ().  */
17413
17414static void
17415close_automatic_it_block (void)
17416{
17417  now_it.mask = 0x10;
17418  now_it.block_length = 0;
17419}
17420
17421/* Update the mask of the current automatically-generated IT
17422   instruction. See comments in new_automatic_it_block ().  */
17423
17424static void
17425now_it_add_mask (int cond)
17426{
17427#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
17428#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
17429					      | ((bitvalue) << (nbit)))
17430  const int resulting_bit = (cond & 1);
17431
17432  now_it.mask &= 0xf;
17433  now_it.mask = SET_BIT_VALUE (now_it.mask,
17434				   resulting_bit,
17435				  (5 - now_it.block_length));
17436  now_it.mask = SET_BIT_VALUE (now_it.mask,
17437				   1,
17438				   ((5 - now_it.block_length) - 1) );
17439  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17440
17441#undef CLEAR_BIT
17442#undef SET_BIT_VALUE
17443}
17444
17445/* The IT blocks handling machinery is accessed through the these functions:
17446     it_fsm_pre_encode ()               from md_assemble ()
17447     set_it_insn_type ()                optional, from the tencode functions
17448     set_it_insn_type_last ()           ditto
17449     in_it_block ()                     ditto
17450     it_fsm_post_encode ()              from md_assemble ()
17451     force_automatic_it_block_close ()  from label habdling functions
17452
17453   Rationale:
17454     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17455	initializing the IT insn type with a generic initial value depending
17456	on the inst.condition.
17457     2) During the tencode function, two things may happen:
17458	a) The tencode function overrides the IT insn type by
17459	   calling either set_it_insn_type (type) or set_it_insn_type_last ().
17460	b) The tencode function queries the IT block state by
17461	   calling in_it_block () (i.e. to determine narrow/not narrow mode).
17462
17463	Both set_it_insn_type and in_it_block run the internal FSM state
17464	handling function (handle_it_state), because: a) setting the IT insn
17465	type may incur in an invalid state (exiting the function),
17466	and b) querying the state requires the FSM to be updated.
17467	Specifically we want to avoid creating an IT block for conditional
17468	branches, so it_fsm_pre_encode is actually a guess and we can't
17469	determine whether an IT block is required until the tencode () routine
17470	has decided what type of instruction this actually it.
17471	Because of this, if set_it_insn_type and in_it_block have to be used,
17472	set_it_insn_type has to be called first.
17473
17474	set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17475	determines the insn IT type depending on the inst.cond code.
17476	When a tencode () routine encodes an instruction that can be
17477	either outside an IT block, or, in the case of being inside, has to be
17478	the last one, set_it_insn_type_last () will determine the proper
17479	IT instruction type based on the inst.cond code. Otherwise,
17480	set_it_insn_type can be called for overriding that logic or
17481	for covering other cases.
17482
17483	Calling handle_it_state () may not transition the IT block state to
17484	OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17485	still queried. Instead, if the FSM determines that the state should
17486	be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17487	after the tencode () function: that's what it_fsm_post_encode () does.
17488
17489	Since in_it_block () calls the state handling function to get an
17490	updated state, an error may occur (due to invalid insns combination).
17491	In that case, inst.error is set.
17492	Therefore, inst.error has to be checked after the execution of
17493	the tencode () routine.
17494
17495     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17496	any pending state change (if any) that didn't take place in
17497	handle_it_state () as explained above.  */
17498
17499static void
17500it_fsm_pre_encode (void)
17501{
17502  if (inst.cond != COND_ALWAYS)
17503    inst.it_insn_type = INSIDE_IT_INSN;
17504  else
17505    inst.it_insn_type = OUTSIDE_IT_INSN;
17506
17507  now_it.state_handled = 0;
17508}
17509
17510/* IT state FSM handling function.  */
17511
17512static int
17513handle_it_state (void)
17514{
17515  now_it.state_handled = 1;
17516  now_it.insn_cond = FALSE;
17517
17518  switch (now_it.state)
17519    {
17520    case OUTSIDE_IT_BLOCK:
17521      switch (inst.it_insn_type)
17522	{
17523	case OUTSIDE_IT_INSN:
17524	  break;
17525
17526	case INSIDE_IT_INSN:
17527	case INSIDE_IT_LAST_INSN:
17528	  if (thumb_mode == 0)
17529	    {
17530	      if (unified_syntax
17531		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17532		as_tsktsk (_("Warning: conditional outside an IT block"\
17533			     " for Thumb."));
17534	    }
17535	  else
17536	    {
17537	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17538		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17539		{
17540		  /* Automatically generate the IT instruction.  */
17541		  new_automatic_it_block (inst.cond);
17542		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17543		    close_automatic_it_block ();
17544		}
17545	      else
17546		{
17547		  inst.error = BAD_OUT_IT;
17548		  return FAIL;
17549		}
17550	    }
17551	  break;
17552
17553	case IF_INSIDE_IT_LAST_INSN:
17554	case NEUTRAL_IT_INSN:
17555	  break;
17556
17557	case IT_INSN:
17558	  now_it.state = MANUAL_IT_BLOCK;
17559	  now_it.block_length = 0;
17560	  break;
17561	}
17562      break;
17563
17564    case AUTOMATIC_IT_BLOCK:
17565      /* Three things may happen now:
17566	 a) We should increment current it block size;
17567	 b) We should close current it block (closing insn or 4 insns);
17568	 c) We should close current it block and start a new one (due
17569	 to incompatible conditions or
17570	 4 insns-length block reached).  */
17571
17572      switch (inst.it_insn_type)
17573	{
17574	case OUTSIDE_IT_INSN:
17575	  /* The closure of the block shall happen immediatelly,
17576	     so any in_it_block () call reports the block as closed.  */
17577	  force_automatic_it_block_close ();
17578	  break;
17579
17580	case INSIDE_IT_INSN:
17581	case INSIDE_IT_LAST_INSN:
17582	case IF_INSIDE_IT_LAST_INSN:
17583	  now_it.block_length++;
17584
17585	  if (now_it.block_length > 4
17586	      || !now_it_compatible (inst.cond))
17587	    {
17588	      force_automatic_it_block_close ();
17589	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17590		new_automatic_it_block (inst.cond);
17591	    }
17592	  else
17593	    {
17594	      now_it.insn_cond = TRUE;
17595	      now_it_add_mask (inst.cond);
17596	    }
17597
17598	  if (now_it.state == AUTOMATIC_IT_BLOCK
17599	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17600		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17601	    close_automatic_it_block ();
17602	  break;
17603
17604	case NEUTRAL_IT_INSN:
17605	  now_it.block_length++;
17606	  now_it.insn_cond = TRUE;
17607
17608	  if (now_it.block_length > 4)
17609	    force_automatic_it_block_close ();
17610	  else
17611	    now_it_add_mask (now_it.cc & 1);
17612	  break;
17613
17614	case IT_INSN:
17615	  close_automatic_it_block ();
17616	  now_it.state = MANUAL_IT_BLOCK;
17617	  break;
17618	}
17619      break;
17620
17621    case MANUAL_IT_BLOCK:
17622      {
17623	/* Check conditional suffixes.  */
17624	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17625	int is_last;
17626	now_it.mask <<= 1;
17627	now_it.mask &= 0x1f;
17628	is_last = (now_it.mask == 0x10);
17629	now_it.insn_cond = TRUE;
17630
17631	switch (inst.it_insn_type)
17632	  {
17633	  case OUTSIDE_IT_INSN:
17634	    inst.error = BAD_NOT_IT;
17635	    return FAIL;
17636
17637	  case INSIDE_IT_INSN:
17638	    if (cond != inst.cond)
17639	      {
17640		inst.error = BAD_IT_COND;
17641		return FAIL;
17642	      }
17643	    break;
17644
17645	  case INSIDE_IT_LAST_INSN:
17646	  case IF_INSIDE_IT_LAST_INSN:
17647	    if (cond != inst.cond)
17648	      {
17649		inst.error = BAD_IT_COND;
17650		return FAIL;
17651	      }
17652	    if (!is_last)
17653	      {
17654		inst.error = BAD_BRANCH;
17655		return FAIL;
17656	      }
17657	    break;
17658
17659	  case NEUTRAL_IT_INSN:
17660	    /* The BKPT instruction is unconditional even in an IT block.  */
17661	    break;
17662
17663	  case IT_INSN:
17664	    inst.error = BAD_IT_IT;
17665	    return FAIL;
17666	  }
17667      }
17668      break;
17669    }
17670
17671  return SUCCESS;
17672}
17673
17674struct depr_insn_mask
17675{
17676  unsigned long pattern;
17677  unsigned long mask;
17678  const char* description;
17679};
17680
17681/* List of 16-bit instruction patterns deprecated in an IT block in
17682   ARMv8.  */
17683static const struct depr_insn_mask depr_it_insns[] = {
17684  { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17685  { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17686  { 0xa000, 0xb800, N_("ADR") },
17687  { 0x4800, 0xf800, N_("Literal loads") },
17688  { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17689  { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17690  /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17691     field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
17692  { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17693  { 0, 0, NULL }
17694};
17695
17696static void
17697it_fsm_post_encode (void)
17698{
17699  int is_last;
17700
17701  if (!now_it.state_handled)
17702    handle_it_state ();
17703
17704  if (now_it.insn_cond
17705      && !now_it.warn_deprecated
17706      && warn_on_deprecated
17707      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17708    {
17709      if (inst.instruction >= 0x10000)
17710	{
17711	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17712		     "deprecated in ARMv8"));
17713	  now_it.warn_deprecated = TRUE;
17714	}
17715      else
17716	{
17717	  const struct depr_insn_mask *p = depr_it_insns;
17718
17719	  while (p->mask != 0)
17720	    {
17721	      if ((inst.instruction & p->mask) == p->pattern)
17722		{
17723		  as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17724			     "of the following class are deprecated in ARMv8: "
17725			     "%s"), p->description);
17726		  now_it.warn_deprecated = TRUE;
17727		  break;
17728		}
17729
17730	      ++p;
17731	    }
17732	}
17733
17734      if (now_it.block_length > 1)
17735	{
17736	  as_tsktsk (_("IT blocks containing more than one conditional "
17737		     "instruction are deprecated in ARMv8"));
17738	  now_it.warn_deprecated = TRUE;
17739	}
17740    }
17741
17742  is_last = (now_it.mask == 0x10);
17743  if (is_last)
17744    {
17745      now_it.state = OUTSIDE_IT_BLOCK;
17746      now_it.mask = 0;
17747    }
17748}
17749
17750static void
17751force_automatic_it_block_close (void)
17752{
17753  if (now_it.state == AUTOMATIC_IT_BLOCK)
17754    {
17755      close_automatic_it_block ();
17756      now_it.state = OUTSIDE_IT_BLOCK;
17757      now_it.mask = 0;
17758    }
17759}
17760
17761static int
17762in_it_block (void)
17763{
17764  if (!now_it.state_handled)
17765    handle_it_state ();
17766
17767  return now_it.state != OUTSIDE_IT_BLOCK;
17768}
17769
17770void
17771md_assemble (char *str)
17772{
17773  char *p = str;
17774  const struct asm_opcode * opcode;
17775
17776  /* Align the previous label if needed.  */
17777  if (last_label_seen != NULL)
17778    {
17779      symbol_set_frag (last_label_seen, frag_now);
17780      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17781      S_SET_SEGMENT (last_label_seen, now_seg);
17782    }
17783
17784  memset (&inst, '\0', sizeof (inst));
17785  inst.reloc.type = BFD_RELOC_UNUSED;
17786
17787  opcode = opcode_lookup (&p);
17788  if (!opcode)
17789    {
17790      /* It wasn't an instruction, but it might be a register alias of
17791	 the form alias .req reg, or a Neon .dn/.qn directive.  */
17792      if (! create_register_alias (str, p)
17793	  && ! create_neon_reg_alias (str, p))
17794	as_bad (_("bad instruction `%s'"), str);
17795
17796      return;
17797    }
17798
17799  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17800    as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17801
17802  /* The value which unconditional instructions should have in place of the
17803     condition field.  */
17804  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17805
17806  if (thumb_mode)
17807    {
17808      arm_feature_set variant;
17809
17810      variant = cpu_variant;
17811      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
17812      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17813	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17814      /* Check that this instruction is supported for this CPU.  */
17815      if (!opcode->tvariant
17816	  || (thumb_mode == 1
17817	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17818	{
17819	  as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
17820	  return;
17821	}
17822      if (inst.cond != COND_ALWAYS && !unified_syntax
17823	  && opcode->tencode != do_t_branch)
17824	{
17825	  as_bad (_("Thumb does not support conditional execution"));
17826	  return;
17827	}
17828
17829      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17830	{
17831	  if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17832	      && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17833		   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17834	    {
17835	      /* Two things are addressed here.
17836		 1) Implicit require narrow instructions on Thumb-1.
17837		    This avoids relaxation accidentally introducing Thumb-2
17838		     instructions.
17839		 2) Reject wide instructions in non Thumb-2 cores.  */
17840	      if (inst.size_req == 0)
17841		inst.size_req = 2;
17842	      else if (inst.size_req == 4)
17843		{
17844		  as_bad (_("selected processor does not support `%s' in Thumb-2 mode"), str);
17845		  return;
17846		}
17847	    }
17848	}
17849
17850      inst.instruction = opcode->tvalue;
17851
17852      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17853	{
17854	  /* Prepare the it_insn_type for those encodings that don't set
17855	     it.  */
17856	  it_fsm_pre_encode ();
17857
17858	  opcode->tencode ();
17859
17860	  it_fsm_post_encode ();
17861	}
17862
17863      if (!(inst.error || inst.relax))
17864	{
17865	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17866	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
17867	  if (inst.size_req && inst.size_req != inst.size)
17868	    {
17869	      as_bad (_("cannot honor width suffix -- `%s'"), str);
17870	      return;
17871	    }
17872	}
17873
17874      /* Something has gone badly wrong if we try to relax a fixed size
17875	 instruction.  */
17876      gas_assert (inst.size_req == 0 || !inst.relax);
17877
17878      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17879			      *opcode->tvariant);
17880      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17881	 set those bits when Thumb-2 32-bit instructions are seen.  ie.
17882	 anything other than bl/blx and v6-M instructions.
17883	 The impact of relaxable instructions will be considered later after we
17884	 finish all relaxation.  */
17885      if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17886	  && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17887	       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17888	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17889				arm_ext_v6t2);
17890
17891      check_neon_suffixes;
17892
17893      if (!inst.error)
17894	{
17895	  mapping_state (MAP_THUMB);
17896	}
17897    }
17898  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17899    {
17900      bfd_boolean is_bx;
17901
17902      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
17903      is_bx = (opcode->aencode == do_bx);
17904
17905      /* Check that this instruction is supported for this CPU.  */
17906      if (!(is_bx && fix_v4bx)
17907	  && !(opcode->avariant &&
17908	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17909	{
17910	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
17911	  return;
17912	}
17913      if (inst.size_req)
17914	{
17915	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17916	  return;
17917	}
17918
17919      inst.instruction = opcode->avalue;
17920      if (opcode->tag == OT_unconditionalF)
17921	inst.instruction |= 0xFU << 28;
17922      else
17923	inst.instruction |= inst.cond << 28;
17924      inst.size = INSN_SIZE;
17925      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17926	{
17927	  it_fsm_pre_encode ();
17928	  opcode->aencode ();
17929	  it_fsm_post_encode ();
17930	}
17931      /* Arm mode bx is marked as both v4T and v5 because it's still required
17932	 on a hypothetical non-thumb v5 core.  */
17933      if (is_bx)
17934	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17935      else
17936	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17937				*opcode->avariant);
17938
17939      check_neon_suffixes;
17940
17941      if (!inst.error)
17942	{
17943	  mapping_state (MAP_ARM);
17944	}
17945    }
17946  else
17947    {
17948      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17949		"-- `%s'"), str);
17950      return;
17951    }
17952  output_inst (str);
17953}
17954
17955static void
17956check_it_blocks_finished (void)
17957{
17958#ifdef OBJ_ELF
17959  asection *sect;
17960
17961  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17962    {
17963      segment_info_type *seginfo = seg_info (sect);
17964
17965      if (seginfo && seginfo->tc_segment_info_data.current_it.state
17966	  == MANUAL_IT_BLOCK)
17967        {
17968	  as_warn (_("section '%s' finished with an open IT block."),
17969		   sect->name);
17970        }
17971    }
17972#else
17973  if (now_it.state == MANUAL_IT_BLOCK)
17974    as_warn (_("file finished with an open IT block."));
17975#endif
17976}
17977
17978/* Various frobbings of labels and their addresses.  */
17979
17980void
17981arm_start_line_hook (void)
17982{
17983  last_label_seen = NULL;
17984}
17985
17986void
17987arm_frob_label (symbolS * sym)
17988{
17989  last_label_seen = sym;
17990
17991  ARM_SET_THUMB (sym, thumb_mode);
17992
17993#if defined OBJ_COFF || defined OBJ_ELF
17994  ARM_SET_INTERWORK (sym, support_interwork);
17995#endif
17996
17997  force_automatic_it_block_close ();
17998
17999  /* Note - do not allow local symbols (.Lxxx) to be labelled
18000     as Thumb functions.  This is because these labels, whilst
18001     they exist inside Thumb code, are not the entry points for
18002     possible ARM->Thumb calls.	 Also, these labels can be used
18003     as part of a computed goto or switch statement.  eg gcc
18004     can generate code that looks like this:
18005
18006		ldr  r2, [pc, .Laaa]
18007		lsl  r3, r3, #2
18008		ldr  r2, [r3, r2]
18009		mov  pc, r2
18010
18011       .Lbbb:  .word .Lxxx
18012       .Lccc:  .word .Lyyy
18013       ..etc...
18014       .Laaa:	.word Lbbb
18015
18016     The first instruction loads the address of the jump table.
18017     The second instruction converts a table index into a byte offset.
18018     The third instruction gets the jump address out of the table.
18019     The fourth instruction performs the jump.
18020
18021     If the address stored at .Laaa is that of a symbol which has the
18022     Thumb_Func bit set, then the linker will arrange for this address
18023     to have the bottom bit set, which in turn would mean that the
18024     address computation performed by the third instruction would end
18025     up with the bottom bit set.  Since the ARM is capable of unaligned
18026     word loads, the instruction would then load the incorrect address
18027     out of the jump table, and chaos would ensue.  */
18028  if (label_is_thumb_function_name
18029      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18030      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18031    {
18032      /* When the address of a Thumb function is taken the bottom
18033	 bit of that address should be set.  This will allow
18034	 interworking between Arm and Thumb functions to work
18035	 correctly.  */
18036
18037      THUMB_SET_FUNC (sym, 1);
18038
18039      label_is_thumb_function_name = FALSE;
18040    }
18041
18042  dwarf2_emit_label (sym);
18043}
18044
18045bfd_boolean
18046arm_data_in_code (void)
18047{
18048  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18049    {
18050      *input_line_pointer = '/';
18051      input_line_pointer += 5;
18052      *input_line_pointer = 0;
18053      return TRUE;
18054    }
18055
18056  return FALSE;
18057}
18058
18059char *
18060arm_canonicalize_symbol_name (char * name)
18061{
18062  int len;
18063
18064  if (thumb_mode && (len = strlen (name)) > 5
18065      && streq (name + len - 5, "/data"))
18066    *(name + len - 5) = 0;
18067
18068  return name;
18069}
18070
18071/* Table of all register names defined by default.  The user can
18072   define additional names with .req.  Note that all register names
18073   should appear in both upper and lowercase variants.	Some registers
18074   also have mixed-case names.	*/
18075
18076#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18077#define REGNUM(p,n,t) REGDEF(p##n, n, t)
18078#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18079#define REGSET(p,t) \
18080  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18081  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18082  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18083  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18084#define REGSETH(p,t) \
18085  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18086  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18087  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18088  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18089#define REGSET2(p,t) \
18090  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18091  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18092  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18093  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18094#define SPLRBANK(base,bank,t) \
18095  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18096  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18097  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18098  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18099  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18100  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18101
18102static const struct reg_entry reg_names[] =
18103{
18104  /* ARM integer registers.  */
18105  REGSET(r, RN), REGSET(R, RN),
18106
18107  /* ATPCS synonyms.  */
18108  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18109  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18110  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18111
18112  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18113  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18114  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18115
18116  /* Well-known aliases.  */
18117  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18118  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18119
18120  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18121  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18122
18123  /* Coprocessor numbers.  */
18124  REGSET(p, CP), REGSET(P, CP),
18125
18126  /* Coprocessor register numbers.  The "cr" variants are for backward
18127     compatibility.  */
18128  REGSET(c,  CN), REGSET(C, CN),
18129  REGSET(cr, CN), REGSET(CR, CN),
18130
18131  /* ARM banked registers.  */
18132  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18133  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18134  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18135  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18136  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18137  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18138  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18139
18140  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18141  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18142  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18143  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18144  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18145  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18146  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18147  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18148
18149  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18150  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18151  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18152  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18153  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18154  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18155  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18156  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18157  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18158
18159  /* FPA registers.  */
18160  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18161  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18162
18163  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18164  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18165
18166  /* VFP SP registers.	*/
18167  REGSET(s,VFS),  REGSET(S,VFS),
18168  REGSETH(s,VFS), REGSETH(S,VFS),
18169
18170  /* VFP DP Registers.	*/
18171  REGSET(d,VFD),  REGSET(D,VFD),
18172  /* Extra Neon DP registers.  */
18173  REGSETH(d,VFD), REGSETH(D,VFD),
18174
18175  /* Neon QP registers.  */
18176  REGSET2(q,NQ),  REGSET2(Q,NQ),
18177
18178  /* VFP control registers.  */
18179  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18180  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18181  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18182  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18183  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18184  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18185
18186  /* Maverick DSP coprocessor registers.  */
18187  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
18188  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
18189
18190  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18191  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18192  REGDEF(dspsc,0,DSPSC),
18193
18194  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18195  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18196  REGDEF(DSPSC,0,DSPSC),
18197
18198  /* iWMMXt data registers - p0, c0-15.	 */
18199  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18200
18201  /* iWMMXt control registers - p1, c0-3.  */
18202  REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
18203  REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
18204  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
18205  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
18206
18207  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
18208  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
18209  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
18210  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
18211  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
18212
18213  /* XScale accumulator registers.  */
18214  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18215};
18216#undef REGDEF
18217#undef REGNUM
18218#undef REGSET
18219
18220/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
18221   within psr_required_here.  */
18222static const struct asm_psr psrs[] =
18223{
18224  /* Backward compatibility notation.  Note that "all" is no longer
18225     truly all possible PSR bits.  */
18226  {"all",  PSR_c | PSR_f},
18227  {"flg",  PSR_f},
18228  {"ctl",  PSR_c},
18229
18230  /* Individual flags.	*/
18231  {"f",	   PSR_f},
18232  {"c",	   PSR_c},
18233  {"x",	   PSR_x},
18234  {"s",	   PSR_s},
18235
18236  /* Combinations of flags.  */
18237  {"fs",   PSR_f | PSR_s},
18238  {"fx",   PSR_f | PSR_x},
18239  {"fc",   PSR_f | PSR_c},
18240  {"sf",   PSR_s | PSR_f},
18241  {"sx",   PSR_s | PSR_x},
18242  {"sc",   PSR_s | PSR_c},
18243  {"xf",   PSR_x | PSR_f},
18244  {"xs",   PSR_x | PSR_s},
18245  {"xc",   PSR_x | PSR_c},
18246  {"cf",   PSR_c | PSR_f},
18247  {"cs",   PSR_c | PSR_s},
18248  {"cx",   PSR_c | PSR_x},
18249  {"fsx",  PSR_f | PSR_s | PSR_x},
18250  {"fsc",  PSR_f | PSR_s | PSR_c},
18251  {"fxs",  PSR_f | PSR_x | PSR_s},
18252  {"fxc",  PSR_f | PSR_x | PSR_c},
18253  {"fcs",  PSR_f | PSR_c | PSR_s},
18254  {"fcx",  PSR_f | PSR_c | PSR_x},
18255  {"sfx",  PSR_s | PSR_f | PSR_x},
18256  {"sfc",  PSR_s | PSR_f | PSR_c},
18257  {"sxf",  PSR_s | PSR_x | PSR_f},
18258  {"sxc",  PSR_s | PSR_x | PSR_c},
18259  {"scf",  PSR_s | PSR_c | PSR_f},
18260  {"scx",  PSR_s | PSR_c | PSR_x},
18261  {"xfs",  PSR_x | PSR_f | PSR_s},
18262  {"xfc",  PSR_x | PSR_f | PSR_c},
18263  {"xsf",  PSR_x | PSR_s | PSR_f},
18264  {"xsc",  PSR_x | PSR_s | PSR_c},
18265  {"xcf",  PSR_x | PSR_c | PSR_f},
18266  {"xcs",  PSR_x | PSR_c | PSR_s},
18267  {"cfs",  PSR_c | PSR_f | PSR_s},
18268  {"cfx",  PSR_c | PSR_f | PSR_x},
18269  {"csf",  PSR_c | PSR_s | PSR_f},
18270  {"csx",  PSR_c | PSR_s | PSR_x},
18271  {"cxf",  PSR_c | PSR_x | PSR_f},
18272  {"cxs",  PSR_c | PSR_x | PSR_s},
18273  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18274  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18275  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18276  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18277  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18278  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18279  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18280  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18281  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18282  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18283  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18284  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18285  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18286  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18287  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18288  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18289  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18290  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18291  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18292  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18293  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18294  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18295  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18296  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18297};
18298
18299/* Table of V7M psr names.  */
18300static const struct asm_psr v7m_psrs[] =
18301{
18302  {"apsr",	  0 }, {"APSR",		0 },
18303  {"iapsr",	  1 }, {"IAPSR",	1 },
18304  {"eapsr",	  2 }, {"EAPSR",	2 },
18305  {"psr",	  3 }, {"PSR",		3 },
18306  {"xpsr",	  3 }, {"XPSR",		3 }, {"xPSR",	  3 },
18307  {"ipsr",	  5 }, {"IPSR",		5 },
18308  {"epsr",	  6 }, {"EPSR",		6 },
18309  {"iepsr",	  7 }, {"IEPSR",	7 },
18310  {"msp",	  8 }, {"MSP",		8 },
18311  {"psp",	  9 }, {"PSP",		9 },
18312  {"primask",	  16}, {"PRIMASK",	16},
18313  {"basepri",	  17}, {"BASEPRI",	17},
18314  {"basepri_max", 18}, {"BASEPRI_MAX",	18},
18315  {"basepri_max", 18}, {"BASEPRI_MASK",	18}, /* Typo, preserved for backwards compatibility.  */
18316  {"faultmask",	  19}, {"FAULTMASK",	19},
18317  {"control",	  20}, {"CONTROL",	20}
18318};
18319
18320/* Table of all shift-in-operand names.	 */
18321static const struct asm_shift_name shift_names [] =
18322{
18323  { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
18324  { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
18325  { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
18326  { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
18327  { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
18328  { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
18329};
18330
18331/* Table of all explicit relocation names.  */
18332#ifdef OBJ_ELF
18333static struct reloc_entry reloc_names[] =
18334{
18335  { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
18336  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
18337  { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
18338  { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18339  { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18340  { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
18341  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
18342  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
18343  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
18344  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18345  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
18346  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18347  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18348	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18349  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18350	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18351  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18352	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18353};
18354#endif
18355
18356/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
18357static const struct asm_cond conds[] =
18358{
18359  {"eq", 0x0},
18360  {"ne", 0x1},
18361  {"cs", 0x2}, {"hs", 0x2},
18362  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18363  {"mi", 0x4},
18364  {"pl", 0x5},
18365  {"vs", 0x6},
18366  {"vc", 0x7},
18367  {"hi", 0x8},
18368  {"ls", 0x9},
18369  {"ge", 0xa},
18370  {"lt", 0xb},
18371  {"gt", 0xc},
18372  {"le", 0xd},
18373  {"al", 0xe}
18374};
18375
18376#define UL_BARRIER(L,U,CODE,FEAT) \
18377  { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18378  { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18379
18380static struct asm_barrier_opt barrier_opt_names[] =
18381{
18382  UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
18383  UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
18384  UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
18385  UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
18386  UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
18387  UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
18388  UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
18389  UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
18390  UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
18391  UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
18392  UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
18393  UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
18394  UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
18395  UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
18396  UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
18397  UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
18398};
18399
18400#undef UL_BARRIER
18401
18402/* Table of ARM-format instructions.	*/
18403
18404/* Macros for gluing together operand strings.  N.B. In all cases
18405   other than OPS0, the trailing OP_stop comes from default
18406   zero-initialization of the unspecified elements of the array.  */
18407#define OPS0()		  { OP_stop, }
18408#define OPS1(a)		  { OP_##a, }
18409#define OPS2(a,b)	  { OP_##a,OP_##b, }
18410#define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
18411#define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
18412#define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18413#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18414
18415/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18416   This is useful when mixing operands for ARM and THUMB, i.e. using the
18417   MIX_ARM_THUMB_OPERANDS macro.
18418   In order to use these macros, prefix the number of operands with _
18419   e.g. _3.  */
18420#define OPS_1(a)	   { a, }
18421#define OPS_2(a,b)	   { a,b, }
18422#define OPS_3(a,b,c)	   { a,b,c, }
18423#define OPS_4(a,b,c,d)	   { a,b,c,d, }
18424#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
18425#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18426
18427/* These macros abstract out the exact format of the mnemonic table and
18428   save some repeated characters.  */
18429
18430/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
18431#define TxCE(mnem, op, top, nops, ops, ae, te) \
18432  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18433    THUMB_VARIANT, do_##ae, do_##te }
18434
18435/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18436   a T_MNEM_xyz enumerator.  */
18437#define TCE(mnem, aop, top, nops, ops, ae, te) \
18438      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18439#define tCE(mnem, aop, top, nops, ops, ae, te) \
18440      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18441
18442/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18443   infix after the third character.  */
18444#define TxC3(mnem, op, top, nops, ops, ae, te) \
18445  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18446    THUMB_VARIANT, do_##ae, do_##te }
18447#define TxC3w(mnem, op, top, nops, ops, ae, te) \
18448  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18449    THUMB_VARIANT, do_##ae, do_##te }
18450#define TC3(mnem, aop, top, nops, ops, ae, te) \
18451      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18452#define TC3w(mnem, aop, top, nops, ops, ae, te) \
18453      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18454#define tC3(mnem, aop, top, nops, ops, ae, te) \
18455      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18456#define tC3w(mnem, aop, top, nops, ops, ae, te) \
18457      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18458
18459/* Mnemonic that cannot be conditionalized.  The ARM condition-code
18460   field is still 0xE.  Many of the Thumb variants can be executed
18461   conditionally, so this is checked separately.  */
18462#define TUE(mnem, op, top, nops, ops, ae, te)				\
18463  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18464    THUMB_VARIANT, do_##ae, do_##te }
18465
18466/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18467   Used by mnemonics that have very minimal differences in the encoding for
18468   ARM and Thumb variants and can be handled in a common function.  */
18469#define TUEc(mnem, op, top, nops, ops, en) \
18470  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18471    THUMB_VARIANT, do_##en, do_##en }
18472
18473/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18474   condition code field.  */
18475#define TUF(mnem, op, top, nops, ops, ae, te)				\
18476  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18477    THUMB_VARIANT, do_##ae, do_##te }
18478
18479/* ARM-only variants of all the above.  */
18480#define CE(mnem,  op, nops, ops, ae)	\
18481  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18482
18483#define C3(mnem, op, nops, ops, ae)	\
18484  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18485
18486/* Legacy mnemonics that always have conditional infix after the third
18487   character.  */
18488#define CL(mnem, op, nops, ops, ae)	\
18489  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18490    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18491
18492/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
18493#define cCE(mnem,  op, nops, ops, ae)	\
18494  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18495
18496/* Legacy coprocessor instructions where conditional infix and conditional
18497   suffix are ambiguous.  For consistency this includes all FPA instructions,
18498   not just the potentially ambiguous ones.  */
18499#define cCL(mnem, op, nops, ops, ae)	\
18500  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18501    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18502
18503/* Coprocessor, takes either a suffix or a position-3 infix
18504   (for an FPA corner case). */
18505#define C3E(mnem, op, nops, ops, ae) \
18506  { mnem, OPS##nops ops, OT_csuf_or_in3, \
18507    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18508
18509#define xCM_(m1, m2, m3, op, nops, ops, ae)	\
18510  { m1 #m2 m3, OPS##nops ops, \
18511    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18512    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18513
18514#define CM(m1, m2, op, nops, ops, ae)	\
18515  xCM_ (m1,   , m2, op, nops, ops, ae),	\
18516  xCM_ (m1, eq, m2, op, nops, ops, ae),	\
18517  xCM_ (m1, ne, m2, op, nops, ops, ae),	\
18518  xCM_ (m1, cs, m2, op, nops, ops, ae),	\
18519  xCM_ (m1, hs, m2, op, nops, ops, ae),	\
18520  xCM_ (m1, cc, m2, op, nops, ops, ae),	\
18521  xCM_ (m1, ul, m2, op, nops, ops, ae),	\
18522  xCM_ (m1, lo, m2, op, nops, ops, ae),	\
18523  xCM_ (m1, mi, m2, op, nops, ops, ae),	\
18524  xCM_ (m1, pl, m2, op, nops, ops, ae),	\
18525  xCM_ (m1, vs, m2, op, nops, ops, ae),	\
18526  xCM_ (m1, vc, m2, op, nops, ops, ae),	\
18527  xCM_ (m1, hi, m2, op, nops, ops, ae),	\
18528  xCM_ (m1, ls, m2, op, nops, ops, ae),	\
18529  xCM_ (m1, ge, m2, op, nops, ops, ae),	\
18530  xCM_ (m1, lt, m2, op, nops, ops, ae),	\
18531  xCM_ (m1, gt, m2, op, nops, ops, ae),	\
18532  xCM_ (m1, le, m2, op, nops, ops, ae),	\
18533  xCM_ (m1, al, m2, op, nops, ops, ae)
18534
18535#define UE(mnem, op, nops, ops, ae)	\
18536  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18537
18538#define UF(mnem, op, nops, ops, ae)	\
18539  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18540
18541/* Neon data-processing. ARM versions are unconditional with cond=0xf.
18542   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18543   use the same encoding function for each.  */
18544#define NUF(mnem, op, nops, ops, enc)					\
18545  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
18546    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18547
18548/* Neon data processing, version which indirects through neon_enc_tab for
18549   the various overloaded versions of opcodes.  */
18550#define nUF(mnem, op, nops, ops, enc)					\
18551  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
18552    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18553
18554/* Neon insn with conditional suffix for the ARM version, non-overloaded
18555   version.  */
18556#define NCE_tag(mnem, op, nops, ops, enc, tag)				\
18557  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
18558    THUMB_VARIANT, do_##enc, do_##enc }
18559
18560#define NCE(mnem, op, nops, ops, enc)					\
18561   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18562
18563#define NCEF(mnem, op, nops, ops, enc)					\
18564    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18565
18566/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
18567#define nCE_tag(mnem, op, nops, ops, enc, tag)				\
18568  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
18569    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18570
18571#define nCE(mnem, op, nops, ops, enc)					\
18572   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18573
18574#define nCEF(mnem, op, nops, ops, enc)					\
18575    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18576
18577#define do_0 0
18578
18579static const struct asm_opcode insns[] =
18580{
18581#define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
18582#define THUMB_VARIANT  & arm_ext_v4t
18583 tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
18584 tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
18585 tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
18586 tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
18587 tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
18588 tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
18589 tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
18590 tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
18591 tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
18592 tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
18593 tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
18594 tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
18595 tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
18596 tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
18597 tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
18598 tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
18599
18600 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18601    for setting PSR flag bits.  They are obsolete in V6 and do not
18602    have Thumb equivalents. */
18603 tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
18604 tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
18605  CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
18606 tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
18607 tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
18608  CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
18609 tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
18610 tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
18611  CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
18612
18613 tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
18614 tC3("movs",	1b00000, _movs,	   2, (RR, SH),      mov,  t_mov_cmp),
18615 tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
18616 tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
18617
18618 tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
18619 tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18620 tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18621								OP_RRnpc),
18622					OP_ADDRGLDR),ldst, t_ldst),
18623 tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18624
18625 tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18626 tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18627 tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18628 tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18629 tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18630 tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
18631
18632 TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
18633 TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
18634 tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
18635 TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
18636
18637  /* Pseudo ops.  */
18638 tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
18639  C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
18640 tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
18641 tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
18642
18643  /* Thumb-compatibility pseudo ops.  */
18644 tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
18645 tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
18646 tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
18647 tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
18648 tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
18649 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
18650 tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
18651 tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
18652 tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
18653 tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
18654 tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
18655 tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
18656
18657 /* These may simplify to neg.  */
18658 TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18659 TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18660
18661#undef  THUMB_VARIANT
18662#define THUMB_VARIANT  & arm_ext_v6
18663
18664 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
18665
18666 /* V1 instructions with no Thumb analogue prior to V6T2.  */
18667#undef  THUMB_VARIANT
18668#define THUMB_VARIANT  & arm_ext_v6t2
18669
18670 TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
18671 TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
18672  CL("teqp",	130f000,           2, (RR, SH),      cmp),
18673
18674 TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18675 TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18676 TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
18677 TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18678
18679 TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18680 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18681
18682 TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18683 TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18684
18685 /* V1 instructions with no Thumb analogue at all.  */
18686  CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
18687  C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
18688
18689  C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
18690  C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
18691  C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
18692  C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
18693  C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
18694  C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
18695  C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
18696  C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
18697
18698#undef  ARM_VARIANT
18699#define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
18700#undef  THUMB_VARIANT
18701#define THUMB_VARIANT  & arm_ext_v4t
18702
18703 tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
18704 tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
18705
18706#undef  THUMB_VARIANT
18707#define THUMB_VARIANT  & arm_ext_v6t2
18708
18709 TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18710  C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18711
18712  /* Generic coprocessor instructions.	*/
18713 TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
18714 TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18715 TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18716 TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18717 TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
18718 TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18719 TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
18720
18721#undef  ARM_VARIANT
18722#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
18723
18724  CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18725  C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18726
18727#undef  ARM_VARIANT
18728#define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
18729#undef  THUMB_VARIANT
18730#define THUMB_VARIANT  & arm_ext_msr
18731
18732 TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18733 TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18734
18735#undef  ARM_VARIANT
18736#define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
18737#undef  THUMB_VARIANT
18738#define THUMB_VARIANT  & arm_ext_v6t2
18739
18740 TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18741  CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18742 TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18743  CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18744 TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18745  CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18746 TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18747  CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18748
18749#undef  ARM_VARIANT
18750#define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
18751#undef  THUMB_VARIANT
18752#define THUMB_VARIANT  & arm_ext_v4t
18753
18754 tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18755 tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18756 tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18757 tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18758 tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18759 tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18760
18761#undef  ARM_VARIANT
18762#define ARM_VARIANT  & arm_ext_v4t_5
18763
18764  /* ARM Architecture 4T.  */
18765  /* Note: bx (and blx) are required on V5, even if the processor does
18766     not support Thumb.	 */
18767 TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
18768
18769#undef  ARM_VARIANT
18770#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
18771#undef  THUMB_VARIANT
18772#define THUMB_VARIANT  & arm_ext_v5t
18773
18774  /* Note: blx has 2 variants; the .value coded here is for
18775     BLX(2).  Only this variant has conditional execution.  */
18776 TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
18777 TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
18778
18779#undef  THUMB_VARIANT
18780#define THUMB_VARIANT  & arm_ext_v6t2
18781
18782 TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
18783 TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
18784 TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
18785 TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
18786 TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
18787 TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
18788 TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18789 TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
18790
18791#undef  ARM_VARIANT
18792#define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
18793#undef  THUMB_VARIANT
18794#define THUMB_VARIANT  & arm_ext_v5exp
18795
18796 TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18797 TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18798 TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18799 TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18800
18801 TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18802 TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
18803
18804 TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18805 TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18806 TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18807 TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
18808
18809 TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18810 TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18811 TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18812 TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18813
18814 TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18815 TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
18816
18817 TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18818 TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18819 TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18820 TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
18821
18822#undef  ARM_VARIANT
18823#define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
18824#undef  THUMB_VARIANT
18825#define THUMB_VARIANT  & arm_ext_v6t2
18826
18827 TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
18828 TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18829     ldrd, t_ldstd),
18830 TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18831				       ADDRGLDRS), ldrd, t_ldstd),
18832
18833 TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18834 TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18835
18836#undef  ARM_VARIANT
18837#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
18838
18839 TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
18840
18841#undef  ARM_VARIANT
18842#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
18843#undef  THUMB_VARIANT
18844#define THUMB_VARIANT  & arm_ext_v6
18845
18846 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
18847 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
18848 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18849 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18850 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
18851 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18852 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18853 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18854 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
18855 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
18856
18857#undef  THUMB_VARIANT
18858#define THUMB_VARIANT  & arm_ext_v6t2
18859
18860 TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
18861 TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18862				      strex,  t_strex),
18863 TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18864 TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18865
18866 TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
18867 TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
18868
18869/*  ARM V6 not included in V7M.  */
18870#undef  THUMB_VARIANT
18871#define THUMB_VARIANT  & arm_ext_v6_notm
18872 TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18873 TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18874  UF(rfeib,	9900a00,           1, (RRw),			   rfe),
18875  UF(rfeda,	8100a00,           1, (RRw),			   rfe),
18876 TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
18877 TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
18878  UF(rfefa,	8100a00,           1, (RRw),			   rfe),
18879 TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
18880  UF(rfeed,	9900a00,           1, (RRw),			   rfe),
18881 TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18882 TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18883 TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
18884  UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
18885  UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
18886  UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
18887  UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
18888 TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
18889 TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
18890 TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
18891
18892/*  ARM V6 not included in V7M (eg. integer SIMD).  */
18893#undef  THUMB_VARIANT
18894#define THUMB_VARIANT  & arm_ext_v6_dsp
18895 TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
18896 TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
18897 TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18898 TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18899 TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18900 /* Old name for QASX.  */
18901 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18902 TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18903 /* Old name for QSAX.  */
18904 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18905 TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18906 TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18907 TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18908 TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18909 TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18910 /* Old name for SASX.  */
18911 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18912 TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18913 TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18914 TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18915 /* Old name for SHASX.  */
18916 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18917 TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18918 /* Old name for SHSAX.  */
18919 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18920 TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18921 TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18922 TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18923 /* Old name for SSAX.  */
18924 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18925 TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18926 TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18927 TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18928 TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18929 TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18930 /* Old name for UASX.  */
18931 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18932 TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18933 TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18934 TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18935 /* Old name for UHASX.  */
18936 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18937 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18938 /* Old name for UHSAX.  */
18939 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18940 TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18941 TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18942 TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18943 TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18944 TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18945 /* Old name for UQASX.  */
18946 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18947 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18948 /* Old name for UQSAX.  */
18949 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18950 TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18951 TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18952 TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18953 TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18954 /* Old name for USAX.  */
18955 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18956 TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18957 TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18958 TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18959 TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18960 TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
18961 TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18962 TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18963 TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18964 TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
18965 TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
18966 TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18967 TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18968 TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18969 TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18970 TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18971 TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18972 TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18973 TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18974 TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18975 TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18976 TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18977 TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18978 TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18979 TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18980 TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18981 TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18982 TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18983 TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
18984 TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
18985 TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
18986 TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
18987 TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
18988 TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
18989
18990#undef  ARM_VARIANT
18991#define ARM_VARIANT   & arm_ext_v6k
18992#undef  THUMB_VARIANT
18993#define THUMB_VARIANT & arm_ext_v6k
18994
18995 tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
18996 tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
18997 tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
18998 tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
18999
19000#undef  THUMB_VARIANT
19001#define THUMB_VARIANT  & arm_ext_v6_notm
19002 TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19003				      ldrexd, t_ldrexd),
19004 TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19005				       RRnpcb), strexd, t_strexd),
19006
19007#undef  THUMB_VARIANT
19008#define THUMB_VARIANT  & arm_ext_v6t2
19009 TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19010     rd_rn,  rd_rn),
19011 TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19012     rd_rn,  rd_rn),
19013 TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19014     strex, t_strexbh),
19015 TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19016     strex, t_strexbh),
19017 TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
19018
19019#undef  ARM_VARIANT
19020#define ARM_VARIANT    & arm_ext_sec
19021#undef  THUMB_VARIANT
19022#define THUMB_VARIANT  & arm_ext_sec
19023
19024 TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
19025
19026#undef	ARM_VARIANT
19027#define	ARM_VARIANT    & arm_ext_virt
19028#undef	THUMB_VARIANT
19029#define	THUMB_VARIANT    & arm_ext_virt
19030
19031 TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19032 TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
19033
19034#undef	ARM_VARIANT
19035#define	ARM_VARIANT    & arm_ext_pan
19036#undef	THUMB_VARIANT
19037#define	THUMB_VARIANT  & arm_ext_pan
19038
19039 TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
19040
19041#undef  ARM_VARIANT
19042#define ARM_VARIANT    & arm_ext_v6t2
19043#undef  THUMB_VARIANT
19044#define THUMB_VARIANT  & arm_ext_v6t2
19045
19046 TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
19047 TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19048 TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19049 TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19050
19051 TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19052 TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19053 TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19054 TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
19055
19056 TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19057 TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19058 TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19059 TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19060
19061 /* Thumb-only instructions.  */
19062#undef  ARM_VARIANT
19063#define ARM_VARIANT NULL
19064  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
19065  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
19066
19067 /* ARM does not really have an IT instruction, so always allow it.
19068    The opcode is copied from Thumb in order to allow warnings in
19069    -mimplicit-it=[never | arm] modes.  */
19070#undef  ARM_VARIANT
19071#define ARM_VARIANT  & arm_ext_v1
19072
19073 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
19074 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
19075 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
19076 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
19077 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
19078 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
19079 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
19080 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
19081 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
19082 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
19083 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
19084 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
19085 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
19086 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
19087 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
19088 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
19089 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19090 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19091
19092 /* Thumb2 only instructions.  */
19093#undef  ARM_VARIANT
19094#define ARM_VARIANT  NULL
19095
19096 TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19097 TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19098 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
19099 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
19100 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
19101 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
19102
19103 /* Hardware division instructions.  */
19104#undef  ARM_VARIANT
19105#define ARM_VARIANT    & arm_ext_adiv
19106#undef  THUMB_VARIANT
19107#define THUMB_VARIANT  & arm_ext_div
19108
19109 TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19110 TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19111
19112 /* ARM V6M/V7 instructions.  */
19113#undef  ARM_VARIANT
19114#define ARM_VARIANT    & arm_ext_barrier
19115#undef  THUMB_VARIANT
19116#define THUMB_VARIANT  & arm_ext_barrier
19117
19118 TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19119 TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19120 TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19121
19122 /* ARM V7 instructions.  */
19123#undef  ARM_VARIANT
19124#define ARM_VARIANT    & arm_ext_v7
19125#undef  THUMB_VARIANT
19126#define THUMB_VARIANT  & arm_ext_v7
19127
19128 TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
19129 TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
19130
19131#undef  ARM_VARIANT
19132#define ARM_VARIANT    & arm_ext_mp
19133#undef  THUMB_VARIANT
19134#define THUMB_VARIANT  & arm_ext_mp
19135
19136 TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
19137
19138 /* AArchv8 instructions.  */
19139#undef  ARM_VARIANT
19140#define ARM_VARIANT   & arm_ext_v8
19141#undef  THUMB_VARIANT
19142#define THUMB_VARIANT & arm_ext_v8
19143
19144 tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
19145 TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
19146 TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19147 TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19148							ldrexd, t_ldrexd),
19149 TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
19150 TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19151 TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19152							stlex,  t_stlex),
19153 TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19154							strexd, t_strexd),
19155 TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19156							stlex, t_stlex),
19157 TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19158							stlex, t_stlex),
19159 TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19160 TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19161 TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19162 TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19163 TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19164 TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19165
19166 /* ARMv8 T32 only.  */
19167#undef  ARM_VARIANT
19168#define ARM_VARIANT  NULL
19169 TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
19170 TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
19171 TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
19172
19173  /* FP for ARMv8.  */
19174#undef  ARM_VARIANT
19175#define ARM_VARIANT   & fpu_vfp_ext_armv8xd
19176#undef  THUMB_VARIANT
19177#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19178
19179  nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
19180  nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
19181  nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
19182  nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
19183  nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19184  nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19185  nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
19186  nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
19187  nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
19188  nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
19189  nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
19190  nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
19191  nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
19192  nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
19193  nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
19194  nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
19195  nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
19196
19197  /* Crypto v1 extensions.  */
19198#undef  ARM_VARIANT
19199#define ARM_VARIANT & fpu_crypto_ext_armv8
19200#undef  THUMB_VARIANT
19201#define THUMB_VARIANT & fpu_crypto_ext_armv8
19202
19203  nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19204  nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19205  nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19206  nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19207  nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19208  nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19209  nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19210  nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19211  nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19212  nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19213  nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19214  nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19215  nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19216  nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19217
19218#undef  ARM_VARIANT
19219#define ARM_VARIANT   & crc_ext_armv8
19220#undef  THUMB_VARIANT
19221#define THUMB_VARIANT & crc_ext_armv8
19222  TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19223  TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19224  TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19225  TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19226  TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19227  TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19228
19229#undef  ARM_VARIANT
19230#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
19231#undef  THUMB_VARIANT
19232#define THUMB_VARIANT NULL
19233
19234 cCE("wfs",	e200110, 1, (RR),	     rd),
19235 cCE("rfs",	e300110, 1, (RR),	     rd),
19236 cCE("wfc",	e400110, 1, (RR),	     rd),
19237 cCE("rfc",	e500110, 1, (RR),	     rd),
19238
19239 cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19240 cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19241 cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19242 cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19243
19244 cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19245 cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19246 cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19247 cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19248
19249 cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
19250 cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
19251 cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
19252 cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
19253 cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
19254 cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
19255 cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
19256 cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
19257 cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
19258 cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
19259 cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
19260 cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
19261
19262 cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
19263 cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
19264 cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
19265 cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
19266 cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
19267 cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
19268 cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
19269 cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
19270 cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
19271 cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
19272 cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
19273 cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
19274
19275 cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
19276 cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
19277 cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
19278 cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
19279 cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
19280 cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
19281 cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
19282 cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
19283 cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
19284 cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
19285 cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
19286 cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
19287
19288 cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
19289 cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
19290 cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
19291 cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
19292 cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
19293 cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
19294 cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
19295 cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
19296 cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
19297 cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
19298 cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
19299 cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
19300
19301 cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
19302 cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
19303 cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
19304 cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
19305 cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
19306 cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
19307 cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
19308 cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
19309 cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
19310 cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
19311 cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
19312 cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
19313
19314 cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
19315 cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
19316 cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
19317 cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
19318 cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
19319 cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
19320 cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
19321 cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
19322 cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
19323 cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
19324 cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
19325 cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
19326
19327 cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
19328 cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
19329 cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
19330 cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
19331 cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
19332 cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
19333 cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
19334 cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
19335 cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
19336 cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
19337 cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
19338 cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
19339
19340 cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
19341 cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
19342 cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
19343 cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
19344 cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
19345 cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
19346 cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
19347 cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
19348 cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
19349 cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
19350 cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
19351 cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
19352
19353 cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
19354 cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
19355 cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
19356 cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
19357 cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
19358 cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
19359 cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
19360 cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
19361 cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
19362 cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
19363 cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
19364 cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
19365
19366 cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
19367 cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
19368 cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
19369 cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
19370 cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
19371 cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
19372 cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
19373 cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
19374 cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
19375 cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
19376 cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
19377 cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
19378
19379 cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
19380 cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
19381 cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
19382 cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
19383 cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
19384 cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
19385 cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
19386 cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
19387 cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
19388 cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
19389 cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
19390 cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
19391
19392 cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
19393 cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
19394 cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
19395 cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
19396 cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
19397 cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
19398 cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
19399 cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
19400 cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
19401 cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
19402 cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
19403 cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
19404
19405 cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
19406 cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
19407 cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
19408 cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
19409 cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
19410 cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
19411 cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
19412 cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
19413 cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
19414 cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
19415 cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
19416 cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
19417
19418 cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
19419 cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
19420 cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
19421 cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
19422 cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
19423 cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
19424 cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
19425 cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
19426 cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
19427 cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
19428 cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
19429 cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
19430
19431 cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
19432 cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
19433 cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
19434 cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
19435 cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
19436 cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
19437 cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
19438 cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
19439 cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
19440 cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
19441 cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
19442 cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
19443
19444 cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
19445 cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
19446 cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
19447 cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
19448 cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
19449 cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
19450 cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
19451 cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
19452 cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
19453 cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
19454 cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
19455 cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
19456
19457 cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19458 cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19459 cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19460 cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19461 cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19462 cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19463 cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19464 cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19465 cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19466 cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19467 cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19468 cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19469
19470 cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19471 cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19472 cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19473 cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19474 cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19475 cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19476 cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19477 cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19478 cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19479 cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19480 cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19481 cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19482
19483 cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19484 cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19485 cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19486 cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19487 cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19488 cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19489 cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19490 cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19491 cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19492 cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19493 cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19494 cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19495
19496 cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19497 cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19498 cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19499 cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19500 cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19501 cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19502 cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19503 cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19504 cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19505 cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19506 cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19507 cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19508
19509 cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19510 cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19511 cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19512 cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19513 cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19514 cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19515 cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19516 cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19517 cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19518 cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19519 cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19520 cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19521
19522 cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19523 cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19524 cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19525 cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19526 cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19527 cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19528 cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19529 cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19530 cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19531 cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19532 cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19533 cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19534
19535 cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19536 cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19537 cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19538 cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19539 cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19540 cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19541 cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19542 cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19543 cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19544 cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19545 cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19546 cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19547
19548 cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19549 cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19550 cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19551 cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19552 cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19553 cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19554 cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19555 cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19556 cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19557 cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19558 cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19559 cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19560
19561 cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19562 cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19563 cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19564 cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19565 cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19566 cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19567 cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19568 cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19569 cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19570 cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19571 cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19572 cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19573
19574 cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19575 cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19576 cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19577 cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19578 cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19579 cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19580 cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19581 cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19582 cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19583 cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19584 cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19585 cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19586
19587 cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19588 cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19589 cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19590 cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19591 cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19592 cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19593 cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19594 cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19595 cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19596 cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19597 cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19598 cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19599
19600 cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19601 cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19602 cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19603 cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19604 cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19605 cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19606 cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19607 cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19608 cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19609 cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19610 cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19611 cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19612
19613 cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19614 cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19615 cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19616 cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19617 cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19618 cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19619 cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19620 cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19621 cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19622 cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19623 cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19624 cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19625
19626 cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
19627 C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
19628 cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
19629 C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
19630
19631 cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
19632 cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
19633 cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
19634 cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
19635 cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
19636 cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
19637 cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
19638 cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
19639 cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
19640 cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
19641 cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
19642 cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
19643
19644  /* The implementation of the FIX instruction is broken on some
19645     assemblers, in that it accepts a precision specifier as well as a
19646     rounding specifier, despite the fact that this is meaningless.
19647     To be more compatible, we accept it as well, though of course it
19648     does not set any bits.  */
19649 cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
19650 cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
19651 cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
19652 cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
19653 cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
19654 cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
19655 cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
19656 cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
19657 cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
19658 cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
19659 cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
19660 cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
19661 cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
19662
19663  /* Instructions that were new with the real FPA, call them V2.  */
19664#undef  ARM_VARIANT
19665#define ARM_VARIANT  & fpu_fpa_ext_v2
19666
19667 cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19668 cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19669 cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19670 cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19671 cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19672 cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19673
19674#undef  ARM_VARIANT
19675#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
19676
19677  /* Moves and type conversions.  */
19678 cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19679 cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
19680 cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
19681 cCE("fmstat",	ef1fa10, 0, (),		      noargs),
19682 cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
19683 cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
19684 cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19685 cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19686 cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19687 cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19688 cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19689 cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19690 cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
19691 cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
19692
19693  /* Memory operations.	 */
19694 cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
19695 cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
19696 cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19697 cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19698 cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19699 cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19700 cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19701 cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19702 cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19703 cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19704 cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19705 cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
19706 cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19707 cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
19708 cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19709 cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
19710 cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19711 cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
19712
19713  /* Monadic operations.  */
19714 cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19715 cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19716 cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19717
19718  /* Dyadic operations.	 */
19719 cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19720 cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19721 cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19722 cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19723 cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19724 cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19725 cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19726 cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19727 cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
19728
19729  /* Comparisons.  */
19730 cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
19731 cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
19732 cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
19733 cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
19734
19735 /* Double precision load/store are still present on single precision
19736    implementations.  */
19737 cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
19738 cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
19739 cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19740 cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19741 cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19742 cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19743 cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19744 cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
19745 cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19746 cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
19747
19748#undef  ARM_VARIANT
19749#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
19750
19751  /* Moves and type conversions.  */
19752 cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19753 cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19754 cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19755 cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
19756 cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
19757 cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
19758 cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
19759 cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19760 cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
19761 cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19762 cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19763 cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19764 cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
19765
19766  /* Monadic operations.  */
19767 cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19768 cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19769 cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19770
19771  /* Dyadic operations.	 */
19772 cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19773 cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19774 cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19775 cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19776 cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19777 cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19778 cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19779 cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19780 cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
19781
19782  /* Comparisons.  */
19783 cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19784 cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
19785 cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
19786 cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
19787
19788#undef  ARM_VARIANT
19789#define ARM_VARIANT  & fpu_vfp_ext_v2
19790
19791 cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19792 cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19793 cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
19794 cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
19795
19796/* Instructions which may belong to either the Neon or VFP instruction sets.
19797   Individual encoder functions perform additional architecture checks.  */
19798#undef  ARM_VARIANT
19799#define ARM_VARIANT    & fpu_vfp_ext_v1xd
19800#undef  THUMB_VARIANT
19801#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
19802
19803  /* These mnemonics are unique to VFP.  */
19804 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
19805 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19806 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19807 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19808 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19809 nCE(vcmp,      _vcmp,    2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
19810 nCE(vcmpe,     _vcmpe,   2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
19811 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
19812 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
19813 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
19814
19815  /* Mnemonics shared by Neon and VFP.  */
19816 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19817 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19818 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19819
19820 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19821 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19822
19823 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19824 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19825
19826 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19827 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19828 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19829 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19830 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19831 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19832 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19833 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19834
19835 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19836 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
19837 NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19838 NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19839
19840
19841  /* NOTE: All VMOV encoding is special-cased!  */
19842 NCE(vmov,      0,       1, (VMOV), neon_mov),
19843 NCE(vmovq,     0,       1, (VMOV), neon_mov),
19844
19845#undef  THUMB_VARIANT
19846#define THUMB_VARIANT  & fpu_neon_ext_v1
19847#undef  ARM_VARIANT
19848#define ARM_VARIANT    & fpu_neon_ext_v1
19849
19850  /* Data processing with three registers of the same length.  */
19851  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
19852 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
19853 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
19854 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19855 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19856 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19857 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19858 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19859 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
19860  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
19861 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19862 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
19863 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19864 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
19865 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19866 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
19867 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19868 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
19869  /* If not immediate, fall back to neon_dyadic_i64_su.
19870     shl_imm should accept I8 I16 I32 I64,
19871     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
19872 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19873 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
19874 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19875 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
19876  /* Logic ops, types optional & ignored.  */
19877 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19878 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19879 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19880 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19881 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19882 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19883 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19884 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
19885 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
19886 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
19887  /* Bitfield ops, untyped.  */
19888 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19889 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19890 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19891 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19892 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19893 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
19894  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
19895 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19896 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19897 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19898 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19899 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19900 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
19901  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19902     back to neon_dyadic_if_su.  */
19903 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19904 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
19905 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19906 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
19907 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19908 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
19909 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19910 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
19911  /* Comparison. Type I8 I16 I32 F32.  */
19912 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19913 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
19914  /* As above, D registers only.  */
19915 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
19916 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
19917  /* Int and float variants, signedness unimportant.  */
19918 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
19919 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
19920 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
19921  /* Add/sub take types I8 I16 I32 I64 F32.  */
19922 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
19923 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
19924  /* vtst takes sizes 8, 16, 32.  */
19925 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19926 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
19927  /* VMUL takes I8 I16 I32 F32 P8.  */
19928 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
19929  /* VQD{R}MULH takes S16 S32.  */
19930 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19931 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19932 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19933 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19934 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19935 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
19936 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19937 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
19938 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19939 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
19940 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19941 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
19942 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
19943 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
19944 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
19945 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
19946 /* ARM v8.1 extension.  */
19947 nUF(vqrdmlah,  _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19948 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19949 nUF(vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19950 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
19951
19952  /* Two address, int/float. Types S8 S16 S32 F32.  */
19953 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
19954 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
19955
19956  /* Data processing with two registers and a shift amount.  */
19957  /* Right shifts, and variants with rounding.
19958     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
19959 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19960 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
19961 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19962 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
19963 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
19964 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
19965 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
19966 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
19967  /* Shift and insert. Sizes accepted 8 16 32 64.  */
19968 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19969 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
19970 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19971 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
19972  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
19973 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19974 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
19975  /* Right shift immediate, saturating & narrowing, with rounding variants.
19976     Types accepted S16 S32 S64 U16 U32 U64.  */
19977 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19978 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19979  /* As above, unsigned. Types accepted S16 S32 S64.  */
19980 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19981 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19982  /* Right shift narrowing. Types accepted I16 I32 I64.  */
19983 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19984 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19985  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
19986 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
19987  /* CVT with optional immediate for fixed-point variant.  */
19988 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
19989
19990 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
19991 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
19992
19993  /* Data processing, three registers of different lengths.  */
19994  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
19995 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
19996 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
19997 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
19998 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
19999  /* If not scalar, fall back to neon_dyadic_long.
20000     Vector types as above, scalar types S16 S32 U16 U32.  */
20001 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20002 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20003  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
20004 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20005 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20006  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
20007 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20008 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20009 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20010 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20011  /* Saturating doubling multiplies. Types S16 S32.  */
20012 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20013 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20014 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20015  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20016     S16 S32 U16 U32.  */
20017 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
20018
20019  /* Extract. Size 8.  */
20020 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20021 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
20022
20023  /* Two registers, miscellaneous.  */
20024  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
20025 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
20026 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
20027 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
20028 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
20029 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
20030 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
20031  /* Vector replicate. Sizes 8 16 32.  */
20032 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
20033 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
20034  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
20035 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
20036  /* VMOVN. Types I16 I32 I64.  */
20037 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
20038  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
20039 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
20040  /* VQMOVUN. Types S16 S32 S64.  */
20041 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
20042  /* VZIP / VUZP. Sizes 8 16 32.  */
20043 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20044 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
20045 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20046 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
20047  /* VQABS / VQNEG. Types S8 S16 S32.  */
20048 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20049 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20050 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20051 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20052  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
20053 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
20054 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
20055 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
20056 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
20057  /* Reciprocal estimates. Types U32 F32.  */
20058 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
20059 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
20060 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
20061 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
20062  /* VCLS. Types S8 S16 S32.  */
20063 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
20064 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
20065  /* VCLZ. Types I8 I16 I32.  */
20066 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
20067 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
20068  /* VCNT. Size 8.  */
20069 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
20070 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
20071  /* Two address, untyped.  */
20072 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
20073 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
20074  /* VTRN. Sizes 8 16 32.  */
20075 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
20076 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
20077
20078  /* Table lookup. Size 8.  */
20079 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20080 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20081
20082#undef  THUMB_VARIANT
20083#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
20084#undef  ARM_VARIANT
20085#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
20086
20087  /* Neon element/structure load/store.  */
20088 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20089 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20090 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20091 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20092 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20093 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20094 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20095 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20096
20097#undef  THUMB_VARIANT
20098#define THUMB_VARIANT & fpu_vfp_ext_v3xd
20099#undef  ARM_VARIANT
20100#define ARM_VARIANT   & fpu_vfp_ext_v3xd
20101 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
20102 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20103 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20104 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20105 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20106 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20107 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20108 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20109 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20110
20111#undef  THUMB_VARIANT
20112#define THUMB_VARIANT  & fpu_vfp_ext_v3
20113#undef  ARM_VARIANT
20114#define ARM_VARIANT    & fpu_vfp_ext_v3
20115
20116 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
20117 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20118 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20119 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20120 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20121 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20122 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20123 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20124 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20125
20126#undef  ARM_VARIANT
20127#define ARM_VARIANT    & fpu_vfp_ext_fma
20128#undef  THUMB_VARIANT
20129#define THUMB_VARIANT  & fpu_vfp_ext_fma
20130 /* Mnemonics shared by Neon and VFP.  These are included in the
20131    VFP FMA variant; NEON and VFP FMA always includes the NEON
20132    FMA instructions.  */
20133 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20134 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20135 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20136    the v form should always be used.  */
20137 cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20138 cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20139 cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20140 cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20141 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20142 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20143
20144#undef THUMB_VARIANT
20145#undef  ARM_VARIANT
20146#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
20147
20148 cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20149 cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20150 cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20151 cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20152 cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20153 cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20154 cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20155 cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20156
20157#undef  ARM_VARIANT
20158#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
20159
20160 cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
20161 cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
20162 cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
20163 cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
20164 cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
20165 cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
20166 cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
20167 cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
20168 cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
20169 cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20170 cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20171 cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20172 cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20173 cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20174 cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20175 cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20176 cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20177 cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20178 cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
20179 cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
20180 cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20181 cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20182 cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20183 cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20184 cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20185 cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20186 cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
20187 cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
20188 cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
20189 cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
20190 cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
20191 cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
20192 cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
20193 cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
20194 cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
20195 cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
20196 cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
20197 cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20198 cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20199 cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20200 cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20201 cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20202 cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20203 cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20204 cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20205 cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20206 cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20207 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20208 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20209 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20210 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20211 cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20212 cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20213 cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20214 cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20215 cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20216 cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20217 cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20218 cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20219 cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20220 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20221 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20222 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20223 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20224 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20225 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20226 cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20227 cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20228 cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20229 cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20230 cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20231 cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20232 cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20233 cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20234 cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20235 cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20236 cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20237 cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20238 cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20239 cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20240 cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20241 cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20242 cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20243 cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20244 cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20245 cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20246 cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20247 cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20248 cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
20249 cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20250 cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20251 cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20252 cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20253 cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20254 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20255 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20256 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20257 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20258 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20259 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20260 cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20261 cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20262 cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20263 cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20264 cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20265 cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20266 cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20267 cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20268 cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20269 cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20270 cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
20271 cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20272 cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20273 cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20274 cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20275 cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20276 cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20277 cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20278 cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20279 cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20280 cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20281 cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20282 cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20283 cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20284 cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20285 cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20286 cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20287 cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20288 cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20289 cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20290 cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20291 cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20292 cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20293 cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20294 cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20295 cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20296 cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20297 cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20298 cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20299 cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20300 cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20301 cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20302 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
20303 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
20304 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
20305 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
20306 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
20307 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
20308 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20309 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20310 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20311 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
20312 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
20313 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
20314 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
20315 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
20316 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
20317 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20318 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20319 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20320 cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20321 cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
20322
20323#undef  ARM_VARIANT
20324#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
20325
20326 cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
20327 cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
20328 cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
20329 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
20330 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
20331 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
20332 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20333 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20334 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20335 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20336 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20337 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20338 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20339 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20340 cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20341 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20342 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20343 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20344 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20345 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20346 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20347 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20348 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20349 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20350 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20351 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20352 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20353 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20354 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20355 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20356 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20357 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20358 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20359 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20360 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20361 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20362 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20363 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20364 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20365 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20366 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20367 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20368 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20369 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20370 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20371 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20372 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20373 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20374 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20375 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20376 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20377 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20378 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20379 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20380 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20381 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20382 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20383
20384#undef  ARM_VARIANT
20385#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
20386
20387 cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
20388 cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
20389 cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
20390 cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
20391 cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
20392 cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
20393 cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
20394 cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
20395 cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
20396 cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
20397 cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
20398 cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
20399 cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
20400 cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
20401 cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
20402 cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
20403 cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
20404 cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
20405 cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
20406 cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
20407 cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
20408 cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
20409 cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
20410 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
20411 cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
20412 cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
20413 cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
20414 cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
20415 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
20416 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
20417 cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
20418 cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
20419 cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
20420 cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
20421 cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
20422 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
20423 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
20424 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
20425 cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
20426 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
20427 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
20428 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
20429 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
20430 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
20431 cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
20432 cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
20433 cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
20434 cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
20435 cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
20436 cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
20437 cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
20438 cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
20439 cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
20440 cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
20441 cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20442 cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20443 cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20444 cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20445 cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20446 cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20447 cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
20448 cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
20449 cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
20450 cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
20451 cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20452 cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20453 cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20454 cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20455 cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20456 cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20457 cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20458 cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20459 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20460 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20461 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20462 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20463};
20464#undef ARM_VARIANT
20465#undef THUMB_VARIANT
20466#undef TCE
20467#undef TUE
20468#undef TUF
20469#undef TCC
20470#undef cCE
20471#undef cCL
20472#undef C3E
20473#undef CE
20474#undef CM
20475#undef UE
20476#undef UF
20477#undef UT
20478#undef NUF
20479#undef nUF
20480#undef NCE
20481#undef nCE
20482#undef OPS0
20483#undef OPS1
20484#undef OPS2
20485#undef OPS3
20486#undef OPS4
20487#undef OPS5
20488#undef OPS6
20489#undef do_0
20490
20491/* MD interface: bits in the object file.  */
20492
20493/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20494   for use in the a.out file, and stores them in the array pointed to by buf.
20495   This knows about the endian-ness of the target machine and does
20496   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
20497   2 (short) and 4 (long)  Floating numbers are put out as a series of
20498   LITTLENUMS (shorts, here at least).	*/
20499
20500void
20501md_number_to_chars (char * buf, valueT val, int n)
20502{
20503  if (target_big_endian)
20504    number_to_chars_bigendian (buf, val, n);
20505  else
20506    number_to_chars_littleendian (buf, val, n);
20507}
20508
20509static valueT
20510md_chars_to_number (char * buf, int n)
20511{
20512  valueT result = 0;
20513  unsigned char * where = (unsigned char *) buf;
20514
20515  if (target_big_endian)
20516    {
20517      while (n--)
20518	{
20519	  result <<= 8;
20520	  result |= (*where++ & 255);
20521	}
20522    }
20523  else
20524    {
20525      while (n--)
20526	{
20527	  result <<= 8;
20528	  result |= (where[n] & 255);
20529	}
20530    }
20531
20532  return result;
20533}
20534
20535/* MD interface: Sections.  */
20536
20537/* Calculate the maximum variable size (i.e., excluding fr_fix)
20538   that an rs_machine_dependent frag may reach.  */
20539
20540unsigned int
20541arm_frag_max_var (fragS *fragp)
20542{
20543  /* We only use rs_machine_dependent for variable-size Thumb instructions,
20544     which are either THUMB_SIZE (2) or INSN_SIZE (4).
20545
20546     Note that we generate relaxable instructions even for cases that don't
20547     really need it, like an immediate that's a trivial constant.  So we're
20548     overestimating the instruction size for some of those cases.  Rather
20549     than putting more intelligence here, it would probably be better to
20550     avoid generating a relaxation frag in the first place when it can be
20551     determined up front that a short instruction will suffice.  */
20552
20553  gas_assert (fragp->fr_type == rs_machine_dependent);
20554  return INSN_SIZE;
20555}
20556
20557/* Estimate the size of a frag before relaxing.  Assume everything fits in
20558   2 bytes.  */
20559
20560int
20561md_estimate_size_before_relax (fragS * fragp,
20562			       segT    segtype ATTRIBUTE_UNUSED)
20563{
20564  fragp->fr_var = 2;
20565  return 2;
20566}
20567
20568/* Convert a machine dependent frag.  */
20569
20570void
20571md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20572{
20573  unsigned long insn;
20574  unsigned long old_op;
20575  char *buf;
20576  expressionS exp;
20577  fixS *fixp;
20578  int reloc_type;
20579  int pc_rel;
20580  int opcode;
20581
20582  buf = fragp->fr_literal + fragp->fr_fix;
20583
20584  old_op = bfd_get_16(abfd, buf);
20585  if (fragp->fr_symbol)
20586    {
20587      exp.X_op = O_symbol;
20588      exp.X_add_symbol = fragp->fr_symbol;
20589    }
20590  else
20591    {
20592      exp.X_op = O_constant;
20593    }
20594  exp.X_add_number = fragp->fr_offset;
20595  opcode = fragp->fr_subtype;
20596  switch (opcode)
20597    {
20598    case T_MNEM_ldr_pc:
20599    case T_MNEM_ldr_pc2:
20600    case T_MNEM_ldr_sp:
20601    case T_MNEM_str_sp:
20602    case T_MNEM_ldr:
20603    case T_MNEM_ldrb:
20604    case T_MNEM_ldrh:
20605    case T_MNEM_str:
20606    case T_MNEM_strb:
20607    case T_MNEM_strh:
20608      if (fragp->fr_var == 4)
20609	{
20610	  insn = THUMB_OP32 (opcode);
20611	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20612	    {
20613	      insn |= (old_op & 0x700) << 4;
20614	    }
20615	  else
20616	    {
20617	      insn |= (old_op & 7) << 12;
20618	      insn |= (old_op & 0x38) << 13;
20619	    }
20620	  insn |= 0x00000c00;
20621	  put_thumb32_insn (buf, insn);
20622	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20623	}
20624      else
20625	{
20626	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20627	}
20628      pc_rel = (opcode == T_MNEM_ldr_pc2);
20629      break;
20630    case T_MNEM_adr:
20631      if (fragp->fr_var == 4)
20632	{
20633	  insn = THUMB_OP32 (opcode);
20634	  insn |= (old_op & 0xf0) << 4;
20635	  put_thumb32_insn (buf, insn);
20636	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20637	}
20638      else
20639	{
20640	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20641	  exp.X_add_number -= 4;
20642	}
20643      pc_rel = 1;
20644      break;
20645    case T_MNEM_mov:
20646    case T_MNEM_movs:
20647    case T_MNEM_cmp:
20648    case T_MNEM_cmn:
20649      if (fragp->fr_var == 4)
20650	{
20651	  int r0off = (opcode == T_MNEM_mov
20652		       || opcode == T_MNEM_movs) ? 0 : 8;
20653	  insn = THUMB_OP32 (opcode);
20654	  insn = (insn & 0xe1ffffff) | 0x10000000;
20655	  insn |= (old_op & 0x700) << r0off;
20656	  put_thumb32_insn (buf, insn);
20657	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20658	}
20659      else
20660	{
20661	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20662	}
20663      pc_rel = 0;
20664      break;
20665    case T_MNEM_b:
20666      if (fragp->fr_var == 4)
20667	{
20668	  insn = THUMB_OP32(opcode);
20669	  put_thumb32_insn (buf, insn);
20670	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20671	}
20672      else
20673	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20674      pc_rel = 1;
20675      break;
20676    case T_MNEM_bcond:
20677      if (fragp->fr_var == 4)
20678	{
20679	  insn = THUMB_OP32(opcode);
20680	  insn |= (old_op & 0xf00) << 14;
20681	  put_thumb32_insn (buf, insn);
20682	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20683	}
20684      else
20685	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20686      pc_rel = 1;
20687      break;
20688    case T_MNEM_add_sp:
20689    case T_MNEM_add_pc:
20690    case T_MNEM_inc_sp:
20691    case T_MNEM_dec_sp:
20692      if (fragp->fr_var == 4)
20693	{
20694	  /* ??? Choose between add and addw.  */
20695	  insn = THUMB_OP32 (opcode);
20696	  insn |= (old_op & 0xf0) << 4;
20697	  put_thumb32_insn (buf, insn);
20698	  if (opcode == T_MNEM_add_pc)
20699	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
20700	  else
20701	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20702	}
20703      else
20704	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20705      pc_rel = 0;
20706      break;
20707
20708    case T_MNEM_addi:
20709    case T_MNEM_addis:
20710    case T_MNEM_subi:
20711    case T_MNEM_subis:
20712      if (fragp->fr_var == 4)
20713	{
20714	  insn = THUMB_OP32 (opcode);
20715	  insn |= (old_op & 0xf0) << 4;
20716	  insn |= (old_op & 0xf) << 16;
20717	  put_thumb32_insn (buf, insn);
20718	  if (insn & (1 << 20))
20719	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20720	  else
20721	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20722	}
20723      else
20724	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20725      pc_rel = 0;
20726      break;
20727    default:
20728      abort ();
20729    }
20730  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20731		      (enum bfd_reloc_code_real) reloc_type);
20732  fixp->fx_file = fragp->fr_file;
20733  fixp->fx_line = fragp->fr_line;
20734  fragp->fr_fix += fragp->fr_var;
20735
20736  /* Set whether we use thumb-2 ISA based on final relaxation results.  */
20737  if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20738      && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20739    ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20740}
20741
20742/* Return the size of a relaxable immediate operand instruction.
20743   SHIFT and SIZE specify the form of the allowable immediate.  */
20744static int
20745relax_immediate (fragS *fragp, int size, int shift)
20746{
20747  offsetT offset;
20748  offsetT mask;
20749  offsetT low;
20750
20751  /* ??? Should be able to do better than this.  */
20752  if (fragp->fr_symbol)
20753    return 4;
20754
20755  low = (1 << shift) - 1;
20756  mask = (1 << (shift + size)) - (1 << shift);
20757  offset = fragp->fr_offset;
20758  /* Force misaligned offsets to 32-bit variant.  */
20759  if (offset & low)
20760    return 4;
20761  if (offset & ~mask)
20762    return 4;
20763  return 2;
20764}
20765
20766/* Get the address of a symbol during relaxation.  */
20767static addressT
20768relaxed_symbol_addr (fragS *fragp, long stretch)
20769{
20770  fragS *sym_frag;
20771  addressT addr;
20772  symbolS *sym;
20773
20774  sym = fragp->fr_symbol;
20775  sym_frag = symbol_get_frag (sym);
20776  know (S_GET_SEGMENT (sym) != absolute_section
20777	|| sym_frag == &zero_address_frag);
20778  addr = S_GET_VALUE (sym) + fragp->fr_offset;
20779
20780  /* If frag has yet to be reached on this pass, assume it will
20781     move by STRETCH just as we did.  If this is not so, it will
20782     be because some frag between grows, and that will force
20783     another pass.  */
20784
20785  if (stretch != 0
20786      && sym_frag->relax_marker != fragp->relax_marker)
20787    {
20788      fragS *f;
20789
20790      /* Adjust stretch for any alignment frag.  Note that if have
20791	 been expanding the earlier code, the symbol may be
20792	 defined in what appears to be an earlier frag.  FIXME:
20793	 This doesn't handle the fr_subtype field, which specifies
20794	 a maximum number of bytes to skip when doing an
20795	 alignment.  */
20796      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20797	{
20798	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20799	    {
20800	      if (stretch < 0)
20801		stretch = - ((- stretch)
20802			     & ~ ((1 << (int) f->fr_offset) - 1));
20803	      else
20804		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20805	      if (stretch == 0)
20806		break;
20807	    }
20808	}
20809      if (f != NULL)
20810	addr += stretch;
20811    }
20812
20813  return addr;
20814}
20815
20816/* Return the size of a relaxable adr pseudo-instruction or PC-relative
20817   load.  */
20818static int
20819relax_adr (fragS *fragp, asection *sec, long stretch)
20820{
20821  addressT addr;
20822  offsetT val;
20823
20824  /* Assume worst case for symbols not known to be in the same section.  */
20825  if (fragp->fr_symbol == NULL
20826      || !S_IS_DEFINED (fragp->fr_symbol)
20827      || sec != S_GET_SEGMENT (fragp->fr_symbol)
20828      || S_IS_WEAK (fragp->fr_symbol))
20829    return 4;
20830
20831  val = relaxed_symbol_addr (fragp, stretch);
20832  addr = fragp->fr_address + fragp->fr_fix;
20833  addr = (addr + 4) & ~3;
20834  /* Force misaligned targets to 32-bit variant.  */
20835  if (val & 3)
20836    return 4;
20837  val -= addr;
20838  if (val < 0 || val > 1020)
20839    return 4;
20840  return 2;
20841}
20842
20843/* Return the size of a relaxable add/sub immediate instruction.  */
20844static int
20845relax_addsub (fragS *fragp, asection *sec)
20846{
20847  char *buf;
20848  int op;
20849
20850  buf = fragp->fr_literal + fragp->fr_fix;
20851  op = bfd_get_16(sec->owner, buf);
20852  if ((op & 0xf) == ((op >> 4) & 0xf))
20853    return relax_immediate (fragp, 8, 0);
20854  else
20855    return relax_immediate (fragp, 3, 0);
20856}
20857
20858/* Return TRUE iff the definition of symbol S could be pre-empted
20859   (overridden) at link or load time.  */
20860static bfd_boolean
20861symbol_preemptible (symbolS *s)
20862{
20863  /* Weak symbols can always be pre-empted.  */
20864  if (S_IS_WEAK (s))
20865    return TRUE;
20866
20867  /* Non-global symbols cannot be pre-empted. */
20868  if (! S_IS_EXTERNAL (s))
20869    return FALSE;
20870
20871#ifdef OBJ_ELF
20872  /* In ELF, a global symbol can be marked protected, or private.  In that
20873     case it can't be pre-empted (other definitions in the same link unit
20874     would violate the ODR).  */
20875  if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20876    return FALSE;
20877#endif
20878
20879  /* Other global symbols might be pre-empted.  */
20880  return TRUE;
20881}
20882
20883/* Return the size of a relaxable branch instruction.  BITS is the
20884   size of the offset field in the narrow instruction.  */
20885
20886static int
20887relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20888{
20889  addressT addr;
20890  offsetT val;
20891  offsetT limit;
20892
20893  /* Assume worst case for symbols not known to be in the same section.  */
20894  if (!S_IS_DEFINED (fragp->fr_symbol)
20895      || sec != S_GET_SEGMENT (fragp->fr_symbol)
20896      || S_IS_WEAK (fragp->fr_symbol))
20897    return 4;
20898
20899#ifdef OBJ_ELF
20900  /* A branch to a function in ARM state will require interworking.  */
20901  if (S_IS_DEFINED (fragp->fr_symbol)
20902      && ARM_IS_FUNC (fragp->fr_symbol))
20903      return 4;
20904#endif
20905
20906  if (symbol_preemptible (fragp->fr_symbol))
20907    return 4;
20908
20909  val = relaxed_symbol_addr (fragp, stretch);
20910  addr = fragp->fr_address + fragp->fr_fix + 4;
20911  val -= addr;
20912
20913  /* Offset is a signed value *2 */
20914  limit = 1 << bits;
20915  if (val >= limit || val < -limit)
20916    return 4;
20917  return 2;
20918}
20919
20920
20921/* Relax a machine dependent frag.  This returns the amount by which
20922   the current size of the frag should change.  */
20923
20924int
20925arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20926{
20927  int oldsize;
20928  int newsize;
20929
20930  oldsize = fragp->fr_var;
20931  switch (fragp->fr_subtype)
20932    {
20933    case T_MNEM_ldr_pc2:
20934      newsize = relax_adr (fragp, sec, stretch);
20935      break;
20936    case T_MNEM_ldr_pc:
20937    case T_MNEM_ldr_sp:
20938    case T_MNEM_str_sp:
20939      newsize = relax_immediate (fragp, 8, 2);
20940      break;
20941    case T_MNEM_ldr:
20942    case T_MNEM_str:
20943      newsize = relax_immediate (fragp, 5, 2);
20944      break;
20945    case T_MNEM_ldrh:
20946    case T_MNEM_strh:
20947      newsize = relax_immediate (fragp, 5, 1);
20948      break;
20949    case T_MNEM_ldrb:
20950    case T_MNEM_strb:
20951      newsize = relax_immediate (fragp, 5, 0);
20952      break;
20953    case T_MNEM_adr:
20954      newsize = relax_adr (fragp, sec, stretch);
20955      break;
20956    case T_MNEM_mov:
20957    case T_MNEM_movs:
20958    case T_MNEM_cmp:
20959    case T_MNEM_cmn:
20960      newsize = relax_immediate (fragp, 8, 0);
20961      break;
20962    case T_MNEM_b:
20963      newsize = relax_branch (fragp, sec, 11, stretch);
20964      break;
20965    case T_MNEM_bcond:
20966      newsize = relax_branch (fragp, sec, 8, stretch);
20967      break;
20968    case T_MNEM_add_sp:
20969    case T_MNEM_add_pc:
20970      newsize = relax_immediate (fragp, 8, 2);
20971      break;
20972    case T_MNEM_inc_sp:
20973    case T_MNEM_dec_sp:
20974      newsize = relax_immediate (fragp, 7, 2);
20975      break;
20976    case T_MNEM_addi:
20977    case T_MNEM_addis:
20978    case T_MNEM_subi:
20979    case T_MNEM_subis:
20980      newsize = relax_addsub (fragp, sec);
20981      break;
20982    default:
20983      abort ();
20984    }
20985
20986  fragp->fr_var = newsize;
20987  /* Freeze wide instructions that are at or before the same location as
20988     in the previous pass.  This avoids infinite loops.
20989     Don't freeze them unconditionally because targets may be artificially
20990     misaligned by the expansion of preceding frags.  */
20991  if (stretch <= 0 && newsize > 2)
20992    {
20993      md_convert_frag (sec->owner, sec, fragp);
20994      frag_wane (fragp);
20995    }
20996
20997  return newsize - oldsize;
20998}
20999
21000/* Round up a section size to the appropriate boundary.	 */
21001
21002valueT
21003md_section_align (segT	 segment ATTRIBUTE_UNUSED,
21004		  valueT size)
21005{
21006#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21007  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21008    {
21009      /* For a.out, force the section size to be aligned.  If we don't do
21010	 this, BFD will align it for us, but it will not write out the
21011	 final bytes of the section.  This may be a bug in BFD, but it is
21012	 easier to fix it here since that is how the other a.out targets
21013	 work.  */
21014      int align;
21015
21016      align = bfd_get_section_alignment (stdoutput, segment);
21017      size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21018    }
21019#endif
21020
21021  return size;
21022}
21023
21024/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
21025   of an rs_align_code fragment.  */
21026
21027void
21028arm_handle_align (fragS * fragP)
21029{
21030  static char const arm_noop[2][2][4] =
21031    {
21032      {  /* ARMv1 */
21033	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
21034	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
21035      },
21036      {  /* ARMv6k */
21037	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
21038	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
21039      },
21040    };
21041  static char const thumb_noop[2][2][2] =
21042    {
21043      {  /* Thumb-1 */
21044	{0xc0, 0x46},  /* LE */
21045	{0x46, 0xc0},  /* BE */
21046      },
21047      {  /* Thumb-2 */
21048	{0x00, 0xbf},  /* LE */
21049	{0xbf, 0x00}   /* BE */
21050      }
21051    };
21052  static char const wide_thumb_noop[2][4] =
21053    {  /* Wide Thumb-2 */
21054      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
21055      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
21056    };
21057
21058  unsigned bytes, fix, noop_size;
21059  char * p;
21060  const char * noop;
21061  const char *narrow_noop = NULL;
21062#ifdef OBJ_ELF
21063  enum mstate state;
21064#endif
21065
21066  if (fragP->fr_type != rs_align_code)
21067    return;
21068
21069  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21070  p = fragP->fr_literal + fragP->fr_fix;
21071  fix = 0;
21072
21073  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21074    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21075
21076  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21077
21078  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21079    {
21080      if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21081			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21082	{
21083	  narrow_noop = thumb_noop[1][target_big_endian];
21084	  noop = wide_thumb_noop[target_big_endian];
21085	}
21086      else
21087	noop = thumb_noop[0][target_big_endian];
21088      noop_size = 2;
21089#ifdef OBJ_ELF
21090      state = MAP_THUMB;
21091#endif
21092    }
21093  else
21094    {
21095      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21096					   ? selected_cpu : arm_arch_none,
21097					   arm_ext_v6k) != 0]
21098		     [target_big_endian];
21099      noop_size = 4;
21100#ifdef OBJ_ELF
21101      state = MAP_ARM;
21102#endif
21103    }
21104
21105  fragP->fr_var = noop_size;
21106
21107  if (bytes & (noop_size - 1))
21108    {
21109      fix = bytes & (noop_size - 1);
21110#ifdef OBJ_ELF
21111      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21112#endif
21113      memset (p, 0, fix);
21114      p += fix;
21115      bytes -= fix;
21116    }
21117
21118  if (narrow_noop)
21119    {
21120      if (bytes & noop_size)
21121	{
21122	  /* Insert a narrow noop.  */
21123	  memcpy (p, narrow_noop, noop_size);
21124	  p += noop_size;
21125	  bytes -= noop_size;
21126	  fix += noop_size;
21127	}
21128
21129      /* Use wide noops for the remainder */
21130      noop_size = 4;
21131    }
21132
21133  while (bytes >= noop_size)
21134    {
21135      memcpy (p, noop, noop_size);
21136      p += noop_size;
21137      bytes -= noop_size;
21138      fix += noop_size;
21139    }
21140
21141  fragP->fr_fix += fix;
21142}
21143
21144/* Called from md_do_align.  Used to create an alignment
21145   frag in a code section.  */
21146
21147void
21148arm_frag_align_code (int n, int max)
21149{
21150  char * p;
21151
21152  /* We assume that there will never be a requirement
21153     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
21154  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21155    {
21156      char err_msg[128];
21157
21158      sprintf (err_msg,
21159	_("alignments greater than %d bytes not supported in .text sections."),
21160	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21161      as_fatal ("%s", err_msg);
21162    }
21163
21164  p = frag_var (rs_align_code,
21165		MAX_MEM_FOR_RS_ALIGN_CODE,
21166		1,
21167		(relax_substateT) max,
21168		(symbolS *) NULL,
21169		(offsetT) n,
21170		(char *) NULL);
21171  *p = 0;
21172}
21173
21174/* Perform target specific initialisation of a frag.
21175   Note - despite the name this initialisation is not done when the frag
21176   is created, but only when its type is assigned.  A frag can be created
21177   and used a long time before its type is set, so beware of assuming that
21178   this initialisationis performed first.  */
21179
21180#ifndef OBJ_ELF
21181void
21182arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21183{
21184  /* Record whether this frag is in an ARM or a THUMB area.  */
21185  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21186}
21187
21188#else /* OBJ_ELF is defined.  */
21189void
21190arm_init_frag (fragS * fragP, int max_chars)
21191{
21192  int frag_thumb_mode;
21193
21194  /* If the current ARM vs THUMB mode has not already
21195     been recorded into this frag then do so now.  */
21196  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21197    fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21198
21199  frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21200
21201  /* Record a mapping symbol for alignment frags.  We will delete this
21202     later if the alignment ends up empty.  */
21203  switch (fragP->fr_type)
21204    {
21205    case rs_align:
21206    case rs_align_test:
21207    case rs_fill:
21208      mapping_state_2 (MAP_DATA, max_chars);
21209      break;
21210    case rs_align_code:
21211      mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21212      break;
21213    default:
21214      break;
21215    }
21216}
21217
21218/* When we change sections we need to issue a new mapping symbol.  */
21219
21220void
21221arm_elf_change_section (void)
21222{
21223  /* Link an unlinked unwind index table section to the .text section.	*/
21224  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21225      && elf_linked_to_section (now_seg) == NULL)
21226    elf_linked_to_section (now_seg) = text_section;
21227}
21228
21229int
21230arm_elf_section_type (const char * str, size_t len)
21231{
21232  if (len == 5 && strncmp (str, "exidx", 5) == 0)
21233    return SHT_ARM_EXIDX;
21234
21235  return -1;
21236}
21237
21238/* Code to deal with unwinding tables.	*/
21239
21240static void add_unwind_adjustsp (offsetT);
21241
21242/* Generate any deferred unwind frame offset.  */
21243
21244static void
21245flush_pending_unwind (void)
21246{
21247  offsetT offset;
21248
21249  offset = unwind.pending_offset;
21250  unwind.pending_offset = 0;
21251  if (offset != 0)
21252    add_unwind_adjustsp (offset);
21253}
21254
21255/* Add an opcode to this list for this function.  Two-byte opcodes should
21256   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
21257   order.  */
21258
21259static void
21260add_unwind_opcode (valueT op, int length)
21261{
21262  /* Add any deferred stack adjustment.	 */
21263  if (unwind.pending_offset)
21264    flush_pending_unwind ();
21265
21266  unwind.sp_restored = 0;
21267
21268  if (unwind.opcode_count + length > unwind.opcode_alloc)
21269    {
21270      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21271      if (unwind.opcodes)
21272	unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21273						     unwind.opcode_alloc);
21274      else
21275	unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21276    }
21277  while (length > 0)
21278    {
21279      length--;
21280      unwind.opcodes[unwind.opcode_count] = op & 0xff;
21281      op >>= 8;
21282      unwind.opcode_count++;
21283    }
21284}
21285
21286/* Add unwind opcodes to adjust the stack pointer.  */
21287
21288static void
21289add_unwind_adjustsp (offsetT offset)
21290{
21291  valueT op;
21292
21293  if (offset > 0x200)
21294    {
21295      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
21296      char bytes[5];
21297      int n;
21298      valueT o;
21299
21300      /* Long form: 0xb2, uleb128.  */
21301      /* This might not fit in a word so add the individual bytes,
21302	 remembering the list is built in reverse order.  */
21303      o = (valueT) ((offset - 0x204) >> 2);
21304      if (o == 0)
21305	add_unwind_opcode (0, 1);
21306
21307      /* Calculate the uleb128 encoding of the offset.	*/
21308      n = 0;
21309      while (o)
21310	{
21311	  bytes[n] = o & 0x7f;
21312	  o >>= 7;
21313	  if (o)
21314	    bytes[n] |= 0x80;
21315	  n++;
21316	}
21317      /* Add the insn.	*/
21318      for (; n; n--)
21319	add_unwind_opcode (bytes[n - 1], 1);
21320      add_unwind_opcode (0xb2, 1);
21321    }
21322  else if (offset > 0x100)
21323    {
21324      /* Two short opcodes.  */
21325      add_unwind_opcode (0x3f, 1);
21326      op = (offset - 0x104) >> 2;
21327      add_unwind_opcode (op, 1);
21328    }
21329  else if (offset > 0)
21330    {
21331      /* Short opcode.	*/
21332      op = (offset - 4) >> 2;
21333      add_unwind_opcode (op, 1);
21334    }
21335  else if (offset < 0)
21336    {
21337      offset = -offset;
21338      while (offset > 0x100)
21339	{
21340	  add_unwind_opcode (0x7f, 1);
21341	  offset -= 0x100;
21342	}
21343      op = ((offset - 4) >> 2) | 0x40;
21344      add_unwind_opcode (op, 1);
21345    }
21346}
21347
21348/* Finish the list of unwind opcodes for this function.	 */
21349static void
21350finish_unwind_opcodes (void)
21351{
21352  valueT op;
21353
21354  if (unwind.fp_used)
21355    {
21356      /* Adjust sp as necessary.  */
21357      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21358      flush_pending_unwind ();
21359
21360      /* After restoring sp from the frame pointer.  */
21361      op = 0x90 | unwind.fp_reg;
21362      add_unwind_opcode (op, 1);
21363    }
21364  else
21365    flush_pending_unwind ();
21366}
21367
21368
21369/* Start an exception table entry.  If idx is nonzero this is an index table
21370   entry.  */
21371
21372static void
21373start_unwind_section (const segT text_seg, int idx)
21374{
21375  const char * text_name;
21376  const char * prefix;
21377  const char * prefix_once;
21378  const char * group_name;
21379  size_t prefix_len;
21380  size_t text_len;
21381  char * sec_name;
21382  size_t sec_name_len;
21383  int type;
21384  int flags;
21385  int linkonce;
21386
21387  if (idx)
21388    {
21389      prefix = ELF_STRING_ARM_unwind;
21390      prefix_once = ELF_STRING_ARM_unwind_once;
21391      type = SHT_ARM_EXIDX;
21392    }
21393  else
21394    {
21395      prefix = ELF_STRING_ARM_unwind_info;
21396      prefix_once = ELF_STRING_ARM_unwind_info_once;
21397      type = SHT_PROGBITS;
21398    }
21399
21400  text_name = segment_name (text_seg);
21401  if (streq (text_name, ".text"))
21402    text_name = "";
21403
21404  if (strncmp (text_name, ".gnu.linkonce.t.",
21405	       strlen (".gnu.linkonce.t.")) == 0)
21406    {
21407      prefix = prefix_once;
21408      text_name += strlen (".gnu.linkonce.t.");
21409    }
21410
21411  prefix_len = strlen (prefix);
21412  text_len = strlen (text_name);
21413  sec_name_len = prefix_len + text_len;
21414  sec_name = (char *) xmalloc (sec_name_len + 1);
21415  memcpy (sec_name, prefix, prefix_len);
21416  memcpy (sec_name + prefix_len, text_name, text_len);
21417  sec_name[prefix_len + text_len] = '\0';
21418
21419  flags = SHF_ALLOC;
21420  linkonce = 0;
21421  group_name = 0;
21422
21423  /* Handle COMDAT group.  */
21424  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21425    {
21426      group_name = elf_group_name (text_seg);
21427      if (group_name == NULL)
21428	{
21429	  as_bad (_("Group section `%s' has no group signature"),
21430		  segment_name (text_seg));
21431	  ignore_rest_of_line ();
21432	  return;
21433	}
21434      flags |= SHF_GROUP;
21435      linkonce = 1;
21436    }
21437
21438  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21439
21440  /* Set the section link for index tables.  */
21441  if (idx)
21442    elf_linked_to_section (now_seg) = text_seg;
21443}
21444
21445
21446/* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
21447   personality routine data.  Returns zero, or the index table value for
21448   an inline entry.  */
21449
21450static valueT
21451create_unwind_entry (int have_data)
21452{
21453  int size;
21454  addressT where;
21455  char *ptr;
21456  /* The current word of data.	*/
21457  valueT data;
21458  /* The number of bytes left in this word.  */
21459  int n;
21460
21461  finish_unwind_opcodes ();
21462
21463  /* Remember the current text section.	 */
21464  unwind.saved_seg = now_seg;
21465  unwind.saved_subseg = now_subseg;
21466
21467  start_unwind_section (now_seg, 0);
21468
21469  if (unwind.personality_routine == NULL)
21470    {
21471      if (unwind.personality_index == -2)
21472	{
21473	  if (have_data)
21474	    as_bad (_("handlerdata in cantunwind frame"));
21475	  return 1; /* EXIDX_CANTUNWIND.  */
21476	}
21477
21478      /* Use a default personality routine if none is specified.  */
21479      if (unwind.personality_index == -1)
21480	{
21481	  if (unwind.opcode_count > 3)
21482	    unwind.personality_index = 1;
21483	  else
21484	    unwind.personality_index = 0;
21485	}
21486
21487      /* Space for the personality routine entry.  */
21488      if (unwind.personality_index == 0)
21489	{
21490	  if (unwind.opcode_count > 3)
21491	    as_bad (_("too many unwind opcodes for personality routine 0"));
21492
21493	  if (!have_data)
21494	    {
21495	      /* All the data is inline in the index table.  */
21496	      data = 0x80;
21497	      n = 3;
21498	      while (unwind.opcode_count > 0)
21499		{
21500		  unwind.opcode_count--;
21501		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21502		  n--;
21503		}
21504
21505	      /* Pad with "finish" opcodes.  */
21506	      while (n--)
21507		data = (data << 8) | 0xb0;
21508
21509	      return data;
21510	    }
21511	  size = 0;
21512	}
21513      else
21514	/* We get two opcodes "free" in the first word.	 */
21515	size = unwind.opcode_count - 2;
21516    }
21517  else
21518    {
21519      /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
21520      if (unwind.personality_index != -1)
21521	{
21522	  as_bad (_("attempt to recreate an unwind entry"));
21523	  return 1;
21524	}
21525
21526      /* An extra byte is required for the opcode count.	*/
21527      size = unwind.opcode_count + 1;
21528    }
21529
21530  size = (size + 3) >> 2;
21531  if (size > 0xff)
21532    as_bad (_("too many unwind opcodes"));
21533
21534  frag_align (2, 0, 0);
21535  record_alignment (now_seg, 2);
21536  unwind.table_entry = expr_build_dot ();
21537
21538  /* Allocate the table entry.	*/
21539  ptr = frag_more ((size << 2) + 4);
21540  /* PR 13449: Zero the table entries in case some of them are not used.  */
21541  memset (ptr, 0, (size << 2) + 4);
21542  where = frag_now_fix () - ((size << 2) + 4);
21543
21544  switch (unwind.personality_index)
21545    {
21546    case -1:
21547      /* ??? Should this be a PLT generating relocation?  */
21548      /* Custom personality routine.  */
21549      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21550	       BFD_RELOC_ARM_PREL31);
21551
21552      where += 4;
21553      ptr += 4;
21554
21555      /* Set the first byte to the number of additional words.	*/
21556      data = size > 0 ? size - 1 : 0;
21557      n = 3;
21558      break;
21559
21560    /* ABI defined personality routines.  */
21561    case 0:
21562      /* Three opcodes bytes are packed into the first word.  */
21563      data = 0x80;
21564      n = 3;
21565      break;
21566
21567    case 1:
21568    case 2:
21569      /* The size and first two opcode bytes go in the first word.  */
21570      data = ((0x80 + unwind.personality_index) << 8) | size;
21571      n = 2;
21572      break;
21573
21574    default:
21575      /* Should never happen.  */
21576      abort ();
21577    }
21578
21579  /* Pack the opcodes into words (MSB first), reversing the list at the same
21580     time.  */
21581  while (unwind.opcode_count > 0)
21582    {
21583      if (n == 0)
21584	{
21585	  md_number_to_chars (ptr, data, 4);
21586	  ptr += 4;
21587	  n = 4;
21588	  data = 0;
21589	}
21590      unwind.opcode_count--;
21591      n--;
21592      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21593    }
21594
21595  /* Finish off the last word.	*/
21596  if (n < 4)
21597    {
21598      /* Pad with "finish" opcodes.  */
21599      while (n--)
21600	data = (data << 8) | 0xb0;
21601
21602      md_number_to_chars (ptr, data, 4);
21603    }
21604
21605  if (!have_data)
21606    {
21607      /* Add an empty descriptor if there is no user-specified data.   */
21608      ptr = frag_more (4);
21609      md_number_to_chars (ptr, 0, 4);
21610    }
21611
21612  return 0;
21613}
21614
21615
21616/* Initialize the DWARF-2 unwind information for this procedure.  */
21617
21618void
21619tc_arm_frame_initial_instructions (void)
21620{
21621  cfi_add_CFA_def_cfa (REG_SP, 0);
21622}
21623#endif /* OBJ_ELF */
21624
21625/* Convert REGNAME to a DWARF-2 register number.  */
21626
21627int
21628tc_arm_regname_to_dw2regnum (char *regname)
21629{
21630  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21631  if (reg != FAIL)
21632    return reg;
21633
21634  /* PR 16694: Allow VFP registers as well.  */
21635  reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21636  if (reg != FAIL)
21637    return 64 + reg;
21638
21639  reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21640  if (reg != FAIL)
21641    return reg + 256;
21642
21643  return -1;
21644}
21645
21646#ifdef TE_PE
21647void
21648tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21649{
21650  expressionS exp;
21651
21652  exp.X_op = O_secrel;
21653  exp.X_add_symbol = symbol;
21654  exp.X_add_number = 0;
21655  emit_expr (&exp, size);
21656}
21657#endif
21658
21659/* MD interface: Symbol and relocation handling.  */
21660
21661/* Return the address within the segment that a PC-relative fixup is
21662   relative to.  For ARM, PC-relative fixups applied to instructions
21663   are generally relative to the location of the fixup plus 8 bytes.
21664   Thumb branches are offset by 4, and Thumb loads relative to PC
21665   require special handling.  */
21666
21667long
21668md_pcrel_from_section (fixS * fixP, segT seg)
21669{
21670  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21671
21672  /* If this is pc-relative and we are going to emit a relocation
21673     then we just want to put out any pipeline compensation that the linker
21674     will need.  Otherwise we want to use the calculated base.
21675     For WinCE we skip the bias for externals as well, since this
21676     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
21677  if (fixP->fx_pcrel
21678      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21679	  || (arm_force_relocation (fixP)
21680#ifdef TE_WINCE
21681	      && !S_IS_EXTERNAL (fixP->fx_addsy)
21682#endif
21683	      )))
21684    base = 0;
21685
21686
21687  switch (fixP->fx_r_type)
21688    {
21689      /* PC relative addressing on the Thumb is slightly odd as the
21690	 bottom two bits of the PC are forced to zero for the
21691	 calculation.  This happens *after* application of the
21692	 pipeline offset.  However, Thumb adrl already adjusts for
21693	 this, so we need not do it again.  */
21694    case BFD_RELOC_ARM_THUMB_ADD:
21695      return base & ~3;
21696
21697    case BFD_RELOC_ARM_THUMB_OFFSET:
21698    case BFD_RELOC_ARM_T32_OFFSET_IMM:
21699    case BFD_RELOC_ARM_T32_ADD_PC12:
21700    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21701      return (base + 4) & ~3;
21702
21703      /* Thumb branches are simply offset by +4.  */
21704    case BFD_RELOC_THUMB_PCREL_BRANCH7:
21705    case BFD_RELOC_THUMB_PCREL_BRANCH9:
21706    case BFD_RELOC_THUMB_PCREL_BRANCH12:
21707    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21708    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21709      return base + 4;
21710
21711    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21712      if (fixP->fx_addsy
21713	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21714	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21715	  && ARM_IS_FUNC (fixP->fx_addsy)
21716	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21717	base = fixP->fx_where + fixP->fx_frag->fr_address;
21718       return base + 4;
21719
21720      /* BLX is like branches above, but forces the low two bits of PC to
21721	 zero.  */
21722    case BFD_RELOC_THUMB_PCREL_BLX:
21723      if (fixP->fx_addsy
21724	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21725	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21726	  && THUMB_IS_FUNC (fixP->fx_addsy)
21727	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21728	base = fixP->fx_where + fixP->fx_frag->fr_address;
21729      return (base + 4) & ~3;
21730
21731      /* ARM mode branches are offset by +8.  However, the Windows CE
21732	 loader expects the relocation not to take this into account.  */
21733    case BFD_RELOC_ARM_PCREL_BLX:
21734      if (fixP->fx_addsy
21735	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21736	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21737	  && ARM_IS_FUNC (fixP->fx_addsy)
21738	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21739	base = fixP->fx_where + fixP->fx_frag->fr_address;
21740      return base + 8;
21741
21742    case BFD_RELOC_ARM_PCREL_CALL:
21743      if (fixP->fx_addsy
21744	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21745	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21746	  && THUMB_IS_FUNC (fixP->fx_addsy)
21747	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21748	base = fixP->fx_where + fixP->fx_frag->fr_address;
21749      return base + 8;
21750
21751    case BFD_RELOC_ARM_PCREL_BRANCH:
21752    case BFD_RELOC_ARM_PCREL_JUMP:
21753    case BFD_RELOC_ARM_PLT32:
21754#ifdef TE_WINCE
21755      /* When handling fixups immediately, because we have already
21756	 discovered the value of a symbol, or the address of the frag involved
21757	 we must account for the offset by +8, as the OS loader will never see the reloc.
21758	 see fixup_segment() in write.c
21759	 The S_IS_EXTERNAL test handles the case of global symbols.
21760	 Those need the calculated base, not just the pipe compensation the linker will need.  */
21761      if (fixP->fx_pcrel
21762	  && fixP->fx_addsy != NULL
21763	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21764	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21765	return base + 8;
21766      return base;
21767#else
21768      return base + 8;
21769#endif
21770
21771
21772      /* ARM mode loads relative to PC are also offset by +8.  Unlike
21773	 branches, the Windows CE loader *does* expect the relocation
21774	 to take this into account.  */
21775    case BFD_RELOC_ARM_OFFSET_IMM:
21776    case BFD_RELOC_ARM_OFFSET_IMM8:
21777    case BFD_RELOC_ARM_HWLITERAL:
21778    case BFD_RELOC_ARM_LITERAL:
21779    case BFD_RELOC_ARM_CP_OFF_IMM:
21780      return base + 8;
21781
21782
21783      /* Other PC-relative relocations are un-offset.  */
21784    default:
21785      return base;
21786    }
21787}
21788
21789static bfd_boolean flag_warn_syms = TRUE;
21790
21791bfd_boolean
21792arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
21793{
21794  /* PR 18347 - Warn if the user attempts to create a symbol with the same
21795     name as an ARM instruction.  Whilst strictly speaking it is allowed, it
21796     does mean that the resulting code might be very confusing to the reader.
21797     Also this warning can be triggered if the user omits an operand before
21798     an immediate address, eg:
21799
21800       LDR =foo
21801
21802     GAS treats this as an assignment of the value of the symbol foo to a
21803     symbol LDR, and so (without this code) it will not issue any kind of
21804     warning or error message.
21805
21806     Note - ARM instructions are case-insensitive but the strings in the hash
21807     table are all stored in lower case, so we must first ensure that name is
21808     lower case too.  */
21809  if (flag_warn_syms && arm_ops_hsh)
21810    {
21811      char * nbuf = strdup (name);
21812      char * p;
21813
21814      for (p = nbuf; *p; p++)
21815	*p = TOLOWER (*p);
21816      if (hash_find (arm_ops_hsh, nbuf) != NULL)
21817	{
21818	  static struct hash_control * already_warned = NULL;
21819
21820	  if (already_warned == NULL)
21821	    already_warned = hash_new ();
21822	  /* Only warn about the symbol once.  To keep the code
21823	     simple we let hash_insert do the lookup for us.  */
21824	  if (hash_insert (already_warned, name, NULL) == NULL)
21825	    as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
21826	}
21827      else
21828	free (nbuf);
21829    }
21830
21831  return FALSE;
21832}
21833
21834/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21835   Otherwise we have no need to default values of symbols.  */
21836
21837symbolS *
21838md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21839{
21840#ifdef OBJ_ELF
21841  if (name[0] == '_' && name[1] == 'G'
21842      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21843    {
21844      if (!GOT_symbol)
21845	{
21846	  if (symbol_find (name))
21847	    as_bad (_("GOT already in the symbol table"));
21848
21849	  GOT_symbol = symbol_new (name, undefined_section,
21850				   (valueT) 0, & zero_address_frag);
21851	}
21852
21853      return GOT_symbol;
21854    }
21855#endif
21856
21857  return NULL;
21858}
21859
21860/* Subroutine of md_apply_fix.	 Check to see if an immediate can be
21861   computed as two separate immediate values, added together.  We
21862   already know that this value cannot be computed by just one ARM
21863   instruction.	 */
21864
21865static unsigned int
21866validate_immediate_twopart (unsigned int   val,
21867			    unsigned int * highpart)
21868{
21869  unsigned int a;
21870  unsigned int i;
21871
21872  for (i = 0; i < 32; i += 2)
21873    if (((a = rotate_left (val, i)) & 0xff) != 0)
21874      {
21875	if (a & 0xff00)
21876	  {
21877	    if (a & ~ 0xffff)
21878	      continue;
21879	    * highpart = (a  >> 8) | ((i + 24) << 7);
21880	  }
21881	else if (a & 0xff0000)
21882	  {
21883	    if (a & 0xff000000)
21884	      continue;
21885	    * highpart = (a >> 16) | ((i + 16) << 7);
21886	  }
21887	else
21888	  {
21889	    gas_assert (a & 0xff000000);
21890	    * highpart = (a >> 24) | ((i + 8) << 7);
21891	  }
21892
21893	return (a & 0xff) | (i << 7);
21894      }
21895
21896  return FAIL;
21897}
21898
21899static int
21900validate_offset_imm (unsigned int val, int hwse)
21901{
21902  if ((hwse && val > 255) || val > 4095)
21903    return FAIL;
21904  return val;
21905}
21906
21907/* Subroutine of md_apply_fix.	 Do those data_ops which can take a
21908   negative immediate constant by altering the instruction.  A bit of
21909   a hack really.
21910	MOV <-> MVN
21911	AND <-> BIC
21912	ADC <-> SBC
21913	by inverting the second operand, and
21914	ADD <-> SUB
21915	CMP <-> CMN
21916	by negating the second operand.	 */
21917
21918static int
21919negate_data_op (unsigned long * instruction,
21920		unsigned long	value)
21921{
21922  int op, new_inst;
21923  unsigned long negated, inverted;
21924
21925  negated = encode_arm_immediate (-value);
21926  inverted = encode_arm_immediate (~value);
21927
21928  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21929  switch (op)
21930    {
21931      /* First negates.	 */
21932    case OPCODE_SUB:		 /* ADD <-> SUB	 */
21933      new_inst = OPCODE_ADD;
21934      value = negated;
21935      break;
21936
21937    case OPCODE_ADD:
21938      new_inst = OPCODE_SUB;
21939      value = negated;
21940      break;
21941
21942    case OPCODE_CMP:		 /* CMP <-> CMN	 */
21943      new_inst = OPCODE_CMN;
21944      value = negated;
21945      break;
21946
21947    case OPCODE_CMN:
21948      new_inst = OPCODE_CMP;
21949      value = negated;
21950      break;
21951
21952      /* Now Inverted ops.  */
21953    case OPCODE_MOV:		 /* MOV <-> MVN	 */
21954      new_inst = OPCODE_MVN;
21955      value = inverted;
21956      break;
21957
21958    case OPCODE_MVN:
21959      new_inst = OPCODE_MOV;
21960      value = inverted;
21961      break;
21962
21963    case OPCODE_AND:		 /* AND <-> BIC	 */
21964      new_inst = OPCODE_BIC;
21965      value = inverted;
21966      break;
21967
21968    case OPCODE_BIC:
21969      new_inst = OPCODE_AND;
21970      value = inverted;
21971      break;
21972
21973    case OPCODE_ADC:		  /* ADC <-> SBC  */
21974      new_inst = OPCODE_SBC;
21975      value = inverted;
21976      break;
21977
21978    case OPCODE_SBC:
21979      new_inst = OPCODE_ADC;
21980      value = inverted;
21981      break;
21982
21983      /* We cannot do anything.	 */
21984    default:
21985      return FAIL;
21986    }
21987
21988  if (value == (unsigned) FAIL)
21989    return FAIL;
21990
21991  *instruction &= OPCODE_MASK;
21992  *instruction |= new_inst << DATA_OP_SHIFT;
21993  return value;
21994}
21995
21996/* Like negate_data_op, but for Thumb-2.   */
21997
21998static unsigned int
21999thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22000{
22001  int op, new_inst;
22002  int rd;
22003  unsigned int negated, inverted;
22004
22005  negated = encode_thumb32_immediate (-value);
22006  inverted = encode_thumb32_immediate (~value);
22007
22008  rd = (*instruction >> 8) & 0xf;
22009  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22010  switch (op)
22011    {
22012      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
22013    case T2_OPCODE_SUB:
22014      new_inst = T2_OPCODE_ADD;
22015      value = negated;
22016      break;
22017
22018    case T2_OPCODE_ADD:
22019      new_inst = T2_OPCODE_SUB;
22020      value = negated;
22021      break;
22022
22023      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
22024    case T2_OPCODE_ORR:
22025      new_inst = T2_OPCODE_ORN;
22026      value = inverted;
22027      break;
22028
22029    case T2_OPCODE_ORN:
22030      new_inst = T2_OPCODE_ORR;
22031      value = inverted;
22032      break;
22033
22034      /* AND <-> BIC.  TST has no inverted equivalent.  */
22035    case T2_OPCODE_AND:
22036      new_inst = T2_OPCODE_BIC;
22037      if (rd == 15)
22038	value = FAIL;
22039      else
22040	value = inverted;
22041      break;
22042
22043    case T2_OPCODE_BIC:
22044      new_inst = T2_OPCODE_AND;
22045      value = inverted;
22046      break;
22047
22048      /* ADC <-> SBC  */
22049    case T2_OPCODE_ADC:
22050      new_inst = T2_OPCODE_SBC;
22051      value = inverted;
22052      break;
22053
22054    case T2_OPCODE_SBC:
22055      new_inst = T2_OPCODE_ADC;
22056      value = inverted;
22057      break;
22058
22059      /* We cannot do anything.	 */
22060    default:
22061      return FAIL;
22062    }
22063
22064  if (value == (unsigned int)FAIL)
22065    return FAIL;
22066
22067  *instruction &= T2_OPCODE_MASK;
22068  *instruction |= new_inst << T2_DATA_OP_SHIFT;
22069  return value;
22070}
22071
22072/* Read a 32-bit thumb instruction from buf.  */
22073static unsigned long
22074get_thumb32_insn (char * buf)
22075{
22076  unsigned long insn;
22077  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22078  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22079
22080  return insn;
22081}
22082
22083
22084/* We usually want to set the low bit on the address of thumb function
22085   symbols.  In particular .word foo - . should have the low bit set.
22086   Generic code tries to fold the difference of two symbols to
22087   a constant.  Prevent this and force a relocation when the first symbols
22088   is a thumb function.  */
22089
22090bfd_boolean
22091arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22092{
22093  if (op == O_subtract
22094      && l->X_op == O_symbol
22095      && r->X_op == O_symbol
22096      && THUMB_IS_FUNC (l->X_add_symbol))
22097    {
22098      l->X_op = O_subtract;
22099      l->X_op_symbol = r->X_add_symbol;
22100      l->X_add_number -= r->X_add_number;
22101      return TRUE;
22102    }
22103
22104  /* Process as normal.  */
22105  return FALSE;
22106}
22107
22108/* Encode Thumb2 unconditional branches and calls. The encoding
22109   for the 2 are identical for the immediate values.  */
22110
22111static void
22112encode_thumb2_b_bl_offset (char * buf, offsetT value)
22113{
22114#define T2I1I2MASK  ((1 << 13) | (1 << 11))
22115  offsetT newval;
22116  offsetT newval2;
22117  addressT S, I1, I2, lo, hi;
22118
22119  S = (value >> 24) & 0x01;
22120  I1 = (value >> 23) & 0x01;
22121  I2 = (value >> 22) & 0x01;
22122  hi = (value >> 12) & 0x3ff;
22123  lo = (value >> 1) & 0x7ff;
22124  newval   = md_chars_to_number (buf, THUMB_SIZE);
22125  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22126  newval  |= (S << 10) | hi;
22127  newval2 &=  ~T2I1I2MASK;
22128  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22129  md_number_to_chars (buf, newval, THUMB_SIZE);
22130  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22131}
22132
22133void
22134md_apply_fix (fixS *	fixP,
22135	       valueT * valP,
22136	       segT	seg)
22137{
22138  offsetT	 value = * valP;
22139  offsetT	 newval;
22140  unsigned int	 newimm;
22141  unsigned long	 temp;
22142  int		 sign;
22143  char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22144
22145  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22146
22147  /* Note whether this will delete the relocation.  */
22148
22149  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22150    fixP->fx_done = 1;
22151
22152  /* On a 64-bit host, silently truncate 'value' to 32 bits for
22153     consistency with the behaviour on 32-bit hosts.  Remember value
22154     for emit_reloc.  */
22155  value &= 0xffffffff;
22156  value ^= 0x80000000;
22157  value -= 0x80000000;
22158
22159  *valP = value;
22160  fixP->fx_addnumber = value;
22161
22162  /* Same treatment for fixP->fx_offset.  */
22163  fixP->fx_offset &= 0xffffffff;
22164  fixP->fx_offset ^= 0x80000000;
22165  fixP->fx_offset -= 0x80000000;
22166
22167  switch (fixP->fx_r_type)
22168    {
22169    case BFD_RELOC_NONE:
22170      /* This will need to go in the object file.  */
22171      fixP->fx_done = 0;
22172      break;
22173
22174    case BFD_RELOC_ARM_IMMEDIATE:
22175      /* We claim that this fixup has been processed here,
22176	 even if in fact we generate an error because we do
22177	 not have a reloc for it, so tc_gen_reloc will reject it.  */
22178      fixP->fx_done = 1;
22179
22180      if (fixP->fx_addsy)
22181	{
22182	  const char *msg = 0;
22183
22184	  if (! S_IS_DEFINED (fixP->fx_addsy))
22185	    msg = _("undefined symbol %s used as an immediate value");
22186	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22187	    msg = _("symbol %s is in a different section");
22188	  else if (S_IS_WEAK (fixP->fx_addsy))
22189	    msg = _("symbol %s is weak and may be overridden later");
22190
22191	  if (msg)
22192	    {
22193	      as_bad_where (fixP->fx_file, fixP->fx_line,
22194			    msg, S_GET_NAME (fixP->fx_addsy));
22195	      break;
22196	    }
22197	}
22198
22199      temp = md_chars_to_number (buf, INSN_SIZE);
22200
22201      /* If the offset is negative, we should use encoding A2 for ADR.  */
22202      if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22203	newimm = negate_data_op (&temp, value);
22204      else
22205	{
22206	  newimm = encode_arm_immediate (value);
22207
22208	  /* If the instruction will fail, see if we can fix things up by
22209	     changing the opcode.  */
22210	  if (newimm == (unsigned int) FAIL)
22211	    newimm = negate_data_op (&temp, value);
22212	}
22213
22214      if (newimm == (unsigned int) FAIL)
22215	{
22216	  as_bad_where (fixP->fx_file, fixP->fx_line,
22217			_("invalid constant (%lx) after fixup"),
22218			(unsigned long) value);
22219	  break;
22220	}
22221
22222      newimm |= (temp & 0xfffff000);
22223      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22224      break;
22225
22226    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22227      {
22228	unsigned int highpart = 0;
22229	unsigned int newinsn  = 0xe1a00000; /* nop.  */
22230
22231	if (fixP->fx_addsy)
22232	  {
22233	    const char *msg = 0;
22234
22235	    if (! S_IS_DEFINED (fixP->fx_addsy))
22236	      msg = _("undefined symbol %s used as an immediate value");
22237	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22238	      msg = _("symbol %s is in a different section");
22239	    else if (S_IS_WEAK (fixP->fx_addsy))
22240	      msg = _("symbol %s is weak and may be overridden later");
22241
22242	    if (msg)
22243	      {
22244		as_bad_where (fixP->fx_file, fixP->fx_line,
22245			      msg, S_GET_NAME (fixP->fx_addsy));
22246		break;
22247	      }
22248	  }
22249
22250	newimm = encode_arm_immediate (value);
22251	temp = md_chars_to_number (buf, INSN_SIZE);
22252
22253	/* If the instruction will fail, see if we can fix things up by
22254	   changing the opcode.	 */
22255	if (newimm == (unsigned int) FAIL
22256	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22257	  {
22258	    /* No ?  OK - try using two ADD instructions to generate
22259	       the value.  */
22260	    newimm = validate_immediate_twopart (value, & highpart);
22261
22262	    /* Yes - then make sure that the second instruction is
22263	       also an add.  */
22264	    if (newimm != (unsigned int) FAIL)
22265	      newinsn = temp;
22266	    /* Still No ?  Try using a negated value.  */
22267	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22268	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22269	    /* Otherwise - give up.  */
22270	    else
22271	      {
22272		as_bad_where (fixP->fx_file, fixP->fx_line,
22273			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22274			      (long) value);
22275		break;
22276	      }
22277
22278	    /* Replace the first operand in the 2nd instruction (which
22279	       is the PC) with the destination register.  We have
22280	       already added in the PC in the first instruction and we
22281	       do not want to do it again.  */
22282	    newinsn &= ~ 0xf0000;
22283	    newinsn |= ((newinsn & 0x0f000) << 4);
22284	  }
22285
22286	newimm |= (temp & 0xfffff000);
22287	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22288
22289	highpart |= (newinsn & 0xfffff000);
22290	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22291      }
22292      break;
22293
22294    case BFD_RELOC_ARM_OFFSET_IMM:
22295      if (!fixP->fx_done && seg->use_rela_p)
22296	value = 0;
22297
22298    case BFD_RELOC_ARM_LITERAL:
22299      sign = value > 0;
22300
22301      if (value < 0)
22302	value = - value;
22303
22304      if (validate_offset_imm (value, 0) == FAIL)
22305	{
22306	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22307	    as_bad_where (fixP->fx_file, fixP->fx_line,
22308			  _("invalid literal constant: pool needs to be closer"));
22309	  else
22310	    as_bad_where (fixP->fx_file, fixP->fx_line,
22311			  _("bad immediate value for offset (%ld)"),
22312			  (long) value);
22313	  break;
22314	}
22315
22316      newval = md_chars_to_number (buf, INSN_SIZE);
22317      if (value == 0)
22318	newval &= 0xfffff000;
22319      else
22320	{
22321	  newval &= 0xff7ff000;
22322	  newval |= value | (sign ? INDEX_UP : 0);
22323	}
22324      md_number_to_chars (buf, newval, INSN_SIZE);
22325      break;
22326
22327    case BFD_RELOC_ARM_OFFSET_IMM8:
22328    case BFD_RELOC_ARM_HWLITERAL:
22329      sign = value > 0;
22330
22331      if (value < 0)
22332	value = - value;
22333
22334      if (validate_offset_imm (value, 1) == FAIL)
22335	{
22336	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22337	    as_bad_where (fixP->fx_file, fixP->fx_line,
22338			  _("invalid literal constant: pool needs to be closer"));
22339	  else
22340	    as_bad_where (fixP->fx_file, fixP->fx_line,
22341			  _("bad immediate value for 8-bit offset (%ld)"),
22342			  (long) value);
22343	  break;
22344	}
22345
22346      newval = md_chars_to_number (buf, INSN_SIZE);
22347      if (value == 0)
22348	newval &= 0xfffff0f0;
22349      else
22350	{
22351	  newval &= 0xff7ff0f0;
22352	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22353	}
22354      md_number_to_chars (buf, newval, INSN_SIZE);
22355      break;
22356
22357    case BFD_RELOC_ARM_T32_OFFSET_U8:
22358      if (value < 0 || value > 1020 || value % 4 != 0)
22359	as_bad_where (fixP->fx_file, fixP->fx_line,
22360		      _("bad immediate value for offset (%ld)"), (long) value);
22361      value /= 4;
22362
22363      newval = md_chars_to_number (buf+2, THUMB_SIZE);
22364      newval |= value;
22365      md_number_to_chars (buf+2, newval, THUMB_SIZE);
22366      break;
22367
22368    case BFD_RELOC_ARM_T32_OFFSET_IMM:
22369      /* This is a complicated relocation used for all varieties of Thumb32
22370	 load/store instruction with immediate offset:
22371
22372	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22373						   *4, optional writeback(W)
22374						   (doubleword load/store)
22375
22376	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22377	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22378	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22379	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22380	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22381
22382	 Uppercase letters indicate bits that are already encoded at
22383	 this point.  Lowercase letters are our problem.  For the
22384	 second block of instructions, the secondary opcode nybble
22385	 (bits 8..11) is present, and bit 23 is zero, even if this is
22386	 a PC-relative operation.  */
22387      newval = md_chars_to_number (buf, THUMB_SIZE);
22388      newval <<= 16;
22389      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22390
22391      if ((newval & 0xf0000000) == 0xe0000000)
22392	{
22393	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
22394	  if (value >= 0)
22395	    newval |= (1 << 23);
22396	  else
22397	    value = -value;
22398	  if (value % 4 != 0)
22399	    {
22400	      as_bad_where (fixP->fx_file, fixP->fx_line,
22401			    _("offset not a multiple of 4"));
22402	      break;
22403	    }
22404	  value /= 4;
22405	  if (value > 0xff)
22406	    {
22407	      as_bad_where (fixP->fx_file, fixP->fx_line,
22408			    _("offset out of range"));
22409	      break;
22410	    }
22411	  newval &= ~0xff;
22412	}
22413      else if ((newval & 0x000f0000) == 0x000f0000)
22414	{
22415	  /* PC-relative, 12-bit offset.  */
22416	  if (value >= 0)
22417	    newval |= (1 << 23);
22418	  else
22419	    value = -value;
22420	  if (value > 0xfff)
22421	    {
22422	      as_bad_where (fixP->fx_file, fixP->fx_line,
22423			    _("offset out of range"));
22424	      break;
22425	    }
22426	  newval &= ~0xfff;
22427	}
22428      else if ((newval & 0x00000100) == 0x00000100)
22429	{
22430	  /* Writeback: 8-bit, +/- offset.  */
22431	  if (value >= 0)
22432	    newval |= (1 << 9);
22433	  else
22434	    value = -value;
22435	  if (value > 0xff)
22436	    {
22437	      as_bad_where (fixP->fx_file, fixP->fx_line,
22438			    _("offset out of range"));
22439	      break;
22440	    }
22441	  newval &= ~0xff;
22442	}
22443      else if ((newval & 0x00000f00) == 0x00000e00)
22444	{
22445	  /* T-instruction: positive 8-bit offset.  */
22446	  if (value < 0 || value > 0xff)
22447	    {
22448	      as_bad_where (fixP->fx_file, fixP->fx_line,
22449			    _("offset out of range"));
22450	      break;
22451	    }
22452	  newval &= ~0xff;
22453	  newval |= value;
22454	}
22455      else
22456	{
22457	  /* Positive 12-bit or negative 8-bit offset.  */
22458	  int limit;
22459	  if (value >= 0)
22460	    {
22461	      newval |= (1 << 23);
22462	      limit = 0xfff;
22463	    }
22464	  else
22465	    {
22466	      value = -value;
22467	      limit = 0xff;
22468	    }
22469	  if (value > limit)
22470	    {
22471	      as_bad_where (fixP->fx_file, fixP->fx_line,
22472			    _("offset out of range"));
22473	      break;
22474	    }
22475	  newval &= ~limit;
22476	}
22477
22478      newval |= value;
22479      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22480      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22481      break;
22482
22483    case BFD_RELOC_ARM_SHIFT_IMM:
22484      newval = md_chars_to_number (buf, INSN_SIZE);
22485      if (((unsigned long) value) > 32
22486	  || (value == 32
22487	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22488	{
22489	  as_bad_where (fixP->fx_file, fixP->fx_line,
22490			_("shift expression is too large"));
22491	  break;
22492	}
22493
22494      if (value == 0)
22495	/* Shifts of zero must be done as lsl.	*/
22496	newval &= ~0x60;
22497      else if (value == 32)
22498	value = 0;
22499      newval &= 0xfffff07f;
22500      newval |= (value & 0x1f) << 7;
22501      md_number_to_chars (buf, newval, INSN_SIZE);
22502      break;
22503
22504    case BFD_RELOC_ARM_T32_IMMEDIATE:
22505    case BFD_RELOC_ARM_T32_ADD_IMM:
22506    case BFD_RELOC_ARM_T32_IMM12:
22507    case BFD_RELOC_ARM_T32_ADD_PC12:
22508      /* We claim that this fixup has been processed here,
22509	 even if in fact we generate an error because we do
22510	 not have a reloc for it, so tc_gen_reloc will reject it.  */
22511      fixP->fx_done = 1;
22512
22513      if (fixP->fx_addsy
22514	  && ! S_IS_DEFINED (fixP->fx_addsy))
22515	{
22516	  as_bad_where (fixP->fx_file, fixP->fx_line,
22517			_("undefined symbol %s used as an immediate value"),
22518			S_GET_NAME (fixP->fx_addsy));
22519	  break;
22520	}
22521
22522      newval = md_chars_to_number (buf, THUMB_SIZE);
22523      newval <<= 16;
22524      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22525
22526      newimm = FAIL;
22527      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22528	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22529	{
22530	  newimm = encode_thumb32_immediate (value);
22531	  if (newimm == (unsigned int) FAIL)
22532	    newimm = thumb32_negate_data_op (&newval, value);
22533	}
22534      if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22535	  && newimm == (unsigned int) FAIL)
22536	{
22537	  /* Turn add/sum into addw/subw.  */
22538	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22539	    newval = (newval & 0xfeffffff) | 0x02000000;
22540	  /* No flat 12-bit imm encoding for addsw/subsw.  */
22541	  if ((newval & 0x00100000) == 0)
22542	    {
22543	      /* 12 bit immediate for addw/subw.  */
22544	      if (value < 0)
22545		{
22546		  value = -value;
22547		  newval ^= 0x00a00000;
22548		}
22549	      if (value > 0xfff)
22550		newimm = (unsigned int) FAIL;
22551	      else
22552		newimm = value;
22553	    }
22554	}
22555
22556      if (newimm == (unsigned int)FAIL)
22557	{
22558	  as_bad_where (fixP->fx_file, fixP->fx_line,
22559			_("invalid constant (%lx) after fixup"),
22560			(unsigned long) value);
22561	  break;
22562	}
22563
22564      newval |= (newimm & 0x800) << 15;
22565      newval |= (newimm & 0x700) << 4;
22566      newval |= (newimm & 0x0ff);
22567
22568      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22569      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22570      break;
22571
22572    case BFD_RELOC_ARM_SMC:
22573      if (((unsigned long) value) > 0xffff)
22574	as_bad_where (fixP->fx_file, fixP->fx_line,
22575		      _("invalid smc expression"));
22576      newval = md_chars_to_number (buf, INSN_SIZE);
22577      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22578      md_number_to_chars (buf, newval, INSN_SIZE);
22579      break;
22580
22581    case BFD_RELOC_ARM_HVC:
22582      if (((unsigned long) value) > 0xffff)
22583	as_bad_where (fixP->fx_file, fixP->fx_line,
22584		      _("invalid hvc expression"));
22585      newval = md_chars_to_number (buf, INSN_SIZE);
22586      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22587      md_number_to_chars (buf, newval, INSN_SIZE);
22588      break;
22589
22590    case BFD_RELOC_ARM_SWI:
22591      if (fixP->tc_fix_data != 0)
22592	{
22593	  if (((unsigned long) value) > 0xff)
22594	    as_bad_where (fixP->fx_file, fixP->fx_line,
22595			  _("invalid swi expression"));
22596	  newval = md_chars_to_number (buf, THUMB_SIZE);
22597	  newval |= value;
22598	  md_number_to_chars (buf, newval, THUMB_SIZE);
22599	}
22600      else
22601	{
22602	  if (((unsigned long) value) > 0x00ffffff)
22603	    as_bad_where (fixP->fx_file, fixP->fx_line,
22604			  _("invalid swi expression"));
22605	  newval = md_chars_to_number (buf, INSN_SIZE);
22606	  newval |= value;
22607	  md_number_to_chars (buf, newval, INSN_SIZE);
22608	}
22609      break;
22610
22611    case BFD_RELOC_ARM_MULTI:
22612      if (((unsigned long) value) > 0xffff)
22613	as_bad_where (fixP->fx_file, fixP->fx_line,
22614		      _("invalid expression in load/store multiple"));
22615      newval = value | md_chars_to_number (buf, INSN_SIZE);
22616      md_number_to_chars (buf, newval, INSN_SIZE);
22617      break;
22618
22619#ifdef OBJ_ELF
22620    case BFD_RELOC_ARM_PCREL_CALL:
22621
22622      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22623	  && fixP->fx_addsy
22624	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22625	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22626	  && THUMB_IS_FUNC (fixP->fx_addsy))
22627	/* Flip the bl to blx. This is a simple flip
22628	   bit here because we generate PCREL_CALL for
22629	   unconditional bls.  */
22630	{
22631	  newval = md_chars_to_number (buf, INSN_SIZE);
22632	  newval = newval | 0x10000000;
22633	  md_number_to_chars (buf, newval, INSN_SIZE);
22634	  temp = 1;
22635	  fixP->fx_done = 1;
22636	}
22637      else
22638	temp = 3;
22639      goto arm_branch_common;
22640
22641    case BFD_RELOC_ARM_PCREL_JUMP:
22642      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22643	  && fixP->fx_addsy
22644	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22645	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22646	  && THUMB_IS_FUNC (fixP->fx_addsy))
22647	{
22648	  /* This would map to a bl<cond>, b<cond>,
22649	     b<always> to a Thumb function. We
22650	     need to force a relocation for this particular
22651	     case.  */
22652	  newval = md_chars_to_number (buf, INSN_SIZE);
22653	  fixP->fx_done = 0;
22654	}
22655
22656    case BFD_RELOC_ARM_PLT32:
22657#endif
22658    case BFD_RELOC_ARM_PCREL_BRANCH:
22659      temp = 3;
22660      goto arm_branch_common;
22661
22662    case BFD_RELOC_ARM_PCREL_BLX:
22663
22664      temp = 1;
22665      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22666	  && fixP->fx_addsy
22667	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22668	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22669	  && ARM_IS_FUNC (fixP->fx_addsy))
22670	{
22671	  /* Flip the blx to a bl and warn.  */
22672	  const char *name = S_GET_NAME (fixP->fx_addsy);
22673	  newval = 0xeb000000;
22674	  as_warn_where (fixP->fx_file, fixP->fx_line,
22675			 _("blx to '%s' an ARM ISA state function changed to bl"),
22676			  name);
22677	  md_number_to_chars (buf, newval, INSN_SIZE);
22678	  temp = 3;
22679	  fixP->fx_done = 1;
22680	}
22681
22682#ifdef OBJ_ELF
22683       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22684	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22685#endif
22686
22687    arm_branch_common:
22688      /* We are going to store value (shifted right by two) in the
22689	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
22690	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
22691	 also be be clear.  */
22692      if (value & temp)
22693	as_bad_where (fixP->fx_file, fixP->fx_line,
22694		      _("misaligned branch destination"));
22695      if ((value & (offsetT)0xfe000000) != (offsetT)0
22696	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22697	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22698
22699      if (fixP->fx_done || !seg->use_rela_p)
22700	{
22701	  newval = md_chars_to_number (buf, INSN_SIZE);
22702	  newval |= (value >> 2) & 0x00ffffff;
22703	  /* Set the H bit on BLX instructions.  */
22704	  if (temp == 1)
22705	    {
22706	      if (value & 2)
22707		newval |= 0x01000000;
22708	      else
22709		newval &= ~0x01000000;
22710	    }
22711	  md_number_to_chars (buf, newval, INSN_SIZE);
22712	}
22713      break;
22714
22715    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22716      /* CBZ can only branch forward.  */
22717
22718      /* Attempts to use CBZ to branch to the next instruction
22719	 (which, strictly speaking, are prohibited) will be turned into
22720	 no-ops.
22721
22722	 FIXME: It may be better to remove the instruction completely and
22723	 perform relaxation.  */
22724      if (value == -2)
22725	{
22726	  newval = md_chars_to_number (buf, THUMB_SIZE);
22727	  newval = 0xbf00; /* NOP encoding T1 */
22728	  md_number_to_chars (buf, newval, THUMB_SIZE);
22729	}
22730      else
22731	{
22732	  if (value & ~0x7e)
22733	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22734
22735	  if (fixP->fx_done || !seg->use_rela_p)
22736	    {
22737	      newval = md_chars_to_number (buf, THUMB_SIZE);
22738	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22739	      md_number_to_chars (buf, newval, THUMB_SIZE);
22740	    }
22741	}
22742      break;
22743
22744    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
22745      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22746	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22747
22748      if (fixP->fx_done || !seg->use_rela_p)
22749	{
22750	  newval = md_chars_to_number (buf, THUMB_SIZE);
22751	  newval |= (value & 0x1ff) >> 1;
22752	  md_number_to_chars (buf, newval, THUMB_SIZE);
22753	}
22754      break;
22755
22756    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
22757      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22758	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22759
22760      if (fixP->fx_done || !seg->use_rela_p)
22761	{
22762	  newval = md_chars_to_number (buf, THUMB_SIZE);
22763	  newval |= (value & 0xfff) >> 1;
22764	  md_number_to_chars (buf, newval, THUMB_SIZE);
22765	}
22766      break;
22767
22768    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22769      if (fixP->fx_addsy
22770	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22771	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22772	  && ARM_IS_FUNC (fixP->fx_addsy)
22773	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22774	{
22775	  /* Force a relocation for a branch 20 bits wide.  */
22776	  fixP->fx_done = 0;
22777	}
22778      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22779	as_bad_where (fixP->fx_file, fixP->fx_line,
22780		      _("conditional branch out of range"));
22781
22782      if (fixP->fx_done || !seg->use_rela_p)
22783	{
22784	  offsetT newval2;
22785	  addressT S, J1, J2, lo, hi;
22786
22787	  S  = (value & 0x00100000) >> 20;
22788	  J2 = (value & 0x00080000) >> 19;
22789	  J1 = (value & 0x00040000) >> 18;
22790	  hi = (value & 0x0003f000) >> 12;
22791	  lo = (value & 0x00000ffe) >> 1;
22792
22793	  newval   = md_chars_to_number (buf, THUMB_SIZE);
22794	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22795	  newval  |= (S << 10) | hi;
22796	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
22797	  md_number_to_chars (buf, newval, THUMB_SIZE);
22798	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22799	}
22800      break;
22801
22802    case BFD_RELOC_THUMB_PCREL_BLX:
22803      /* If there is a blx from a thumb state function to
22804	 another thumb function flip this to a bl and warn
22805	 about it.  */
22806
22807      if (fixP->fx_addsy
22808	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22809	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22810	  && THUMB_IS_FUNC (fixP->fx_addsy))
22811	{
22812	  const char *name = S_GET_NAME (fixP->fx_addsy);
22813	  as_warn_where (fixP->fx_file, fixP->fx_line,
22814			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22815			 name);
22816	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22817	  newval = newval | 0x1000;
22818	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22819	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22820	  fixP->fx_done = 1;
22821	}
22822
22823
22824      goto thumb_bl_common;
22825
22826    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22827      /* A bl from Thumb state ISA to an internal ARM state function
22828	 is converted to a blx.  */
22829      if (fixP->fx_addsy
22830	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22831	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22832	  && ARM_IS_FUNC (fixP->fx_addsy)
22833	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22834	{
22835	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22836	  newval = newval & ~0x1000;
22837	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22838	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22839	  fixP->fx_done = 1;
22840	}
22841
22842    thumb_bl_common:
22843
22844      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22845	/* For a BLX instruction, make sure that the relocation is rounded up
22846	   to a word boundary.  This follows the semantics of the instruction
22847	   which specifies that bit 1 of the target address will come from bit
22848	   1 of the base address.  */
22849	value = (value + 3) & ~ 3;
22850
22851#ifdef OBJ_ELF
22852       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22853	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22854	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22855#endif
22856
22857      if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22858	{
22859	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22860	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22861	  else if ((value & ~0x1ffffff)
22862		   && ((value & ~0x1ffffff) != ~0x1ffffff))
22863	    as_bad_where (fixP->fx_file, fixP->fx_line,
22864			  _("Thumb2 branch out of range"));
22865	}
22866
22867      if (fixP->fx_done || !seg->use_rela_p)
22868	encode_thumb2_b_bl_offset (buf, value);
22869
22870      break;
22871
22872    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22873      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22874	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22875
22876      if (fixP->fx_done || !seg->use_rela_p)
22877	  encode_thumb2_b_bl_offset (buf, value);
22878
22879      break;
22880
22881    case BFD_RELOC_8:
22882      if (fixP->fx_done || !seg->use_rela_p)
22883	*buf = value;
22884      break;
22885
22886    case BFD_RELOC_16:
22887      if (fixP->fx_done || !seg->use_rela_p)
22888	md_number_to_chars (buf, value, 2);
22889      break;
22890
22891#ifdef OBJ_ELF
22892    case BFD_RELOC_ARM_TLS_CALL:
22893    case BFD_RELOC_ARM_THM_TLS_CALL:
22894    case BFD_RELOC_ARM_TLS_DESCSEQ:
22895    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22896    case BFD_RELOC_ARM_TLS_GOTDESC:
22897    case BFD_RELOC_ARM_TLS_GD32:
22898    case BFD_RELOC_ARM_TLS_LE32:
22899    case BFD_RELOC_ARM_TLS_IE32:
22900    case BFD_RELOC_ARM_TLS_LDM32:
22901    case BFD_RELOC_ARM_TLS_LDO32:
22902      S_SET_THREAD_LOCAL (fixP->fx_addsy);
22903      break;
22904
22905    case BFD_RELOC_ARM_GOT32:
22906    case BFD_RELOC_ARM_GOTOFF:
22907      break;
22908
22909    case BFD_RELOC_ARM_GOT_PREL:
22910      if (fixP->fx_done || !seg->use_rela_p)
22911	md_number_to_chars (buf, value, 4);
22912      break;
22913
22914    case BFD_RELOC_ARM_TARGET2:
22915      /* TARGET2 is not partial-inplace, so we need to write the
22916	 addend here for REL targets, because it won't be written out
22917	 during reloc processing later.  */
22918      if (fixP->fx_done || !seg->use_rela_p)
22919	md_number_to_chars (buf, fixP->fx_offset, 4);
22920      break;
22921#endif
22922
22923    case BFD_RELOC_RVA:
22924    case BFD_RELOC_32:
22925    case BFD_RELOC_ARM_TARGET1:
22926    case BFD_RELOC_ARM_ROSEGREL32:
22927    case BFD_RELOC_ARM_SBREL32:
22928    case BFD_RELOC_32_PCREL:
22929#ifdef TE_PE
22930    case BFD_RELOC_32_SECREL:
22931#endif
22932      if (fixP->fx_done || !seg->use_rela_p)
22933#ifdef TE_WINCE
22934	/* For WinCE we only do this for pcrel fixups.  */
22935	if (fixP->fx_done || fixP->fx_pcrel)
22936#endif
22937	  md_number_to_chars (buf, value, 4);
22938      break;
22939
22940#ifdef OBJ_ELF
22941    case BFD_RELOC_ARM_PREL31:
22942      if (fixP->fx_done || !seg->use_rela_p)
22943	{
22944	  newval = md_chars_to_number (buf, 4) & 0x80000000;
22945	  if ((value ^ (value >> 1)) & 0x40000000)
22946	    {
22947	      as_bad_where (fixP->fx_file, fixP->fx_line,
22948			    _("rel31 relocation overflow"));
22949	    }
22950	  newval |= value & 0x7fffffff;
22951	  md_number_to_chars (buf, newval, 4);
22952	}
22953      break;
22954#endif
22955
22956    case BFD_RELOC_ARM_CP_OFF_IMM:
22957    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22958      if (value < -1023 || value > 1023 || (value & 3))
22959	as_bad_where (fixP->fx_file, fixP->fx_line,
22960		      _("co-processor offset out of range"));
22961    cp_off_common:
22962      sign = value > 0;
22963      if (value < 0)
22964	value = -value;
22965      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22966	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22967	newval = md_chars_to_number (buf, INSN_SIZE);
22968      else
22969	newval = get_thumb32_insn (buf);
22970      if (value == 0)
22971	newval &= 0xffffff00;
22972      else
22973	{
22974	  newval &= 0xff7fff00;
22975	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22976	}
22977      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22978	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22979	md_number_to_chars (buf, newval, INSN_SIZE);
22980      else
22981	put_thumb32_insn (buf, newval);
22982      break;
22983
22984    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22985    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22986      if (value < -255 || value > 255)
22987	as_bad_where (fixP->fx_file, fixP->fx_line,
22988		      _("co-processor offset out of range"));
22989      value *= 4;
22990      goto cp_off_common;
22991
22992    case BFD_RELOC_ARM_THUMB_OFFSET:
22993      newval = md_chars_to_number (buf, THUMB_SIZE);
22994      /* Exactly what ranges, and where the offset is inserted depends
22995	 on the type of instruction, we can establish this from the
22996	 top 4 bits.  */
22997      switch (newval >> 12)
22998	{
22999	case 4: /* PC load.  */
23000	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23001	     forced to zero for these loads; md_pcrel_from has already
23002	     compensated for this.  */
23003	  if (value & 3)
23004	    as_bad_where (fixP->fx_file, fixP->fx_line,
23005			  _("invalid offset, target not word aligned (0x%08lX)"),
23006			  (((unsigned long) fixP->fx_frag->fr_address
23007			    + (unsigned long) fixP->fx_where) & ~3)
23008			  + (unsigned long) value);
23009
23010	  if (value & ~0x3fc)
23011	    as_bad_where (fixP->fx_file, fixP->fx_line,
23012			  _("invalid offset, value too big (0x%08lX)"),
23013			  (long) value);
23014
23015	  newval |= value >> 2;
23016	  break;
23017
23018	case 9: /* SP load/store.  */
23019	  if (value & ~0x3fc)
23020	    as_bad_where (fixP->fx_file, fixP->fx_line,
23021			  _("invalid offset, value too big (0x%08lX)"),
23022			  (long) value);
23023	  newval |= value >> 2;
23024	  break;
23025
23026	case 6: /* Word load/store.  */
23027	  if (value & ~0x7c)
23028	    as_bad_where (fixP->fx_file, fixP->fx_line,
23029			  _("invalid offset, value too big (0x%08lX)"),
23030			  (long) value);
23031	  newval |= value << 4; /* 6 - 2.  */
23032	  break;
23033
23034	case 7: /* Byte load/store.  */
23035	  if (value & ~0x1f)
23036	    as_bad_where (fixP->fx_file, fixP->fx_line,
23037			  _("invalid offset, value too big (0x%08lX)"),
23038			  (long) value);
23039	  newval |= value << 6;
23040	  break;
23041
23042	case 8: /* Halfword load/store.	 */
23043	  if (value & ~0x3e)
23044	    as_bad_where (fixP->fx_file, fixP->fx_line,
23045			  _("invalid offset, value too big (0x%08lX)"),
23046			  (long) value);
23047	  newval |= value << 5; /* 6 - 1.  */
23048	  break;
23049
23050	default:
23051	  as_bad_where (fixP->fx_file, fixP->fx_line,
23052			"Unable to process relocation for thumb opcode: %lx",
23053			(unsigned long) newval);
23054	  break;
23055	}
23056      md_number_to_chars (buf, newval, THUMB_SIZE);
23057      break;
23058
23059    case BFD_RELOC_ARM_THUMB_ADD:
23060      /* This is a complicated relocation, since we use it for all of
23061	 the following immediate relocations:
23062
23063	    3bit ADD/SUB
23064	    8bit ADD/SUB
23065	    9bit ADD/SUB SP word-aligned
23066	   10bit ADD PC/SP word-aligned
23067
23068	 The type of instruction being processed is encoded in the
23069	 instruction field:
23070
23071	   0x8000  SUB
23072	   0x00F0  Rd
23073	   0x000F  Rs
23074      */
23075      newval = md_chars_to_number (buf, THUMB_SIZE);
23076      {
23077	int rd = (newval >> 4) & 0xf;
23078	int rs = newval & 0xf;
23079	int subtract = !!(newval & 0x8000);
23080
23081	/* Check for HI regs, only very restricted cases allowed:
23082	   Adjusting SP, and using PC or SP to get an address.	*/
23083	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23084	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
23085	  as_bad_where (fixP->fx_file, fixP->fx_line,
23086			_("invalid Hi register with immediate"));
23087
23088	/* If value is negative, choose the opposite instruction.  */
23089	if (value < 0)
23090	  {
23091	    value = -value;
23092	    subtract = !subtract;
23093	    if (value < 0)
23094	      as_bad_where (fixP->fx_file, fixP->fx_line,
23095			    _("immediate value out of range"));
23096	  }
23097
23098	if (rd == REG_SP)
23099	  {
23100 	    if (value & ~0x1fc)
23101	      as_bad_where (fixP->fx_file, fixP->fx_line,
23102			    _("invalid immediate for stack address calculation"));
23103	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23104	    newval |= value >> 2;
23105	  }
23106	else if (rs == REG_PC || rs == REG_SP)
23107	  {
23108	    /* PR gas/18541.  If the addition is for a defined symbol
23109	       within range of an ADR instruction then accept it.  */
23110	    if (subtract
23111		&& value == 4
23112		&& fixP->fx_addsy != NULL)
23113	      {
23114		subtract = 0;
23115
23116		if (! S_IS_DEFINED (fixP->fx_addsy)
23117		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
23118		    || S_IS_WEAK (fixP->fx_addsy))
23119		  {
23120		    as_bad_where (fixP->fx_file, fixP->fx_line,
23121				  _("address calculation needs a strongly defined nearby symbol"));
23122		  }
23123		else
23124		  {
23125		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23126
23127		    /* Round up to the next 4-byte boundary.  */
23128		    if (v & 3)
23129		      v = (v + 3) & ~ 3;
23130		    else
23131		      v += 4;
23132		    v = S_GET_VALUE (fixP->fx_addsy) - v;
23133
23134		    if (v & ~0x3fc)
23135		      {
23136			as_bad_where (fixP->fx_file, fixP->fx_line,
23137				      _("symbol too far away"));
23138		      }
23139		    else
23140		      {
23141			fixP->fx_done = 1;
23142			value = v;
23143		      }
23144		  }
23145	      }
23146
23147	    if (subtract || value & ~0x3fc)
23148	      as_bad_where (fixP->fx_file, fixP->fx_line,
23149			    _("invalid immediate for address calculation (value = 0x%08lX)"),
23150			    (unsigned long) (subtract ? - value : value));
23151	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23152	    newval |= rd << 8;
23153	    newval |= value >> 2;
23154	  }
23155	else if (rs == rd)
23156	  {
23157	    if (value & ~0xff)
23158	      as_bad_where (fixP->fx_file, fixP->fx_line,
23159			    _("immediate value out of range"));
23160	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23161	    newval |= (rd << 8) | value;
23162	  }
23163	else
23164	  {
23165	    if (value & ~0x7)
23166	      as_bad_where (fixP->fx_file, fixP->fx_line,
23167			    _("immediate value out of range"));
23168	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23169	    newval |= rd | (rs << 3) | (value << 6);
23170	  }
23171      }
23172      md_number_to_chars (buf, newval, THUMB_SIZE);
23173      break;
23174
23175    case BFD_RELOC_ARM_THUMB_IMM:
23176      newval = md_chars_to_number (buf, THUMB_SIZE);
23177      if (value < 0 || value > 255)
23178	as_bad_where (fixP->fx_file, fixP->fx_line,
23179		      _("invalid immediate: %ld is out of range"),
23180		      (long) value);
23181      newval |= value;
23182      md_number_to_chars (buf, newval, THUMB_SIZE);
23183      break;
23184
23185    case BFD_RELOC_ARM_THUMB_SHIFT:
23186      /* 5bit shift value (0..32).  LSL cannot take 32.	 */
23187      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23188      temp = newval & 0xf800;
23189      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23190	as_bad_where (fixP->fx_file, fixP->fx_line,
23191		      _("invalid shift value: %ld"), (long) value);
23192      /* Shifts of zero must be encoded as LSL.	 */
23193      if (value == 0)
23194	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23195      /* Shifts of 32 are encoded as zero.  */
23196      else if (value == 32)
23197	value = 0;
23198      newval |= value << 6;
23199      md_number_to_chars (buf, newval, THUMB_SIZE);
23200      break;
23201
23202    case BFD_RELOC_VTABLE_INHERIT:
23203    case BFD_RELOC_VTABLE_ENTRY:
23204      fixP->fx_done = 0;
23205      return;
23206
23207    case BFD_RELOC_ARM_MOVW:
23208    case BFD_RELOC_ARM_MOVT:
23209    case BFD_RELOC_ARM_THUMB_MOVW:
23210    case BFD_RELOC_ARM_THUMB_MOVT:
23211      if (fixP->fx_done || !seg->use_rela_p)
23212	{
23213	  /* REL format relocations are limited to a 16-bit addend.  */
23214	  if (!fixP->fx_done)
23215	    {
23216	      if (value < -0x8000 || value > 0x7fff)
23217		  as_bad_where (fixP->fx_file, fixP->fx_line,
23218				_("offset out of range"));
23219	    }
23220	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23221		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23222	    {
23223	      value >>= 16;
23224	    }
23225
23226	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23227	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23228	    {
23229	      newval = get_thumb32_insn (buf);
23230	      newval &= 0xfbf08f00;
23231	      newval |= (value & 0xf000) << 4;
23232	      newval |= (value & 0x0800) << 15;
23233	      newval |= (value & 0x0700) << 4;
23234	      newval |= (value & 0x00ff);
23235	      put_thumb32_insn (buf, newval);
23236	    }
23237	  else
23238	    {
23239	      newval = md_chars_to_number (buf, 4);
23240	      newval &= 0xfff0f000;
23241	      newval |= value & 0x0fff;
23242	      newval |= (value & 0xf000) << 4;
23243	      md_number_to_chars (buf, newval, 4);
23244	    }
23245	}
23246      return;
23247
23248   case BFD_RELOC_ARM_ALU_PC_G0_NC:
23249   case BFD_RELOC_ARM_ALU_PC_G0:
23250   case BFD_RELOC_ARM_ALU_PC_G1_NC:
23251   case BFD_RELOC_ARM_ALU_PC_G1:
23252   case BFD_RELOC_ARM_ALU_PC_G2:
23253   case BFD_RELOC_ARM_ALU_SB_G0_NC:
23254   case BFD_RELOC_ARM_ALU_SB_G0:
23255   case BFD_RELOC_ARM_ALU_SB_G1_NC:
23256   case BFD_RELOC_ARM_ALU_SB_G1:
23257   case BFD_RELOC_ARM_ALU_SB_G2:
23258     gas_assert (!fixP->fx_done);
23259     if (!seg->use_rela_p)
23260       {
23261	 bfd_vma insn;
23262	 bfd_vma encoded_addend;
23263	 bfd_vma addend_abs = abs (value);
23264
23265	 /* Check that the absolute value of the addend can be
23266	    expressed as an 8-bit constant plus a rotation.  */
23267	 encoded_addend = encode_arm_immediate (addend_abs);
23268	 if (encoded_addend == (unsigned int) FAIL)
23269	   as_bad_where (fixP->fx_file, fixP->fx_line,
23270			 _("the offset 0x%08lX is not representable"),
23271			 (unsigned long) addend_abs);
23272
23273	 /* Extract the instruction.  */
23274	 insn = md_chars_to_number (buf, INSN_SIZE);
23275
23276	 /* If the addend is positive, use an ADD instruction.
23277	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
23278	 insn &= 0xff1fffff;
23279	 if (value < 0)
23280	   insn |= 1 << 22;
23281	 else
23282	   insn |= 1 << 23;
23283
23284	 /* Place the encoded addend into the first 12 bits of the
23285	    instruction.  */
23286	 insn &= 0xfffff000;
23287	 insn |= encoded_addend;
23288
23289	 /* Update the instruction.  */
23290	 md_number_to_chars (buf, insn, INSN_SIZE);
23291       }
23292     break;
23293
23294    case BFD_RELOC_ARM_LDR_PC_G0:
23295    case BFD_RELOC_ARM_LDR_PC_G1:
23296    case BFD_RELOC_ARM_LDR_PC_G2:
23297    case BFD_RELOC_ARM_LDR_SB_G0:
23298    case BFD_RELOC_ARM_LDR_SB_G1:
23299    case BFD_RELOC_ARM_LDR_SB_G2:
23300      gas_assert (!fixP->fx_done);
23301      if (!seg->use_rela_p)
23302	{
23303	  bfd_vma insn;
23304	  bfd_vma addend_abs = abs (value);
23305
23306	  /* Check that the absolute value of the addend can be
23307	     encoded in 12 bits.  */
23308	  if (addend_abs >= 0x1000)
23309	    as_bad_where (fixP->fx_file, fixP->fx_line,
23310			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23311			  (unsigned long) addend_abs);
23312
23313	  /* Extract the instruction.  */
23314	  insn = md_chars_to_number (buf, INSN_SIZE);
23315
23316	  /* If the addend is negative, clear bit 23 of the instruction.
23317	     Otherwise set it.  */
23318	  if (value < 0)
23319	    insn &= ~(1 << 23);
23320	  else
23321	    insn |= 1 << 23;
23322
23323	  /* Place the absolute value of the addend into the first 12 bits
23324	     of the instruction.  */
23325	  insn &= 0xfffff000;
23326	  insn |= addend_abs;
23327
23328	  /* Update the instruction.  */
23329	  md_number_to_chars (buf, insn, INSN_SIZE);
23330	}
23331      break;
23332
23333    case BFD_RELOC_ARM_LDRS_PC_G0:
23334    case BFD_RELOC_ARM_LDRS_PC_G1:
23335    case BFD_RELOC_ARM_LDRS_PC_G2:
23336    case BFD_RELOC_ARM_LDRS_SB_G0:
23337    case BFD_RELOC_ARM_LDRS_SB_G1:
23338    case BFD_RELOC_ARM_LDRS_SB_G2:
23339      gas_assert (!fixP->fx_done);
23340      if (!seg->use_rela_p)
23341	{
23342	  bfd_vma insn;
23343	  bfd_vma addend_abs = abs (value);
23344
23345	  /* Check that the absolute value of the addend can be
23346	     encoded in 8 bits.  */
23347	  if (addend_abs >= 0x100)
23348	    as_bad_where (fixP->fx_file, fixP->fx_line,
23349			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23350			  (unsigned long) addend_abs);
23351
23352	  /* Extract the instruction.  */
23353	  insn = md_chars_to_number (buf, INSN_SIZE);
23354
23355	  /* If the addend is negative, clear bit 23 of the instruction.
23356	     Otherwise set it.  */
23357	  if (value < 0)
23358	    insn &= ~(1 << 23);
23359	  else
23360	    insn |= 1 << 23;
23361
23362	  /* Place the first four bits of the absolute value of the addend
23363	     into the first 4 bits of the instruction, and the remaining
23364	     four into bits 8 .. 11.  */
23365	  insn &= 0xfffff0f0;
23366	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23367
23368	  /* Update the instruction.  */
23369	  md_number_to_chars (buf, insn, INSN_SIZE);
23370	}
23371      break;
23372
23373    case BFD_RELOC_ARM_LDC_PC_G0:
23374    case BFD_RELOC_ARM_LDC_PC_G1:
23375    case BFD_RELOC_ARM_LDC_PC_G2:
23376    case BFD_RELOC_ARM_LDC_SB_G0:
23377    case BFD_RELOC_ARM_LDC_SB_G1:
23378    case BFD_RELOC_ARM_LDC_SB_G2:
23379      gas_assert (!fixP->fx_done);
23380      if (!seg->use_rela_p)
23381	{
23382	  bfd_vma insn;
23383	  bfd_vma addend_abs = abs (value);
23384
23385	  /* Check that the absolute value of the addend is a multiple of
23386	     four and, when divided by four, fits in 8 bits.  */
23387	  if (addend_abs & 0x3)
23388	    as_bad_where (fixP->fx_file, fixP->fx_line,
23389			  _("bad offset 0x%08lX (must be word-aligned)"),
23390			  (unsigned long) addend_abs);
23391
23392	  if ((addend_abs >> 2) > 0xff)
23393	    as_bad_where (fixP->fx_file, fixP->fx_line,
23394			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23395			  (unsigned long) addend_abs);
23396
23397	  /* Extract the instruction.  */
23398	  insn = md_chars_to_number (buf, INSN_SIZE);
23399
23400	  /* If the addend is negative, clear bit 23 of the instruction.
23401	     Otherwise set it.  */
23402	  if (value < 0)
23403	    insn &= ~(1 << 23);
23404	  else
23405	    insn |= 1 << 23;
23406
23407	  /* Place the addend (divided by four) into the first eight
23408	     bits of the instruction.  */
23409	  insn &= 0xfffffff0;
23410	  insn |= addend_abs >> 2;
23411
23412	  /* Update the instruction.  */
23413	  md_number_to_chars (buf, insn, INSN_SIZE);
23414	}
23415      break;
23416
23417    case BFD_RELOC_ARM_V4BX:
23418      /* This will need to go in the object file.  */
23419      fixP->fx_done = 0;
23420      break;
23421
23422    case BFD_RELOC_UNUSED:
23423    default:
23424      as_bad_where (fixP->fx_file, fixP->fx_line,
23425		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23426    }
23427}
23428
23429/* Translate internal representation of relocation info to BFD target
23430   format.  */
23431
23432arelent *
23433tc_gen_reloc (asection *section, fixS *fixp)
23434{
23435  arelent * reloc;
23436  bfd_reloc_code_real_type code;
23437
23438  reloc = (arelent *) xmalloc (sizeof (arelent));
23439
23440  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23441  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23442  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23443
23444  if (fixp->fx_pcrel)
23445    {
23446      if (section->use_rela_p)
23447	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23448      else
23449	fixp->fx_offset = reloc->address;
23450    }
23451  reloc->addend = fixp->fx_offset;
23452
23453  switch (fixp->fx_r_type)
23454    {
23455    case BFD_RELOC_8:
23456      if (fixp->fx_pcrel)
23457	{
23458	  code = BFD_RELOC_8_PCREL;
23459	  break;
23460	}
23461
23462    case BFD_RELOC_16:
23463      if (fixp->fx_pcrel)
23464	{
23465	  code = BFD_RELOC_16_PCREL;
23466	  break;
23467	}
23468
23469    case BFD_RELOC_32:
23470      if (fixp->fx_pcrel)
23471	{
23472	  code = BFD_RELOC_32_PCREL;
23473	  break;
23474	}
23475
23476    case BFD_RELOC_ARM_MOVW:
23477      if (fixp->fx_pcrel)
23478	{
23479	  code = BFD_RELOC_ARM_MOVW_PCREL;
23480	  break;
23481	}
23482
23483    case BFD_RELOC_ARM_MOVT:
23484      if (fixp->fx_pcrel)
23485	{
23486	  code = BFD_RELOC_ARM_MOVT_PCREL;
23487	  break;
23488	}
23489
23490    case BFD_RELOC_ARM_THUMB_MOVW:
23491      if (fixp->fx_pcrel)
23492	{
23493	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23494	  break;
23495	}
23496
23497    case BFD_RELOC_ARM_THUMB_MOVT:
23498      if (fixp->fx_pcrel)
23499	{
23500	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23501	  break;
23502	}
23503
23504    case BFD_RELOC_NONE:
23505    case BFD_RELOC_ARM_PCREL_BRANCH:
23506    case BFD_RELOC_ARM_PCREL_BLX:
23507    case BFD_RELOC_RVA:
23508    case BFD_RELOC_THUMB_PCREL_BRANCH7:
23509    case BFD_RELOC_THUMB_PCREL_BRANCH9:
23510    case BFD_RELOC_THUMB_PCREL_BRANCH12:
23511    case BFD_RELOC_THUMB_PCREL_BRANCH20:
23512    case BFD_RELOC_THUMB_PCREL_BRANCH23:
23513    case BFD_RELOC_THUMB_PCREL_BRANCH25:
23514    case BFD_RELOC_VTABLE_ENTRY:
23515    case BFD_RELOC_VTABLE_INHERIT:
23516#ifdef TE_PE
23517    case BFD_RELOC_32_SECREL:
23518#endif
23519      code = fixp->fx_r_type;
23520      break;
23521
23522    case BFD_RELOC_THUMB_PCREL_BLX:
23523#ifdef OBJ_ELF
23524      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23525	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23526      else
23527#endif
23528	code = BFD_RELOC_THUMB_PCREL_BLX;
23529      break;
23530
23531    case BFD_RELOC_ARM_LITERAL:
23532    case BFD_RELOC_ARM_HWLITERAL:
23533      /* If this is called then the a literal has
23534	 been referenced across a section boundary.  */
23535      as_bad_where (fixp->fx_file, fixp->fx_line,
23536		    _("literal referenced across section boundary"));
23537      return NULL;
23538
23539#ifdef OBJ_ELF
23540    case BFD_RELOC_ARM_TLS_CALL:
23541    case BFD_RELOC_ARM_THM_TLS_CALL:
23542    case BFD_RELOC_ARM_TLS_DESCSEQ:
23543    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23544    case BFD_RELOC_ARM_GOT32:
23545    case BFD_RELOC_ARM_GOTOFF:
23546    case BFD_RELOC_ARM_GOT_PREL:
23547    case BFD_RELOC_ARM_PLT32:
23548    case BFD_RELOC_ARM_TARGET1:
23549    case BFD_RELOC_ARM_ROSEGREL32:
23550    case BFD_RELOC_ARM_SBREL32:
23551    case BFD_RELOC_ARM_PREL31:
23552    case BFD_RELOC_ARM_TARGET2:
23553    case BFD_RELOC_ARM_TLS_LDO32:
23554    case BFD_RELOC_ARM_PCREL_CALL:
23555    case BFD_RELOC_ARM_PCREL_JUMP:
23556    case BFD_RELOC_ARM_ALU_PC_G0_NC:
23557    case BFD_RELOC_ARM_ALU_PC_G0:
23558    case BFD_RELOC_ARM_ALU_PC_G1_NC:
23559    case BFD_RELOC_ARM_ALU_PC_G1:
23560    case BFD_RELOC_ARM_ALU_PC_G2:
23561    case BFD_RELOC_ARM_LDR_PC_G0:
23562    case BFD_RELOC_ARM_LDR_PC_G1:
23563    case BFD_RELOC_ARM_LDR_PC_G2:
23564    case BFD_RELOC_ARM_LDRS_PC_G0:
23565    case BFD_RELOC_ARM_LDRS_PC_G1:
23566    case BFD_RELOC_ARM_LDRS_PC_G2:
23567    case BFD_RELOC_ARM_LDC_PC_G0:
23568    case BFD_RELOC_ARM_LDC_PC_G1:
23569    case BFD_RELOC_ARM_LDC_PC_G2:
23570    case BFD_RELOC_ARM_ALU_SB_G0_NC:
23571    case BFD_RELOC_ARM_ALU_SB_G0:
23572    case BFD_RELOC_ARM_ALU_SB_G1_NC:
23573    case BFD_RELOC_ARM_ALU_SB_G1:
23574    case BFD_RELOC_ARM_ALU_SB_G2:
23575    case BFD_RELOC_ARM_LDR_SB_G0:
23576    case BFD_RELOC_ARM_LDR_SB_G1:
23577    case BFD_RELOC_ARM_LDR_SB_G2:
23578    case BFD_RELOC_ARM_LDRS_SB_G0:
23579    case BFD_RELOC_ARM_LDRS_SB_G1:
23580    case BFD_RELOC_ARM_LDRS_SB_G2:
23581    case BFD_RELOC_ARM_LDC_SB_G0:
23582    case BFD_RELOC_ARM_LDC_SB_G1:
23583    case BFD_RELOC_ARM_LDC_SB_G2:
23584    case BFD_RELOC_ARM_V4BX:
23585      code = fixp->fx_r_type;
23586      break;
23587
23588    case BFD_RELOC_ARM_TLS_GOTDESC:
23589    case BFD_RELOC_ARM_TLS_GD32:
23590    case BFD_RELOC_ARM_TLS_LE32:
23591    case BFD_RELOC_ARM_TLS_IE32:
23592    case BFD_RELOC_ARM_TLS_LDM32:
23593      /* BFD will include the symbol's address in the addend.
23594	 But we don't want that, so subtract it out again here.  */
23595      if (!S_IS_COMMON (fixp->fx_addsy))
23596	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23597      code = fixp->fx_r_type;
23598      break;
23599#endif
23600
23601    case BFD_RELOC_ARM_IMMEDIATE:
23602      as_bad_where (fixp->fx_file, fixp->fx_line,
23603		    _("internal relocation (type: IMMEDIATE) not fixed up"));
23604      return NULL;
23605
23606    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23607      as_bad_where (fixp->fx_file, fixp->fx_line,
23608		    _("ADRL used for a symbol not defined in the same file"));
23609      return NULL;
23610
23611    case BFD_RELOC_ARM_OFFSET_IMM:
23612      if (section->use_rela_p)
23613	{
23614	  code = fixp->fx_r_type;
23615	  break;
23616	}
23617
23618      if (fixp->fx_addsy != NULL
23619	  && !S_IS_DEFINED (fixp->fx_addsy)
23620	  && S_IS_LOCAL (fixp->fx_addsy))
23621	{
23622	  as_bad_where (fixp->fx_file, fixp->fx_line,
23623			_("undefined local label `%s'"),
23624			S_GET_NAME (fixp->fx_addsy));
23625	  return NULL;
23626	}
23627
23628      as_bad_where (fixp->fx_file, fixp->fx_line,
23629		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23630      return NULL;
23631
23632    default:
23633      {
23634	char * type;
23635
23636	switch (fixp->fx_r_type)
23637	  {
23638	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
23639	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
23640	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
23641	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
23642	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
23643	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
23644	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
23645	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23646	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23647	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
23648	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
23649	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
23650	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23651	  default:			   type = _("<unknown>"); break;
23652	  }
23653	as_bad_where (fixp->fx_file, fixp->fx_line,
23654		      _("cannot represent %s relocation in this object file format"),
23655		      type);
23656	return NULL;
23657      }
23658    }
23659
23660#ifdef OBJ_ELF
23661  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23662      && GOT_symbol
23663      && fixp->fx_addsy == GOT_symbol)
23664    {
23665      code = BFD_RELOC_ARM_GOTPC;
23666      reloc->addend = fixp->fx_offset = reloc->address;
23667    }
23668#endif
23669
23670  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23671
23672  if (reloc->howto == NULL)
23673    {
23674      as_bad_where (fixp->fx_file, fixp->fx_line,
23675		    _("cannot represent %s relocation in this object file format"),
23676		    bfd_get_reloc_code_name (code));
23677      return NULL;
23678    }
23679
23680  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23681     vtable entry to be used in the relocation's section offset.  */
23682  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23683    reloc->address = fixp->fx_offset;
23684
23685  return reloc;
23686}
23687
23688/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
23689
23690void
23691cons_fix_new_arm (fragS *	frag,
23692		  int		where,
23693		  int		size,
23694		  expressionS * exp,
23695		  bfd_reloc_code_real_type reloc)
23696{
23697  int pcrel = 0;
23698
23699  /* Pick a reloc.
23700     FIXME: @@ Should look at CPU word size.  */
23701  switch (size)
23702    {
23703    case 1:
23704      reloc = BFD_RELOC_8;
23705      break;
23706    case 2:
23707      reloc = BFD_RELOC_16;
23708      break;
23709    case 4:
23710    default:
23711      reloc = BFD_RELOC_32;
23712      break;
23713    case 8:
23714      reloc = BFD_RELOC_64;
23715      break;
23716    }
23717
23718#ifdef TE_PE
23719  if (exp->X_op == O_secrel)
23720  {
23721    exp->X_op = O_symbol;
23722    reloc = BFD_RELOC_32_SECREL;
23723  }
23724#endif
23725
23726  fix_new_exp (frag, where, size, exp, pcrel, reloc);
23727}
23728
23729#if defined (OBJ_COFF)
23730void
23731arm_validate_fix (fixS * fixP)
23732{
23733  /* If the destination of the branch is a defined symbol which does not have
23734     the THUMB_FUNC attribute, then we must be calling a function which has
23735     the (interfacearm) attribute.  We look for the Thumb entry point to that
23736     function and change the branch to refer to that function instead.	*/
23737  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23738      && fixP->fx_addsy != NULL
23739      && S_IS_DEFINED (fixP->fx_addsy)
23740      && ! THUMB_IS_FUNC (fixP->fx_addsy))
23741    {
23742      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23743    }
23744}
23745#endif
23746
23747
23748int
23749arm_force_relocation (struct fix * fixp)
23750{
23751#if defined (OBJ_COFF) && defined (TE_PE)
23752  if (fixp->fx_r_type == BFD_RELOC_RVA)
23753    return 1;
23754#endif
23755
23756  /* In case we have a call or a branch to a function in ARM ISA mode from
23757     a thumb function or vice-versa force the relocation. These relocations
23758     are cleared off for some cores that might have blx and simple transformations
23759     are possible.  */
23760
23761#ifdef OBJ_ELF
23762  switch (fixp->fx_r_type)
23763    {
23764    case BFD_RELOC_ARM_PCREL_JUMP:
23765    case BFD_RELOC_ARM_PCREL_CALL:
23766    case BFD_RELOC_THUMB_PCREL_BLX:
23767      if (THUMB_IS_FUNC (fixp->fx_addsy))
23768	return 1;
23769      break;
23770
23771    case BFD_RELOC_ARM_PCREL_BLX:
23772    case BFD_RELOC_THUMB_PCREL_BRANCH25:
23773    case BFD_RELOC_THUMB_PCREL_BRANCH20:
23774    case BFD_RELOC_THUMB_PCREL_BRANCH23:
23775      if (ARM_IS_FUNC (fixp->fx_addsy))
23776	return 1;
23777      break;
23778
23779    default:
23780      break;
23781    }
23782#endif
23783
23784  /* Resolve these relocations even if the symbol is extern or weak.
23785     Technically this is probably wrong due to symbol preemption.
23786     In practice these relocations do not have enough range to be useful
23787     at dynamic link time, and some code (e.g. in the Linux kernel)
23788     expects these references to be resolved.  */
23789  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23790      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23791      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23792      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23793      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23794      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23795      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23796      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23797      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23798      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23799      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23800      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23801      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23802      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23803    return 0;
23804
23805  /* Always leave these relocations for the linker.  */
23806  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23807       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23808      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23809    return 1;
23810
23811  /* Always generate relocations against function symbols.  */
23812  if (fixp->fx_r_type == BFD_RELOC_32
23813      && fixp->fx_addsy
23814      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23815    return 1;
23816
23817  return generic_force_reloc (fixp);
23818}
23819
23820#if defined (OBJ_ELF) || defined (OBJ_COFF)
23821/* Relocations against function names must be left unadjusted,
23822   so that the linker can use this information to generate interworking
23823   stubs.  The MIPS version of this function
23824   also prevents relocations that are mips-16 specific, but I do not
23825   know why it does this.
23826
23827   FIXME:
23828   There is one other problem that ought to be addressed here, but
23829   which currently is not:  Taking the address of a label (rather
23830   than a function) and then later jumping to that address.  Such
23831   addresses also ought to have their bottom bit set (assuming that
23832   they reside in Thumb code), but at the moment they will not.	 */
23833
23834bfd_boolean
23835arm_fix_adjustable (fixS * fixP)
23836{
23837  if (fixP->fx_addsy == NULL)
23838    return 1;
23839
23840  /* Preserve relocations against symbols with function type.  */
23841  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23842    return FALSE;
23843
23844  if (THUMB_IS_FUNC (fixP->fx_addsy)
23845      && fixP->fx_subsy == NULL)
23846    return FALSE;
23847
23848  /* We need the symbol name for the VTABLE entries.  */
23849  if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23850      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23851    return FALSE;
23852
23853  /* Don't allow symbols to be discarded on GOT related relocs.	 */
23854  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23855      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23856      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23857      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23858      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23859      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23860      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23861      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23862      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23863      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23864      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23865      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23866      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23867      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23868    return FALSE;
23869
23870  /* Similarly for group relocations.  */
23871  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23872       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23873      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23874    return FALSE;
23875
23876  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
23877  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23878      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23879      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23880      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23881      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23882      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23883      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23884      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23885    return FALSE;
23886
23887  return TRUE;
23888}
23889#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23890
23891#ifdef OBJ_ELF
23892
23893const char *
23894elf32_arm_target_format (void)
23895{
23896#ifdef TE_SYMBIAN
23897  return (target_big_endian
23898	  ? "elf32-bigarm-symbian"
23899	  : "elf32-littlearm-symbian");
23900#elif defined (TE_VXWORKS)
23901  return (target_big_endian
23902	  ? "elf32-bigarm-vxworks"
23903	  : "elf32-littlearm-vxworks");
23904#elif defined (TE_NACL)
23905  return (target_big_endian
23906	  ? "elf32-bigarm-nacl"
23907	  : "elf32-littlearm-nacl");
23908#else
23909  if (target_big_endian)
23910    return "elf32-bigarm";
23911  else
23912    return "elf32-littlearm";
23913#endif
23914}
23915
23916void
23917armelf_frob_symbol (symbolS * symp,
23918		    int *     puntp)
23919{
23920  elf_frob_symbol (symp, puntp);
23921}
23922#endif
23923
23924/* MD interface: Finalization.	*/
23925
23926void
23927arm_cleanup (void)
23928{
23929  literal_pool * pool;
23930
23931  /* Ensure that all the IT blocks are properly closed.  */
23932  check_it_blocks_finished ();
23933
23934  for (pool = list_of_pools; pool; pool = pool->next)
23935    {
23936      /* Put it at the end of the relevant section.  */
23937      subseg_set (pool->section, pool->sub_section);
23938#ifdef OBJ_ELF
23939      arm_elf_change_section ();
23940#endif
23941      s_ltorg (0);
23942    }
23943}
23944
23945#ifdef OBJ_ELF
23946/* Remove any excess mapping symbols generated for alignment frags in
23947   SEC.  We may have created a mapping symbol before a zero byte
23948   alignment; remove it if there's a mapping symbol after the
23949   alignment.  */
23950static void
23951check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23952		       void *dummy ATTRIBUTE_UNUSED)
23953{
23954  segment_info_type *seginfo = seg_info (sec);
23955  fragS *fragp;
23956
23957  if (seginfo == NULL || seginfo->frchainP == NULL)
23958    return;
23959
23960  for (fragp = seginfo->frchainP->frch_root;
23961       fragp != NULL;
23962       fragp = fragp->fr_next)
23963    {
23964      symbolS *sym = fragp->tc_frag_data.last_map;
23965      fragS *next = fragp->fr_next;
23966
23967      /* Variable-sized frags have been converted to fixed size by
23968	 this point.  But if this was variable-sized to start with,
23969	 there will be a fixed-size frag after it.  So don't handle
23970	 next == NULL.  */
23971      if (sym == NULL || next == NULL)
23972	continue;
23973
23974      if (S_GET_VALUE (sym) < next->fr_address)
23975	/* Not at the end of this frag.  */
23976	continue;
23977      know (S_GET_VALUE (sym) == next->fr_address);
23978
23979      do
23980	{
23981	  if (next->tc_frag_data.first_map != NULL)
23982	    {
23983	      /* Next frag starts with a mapping symbol.  Discard this
23984		 one.  */
23985	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23986	      break;
23987	    }
23988
23989	  if (next->fr_next == NULL)
23990	    {
23991	      /* This mapping symbol is at the end of the section.  Discard
23992		 it.  */
23993	      know (next->fr_fix == 0 && next->fr_var == 0);
23994	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23995	      break;
23996	    }
23997
23998	  /* As long as we have empty frags without any mapping symbols,
23999	     keep looking.  */
24000	  /* If the next frag is non-empty and does not start with a
24001	     mapping symbol, then this mapping symbol is required.  */
24002	  if (next->fr_address != next->fr_next->fr_address)
24003	    break;
24004
24005	  next = next->fr_next;
24006	}
24007      while (next != NULL);
24008    }
24009}
24010#endif
24011
24012/* Adjust the symbol table.  This marks Thumb symbols as distinct from
24013   ARM ones.  */
24014
24015void
24016arm_adjust_symtab (void)
24017{
24018#ifdef OBJ_COFF
24019  symbolS * sym;
24020
24021  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24022    {
24023      if (ARM_IS_THUMB (sym))
24024	{
24025	  if (THUMB_IS_FUNC (sym))
24026	    {
24027	      /* Mark the symbol as a Thumb function.  */
24028	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
24029		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
24030		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24031
24032	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24033		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24034	      else
24035		as_bad (_("%s: unexpected function type: %d"),
24036			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24037	    }
24038	  else switch (S_GET_STORAGE_CLASS (sym))
24039	    {
24040	    case C_EXT:
24041	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24042	      break;
24043	    case C_STAT:
24044	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24045	      break;
24046	    case C_LABEL:
24047	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24048	      break;
24049	    default:
24050	      /* Do nothing.  */
24051	      break;
24052	    }
24053	}
24054
24055      if (ARM_IS_INTERWORK (sym))
24056	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24057    }
24058#endif
24059#ifdef OBJ_ELF
24060  symbolS * sym;
24061  char	    bind;
24062
24063  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24064    {
24065      if (ARM_IS_THUMB (sym))
24066	{
24067	  elf_symbol_type * elf_sym;
24068
24069	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24070	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24071
24072	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24073		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24074	    {
24075	      /* If it's a .thumb_func, declare it as so,
24076		 otherwise tag label as .code 16.  */
24077	      if (THUMB_IS_FUNC (sym))
24078		elf_sym->internal_elf_sym.st_target_internal
24079		  = ST_BRANCH_TO_THUMB;
24080	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24081		elf_sym->internal_elf_sym.st_info =
24082		  ELF_ST_INFO (bind, STT_ARM_16BIT);
24083	    }
24084	}
24085    }
24086
24087  /* Remove any overlapping mapping symbols generated by alignment frags.  */
24088  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24089  /* Now do generic ELF adjustments.  */
24090  elf_adjust_symtab ();
24091#endif
24092}
24093
24094/* MD interface: Initialization.  */
24095
24096static void
24097set_constant_flonums (void)
24098{
24099  int i;
24100
24101  for (i = 0; i < NUM_FLOAT_VALS; i++)
24102    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24103      abort ();
24104}
24105
24106/* Auto-select Thumb mode if it's the only available instruction set for the
24107   given architecture.  */
24108
24109static void
24110autoselect_thumb_from_cpu_variant (void)
24111{
24112  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24113    opcode_select (16);
24114}
24115
24116void
24117md_begin (void)
24118{
24119  unsigned mach;
24120  unsigned int i;
24121
24122  if (	 (arm_ops_hsh = hash_new ()) == NULL
24123      || (arm_cond_hsh = hash_new ()) == NULL
24124      || (arm_shift_hsh = hash_new ()) == NULL
24125      || (arm_psr_hsh = hash_new ()) == NULL
24126      || (arm_v7m_psr_hsh = hash_new ()) == NULL
24127      || (arm_reg_hsh = hash_new ()) == NULL
24128      || (arm_reloc_hsh = hash_new ()) == NULL
24129      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24130    as_fatal (_("virtual memory exhausted"));
24131
24132  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24133    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24134  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24135    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24136  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24137    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24138  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24139    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24140  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24141    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24142		 (void *) (v7m_psrs + i));
24143  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24144    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24145  for (i = 0;
24146       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24147       i++)
24148    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24149		 (void *) (barrier_opt_names + i));
24150#ifdef OBJ_ELF
24151  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24152    {
24153      struct reloc_entry * entry = reloc_names + i;
24154
24155      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24156	/* This makes encode_branch() use the EABI versions of this relocation.  */
24157	entry->reloc = BFD_RELOC_UNUSED;
24158
24159      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24160    }
24161#endif
24162
24163  set_constant_flonums ();
24164
24165  /* Set the cpu variant based on the command-line options.  We prefer
24166     -mcpu= over -march= if both are set (as for GCC); and we prefer
24167     -mfpu= over any other way of setting the floating point unit.
24168     Use of legacy options with new options are faulted.  */
24169  if (legacy_cpu)
24170    {
24171      if (mcpu_cpu_opt || march_cpu_opt)
24172	as_bad (_("use of old and new-style options to set CPU type"));
24173
24174      mcpu_cpu_opt = legacy_cpu;
24175    }
24176  else if (!mcpu_cpu_opt)
24177    mcpu_cpu_opt = march_cpu_opt;
24178
24179  if (legacy_fpu)
24180    {
24181      if (mfpu_opt)
24182	as_bad (_("use of old and new-style options to set FPU type"));
24183
24184      mfpu_opt = legacy_fpu;
24185    }
24186  else if (!mfpu_opt)
24187    {
24188#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24189	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
24190      /* Some environments specify a default FPU.  If they don't, infer it
24191	 from the processor.  */
24192      if (mcpu_fpu_opt)
24193	mfpu_opt = mcpu_fpu_opt;
24194      else
24195	mfpu_opt = march_fpu_opt;
24196#else
24197      mfpu_opt = &fpu_default;
24198#endif
24199    }
24200
24201  if (!mfpu_opt)
24202    {
24203      if (mcpu_cpu_opt != NULL)
24204	mfpu_opt = &fpu_default;
24205      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24206	mfpu_opt = &fpu_arch_vfp_v2;
24207      else
24208	mfpu_opt = &fpu_arch_fpa;
24209    }
24210
24211#ifdef CPU_DEFAULT
24212  if (!mcpu_cpu_opt)
24213    {
24214      mcpu_cpu_opt = &cpu_default;
24215      selected_cpu = cpu_default;
24216    }
24217  else if (no_cpu_selected ())
24218    selected_cpu = cpu_default;
24219#else
24220  if (mcpu_cpu_opt)
24221    selected_cpu = *mcpu_cpu_opt;
24222  else
24223    mcpu_cpu_opt = &arm_arch_any;
24224#endif
24225
24226  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24227
24228  autoselect_thumb_from_cpu_variant ();
24229
24230  arm_arch_used = thumb_arch_used = arm_arch_none;
24231
24232#if defined OBJ_COFF || defined OBJ_ELF
24233  {
24234    unsigned int flags = 0;
24235
24236#if defined OBJ_ELF
24237    flags = meabi_flags;
24238
24239    switch (meabi_flags)
24240      {
24241      case EF_ARM_EABI_UNKNOWN:
24242#endif
24243	/* Set the flags in the private structure.  */
24244	if (uses_apcs_26)      flags |= F_APCS26;
24245	if (support_interwork) flags |= F_INTERWORK;
24246	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
24247	if (pic_code)	       flags |= F_PIC;
24248	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24249	  flags |= F_SOFT_FLOAT;
24250
24251	switch (mfloat_abi_opt)
24252	  {
24253	  case ARM_FLOAT_ABI_SOFT:
24254	  case ARM_FLOAT_ABI_SOFTFP:
24255	    flags |= F_SOFT_FLOAT;
24256	    break;
24257
24258	  case ARM_FLOAT_ABI_HARD:
24259	    if (flags & F_SOFT_FLOAT)
24260	      as_bad (_("hard-float conflicts with specified fpu"));
24261	    break;
24262	  }
24263
24264	/* Using pure-endian doubles (even if soft-float).	*/
24265	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24266	  flags |= F_VFP_FLOAT;
24267
24268#if defined OBJ_ELF
24269	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24270	    flags |= EF_ARM_MAVERICK_FLOAT;
24271	break;
24272
24273      case EF_ARM_EABI_VER4:
24274      case EF_ARM_EABI_VER5:
24275	/* No additional flags to set.	*/
24276	break;
24277
24278      default:
24279	abort ();
24280      }
24281#endif
24282    bfd_set_private_flags (stdoutput, flags);
24283
24284    /* We have run out flags in the COFF header to encode the
24285       status of ATPCS support, so instead we create a dummy,
24286       empty, debug section called .arm.atpcs.	*/
24287    if (atpcs)
24288      {
24289	asection * sec;
24290
24291	sec = bfd_make_section (stdoutput, ".arm.atpcs");
24292
24293	if (sec != NULL)
24294	  {
24295	    bfd_set_section_flags
24296	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24297	    bfd_set_section_size (stdoutput, sec, 0);
24298	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24299	  }
24300      }
24301  }
24302#endif
24303
24304  /* Record the CPU type as well.  */
24305  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24306    mach = bfd_mach_arm_iWMMXt2;
24307  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24308    mach = bfd_mach_arm_iWMMXt;
24309  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24310    mach = bfd_mach_arm_XScale;
24311  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24312    mach = bfd_mach_arm_ep9312;
24313  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24314    mach = bfd_mach_arm_5TE;
24315  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24316    {
24317      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24318	mach = bfd_mach_arm_5T;
24319      else
24320	mach = bfd_mach_arm_5;
24321    }
24322  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24323    {
24324      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24325	mach = bfd_mach_arm_4T;
24326      else
24327	mach = bfd_mach_arm_4;
24328    }
24329  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24330    mach = bfd_mach_arm_3M;
24331  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24332    mach = bfd_mach_arm_3;
24333  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24334    mach = bfd_mach_arm_2a;
24335  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24336    mach = bfd_mach_arm_2;
24337  else
24338    mach = bfd_mach_arm_unknown;
24339
24340  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24341}
24342
24343/* Command line processing.  */
24344
24345/* md_parse_option
24346      Invocation line includes a switch not recognized by the base assembler.
24347      See if it's a processor-specific option.
24348
24349      This routine is somewhat complicated by the need for backwards
24350      compatibility (since older releases of gcc can't be changed).
24351      The new options try to make the interface as compatible as
24352      possible with GCC.
24353
24354      New options (supported) are:
24355
24356	      -mcpu=<cpu name>		 Assemble for selected processor
24357	      -march=<architecture name> Assemble for selected architecture
24358	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
24359	      -EB/-mbig-endian		 Big-endian
24360	      -EL/-mlittle-endian	 Little-endian
24361	      -k			 Generate PIC code
24362	      -mthumb			 Start in Thumb mode
24363	      -mthumb-interwork		 Code supports ARM/Thumb interworking
24364
24365	      -m[no-]warn-deprecated     Warn about deprecated features
24366	      -m[no-]warn-syms		 Warn when symbols match instructions
24367
24368      For now we will also provide support for:
24369
24370	      -mapcs-32			 32-bit Program counter
24371	      -mapcs-26			 26-bit Program counter
24372	      -macps-float		 Floats passed in FP registers
24373	      -mapcs-reentrant		 Reentrant code
24374	      -matpcs
24375      (sometime these will probably be replaced with -mapcs=<list of options>
24376      and -matpcs=<list of options>)
24377
24378      The remaining options are only supported for back-wards compatibility.
24379      Cpu variants, the arm part is optional:
24380	      -m[arm]1		      Currently not supported.
24381	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
24382	      -m[arm]3		      Arm 3 processor
24383	      -m[arm]6[xx],	      Arm 6 processors
24384	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
24385	      -m[arm]8[10]	      Arm 8 processors
24386	      -m[arm]9[20][tdmi]      Arm 9 processors
24387	      -mstrongarm[110[0]]     StrongARM processors
24388	      -mxscale		      XScale processors
24389	      -m[arm]v[2345[t[e]]]    Arm architectures
24390	      -mall		      All (except the ARM1)
24391      FP variants:
24392	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
24393	      -mfpe-old		      (No float load/store multiples)
24394	      -mvfpxd		      VFP Single precision
24395	      -mvfp		      All VFP
24396	      -mno-fpu		      Disable all floating point instructions
24397
24398      The following CPU names are recognized:
24399	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24400	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24401	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24402	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24403	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24404	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
24405	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24406
24407      */
24408
24409const char * md_shortopts = "m:k";
24410
24411#ifdef ARM_BI_ENDIAN
24412#define OPTION_EB (OPTION_MD_BASE + 0)
24413#define OPTION_EL (OPTION_MD_BASE + 1)
24414#else
24415#if TARGET_BYTES_BIG_ENDIAN
24416#define OPTION_EB (OPTION_MD_BASE + 0)
24417#else
24418#define OPTION_EL (OPTION_MD_BASE + 1)
24419#endif
24420#endif
24421#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24422
24423struct option md_longopts[] =
24424{
24425#ifdef OPTION_EB
24426  {"EB", no_argument, NULL, OPTION_EB},
24427#endif
24428#ifdef OPTION_EL
24429  {"EL", no_argument, NULL, OPTION_EL},
24430#endif
24431  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24432  {NULL, no_argument, NULL, 0}
24433};
24434
24435
24436size_t md_longopts_size = sizeof (md_longopts);
24437
24438struct arm_option_table
24439{
24440  char *option;		/* Option name to match.  */
24441  char *help;		/* Help information.  */
24442  int  *var;		/* Variable to change.	*/
24443  int	value;		/* What to change it to.  */
24444  char *deprecated;	/* If non-null, print this message.  */
24445};
24446
24447struct arm_option_table arm_opts[] =
24448{
24449  {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
24450  {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
24451  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24452   &support_interwork, 1, NULL},
24453  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24454  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24455  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24456   1, NULL},
24457  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24458  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24459  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24460  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24461   NULL},
24462
24463  /* These are recognized by the assembler, but have no affect on code.	 */
24464  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24465  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24466
24467  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24468  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24469   &warn_on_deprecated, 0, NULL},
24470  {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24471  {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24472  {NULL, NULL, NULL, 0, NULL}
24473};
24474
24475struct arm_legacy_option_table
24476{
24477  char *option;				/* Option name to match.  */
24478  const arm_feature_set	**var;		/* Variable to change.	*/
24479  const arm_feature_set	value;		/* What to change it to.  */
24480  char *deprecated;			/* If non-null, print this message.  */
24481};
24482
24483const struct arm_legacy_option_table arm_legacy_opts[] =
24484{
24485  /* DON'T add any new processors to this list -- we want the whole list
24486     to go away...  Add them to the processors table instead.  */
24487  {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
24488  {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
24489  {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
24490  {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
24491  {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24492  {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24493  {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24494  {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24495  {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
24496  {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
24497  {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
24498  {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
24499  {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
24500  {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
24501  {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
24502  {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
24503  {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
24504  {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
24505  {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
24506  {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
24507  {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
24508  {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
24509  {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
24510  {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
24511  {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
24512  {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
24513  {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
24514  {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
24515  {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
24516  {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
24517  {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
24518  {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
24519  {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
24520  {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
24521  {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24522  {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24523  {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24524  {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24525  {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24526  {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24527  {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
24528  {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
24529  {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
24530  {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
24531  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
24532  {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
24533  {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24534  {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24535  {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24536  {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24537  {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24538  {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24539  {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24540  {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24541  {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24542  {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24543  {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
24544  {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
24545  {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
24546  {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
24547  {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24548  {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24549  {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24550  {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24551  {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24552  {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24553  {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24554  {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24555  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
24556  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24557   N_("use -mcpu=strongarm110")},
24558  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24559   N_("use -mcpu=strongarm1100")},
24560  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24561   N_("use -mcpu=strongarm1110")},
24562  {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24563  {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24564  {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
24565
24566  /* Architecture variants -- don't add any more to this list either.  */
24567  {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
24568  {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
24569  {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24570  {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24571  {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
24572  {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
24573  {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24574  {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24575  {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
24576  {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
24577  {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24578  {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24579  {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
24580  {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
24581  {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24582  {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24583  {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24584  {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24585
24586  /* Floating point variants -- don't add any more to this list either.	 */
24587  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24588  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24589  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24590  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
24591   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24592
24593  {NULL, NULL, ARM_ARCH_NONE, NULL}
24594};
24595
24596struct arm_cpu_option_table
24597{
24598  char *name;
24599  size_t name_len;
24600  const arm_feature_set	value;
24601  /* For some CPUs we assume an FPU unless the user explicitly sets
24602     -mfpu=...	*/
24603  const arm_feature_set	default_fpu;
24604  /* The canonical name of the CPU, or NULL to use NAME converted to upper
24605     case.  */
24606  const char *canonical_name;
24607};
24608
24609/* This list should, at a minimum, contain all the cpu names
24610   recognized by GCC.  */
24611#define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24612static const struct arm_cpu_option_table arm_cpus[] =
24613{
24614  ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
24615  ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
24616  ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
24617  ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
24618  ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
24619  ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24620  ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24621  ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24622  ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24623  ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24624  ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24625  ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
24626  ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24627  ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
24628  ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24629  ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
24630  ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24631  ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24632  ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24633  ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24634  ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24635  ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24636  ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24637  ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24638  ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24639  ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24640  ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24641  ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
24642  ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24643  ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24644  ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24645  ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24646  ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24647  ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24648  ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24649  ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24650  ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24651  ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
24652  ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24653  ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
24654  ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24655  ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24656  ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
24657  ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
24658  ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
24659  ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
24660  /* For V5 or later processors we default to using VFP; but the user
24661     should really set the FPU type explicitly.	 */
24662  ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24663  ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24664  ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24665  ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24666  ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
24667  ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24668  ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
24669  ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24670  ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24671  ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
24672  ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24673  ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24674  ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
24675  ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
24676  ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24677  ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
24678  ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
24679  ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24680  ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24681  ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
24682								 "ARM1026EJ-S"),
24683  ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
24684  ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24685  ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
24686  ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
24687  ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
24688  ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
24689  ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
24690  ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
24691  ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
24692								 "ARM1136JF-S"),
24693  ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
24694  ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
24695  ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
24696  ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
24697  ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
24698  ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6KZ,	 FPU_NONE,	  NULL),
24699  ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP_V2, NULL),
24700  ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
24701						 FPU_NONE,	  "Cortex-A5"),
24702  ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
24703								  "Cortex-A7"),
24704  ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
24705						 ARM_FEATURE_COPROC (FPU_VFP_V3
24706							| FPU_NEON_EXT_V1),
24707								  "Cortex-A8"),
24708  ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
24709						 ARM_FEATURE_COPROC (FPU_VFP_V3
24710							| FPU_NEON_EXT_V1),
24711								  "Cortex-A9"),
24712  ARM_CPU_OPT ("cortex-a12",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
24713								  "Cortex-A12"),
24714  ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
24715								  "Cortex-A15"),
24716  ARM_CPU_OPT ("cortex-a17",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
24717								  "Cortex-A17"),
24718  ARM_CPU_OPT ("cortex-a35",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24719								  "Cortex-A35"),
24720  ARM_CPU_OPT ("cortex-a53",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24721								  "Cortex-A53"),
24722  ARM_CPU_OPT ("cortex-a57",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24723								  "Cortex-A57"),
24724  ARM_CPU_OPT ("cortex-a72",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24725								  "Cortex-A72"),
24726  ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
24727  ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
24728								  "Cortex-R4F"),
24729  ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
24730						 FPU_NONE,	  "Cortex-R5"),
24731  ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
24732						 FPU_ARCH_VFP_V3D16,
24733								  "Cortex-R7"),
24734  ARM_CPU_OPT ("cortex-m7",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M7"),
24735  ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
24736  ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
24737  ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
24738  ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
24739  ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
24740  ARM_CPU_OPT ("exynos-m1",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24741								  "Samsung " \
24742								  "Exynos M1"),
24743  ARM_CPU_OPT ("qdf24xx",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24744								  "Qualcomm "
24745								  "QDF24XX"),
24746
24747  /* ??? XSCALE is really an architecture.  */
24748  ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24749  /* ??? iwmmxt is not a processor.  */
24750  ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24751  ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24752  ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24753  /* Maverick */
24754  ARM_CPU_OPT ("ep9312",	ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24755						 FPU_ARCH_MAVERICK, "ARM920T"),
24756  /* Marvell processors.  */
24757  ARM_CPU_OPT ("marvell-pj4",   ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24758						      | ARM_EXT_SEC),
24759						FPU_ARCH_VFP_V3D16, NULL),
24760  ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24761							| ARM_EXT_SEC),
24762					       FPU_ARCH_NEON_VFP_V4, NULL),
24763  /* APM X-Gene family.  */
24764  ARM_CPU_OPT ("xgene1",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24765	                                                          "APM X-Gene 1"),
24766  ARM_CPU_OPT ("xgene2",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24767	                                                          "APM X-Gene 2"),
24768
24769  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24770};
24771#undef ARM_CPU_OPT
24772
24773struct arm_arch_option_table
24774{
24775  char *name;
24776  size_t name_len;
24777  const arm_feature_set	value;
24778  const arm_feature_set	default_fpu;
24779};
24780
24781/* This list should, at a minimum, contain all the architecture names
24782   recognized by GCC.  */
24783#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24784static const struct arm_arch_option_table arm_archs[] =
24785{
24786  ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
24787  ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
24788  ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
24789  ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
24790  ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
24791  ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
24792  ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
24793  ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
24794  ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
24795  ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
24796  ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
24797  ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
24798  ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
24799  ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
24800  ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
24801  ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24802  ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
24803  ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
24804  ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
24805  ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
24806  ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
24807  /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
24808     kept to preserve existing behaviour.  */
24809  ARM_ARCH_OPT ("armv6kz",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
24810  ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
24811  ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
24812  ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
24813  ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
24814  /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
24815     kept to preserve existing behaviour.  */
24816  ARM_ARCH_OPT ("armv6kzt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
24817  ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
24818  ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
24819  ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
24820  ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
24821  /* The official spelling of the ARMv7 profile variants is the dashed form.
24822     Accept the non-dashed form for compatibility with old toolchains.  */
24823  ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
24824  ARM_ARCH_OPT ("armv7ve",	ARM_ARCH_V7VE,	 FPU_ARCH_VFP),
24825  ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
24826  ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
24827  ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
24828  ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
24829  ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
24830  ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
24831  ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
24832  ARM_ARCH_OPT ("armv8.1-a",	ARM_ARCH_V8_1A,	 FPU_ARCH_VFP),
24833  ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24834  ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24835  ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24836  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24837};
24838#undef ARM_ARCH_OPT
24839
24840/* ISA extensions in the co-processor and main instruction set space.  */
24841struct arm_option_extension_value_table
24842{
24843  char *name;
24844  size_t name_len;
24845  const arm_feature_set merge_value;
24846  const arm_feature_set clear_value;
24847  const arm_feature_set allowed_archs;
24848};
24849
24850/* The following table must be in alphabetical order with a NULL last entry.
24851   */
24852#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
24853static const struct arm_option_extension_value_table arm_extensions[] =
24854{
24855  ARM_EXT_OPT ("crc",  ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
24856			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24857  ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24858			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
24859				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24860  ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
24861				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24862  ARM_EXT_OPT ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24863			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24864				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24865  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
24866			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
24867  ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
24868			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
24869  ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
24870			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
24871  ARM_EXT_OPT ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24872			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24873				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24874  ARM_EXT_OPT ("simd",   FPU_ARCH_NEON_VFP_ARMV8,
24875			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
24876				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24877  ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24878			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24879				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
24880  ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
24881			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
24882			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24883  ARM_EXT_OPT ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24884			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24885				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
24886  ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
24887				     | ARM_EXT_DIV),
24888			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
24889				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
24890  ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8,
24891			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
24892				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24893  ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
24894			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
24895  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
24896};
24897#undef ARM_EXT_OPT
24898
24899/* ISA floating-point and Advanced SIMD extensions.  */
24900struct arm_option_fpu_value_table
24901{
24902  char *name;
24903  const arm_feature_set value;
24904};
24905
24906/* This list should, at a minimum, contain all the fpu names
24907   recognized by GCC.  */
24908static const struct arm_option_fpu_value_table arm_fpus[] =
24909{
24910  {"softfpa",		FPU_NONE},
24911  {"fpe",		FPU_ARCH_FPE},
24912  {"fpe2",		FPU_ARCH_FPE},
24913  {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
24914  {"fpa",		FPU_ARCH_FPA},
24915  {"fpa10",		FPU_ARCH_FPA},
24916  {"fpa11",		FPU_ARCH_FPA},
24917  {"arm7500fe",		FPU_ARCH_FPA},
24918  {"softvfp",		FPU_ARCH_VFP},
24919  {"softvfp+vfp",	FPU_ARCH_VFP_V2},
24920  {"vfp",		FPU_ARCH_VFP_V2},
24921  {"vfp9",		FPU_ARCH_VFP_V2},
24922  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
24923  {"vfp10",		FPU_ARCH_VFP_V2},
24924  {"vfp10-r0",		FPU_ARCH_VFP_V1},
24925  {"vfpxd",		FPU_ARCH_VFP_V1xD},
24926  {"vfpv2",		FPU_ARCH_VFP_V2},
24927  {"vfpv3",		FPU_ARCH_VFP_V3},
24928  {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
24929  {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
24930  {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
24931  {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
24932  {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
24933  {"arm1020t",		FPU_ARCH_VFP_V1},
24934  {"arm1020e",		FPU_ARCH_VFP_V2},
24935  {"arm1136jfs",	FPU_ARCH_VFP_V2},
24936  {"arm1136jf-s",	FPU_ARCH_VFP_V2},
24937  {"maverick",		FPU_ARCH_MAVERICK},
24938  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24939  {"neon-fp16",		FPU_ARCH_NEON_FP16},
24940  {"vfpv4",		FPU_ARCH_VFP_V4},
24941  {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
24942  {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
24943  {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
24944  {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
24945  {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
24946  {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
24947  {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
24948  {"crypto-neon-fp-armv8",
24949			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24950  {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
24951  {"crypto-neon-fp-armv8.1",
24952			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
24953  {NULL,		ARM_ARCH_NONE}
24954};
24955
24956struct arm_option_value_table
24957{
24958  char *name;
24959  long value;
24960};
24961
24962static const struct arm_option_value_table arm_float_abis[] =
24963{
24964  {"hard",	ARM_FLOAT_ABI_HARD},
24965  {"softfp",	ARM_FLOAT_ABI_SOFTFP},
24966  {"soft",	ARM_FLOAT_ABI_SOFT},
24967  {NULL,	0}
24968};
24969
24970#ifdef OBJ_ELF
24971/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
24972static const struct arm_option_value_table arm_eabis[] =
24973{
24974  {"gnu",	EF_ARM_EABI_UNKNOWN},
24975  {"4",		EF_ARM_EABI_VER4},
24976  {"5",		EF_ARM_EABI_VER5},
24977  {NULL,	0}
24978};
24979#endif
24980
24981struct arm_long_option_table
24982{
24983  char * option;		/* Substring to match.	*/
24984  char * help;			/* Help information.  */
24985  int (* func) (char * subopt);	/* Function to decode sub-option.  */
24986  char * deprecated;		/* If non-null, print this message.  */
24987};
24988
24989static bfd_boolean
24990arm_parse_extension (char *str, const arm_feature_set **opt_p)
24991{
24992  arm_feature_set *ext_set = (arm_feature_set *)
24993      xmalloc (sizeof (arm_feature_set));
24994
24995  /* We insist on extensions being specified in alphabetical order, and with
24996     extensions being added before being removed.  We achieve this by having
24997     the global ARM_EXTENSIONS table in alphabetical order, and using the
24998     ADDING_VALUE variable to indicate whether we are adding an extension (1)
24999     or removing it (0) and only allowing it to change in the order
25000     -1 -> 1 -> 0.  */
25001  const struct arm_option_extension_value_table * opt = NULL;
25002  int adding_value = -1;
25003
25004  /* Copy the feature set, so that we can modify it.  */
25005  *ext_set = **opt_p;
25006  *opt_p = ext_set;
25007
25008  while (str != NULL && *str != 0)
25009    {
25010      char *ext;
25011      size_t len;
25012
25013      if (*str != '+')
25014	{
25015	  as_bad (_("invalid architectural extension"));
25016	  return FALSE;
25017	}
25018
25019      str++;
25020      ext = strchr (str, '+');
25021
25022      if (ext != NULL)
25023	len = ext - str;
25024      else
25025	len = strlen (str);
25026
25027      if (len >= 2 && strncmp (str, "no", 2) == 0)
25028	{
25029	  if (adding_value != 0)
25030	    {
25031	      adding_value = 0;
25032	      opt = arm_extensions;
25033	    }
25034
25035	  len -= 2;
25036	  str += 2;
25037	}
25038      else if (len > 0)
25039	{
25040	  if (adding_value == -1)
25041	    {
25042	      adding_value = 1;
25043	      opt = arm_extensions;
25044	    }
25045	  else if (adding_value != 1)
25046	    {
25047	      as_bad (_("must specify extensions to add before specifying "
25048			"those to remove"));
25049	      return FALSE;
25050	    }
25051	}
25052
25053      if (len == 0)
25054	{
25055	  as_bad (_("missing architectural extension"));
25056	  return FALSE;
25057	}
25058
25059      gas_assert (adding_value != -1);
25060      gas_assert (opt != NULL);
25061
25062      /* Scan over the options table trying to find an exact match. */
25063      for (; opt->name != NULL; opt++)
25064	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25065	  {
25066	    /* Check we can apply the extension to this architecture.  */
25067	    if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25068	      {
25069		as_bad (_("extension does not apply to the base architecture"));
25070		return FALSE;
25071	      }
25072
25073	    /* Add or remove the extension.  */
25074	    if (adding_value)
25075	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25076	    else
25077	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25078
25079	    break;
25080	  }
25081
25082      if (opt->name == NULL)
25083	{
25084	  /* Did we fail to find an extension because it wasn't specified in
25085	     alphabetical order, or because it does not exist?  */
25086
25087	  for (opt = arm_extensions; opt->name != NULL; opt++)
25088	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25089	      break;
25090
25091	  if (opt->name == NULL)
25092	    as_bad (_("unknown architectural extension `%s'"), str);
25093	  else
25094	    as_bad (_("architectural extensions must be specified in "
25095		      "alphabetical order"));
25096
25097	  return FALSE;
25098	}
25099      else
25100	{
25101	  /* We should skip the extension we've just matched the next time
25102	     round.  */
25103	  opt++;
25104	}
25105
25106      str = ext;
25107    };
25108
25109  return TRUE;
25110}
25111
25112static bfd_boolean
25113arm_parse_cpu (char *str)
25114{
25115  const struct arm_cpu_option_table *opt;
25116  char *ext = strchr (str, '+');
25117  size_t len;
25118
25119  if (ext != NULL)
25120    len = ext - str;
25121  else
25122    len = strlen (str);
25123
25124  if (len == 0)
25125    {
25126      as_bad (_("missing cpu name `%s'"), str);
25127      return FALSE;
25128    }
25129
25130  for (opt = arm_cpus; opt->name != NULL; opt++)
25131    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25132      {
25133	mcpu_cpu_opt = &opt->value;
25134	mcpu_fpu_opt = &opt->default_fpu;
25135	if (opt->canonical_name)
25136	  {
25137	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25138	    strcpy (selected_cpu_name, opt->canonical_name);
25139	  }
25140	else
25141	  {
25142	    size_t i;
25143
25144	    if (len >= sizeof selected_cpu_name)
25145	      len = (sizeof selected_cpu_name) - 1;
25146
25147	    for (i = 0; i < len; i++)
25148	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
25149	    selected_cpu_name[i] = 0;
25150	  }
25151
25152	if (ext != NULL)
25153	  return arm_parse_extension (ext, &mcpu_cpu_opt);
25154
25155	return TRUE;
25156      }
25157
25158  as_bad (_("unknown cpu `%s'"), str);
25159  return FALSE;
25160}
25161
25162static bfd_boolean
25163arm_parse_arch (char *str)
25164{
25165  const struct arm_arch_option_table *opt;
25166  char *ext = strchr (str, '+');
25167  size_t len;
25168
25169  if (ext != NULL)
25170    len = ext - str;
25171  else
25172    len = strlen (str);
25173
25174  if (len == 0)
25175    {
25176      as_bad (_("missing architecture name `%s'"), str);
25177      return FALSE;
25178    }
25179
25180  for (opt = arm_archs; opt->name != NULL; opt++)
25181    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25182      {
25183	march_cpu_opt = &opt->value;
25184	march_fpu_opt = &opt->default_fpu;
25185	strcpy (selected_cpu_name, opt->name);
25186
25187	if (ext != NULL)
25188	  return arm_parse_extension (ext, &march_cpu_opt);
25189
25190	return TRUE;
25191      }
25192
25193  as_bad (_("unknown architecture `%s'\n"), str);
25194  return FALSE;
25195}
25196
25197static bfd_boolean
25198arm_parse_fpu (char * str)
25199{
25200  const struct arm_option_fpu_value_table * opt;
25201
25202  for (opt = arm_fpus; opt->name != NULL; opt++)
25203    if (streq (opt->name, str))
25204      {
25205	mfpu_opt = &opt->value;
25206	return TRUE;
25207      }
25208
25209  as_bad (_("unknown floating point format `%s'\n"), str);
25210  return FALSE;
25211}
25212
25213static bfd_boolean
25214arm_parse_float_abi (char * str)
25215{
25216  const struct arm_option_value_table * opt;
25217
25218  for (opt = arm_float_abis; opt->name != NULL; opt++)
25219    if (streq (opt->name, str))
25220      {
25221	mfloat_abi_opt = opt->value;
25222	return TRUE;
25223      }
25224
25225  as_bad (_("unknown floating point abi `%s'\n"), str);
25226  return FALSE;
25227}
25228
25229#ifdef OBJ_ELF
25230static bfd_boolean
25231arm_parse_eabi (char * str)
25232{
25233  const struct arm_option_value_table *opt;
25234
25235  for (opt = arm_eabis; opt->name != NULL; opt++)
25236    if (streq (opt->name, str))
25237      {
25238	meabi_flags = opt->value;
25239	return TRUE;
25240      }
25241  as_bad (_("unknown EABI `%s'\n"), str);
25242  return FALSE;
25243}
25244#endif
25245
25246static bfd_boolean
25247arm_parse_it_mode (char * str)
25248{
25249  bfd_boolean ret = TRUE;
25250
25251  if (streq ("arm", str))
25252    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25253  else if (streq ("thumb", str))
25254    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25255  else if (streq ("always", str))
25256    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25257  else if (streq ("never", str))
25258    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25259  else
25260    {
25261      as_bad (_("unknown implicit IT mode `%s', should be "\
25262		"arm, thumb, always, or never."), str);
25263      ret = FALSE;
25264    }
25265
25266  return ret;
25267}
25268
25269static bfd_boolean
25270arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25271{
25272  codecomposer_syntax = TRUE;
25273  arm_comment_chars[0] = ';';
25274  arm_line_separator_chars[0] = 0;
25275  return TRUE;
25276}
25277
25278struct arm_long_option_table arm_long_opts[] =
25279{
25280  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
25281   arm_parse_cpu, NULL},
25282  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
25283   arm_parse_arch, NULL},
25284  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
25285   arm_parse_fpu, NULL},
25286  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
25287   arm_parse_float_abi, NULL},
25288#ifdef OBJ_ELF
25289  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
25290   arm_parse_eabi, NULL},
25291#endif
25292  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
25293   arm_parse_it_mode, NULL},
25294  {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
25295   arm_ccs_mode, NULL},
25296  {NULL, NULL, 0, NULL}
25297};
25298
25299int
25300md_parse_option (int c, char * arg)
25301{
25302  struct arm_option_table *opt;
25303  const struct arm_legacy_option_table *fopt;
25304  struct arm_long_option_table *lopt;
25305
25306  switch (c)
25307    {
25308#ifdef OPTION_EB
25309    case OPTION_EB:
25310      target_big_endian = 1;
25311      break;
25312#endif
25313
25314#ifdef OPTION_EL
25315    case OPTION_EL:
25316      target_big_endian = 0;
25317      break;
25318#endif
25319
25320    case OPTION_FIX_V4BX:
25321      fix_v4bx = TRUE;
25322      break;
25323
25324    case 'a':
25325      /* Listing option.  Just ignore these, we don't support additional
25326	 ones.	*/
25327      return 0;
25328
25329    default:
25330      for (opt = arm_opts; opt->option != NULL; opt++)
25331	{
25332	  if (c == opt->option[0]
25333	      && ((arg == NULL && opt->option[1] == 0)
25334		  || streq (arg, opt->option + 1)))
25335	    {
25336	      /* If the option is deprecated, tell the user.  */
25337	      if (warn_on_deprecated && opt->deprecated != NULL)
25338		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25339			   arg ? arg : "", _(opt->deprecated));
25340
25341	      if (opt->var != NULL)
25342		*opt->var = opt->value;
25343
25344	      return 1;
25345	    }
25346	}
25347
25348      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25349	{
25350	  if (c == fopt->option[0]
25351	      && ((arg == NULL && fopt->option[1] == 0)
25352		  || streq (arg, fopt->option + 1)))
25353	    {
25354	      /* If the option is deprecated, tell the user.  */
25355	      if (warn_on_deprecated && fopt->deprecated != NULL)
25356		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25357			   arg ? arg : "", _(fopt->deprecated));
25358
25359	      if (fopt->var != NULL)
25360		*fopt->var = &fopt->value;
25361
25362	      return 1;
25363	    }
25364	}
25365
25366      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25367	{
25368	  /* These options are expected to have an argument.  */
25369	  if (c == lopt->option[0]
25370	      && arg != NULL
25371	      && strncmp (arg, lopt->option + 1,
25372			  strlen (lopt->option + 1)) == 0)
25373	    {
25374	      /* If the option is deprecated, tell the user.  */
25375	      if (warn_on_deprecated && lopt->deprecated != NULL)
25376		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25377			   _(lopt->deprecated));
25378
25379	      /* Call the sup-option parser.  */
25380	      return lopt->func (arg + strlen (lopt->option) - 1);
25381	    }
25382	}
25383
25384      return 0;
25385    }
25386
25387  return 1;
25388}
25389
25390void
25391md_show_usage (FILE * fp)
25392{
25393  struct arm_option_table *opt;
25394  struct arm_long_option_table *lopt;
25395
25396  fprintf (fp, _(" ARM-specific assembler options:\n"));
25397
25398  for (opt = arm_opts; opt->option != NULL; opt++)
25399    if (opt->help != NULL)
25400      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
25401
25402  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25403    if (lopt->help != NULL)
25404      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
25405
25406#ifdef OPTION_EB
25407  fprintf (fp, _("\
25408  -EB                     assemble code for a big-endian cpu\n"));
25409#endif
25410
25411#ifdef OPTION_EL
25412  fprintf (fp, _("\
25413  -EL                     assemble code for a little-endian cpu\n"));
25414#endif
25415
25416  fprintf (fp, _("\
25417  --fix-v4bx              Allow BX in ARMv4 code\n"));
25418}
25419
25420
25421#ifdef OBJ_ELF
25422typedef struct
25423{
25424  int val;
25425  arm_feature_set flags;
25426} cpu_arch_ver_table;
25427
25428/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
25429   least features first.  */
25430static const cpu_arch_ver_table cpu_arch_ver[] =
25431{
25432    {1, ARM_ARCH_V4},
25433    {2, ARM_ARCH_V4T},
25434    {3, ARM_ARCH_V5},
25435    {3, ARM_ARCH_V5T},
25436    {4, ARM_ARCH_V5TE},
25437    {5, ARM_ARCH_V5TEJ},
25438    {6, ARM_ARCH_V6},
25439    {9, ARM_ARCH_V6K},
25440    {7, ARM_ARCH_V6Z},
25441    {11, ARM_ARCH_V6M},
25442    {12, ARM_ARCH_V6SM},
25443    {8, ARM_ARCH_V6T2},
25444    {10, ARM_ARCH_V7VE},
25445    {10, ARM_ARCH_V7R},
25446    {10, ARM_ARCH_V7M},
25447    {14, ARM_ARCH_V8A},
25448    {0, ARM_ARCH_NONE}
25449};
25450
25451/* Set an attribute if it has not already been set by the user.  */
25452static void
25453aeabi_set_attribute_int (int tag, int value)
25454{
25455  if (tag < 1
25456      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25457      || !attributes_set_explicitly[tag])
25458    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25459}
25460
25461static void
25462aeabi_set_attribute_string (int tag, const char *value)
25463{
25464  if (tag < 1
25465      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25466      || !attributes_set_explicitly[tag])
25467    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25468}
25469
25470/* Set the public EABI object attributes.  */
25471void
25472aeabi_set_public_attributes (void)
25473{
25474  int arch;
25475  char profile;
25476  int virt_sec = 0;
25477  int fp16_optional = 0;
25478  arm_feature_set flags;
25479  arm_feature_set tmp;
25480  const cpu_arch_ver_table *p;
25481
25482  /* Choose the architecture based on the capabilities of the requested cpu
25483     (if any) and/or the instructions actually used.  */
25484  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25485  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25486  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25487
25488  if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25489    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25490
25491  if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25492    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25493
25494  selected_cpu = flags;
25495
25496  /* Allow the user to override the reported architecture.  */
25497  if (object_arch)
25498    {
25499      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25500      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25501    }
25502
25503  /* We need to make sure that the attributes do not identify us as v6S-M
25504     when the only v6S-M feature in use is the Operating System Extensions.  */
25505  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25506      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25507	ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25508
25509  tmp = flags;
25510  arch = 0;
25511  for (p = cpu_arch_ver; p->val; p++)
25512    {
25513      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25514	{
25515	  arch = p->val;
25516	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25517	}
25518    }
25519
25520  /* The table lookup above finds the last architecture to contribute
25521     a new feature.  Unfortunately, Tag13 is a subset of the union of
25522     v6T2 and v7-M, so it is never seen as contributing a new feature.
25523     We can not search for the last entry which is entirely used,
25524     because if no CPU is specified we build up only those flags
25525     actually used.  Perhaps we should separate out the specified
25526     and implicit cases.  Avoid taking this path for -march=all by
25527     checking for contradictory v7-A / v7-M features.  */
25528  if (arch == 10
25529      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25530      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25531      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25532    arch = 13;
25533
25534  /* Tag_CPU_name.  */
25535  if (selected_cpu_name[0])
25536    {
25537      char *q;
25538
25539      q = selected_cpu_name;
25540      if (strncmp (q, "armv", 4) == 0)
25541	{
25542	  int i;
25543
25544	  q += 4;
25545	  for (i = 0; q[i]; i++)
25546	    q[i] = TOUPPER (q[i]);
25547	}
25548      aeabi_set_attribute_string (Tag_CPU_name, q);
25549    }
25550
25551  /* Tag_CPU_arch.  */
25552  aeabi_set_attribute_int (Tag_CPU_arch, arch);
25553
25554  /* Tag_CPU_arch_profile.  */
25555  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25556      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25557    profile = 'A';
25558  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25559    profile = 'R';
25560  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25561    profile = 'M';
25562  else
25563    profile = '\0';
25564
25565  if (profile != '\0')
25566    aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25567
25568  /* Tag_ARM_ISA_use.  */
25569  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25570      || arch == 0)
25571    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25572
25573  /* Tag_THUMB_ISA_use.  */
25574  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25575      || arch == 0)
25576    aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25577	ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25578
25579  /* Tag_VFP_arch.  */
25580  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25581    aeabi_set_attribute_int (Tag_VFP_arch,
25582			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25583			     ? 7 : 8);
25584  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25585    aeabi_set_attribute_int (Tag_VFP_arch,
25586			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25587			     ? 5 : 6);
25588  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25589    {
25590      fp16_optional = 1;
25591      aeabi_set_attribute_int (Tag_VFP_arch, 3);
25592    }
25593  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25594    {
25595      aeabi_set_attribute_int (Tag_VFP_arch, 4);
25596      fp16_optional = 1;
25597    }
25598  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25599    aeabi_set_attribute_int (Tag_VFP_arch, 2);
25600  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25601	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25602    aeabi_set_attribute_int (Tag_VFP_arch, 1);
25603
25604  /* Tag_ABI_HardFP_use.  */
25605  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25606      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25607    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25608
25609  /* Tag_WMMX_arch.  */
25610  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25611    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25612  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25613    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25614
25615  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
25616  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25617    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25618  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25619    {
25620      if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25621	{
25622	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25623	}
25624      else
25625	{
25626	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25627	  fp16_optional = 1;
25628	}
25629    }
25630
25631  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
25632  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25633    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25634
25635  /* Tag_DIV_use.
25636
25637     We set Tag_DIV_use to two when integer divide instructions have been used
25638     in ARM state, or when Thumb integer divide instructions have been used,
25639     but we have no architecture profile set, nor have we any ARM instructions.
25640
25641     For ARMv8 we set the tag to 0 as integer divide is implied by the base
25642     architecture.
25643
25644     For new architectures we will have to check these tests.  */
25645  gas_assert (arch <= TAG_CPU_ARCH_V8);
25646  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25647    aeabi_set_attribute_int (Tag_DIV_use, 0);
25648  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25649	   || (profile == '\0'
25650	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25651	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25652    aeabi_set_attribute_int (Tag_DIV_use, 2);
25653
25654  /* Tag_MP_extension_use.  */
25655  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25656    aeabi_set_attribute_int (Tag_MPextension_use, 1);
25657
25658  /* Tag Virtualization_use.  */
25659  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25660    virt_sec |= 1;
25661  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25662    virt_sec |= 2;
25663  if (virt_sec != 0)
25664    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25665}
25666
25667/* Add the default contents for the .ARM.attributes section.  */
25668void
25669arm_md_end (void)
25670{
25671  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25672    return;
25673
25674  aeabi_set_public_attributes ();
25675}
25676#endif /* OBJ_ELF */
25677
25678
25679/* Parse a .cpu directive.  */
25680
25681static void
25682s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25683{
25684  const struct arm_cpu_option_table *opt;
25685  char *name;
25686  char saved_char;
25687
25688  name = input_line_pointer;
25689  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25690    input_line_pointer++;
25691  saved_char = *input_line_pointer;
25692  *input_line_pointer = 0;
25693
25694  /* Skip the first "all" entry.  */
25695  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25696    if (streq (opt->name, name))
25697      {
25698	mcpu_cpu_opt = &opt->value;
25699	selected_cpu = opt->value;
25700	if (opt->canonical_name)
25701	  strcpy (selected_cpu_name, opt->canonical_name);
25702	else
25703	  {
25704	    int i;
25705	    for (i = 0; opt->name[i]; i++)
25706	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
25707
25708	    selected_cpu_name[i] = 0;
25709	  }
25710	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25711	*input_line_pointer = saved_char;
25712	demand_empty_rest_of_line ();
25713	return;
25714      }
25715  as_bad (_("unknown cpu `%s'"), name);
25716  *input_line_pointer = saved_char;
25717  ignore_rest_of_line ();
25718}
25719
25720
25721/* Parse a .arch directive.  */
25722
25723static void
25724s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25725{
25726  const struct arm_arch_option_table *opt;
25727  char saved_char;
25728  char *name;
25729
25730  name = input_line_pointer;
25731  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25732    input_line_pointer++;
25733  saved_char = *input_line_pointer;
25734  *input_line_pointer = 0;
25735
25736  /* Skip the first "all" entry.  */
25737  for (opt = arm_archs + 1; opt->name != NULL; opt++)
25738    if (streq (opt->name, name))
25739      {
25740	mcpu_cpu_opt = &opt->value;
25741	selected_cpu = opt->value;
25742	strcpy (selected_cpu_name, opt->name);
25743	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25744	*input_line_pointer = saved_char;
25745	demand_empty_rest_of_line ();
25746	return;
25747      }
25748
25749  as_bad (_("unknown architecture `%s'\n"), name);
25750  *input_line_pointer = saved_char;
25751  ignore_rest_of_line ();
25752}
25753
25754
25755/* Parse a .object_arch directive.  */
25756
25757static void
25758s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25759{
25760  const struct arm_arch_option_table *opt;
25761  char saved_char;
25762  char *name;
25763
25764  name = input_line_pointer;
25765  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25766    input_line_pointer++;
25767  saved_char = *input_line_pointer;
25768  *input_line_pointer = 0;
25769
25770  /* Skip the first "all" entry.  */
25771  for (opt = arm_archs + 1; opt->name != NULL; opt++)
25772    if (streq (opt->name, name))
25773      {
25774	object_arch = &opt->value;
25775	*input_line_pointer = saved_char;
25776	demand_empty_rest_of_line ();
25777	return;
25778      }
25779
25780  as_bad (_("unknown architecture `%s'\n"), name);
25781  *input_line_pointer = saved_char;
25782  ignore_rest_of_line ();
25783}
25784
25785/* Parse a .arch_extension directive.  */
25786
25787static void
25788s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25789{
25790  const struct arm_option_extension_value_table *opt;
25791  char saved_char;
25792  char *name;
25793  int adding_value = 1;
25794
25795  name = input_line_pointer;
25796  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25797    input_line_pointer++;
25798  saved_char = *input_line_pointer;
25799  *input_line_pointer = 0;
25800
25801  if (strlen (name) >= 2
25802      && strncmp (name, "no", 2) == 0)
25803    {
25804      adding_value = 0;
25805      name += 2;
25806    }
25807
25808  for (opt = arm_extensions; opt->name != NULL; opt++)
25809    if (streq (opt->name, name))
25810      {
25811	if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25812	  {
25813	    as_bad (_("architectural extension `%s' is not allowed for the "
25814		      "current base architecture"), name);
25815	    break;
25816	  }
25817
25818	if (adding_value)
25819	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
25820				  opt->merge_value);
25821	else
25822	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
25823
25824	mcpu_cpu_opt = &selected_cpu;
25825	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25826	*input_line_pointer = saved_char;
25827	demand_empty_rest_of_line ();
25828	return;
25829      }
25830
25831  if (opt->name == NULL)
25832    as_bad (_("unknown architecture extension `%s'\n"), name);
25833
25834  *input_line_pointer = saved_char;
25835  ignore_rest_of_line ();
25836}
25837
25838/* Parse a .fpu directive.  */
25839
25840static void
25841s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25842{
25843  const struct arm_option_fpu_value_table *opt;
25844  char saved_char;
25845  char *name;
25846
25847  name = input_line_pointer;
25848  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25849    input_line_pointer++;
25850  saved_char = *input_line_pointer;
25851  *input_line_pointer = 0;
25852
25853  for (opt = arm_fpus; opt->name != NULL; opt++)
25854    if (streq (opt->name, name))
25855      {
25856	mfpu_opt = &opt->value;
25857	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25858	*input_line_pointer = saved_char;
25859	demand_empty_rest_of_line ();
25860	return;
25861      }
25862
25863  as_bad (_("unknown floating point format `%s'\n"), name);
25864  *input_line_pointer = saved_char;
25865  ignore_rest_of_line ();
25866}
25867
25868/* Copy symbol information.  */
25869
25870void
25871arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25872{
25873  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25874}
25875
25876#ifdef OBJ_ELF
25877/* Given a symbolic attribute NAME, return the proper integer value.
25878   Returns -1 if the attribute is not known.  */
25879
25880int
25881arm_convert_symbolic_attribute (const char *name)
25882{
25883  static const struct
25884  {
25885    const char * name;
25886    const int    tag;
25887  }
25888  attribute_table[] =
25889    {
25890      /* When you modify this table you should
25891	 also modify the list in doc/c-arm.texi.  */
25892#define T(tag) {#tag, tag}
25893      T (Tag_CPU_raw_name),
25894      T (Tag_CPU_name),
25895      T (Tag_CPU_arch),
25896      T (Tag_CPU_arch_profile),
25897      T (Tag_ARM_ISA_use),
25898      T (Tag_THUMB_ISA_use),
25899      T (Tag_FP_arch),
25900      T (Tag_VFP_arch),
25901      T (Tag_WMMX_arch),
25902      T (Tag_Advanced_SIMD_arch),
25903      T (Tag_PCS_config),
25904      T (Tag_ABI_PCS_R9_use),
25905      T (Tag_ABI_PCS_RW_data),
25906      T (Tag_ABI_PCS_RO_data),
25907      T (Tag_ABI_PCS_GOT_use),
25908      T (Tag_ABI_PCS_wchar_t),
25909      T (Tag_ABI_FP_rounding),
25910      T (Tag_ABI_FP_denormal),
25911      T (Tag_ABI_FP_exceptions),
25912      T (Tag_ABI_FP_user_exceptions),
25913      T (Tag_ABI_FP_number_model),
25914      T (Tag_ABI_align_needed),
25915      T (Tag_ABI_align8_needed),
25916      T (Tag_ABI_align_preserved),
25917      T (Tag_ABI_align8_preserved),
25918      T (Tag_ABI_enum_size),
25919      T (Tag_ABI_HardFP_use),
25920      T (Tag_ABI_VFP_args),
25921      T (Tag_ABI_WMMX_args),
25922      T (Tag_ABI_optimization_goals),
25923      T (Tag_ABI_FP_optimization_goals),
25924      T (Tag_compatibility),
25925      T (Tag_CPU_unaligned_access),
25926      T (Tag_FP_HP_extension),
25927      T (Tag_VFP_HP_extension),
25928      T (Tag_ABI_FP_16bit_format),
25929      T (Tag_MPextension_use),
25930      T (Tag_DIV_use),
25931      T (Tag_nodefaults),
25932      T (Tag_also_compatible_with),
25933      T (Tag_conformance),
25934      T (Tag_T2EE_use),
25935      T (Tag_Virtualization_use),
25936      /* We deliberately do not include Tag_MPextension_use_legacy.  */
25937#undef T
25938    };
25939  unsigned int i;
25940
25941  if (name == NULL)
25942    return -1;
25943
25944  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25945    if (streq (name, attribute_table[i].name))
25946      return attribute_table[i].tag;
25947
25948  return -1;
25949}
25950
25951
25952/* Apply sym value for relocations only in the case that they are for
25953   local symbols in the same segment as the fixup and you have the
25954   respective architectural feature for blx and simple switches.  */
25955int
25956arm_apply_sym_value (struct fix * fixP, segT this_seg)
25957{
25958  if (fixP->fx_addsy
25959      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25960      /* PR 17444: If the local symbol is in a different section then a reloc
25961	 will always be generated for it, so applying the symbol value now
25962	 will result in a double offset being stored in the relocation.  */
25963      && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
25964      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25965    {
25966      switch (fixP->fx_r_type)
25967	{
25968	case BFD_RELOC_ARM_PCREL_BLX:
25969	case BFD_RELOC_THUMB_PCREL_BRANCH23:
25970	  if (ARM_IS_FUNC (fixP->fx_addsy))
25971	    return 1;
25972	  break;
25973
25974	case BFD_RELOC_ARM_PCREL_CALL:
25975	case BFD_RELOC_THUMB_PCREL_BLX:
25976	  if (THUMB_IS_FUNC (fixP->fx_addsy))
25977	    return 1;
25978	  break;
25979
25980	default:
25981	  break;
25982	}
25983
25984    }
25985  return 0;
25986}
25987#endif /* OBJ_ELF */
25988