tc-arm.c revision 1.5
1/* tc-arm.c -- Assemble for the ARM
2   Copyright (C) 1994-2016 Free Software Foundation, Inc.
3   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4	Modified by David Taylor (dtaylor@armltd.co.uk)
5	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9   This file is part of GAS, the GNU Assembler.
10
11   GAS is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 3, or (at your option)
14   any later version.
15
16   GAS is distributed in the hope that it will be useful,
17   but WITHOUT ANY WARRANTY; without even the implied warranty of
18   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19   GNU General Public License for more details.
20
21   You should have received a copy of the GNU General Public License
22   along with GAS; see the file COPYING.  If not, write to the Free
23   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24   02110-1301, USA.  */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define	 NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35
36#ifdef OBJ_ELF
37#include "elf/arm.h"
38#include "dw2gencfi.h"
39#endif
40
41#include "dwarf2dbg.h"
42
43#ifdef OBJ_ELF
44/* Must be at least the size of the largest unwind opcode (currently two).  */
45#define ARM_OPCODE_CHUNK_SIZE 8
46
47/* This structure holds the unwinding state.  */
48
49static struct
50{
51  symbolS *	  proc_start;
52  symbolS *	  table_entry;
53  symbolS *	  personality_routine;
54  int		  personality_index;
55  /* The segment containing the function.  */
56  segT		  saved_seg;
57  subsegT	  saved_subseg;
58  /* Opcodes generated from this function.  */
59  unsigned char * opcodes;
60  int		  opcode_count;
61  int		  opcode_alloc;
62  /* The number of bytes pushed to the stack.  */
63  offsetT	  frame_size;
64  /* We don't add stack adjustment opcodes immediately so that we can merge
65     multiple adjustments.  We can also omit the final adjustment
66     when using a frame pointer.  */
67  offsetT	  pending_offset;
68  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
69     hold the reg+offset to use when restoring sp from a frame pointer.	 */
70  offsetT	  fp_offset;
71  int		  fp_reg;
72  /* Nonzero if an unwind_setfp directive has been seen.  */
73  unsigned	  fp_used:1;
74  /* Nonzero if the last opcode restores sp from fp_reg.  */
75  unsigned	  sp_restored:1;
76} unwind;
77
78#endif /* OBJ_ELF */
79
80/* Results from operand parsing worker functions.  */
81
82typedef enum
83{
84  PARSE_OPERAND_SUCCESS,
85  PARSE_OPERAND_FAIL,
86  PARSE_OPERAND_FAIL_NO_BACKTRACK
87} parse_operand_result;
88
89enum arm_float_abi
90{
91  ARM_FLOAT_ABI_HARD,
92  ARM_FLOAT_ABI_SOFTFP,
93  ARM_FLOAT_ABI_SOFT
94};
95
96/* Types of processor to assemble for.	*/
97#ifndef CPU_DEFAULT
98/* The code that was here used to select a default CPU depending on compiler
99   pre-defines which were only present when doing native builds, thus
100   changing gas' default behaviour depending upon the build host.
101
102   If you have a target that requires a default CPU option then the you
103   should define CPU_DEFAULT here.  */
104#endif
105
106#ifndef FPU_DEFAULT
107# ifdef TE_LINUX
108#  define FPU_DEFAULT FPU_ARCH_FPA
109# elif defined (TE_NetBSD)
110#  ifdef OBJ_ELF
111#   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
112#  else
113    /* Legacy a.out format.  */
114#   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
115#  endif
116# elif defined (TE_VXWORKS)
117#  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
118# else
119   /* For backwards compatibility, default to FPA.  */
120#  define FPU_DEFAULT FPU_ARCH_FPA
121# endif
122#endif /* ifndef FPU_DEFAULT */
123
124#define streq(a, b)	      (strcmp (a, b) == 0)
125
126static arm_feature_set cpu_variant;
127static arm_feature_set arm_arch_used;
128static arm_feature_set thumb_arch_used;
129
130/* Flags stored in private area of BFD structure.  */
131static int uses_apcs_26	     = FALSE;
132static int atpcs	     = FALSE;
133static int support_interwork = FALSE;
134static int uses_apcs_float   = FALSE;
135static int pic_code	     = FALSE;
136static int fix_v4bx	     = FALSE;
137/* Warn on using deprecated features.  */
138static int warn_on_deprecated = TRUE;
139
140/* Understand CodeComposer Studio assembly syntax.  */
141bfd_boolean codecomposer_syntax = FALSE;
142
143/* Variables that we set while parsing command-line options.  Once all
144   options have been read we re-process these values to set the real
145   assembly flags.  */
146static const arm_feature_set *legacy_cpu = NULL;
147static const arm_feature_set *legacy_fpu = NULL;
148
149static const arm_feature_set *mcpu_cpu_opt = NULL;
150static const arm_feature_set *mcpu_fpu_opt = NULL;
151static const arm_feature_set *march_cpu_opt = NULL;
152static const arm_feature_set *march_fpu_opt = NULL;
153static const arm_feature_set *mfpu_opt = NULL;
154static const arm_feature_set *object_arch = NULL;
155
156/* Constants for known architecture features.  */
157static const arm_feature_set fpu_default = FPU_DEFAULT;
158static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164#ifdef OBJ_ELF
165static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166#endif
167static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169#ifdef CPU_DEFAULT
170static const arm_feature_set cpu_default = CPU_DEFAULT;
171#endif
172
173static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181static const arm_feature_set arm_ext_v4t_5 =
182  ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191static const arm_feature_set arm_ext_v6_notm =
192  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193static const arm_feature_set arm_ext_v6_dsp =
194  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195static const arm_feature_set arm_ext_barrier =
196  ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197static const arm_feature_set arm_ext_msr =
198  ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203#ifdef OBJ_ELF
204static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205#endif
206static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207static const arm_feature_set arm_ext_m =
208  ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217static const arm_feature_set arm_ext_v8m_main =
218  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219/* Instructions in ARMv8-M only found in M profile architectures.  */
220static const arm_feature_set arm_ext_v8m_m_only =
221  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222static const arm_feature_set arm_ext_v6t2_v8m =
223  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224/* Instructions shared between ARMv8-A and ARMv8-M.  */
225static const arm_feature_set arm_ext_atomics =
226  ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227#ifdef OBJ_ELF
228/* DSP instructions Tag_DSP_extension refers to.  */
229static const arm_feature_set arm_ext_dsp =
230  ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231#endif
232static const arm_feature_set arm_ext_ras =
233  ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234/* FP16 instructions.  */
235static const arm_feature_set arm_ext_fp16 =
236  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237
238static const arm_feature_set arm_arch_any = ARM_ANY;
239static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
240static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
241static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
242#ifdef OBJ_ELF
243static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
244#endif
245
246static const arm_feature_set arm_cext_iwmmxt2 =
247  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
248static const arm_feature_set arm_cext_iwmmxt =
249  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
250static const arm_feature_set arm_cext_xscale =
251  ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
252static const arm_feature_set arm_cext_maverick =
253  ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
254static const arm_feature_set fpu_fpa_ext_v1 =
255  ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
256static const arm_feature_set fpu_fpa_ext_v2 =
257  ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
258static const arm_feature_set fpu_vfp_ext_v1xd =
259  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
260static const arm_feature_set fpu_vfp_ext_v1 =
261  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
262static const arm_feature_set fpu_vfp_ext_v2 =
263  ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
264static const arm_feature_set fpu_vfp_ext_v3xd =
265  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
266static const arm_feature_set fpu_vfp_ext_v3 =
267  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
268static const arm_feature_set fpu_vfp_ext_d32 =
269  ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
270static const arm_feature_set fpu_neon_ext_v1 =
271  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
272static const arm_feature_set fpu_vfp_v3_or_neon_ext =
273  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
274#ifdef OBJ_ELF
275static const arm_feature_set fpu_vfp_fp16 =
276  ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
277static const arm_feature_set fpu_neon_ext_fma =
278  ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
279#endif
280static const arm_feature_set fpu_vfp_ext_fma =
281  ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
282static const arm_feature_set fpu_vfp_ext_armv8 =
283  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
284static const arm_feature_set fpu_vfp_ext_armv8xd =
285  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
286static const arm_feature_set fpu_neon_ext_armv8 =
287  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
288static const arm_feature_set fpu_crypto_ext_armv8 =
289  ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
290static const arm_feature_set crc_ext_armv8 =
291  ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
292static const arm_feature_set fpu_neon_ext_v8_1 =
293  ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
294
295static int mfloat_abi_opt = -1;
296/* Record user cpu selection for object attributes.  */
297static arm_feature_set selected_cpu = ARM_ARCH_NONE;
298/* Must be long enough to hold any of the names in arm_cpus.  */
299static char selected_cpu_name[20];
300
301extern FLONUM_TYPE generic_floating_point_number;
302
303/* Return if no cpu was selected on command-line.  */
304static bfd_boolean
305no_cpu_selected (void)
306{
307  return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
308}
309
310#ifdef OBJ_ELF
311# ifdef EABI_DEFAULT
312static int meabi_flags = EABI_DEFAULT;
313# else
314static int meabi_flags = EF_ARM_EABI_UNKNOWN;
315# endif
316
317static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
318
319bfd_boolean
320arm_is_eabi (void)
321{
322  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
323}
324#endif
325
326#ifdef OBJ_ELF
327/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
328symbolS * GOT_symbol;
329#endif
330
331/* 0: assemble for ARM,
332   1: assemble for Thumb,
333   2: assemble for Thumb even though target CPU does not support thumb
334      instructions.  */
335static int thumb_mode = 0;
336/* A value distinct from the possible values for thumb_mode that we
337   can use to record whether thumb_mode has been copied into the
338   tc_frag_data field of a frag.  */
339#define MODE_RECORDED (1 << 4)
340
341/* Specifies the intrinsic IT insn behavior mode.  */
342enum implicit_it_mode
343{
344  IMPLICIT_IT_MODE_NEVER  = 0x00,
345  IMPLICIT_IT_MODE_ARM    = 0x01,
346  IMPLICIT_IT_MODE_THUMB  = 0x02,
347  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
348};
349static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
350
351/* If unified_syntax is true, we are processing the new unified
352   ARM/Thumb syntax.  Important differences from the old ARM mode:
353
354     - Immediate operands do not require a # prefix.
355     - Conditional affixes always appear at the end of the
356       instruction.  (For backward compatibility, those instructions
357       that formerly had them in the middle, continue to accept them
358       there.)
359     - The IT instruction may appear, and if it does is validated
360       against subsequent conditional affixes.  It does not generate
361       machine code.
362
363   Important differences from the old Thumb mode:
364
365     - Immediate operands do not require a # prefix.
366     - Most of the V6T2 instructions are only available in unified mode.
367     - The .N and .W suffixes are recognized and honored (it is an error
368       if they cannot be honored).
369     - All instructions set the flags if and only if they have an 's' affix.
370     - Conditional affixes may be used.  They are validated against
371       preceding IT instructions.  Unlike ARM mode, you cannot use a
372       conditional affix except in the scope of an IT instruction.  */
373
374static bfd_boolean unified_syntax = FALSE;
375
376/* An immediate operand can start with #, and ld*, st*, pld operands
377   can contain [ and ].  We need to tell APP not to elide whitespace
378   before a [, which can appear as the first operand for pld.
379   Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
380const char arm_symbol_chars[] = "#[]{}";
381
382enum neon_el_type
383{
384  NT_invtype,
385  NT_untyped,
386  NT_integer,
387  NT_float,
388  NT_poly,
389  NT_signed,
390  NT_unsigned
391};
392
393struct neon_type_el
394{
395  enum neon_el_type type;
396  unsigned size;
397};
398
399#define NEON_MAX_TYPE_ELS 4
400
401struct neon_type
402{
403  struct neon_type_el el[NEON_MAX_TYPE_ELS];
404  unsigned elems;
405};
406
407enum it_instruction_type
408{
409   OUTSIDE_IT_INSN,
410   INSIDE_IT_INSN,
411   INSIDE_IT_LAST_INSN,
412   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
413			      if inside, should be the last one.  */
414   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
415			      i.e. BKPT and NOP.  */
416   IT_INSN                 /* The IT insn has been parsed.  */
417};
418
419/* The maximum number of operands we need.  */
420#define ARM_IT_MAX_OPERANDS 6
421
422struct arm_it
423{
424  const char *	error;
425  unsigned long instruction;
426  int		size;
427  int		size_req;
428  int		cond;
429  /* "uncond_value" is set to the value in place of the conditional field in
430     unconditional versions of the instruction, or -1 if nothing is
431     appropriate.  */
432  int		uncond_value;
433  struct neon_type vectype;
434  /* This does not indicate an actual NEON instruction, only that
435     the mnemonic accepts neon-style type suffixes.  */
436  int		is_neon;
437  /* Set to the opcode if the instruction needs relaxation.
438     Zero if the instruction is not relaxed.  */
439  unsigned long	relax;
440  struct
441  {
442    bfd_reloc_code_real_type type;
443    expressionS		     exp;
444    int			     pc_rel;
445  } reloc;
446
447  enum it_instruction_type it_insn_type;
448
449  struct
450  {
451    unsigned reg;
452    signed int imm;
453    struct neon_type_el vectype;
454    unsigned present	: 1;  /* Operand present.  */
455    unsigned isreg	: 1;  /* Operand was a register.  */
456    unsigned immisreg	: 1;  /* .imm field is a second register.  */
457    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
458    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
459    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
460    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461       instructions. This allows us to disambiguate ARM <-> vector insns.  */
462    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
463    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
464    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
465    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
466    unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
467    unsigned writeback	: 1;  /* Operand has trailing !  */
468    unsigned preind	: 1;  /* Preindexed address.  */
469    unsigned postind	: 1;  /* Postindexed address.  */
470    unsigned negative	: 1;  /* Index register was negated.  */
471    unsigned shifted	: 1;  /* Shift applied to operation.  */
472    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
473  } operands[ARM_IT_MAX_OPERANDS];
474};
475
476static struct arm_it inst;
477
478#define NUM_FLOAT_VALS 8
479
480const char * fp_const[] =
481{
482  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
483};
484
485/* Number of littlenums required to hold an extended precision number.	*/
486#define MAX_LITTLENUMS 6
487
488LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
489
490#define FAIL	(-1)
491#define SUCCESS (0)
492
493#define SUFF_S 1
494#define SUFF_D 2
495#define SUFF_E 3
496#define SUFF_P 4
497
498#define CP_T_X	 0x00008000
499#define CP_T_Y	 0x00400000
500
501#define CONDS_BIT	 0x00100000
502#define LOAD_BIT	 0x00100000
503
504#define DOUBLE_LOAD_FLAG 0x00000001
505
506struct asm_cond
507{
508  const char *	 template_name;
509  unsigned long  value;
510};
511
512#define COND_ALWAYS 0xE
513
514struct asm_psr
515{
516  const char *   template_name;
517  unsigned long  field;
518};
519
520struct asm_barrier_opt
521{
522  const char *    template_name;
523  unsigned long   value;
524  const arm_feature_set arch;
525};
526
527/* The bit that distinguishes CPSR and SPSR.  */
528#define SPSR_BIT   (1 << 22)
529
530/* The individual PSR flag bits.  */
531#define PSR_c	(1 << 16)
532#define PSR_x	(1 << 17)
533#define PSR_s	(1 << 18)
534#define PSR_f	(1 << 19)
535
536struct reloc_entry
537{
538  const char *                    name;
539  bfd_reloc_code_real_type  reloc;
540};
541
542enum vfp_reg_pos
543{
544  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
545  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
546};
547
548enum vfp_ldstm_type
549{
550  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
551};
552
553/* Bits for DEFINED field in neon_typed_alias.  */
554#define NTA_HASTYPE  1
555#define NTA_HASINDEX 2
556
557struct neon_typed_alias
558{
559  unsigned char        defined;
560  unsigned char        index;
561  struct neon_type_el  eltype;
562};
563
564/* ARM register categories.  This includes coprocessor numbers and various
565   architecture extensions' registers.	*/
566enum arm_reg_type
567{
568  REG_TYPE_RN,
569  REG_TYPE_CP,
570  REG_TYPE_CN,
571  REG_TYPE_FN,
572  REG_TYPE_VFS,
573  REG_TYPE_VFD,
574  REG_TYPE_NQ,
575  REG_TYPE_VFSD,
576  REG_TYPE_NDQ,
577  REG_TYPE_NSDQ,
578  REG_TYPE_VFC,
579  REG_TYPE_MVF,
580  REG_TYPE_MVD,
581  REG_TYPE_MVFX,
582  REG_TYPE_MVDX,
583  REG_TYPE_MVAX,
584  REG_TYPE_DSPSC,
585  REG_TYPE_MMXWR,
586  REG_TYPE_MMXWC,
587  REG_TYPE_MMXWCG,
588  REG_TYPE_XSCALE,
589  REG_TYPE_RNB
590};
591
592/* Structure for a hash table entry for a register.
593   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594   information which states whether a vector type or index is specified (for a
595   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
596struct reg_entry
597{
598  const char *               name;
599  unsigned int               number;
600  unsigned char              type;
601  unsigned char              builtin;
602  struct neon_typed_alias *  neon;
603};
604
605/* Diagnostics used when we don't get a register of the expected type.	*/
606const char * const reg_expected_msgs[] =
607{
608  N_("ARM register expected"),
609  N_("bad or missing co-processor number"),
610  N_("co-processor register expected"),
611  N_("FPA register expected"),
612  N_("VFP single precision register expected"),
613  N_("VFP/Neon double precision register expected"),
614  N_("Neon quad precision register expected"),
615  N_("VFP single or double precision register expected"),
616  N_("Neon double or quad precision register expected"),
617  N_("VFP single, double or Neon quad precision register expected"),
618  N_("VFP system register expected"),
619  N_("Maverick MVF register expected"),
620  N_("Maverick MVD register expected"),
621  N_("Maverick MVFX register expected"),
622  N_("Maverick MVDX register expected"),
623  N_("Maverick MVAX register expected"),
624  N_("Maverick DSPSC register expected"),
625  N_("iWMMXt data register expected"),
626  N_("iWMMXt control register expected"),
627  N_("iWMMXt scalar register expected"),
628  N_("XScale accumulator register expected"),
629};
630
631/* Some well known registers that we refer to directly elsewhere.  */
632#define REG_R12	12
633#define REG_SP	13
634#define REG_LR	14
635#define REG_PC	15
636
637/* ARM instructions take 4bytes in the object file, Thumb instructions
638   take 2:  */
639#define INSN_SIZE	4
640
641struct asm_opcode
642{
643  /* Basic string to match.  */
644  const char * template_name;
645
646  /* Parameters to instruction.	 */
647  unsigned int operands[8];
648
649  /* Conditional tag - see opcode_lookup.  */
650  unsigned int tag : 4;
651
652  /* Basic instruction code.  */
653  unsigned int avalue : 28;
654
655  /* Thumb-format instruction code.  */
656  unsigned int tvalue;
657
658  /* Which architecture variant provides this instruction.  */
659  const arm_feature_set * avariant;
660  const arm_feature_set * tvariant;
661
662  /* Function to call to encode instruction in ARM format.  */
663  void (* aencode) (void);
664
665  /* Function to call to encode instruction in Thumb format.  */
666  void (* tencode) (void);
667};
668
669/* Defines for various bits that we will want to toggle.  */
670#define INST_IMMEDIATE	0x02000000
671#define OFFSET_REG	0x02000000
672#define HWOFFSET_IMM	0x00400000
673#define SHIFT_BY_REG	0x00000010
674#define PRE_INDEX	0x01000000
675#define INDEX_UP	0x00800000
676#define WRITE_BACK	0x00200000
677#define LDM_TYPE_2_OR_3	0x00400000
678#define CPSI_MMOD	0x00020000
679
680#define LITERAL_MASK	0xf000f000
681#define OPCODE_MASK	0xfe1fffff
682#define V4_STR_BIT	0x00000020
683#define VLDR_VMOV_SAME	0x0040f000
684
685#define T2_SUBS_PC_LR	0xf3de8f00
686
687#define DATA_OP_SHIFT	21
688
689#define T2_OPCODE_MASK	0xfe1fffff
690#define T2_DATA_OP_SHIFT 21
691
692#define A_COND_MASK         0xf0000000
693#define A_PUSH_POP_OP_MASK  0x0fff0000
694
695/* Opcodes for pushing/poping registers to/from the stack.  */
696#define A1_OPCODE_PUSH    0x092d0000
697#define A2_OPCODE_PUSH    0x052d0004
698#define A2_OPCODE_POP     0x049d0004
699
700/* Codes to distinguish the arithmetic instructions.  */
701#define OPCODE_AND	0
702#define OPCODE_EOR	1
703#define OPCODE_SUB	2
704#define OPCODE_RSB	3
705#define OPCODE_ADD	4
706#define OPCODE_ADC	5
707#define OPCODE_SBC	6
708#define OPCODE_RSC	7
709#define OPCODE_TST	8
710#define OPCODE_TEQ	9
711#define OPCODE_CMP	10
712#define OPCODE_CMN	11
713#define OPCODE_ORR	12
714#define OPCODE_MOV	13
715#define OPCODE_BIC	14
716#define OPCODE_MVN	15
717
718#define T2_OPCODE_AND	0
719#define T2_OPCODE_BIC	1
720#define T2_OPCODE_ORR	2
721#define T2_OPCODE_ORN	3
722#define T2_OPCODE_EOR	4
723#define T2_OPCODE_ADD	8
724#define T2_OPCODE_ADC	10
725#define T2_OPCODE_SBC	11
726#define T2_OPCODE_SUB	13
727#define T2_OPCODE_RSB	14
728
729#define T_OPCODE_MUL 0x4340
730#define T_OPCODE_TST 0x4200
731#define T_OPCODE_CMN 0x42c0
732#define T_OPCODE_NEG 0x4240
733#define T_OPCODE_MVN 0x43c0
734
735#define T_OPCODE_ADD_R3	0x1800
736#define T_OPCODE_SUB_R3 0x1a00
737#define T_OPCODE_ADD_HI 0x4400
738#define T_OPCODE_ADD_ST 0xb000
739#define T_OPCODE_SUB_ST 0xb080
740#define T_OPCODE_ADD_SP 0xa800
741#define T_OPCODE_ADD_PC 0xa000
742#define T_OPCODE_ADD_I8 0x3000
743#define T_OPCODE_SUB_I8 0x3800
744#define T_OPCODE_ADD_I3 0x1c00
745#define T_OPCODE_SUB_I3 0x1e00
746
747#define T_OPCODE_ASR_R	0x4100
748#define T_OPCODE_LSL_R	0x4080
749#define T_OPCODE_LSR_R	0x40c0
750#define T_OPCODE_ROR_R	0x41c0
751#define T_OPCODE_ASR_I	0x1000
752#define T_OPCODE_LSL_I	0x0000
753#define T_OPCODE_LSR_I	0x0800
754
755#define T_OPCODE_MOV_I8	0x2000
756#define T_OPCODE_CMP_I8 0x2800
757#define T_OPCODE_CMP_LR 0x4280
758#define T_OPCODE_MOV_HR 0x4600
759#define T_OPCODE_CMP_HR 0x4500
760
761#define T_OPCODE_LDR_PC 0x4800
762#define T_OPCODE_LDR_SP 0x9800
763#define T_OPCODE_STR_SP 0x9000
764#define T_OPCODE_LDR_IW 0x6800
765#define T_OPCODE_STR_IW 0x6000
766#define T_OPCODE_LDR_IH 0x8800
767#define T_OPCODE_STR_IH 0x8000
768#define T_OPCODE_LDR_IB 0x7800
769#define T_OPCODE_STR_IB 0x7000
770#define T_OPCODE_LDR_RW 0x5800
771#define T_OPCODE_STR_RW 0x5000
772#define T_OPCODE_LDR_RH 0x5a00
773#define T_OPCODE_STR_RH 0x5200
774#define T_OPCODE_LDR_RB 0x5c00
775#define T_OPCODE_STR_RB 0x5400
776
777#define T_OPCODE_PUSH	0xb400
778#define T_OPCODE_POP	0xbc00
779
780#define T_OPCODE_BRANCH 0xe000
781
782#define THUMB_SIZE	2	/* Size of thumb instruction.  */
783#define THUMB_PP_PC_LR 0x0100
784#define THUMB_LOAD_BIT 0x0800
785#define THUMB2_LOAD_BIT 0x00100000
786
787#define BAD_ARGS	_("bad arguments to instruction")
788#define BAD_SP          _("r13 not allowed here")
789#define BAD_PC		_("r15 not allowed here")
790#define BAD_COND	_("instruction cannot be conditional")
791#define BAD_OVERLAP	_("registers may not be the same")
792#define BAD_HIREG	_("lo register required")
793#define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
794#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
795#define BAD_BRANCH	_("branch must be last instruction in IT block")
796#define BAD_NOT_IT	_("instruction not allowed in IT block")
797#define BAD_FPU		_("selected FPU does not support instruction")
798#define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
799#define BAD_IT_COND	_("incorrect condition in IT block")
800#define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
801#define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
802#define BAD_PC_ADDRESSING \
803	_("cannot use register index with PC-relative addressing")
804#define BAD_PC_WRITEBACK \
805	_("cannot use writeback with PC-relative addressing")
806#define BAD_RANGE	_("branch out of range")
807#define BAD_FP16	_("selected processor does not support fp16 instruction")
808#define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
809#define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
810
811static struct hash_control * arm_ops_hsh;
812static struct hash_control * arm_cond_hsh;
813static struct hash_control * arm_shift_hsh;
814static struct hash_control * arm_psr_hsh;
815static struct hash_control * arm_v7m_psr_hsh;
816static struct hash_control * arm_reg_hsh;
817static struct hash_control * arm_reloc_hsh;
818static struct hash_control * arm_barrier_opt_hsh;
819
820/* Stuff needed to resolve the label ambiguity
821   As:
822     ...
823     label:   <insn>
824   may differ from:
825     ...
826     label:
827	      <insn>  */
828
829symbolS *  last_label_seen;
830static int label_is_thumb_function_name = FALSE;
831
832/* Literal pool structure.  Held on a per-section
833   and per-sub-section basis.  */
834
835#define MAX_LITERAL_POOL_SIZE 1024
836typedef struct literal_pool
837{
838  expressionS	         literals [MAX_LITERAL_POOL_SIZE];
839  unsigned int	         next_free_entry;
840  unsigned int	         id;
841  symbolS *	         symbol;
842  segT		         section;
843  subsegT	         sub_section;
844#ifdef OBJ_ELF
845  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
846#endif
847  struct literal_pool *  next;
848  unsigned int		 alignment;
849} literal_pool;
850
851/* Pointer to a linked list of literal pools.  */
852literal_pool * list_of_pools = NULL;
853
854typedef enum asmfunc_states
855{
856  OUTSIDE_ASMFUNC,
857  WAITING_ASMFUNC_NAME,
858  WAITING_ENDASMFUNC
859} asmfunc_states;
860
861static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
862
863#ifdef OBJ_ELF
864#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
865#else
866static struct current_it now_it;
867#endif
868
869static inline int
870now_it_compatible (int cond)
871{
872  return (cond & ~1) == (now_it.cc & ~1);
873}
874
875static inline int
876conditional_insn (void)
877{
878  return inst.cond != COND_ALWAYS;
879}
880
881static int in_it_block (void);
882
883static int handle_it_state (void);
884
885static void force_automatic_it_block_close (void);
886
887static void it_fsm_post_encode (void);
888
889#define set_it_insn_type(type)			\
890  do						\
891    {						\
892      inst.it_insn_type = type;			\
893      if (handle_it_state () == FAIL)		\
894	return;					\
895    }						\
896  while (0)
897
898#define set_it_insn_type_nonvoid(type, failret) \
899  do						\
900    {                                           \
901      inst.it_insn_type = type;			\
902      if (handle_it_state () == FAIL)		\
903	return failret;				\
904    }						\
905  while(0)
906
907#define set_it_insn_type_last()				\
908  do							\
909    {							\
910      if (inst.cond == COND_ALWAYS)			\
911	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
912      else						\
913	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
914    }							\
915  while (0)
916
917/* Pure syntax.	 */
918
919/* This array holds the chars that always start a comment.  If the
920   pre-processor is disabled, these aren't very useful.	 */
921char arm_comment_chars[] = "@";
922
923/* This array holds the chars that only start a comment at the beginning of
924   a line.  If the line seems to have the form '# 123 filename'
925   .line and .file directives will appear in the pre-processed output.	*/
926/* Note that input_file.c hand checks for '#' at the beginning of the
927   first line of the input file.  This is because the compiler outputs
928   #NO_APP at the beginning of its output.  */
929/* Also note that comments like this one will always work.  */
930const char line_comment_chars[] = "#";
931
932char arm_line_separator_chars[] = ";";
933
934/* Chars that can be used to separate mant
935   from exp in floating point numbers.	*/
936const char EXP_CHARS[] = "eE";
937
938/* Chars that mean this number is a floating point constant.  */
939/* As in 0f12.456  */
940/* or	 0d1.2345e12  */
941
942const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
943
944/* Prefix characters that indicate the start of an immediate
945   value.  */
946#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
947
948/* Separator character handling.  */
949
950#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
951
952static inline int
953skip_past_char (char ** str, char c)
954{
955  /* PR gas/14987: Allow for whitespace before the expected character.  */
956  skip_whitespace (*str);
957
958  if (**str == c)
959    {
960      (*str)++;
961      return SUCCESS;
962    }
963  else
964    return FAIL;
965}
966
967#define skip_past_comma(str) skip_past_char (str, ',')
968
969/* Arithmetic expressions (possibly involving symbols).	 */
970
971/* Return TRUE if anything in the expression is a bignum.  */
972
973static int
974walk_no_bignums (symbolS * sp)
975{
976  if (symbol_get_value_expression (sp)->X_op == O_big)
977    return 1;
978
979  if (symbol_get_value_expression (sp)->X_add_symbol)
980    {
981      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
982	      || (symbol_get_value_expression (sp)->X_op_symbol
983		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
984    }
985
986  return 0;
987}
988
989static int in_my_get_expression = 0;
990
991/* Third argument to my_get_expression.	 */
992#define GE_NO_PREFIX 0
993#define GE_IMM_PREFIX 1
994#define GE_OPT_PREFIX 2
995/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
996   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
997#define GE_OPT_PREFIX_BIG 3
998
999static int
1000my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1001{
1002  char * save_in;
1003  segT	 seg;
1004
1005  /* In unified syntax, all prefixes are optional.  */
1006  if (unified_syntax)
1007    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1008		  : GE_OPT_PREFIX;
1009
1010  switch (prefix_mode)
1011    {
1012    case GE_NO_PREFIX: break;
1013    case GE_IMM_PREFIX:
1014      if (!is_immediate_prefix (**str))
1015	{
1016	  inst.error = _("immediate expression requires a # prefix");
1017	  return FAIL;
1018	}
1019      (*str)++;
1020      break;
1021    case GE_OPT_PREFIX:
1022    case GE_OPT_PREFIX_BIG:
1023      if (is_immediate_prefix (**str))
1024	(*str)++;
1025      break;
1026    default: abort ();
1027    }
1028
1029  memset (ep, 0, sizeof (expressionS));
1030
1031  save_in = input_line_pointer;
1032  input_line_pointer = *str;
1033  in_my_get_expression = 1;
1034  seg = expression (ep);
1035  in_my_get_expression = 0;
1036
1037  if (ep->X_op == O_illegal || ep->X_op == O_absent)
1038    {
1039      /* We found a bad or missing expression in md_operand().  */
1040      *str = input_line_pointer;
1041      input_line_pointer = save_in;
1042      if (inst.error == NULL)
1043	inst.error = (ep->X_op == O_absent
1044		      ? _("missing expression") :_("bad expression"));
1045      return 1;
1046    }
1047
1048#ifdef OBJ_AOUT
1049  if (seg != absolute_section
1050      && seg != text_section
1051      && seg != data_section
1052      && seg != bss_section
1053      && seg != undefined_section)
1054    {
1055      inst.error = _("bad segment");
1056      *str = input_line_pointer;
1057      input_line_pointer = save_in;
1058      return 1;
1059    }
1060#else
1061  (void) seg;
1062#endif
1063
1064  /* Get rid of any bignums now, so that we don't generate an error for which
1065     we can't establish a line number later on.	 Big numbers are never valid
1066     in instructions, which is where this routine is always called.  */
1067  if (prefix_mode != GE_OPT_PREFIX_BIG
1068      && (ep->X_op == O_big
1069	  || (ep->X_add_symbol
1070	      && (walk_no_bignums (ep->X_add_symbol)
1071		  || (ep->X_op_symbol
1072		      && walk_no_bignums (ep->X_op_symbol))))))
1073    {
1074      inst.error = _("invalid constant");
1075      *str = input_line_pointer;
1076      input_line_pointer = save_in;
1077      return 1;
1078    }
1079
1080  *str = input_line_pointer;
1081  input_line_pointer = save_in;
1082  return 0;
1083}
1084
1085/* Turn a string in input_line_pointer into a floating point constant
1086   of type TYPE, and store the appropriate bytes in *LITP.  The number
1087   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1088   returned, or NULL on OK.
1089
1090   Note that fp constants aren't represent in the normal way on the ARM.
1091   In big endian mode, things are as expected.	However, in little endian
1092   mode fp constants are big-endian word-wise, and little-endian byte-wise
1093   within the words.  For example, (double) 1.1 in big endian mode is
1094   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1095   the byte sequence 99 99 f1 3f 9a 99 99 99.
1096
1097   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1098
1099const char *
1100md_atof (int type, char * litP, int * sizeP)
1101{
1102  int prec;
1103  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1104  char *t;
1105  int i;
1106
1107  switch (type)
1108    {
1109    case 'f':
1110    case 'F':
1111    case 's':
1112    case 'S':
1113      prec = 2;
1114      break;
1115
1116    case 'd':
1117    case 'D':
1118    case 'r':
1119    case 'R':
1120      prec = 4;
1121      break;
1122
1123    case 'x':
1124    case 'X':
1125      prec = 5;
1126      break;
1127
1128    case 'p':
1129    case 'P':
1130      prec = 5;
1131      break;
1132
1133    default:
1134      *sizeP = 0;
1135      return _("Unrecognized or unsupported floating point constant");
1136    }
1137
1138  t = atof_ieee (input_line_pointer, type, words);
1139  if (t)
1140    input_line_pointer = t;
1141  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1142
1143  if (target_big_endian)
1144    {
1145      for (i = 0; i < prec; i++)
1146	{
1147	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1148	  litP += sizeof (LITTLENUM_TYPE);
1149	}
1150    }
1151  else
1152    {
1153      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1154	for (i = prec - 1; i >= 0; i--)
1155	  {
1156	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1157	    litP += sizeof (LITTLENUM_TYPE);
1158	  }
1159      else
1160	/* For a 4 byte float the order of elements in `words' is 1 0.
1161	   For an 8 byte float the order is 1 0 3 2.  */
1162	for (i = 0; i < prec; i += 2)
1163	  {
1164	    md_number_to_chars (litP, (valueT) words[i + 1],
1165				sizeof (LITTLENUM_TYPE));
1166	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1167				(valueT) words[i], sizeof (LITTLENUM_TYPE));
1168	    litP += 2 * sizeof (LITTLENUM_TYPE);
1169	  }
1170    }
1171
1172  return NULL;
1173}
1174
1175/* We handle all bad expressions here, so that we can report the faulty
1176   instruction in the error message.  */
1177void
1178md_operand (expressionS * exp)
1179{
1180  if (in_my_get_expression)
1181    exp->X_op = O_illegal;
1182}
1183
1184/* Immediate values.  */
1185
1186/* Generic immediate-value read function for use in directives.
1187   Accepts anything that 'expression' can fold to a constant.
1188   *val receives the number.  */
1189#ifdef OBJ_ELF
1190static int
1191immediate_for_directive (int *val)
1192{
1193  expressionS exp;
1194  exp.X_op = O_illegal;
1195
1196  if (is_immediate_prefix (*input_line_pointer))
1197    {
1198      input_line_pointer++;
1199      expression (&exp);
1200    }
1201
1202  if (exp.X_op != O_constant)
1203    {
1204      as_bad (_("expected #constant"));
1205      ignore_rest_of_line ();
1206      return FAIL;
1207    }
1208  *val = exp.X_add_number;
1209  return SUCCESS;
1210}
1211#endif
1212
1213/* Register parsing.  */
1214
1215/* Generic register parser.  CCP points to what should be the
1216   beginning of a register name.  If it is indeed a valid register
1217   name, advance CCP over it and return the reg_entry structure;
1218   otherwise return NULL.  Does not issue diagnostics.	*/
1219
1220static struct reg_entry *
1221arm_reg_parse_multi (char **ccp)
1222{
1223  char *start = *ccp;
1224  char *p;
1225  struct reg_entry *reg;
1226
1227  skip_whitespace (start);
1228
1229#ifdef REGISTER_PREFIX
1230  if (*start != REGISTER_PREFIX)
1231    return NULL;
1232  start++;
1233#endif
1234#ifdef OPTIONAL_REGISTER_PREFIX
1235  if (*start == OPTIONAL_REGISTER_PREFIX)
1236    start++;
1237#endif
1238
1239  p = start;
1240  if (!ISALPHA (*p) || !is_name_beginner (*p))
1241    return NULL;
1242
1243  do
1244    p++;
1245  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1246
1247  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1248
1249  if (!reg)
1250    return NULL;
1251
1252  *ccp = p;
1253  return reg;
1254}
1255
1256static int
1257arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1258		    enum arm_reg_type type)
1259{
1260  /* Alternative syntaxes are accepted for a few register classes.  */
1261  switch (type)
1262    {
1263    case REG_TYPE_MVF:
1264    case REG_TYPE_MVD:
1265    case REG_TYPE_MVFX:
1266    case REG_TYPE_MVDX:
1267      /* Generic coprocessor register names are allowed for these.  */
1268      if (reg && reg->type == REG_TYPE_CN)
1269	return reg->number;
1270      break;
1271
1272    case REG_TYPE_CP:
1273      /* For backward compatibility, a bare number is valid here.  */
1274      {
1275	unsigned long processor = strtoul (start, ccp, 10);
1276	if (*ccp != start && processor <= 15)
1277	  return processor;
1278      }
1279
1280    case REG_TYPE_MMXWC:
1281      /* WC includes WCG.  ??? I'm not sure this is true for all
1282	 instructions that take WC registers.  */
1283      if (reg && reg->type == REG_TYPE_MMXWCG)
1284	return reg->number;
1285      break;
1286
1287    default:
1288      break;
1289    }
1290
1291  return FAIL;
1292}
1293
1294/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1295   return value is the register number or FAIL.  */
1296
1297static int
1298arm_reg_parse (char **ccp, enum arm_reg_type type)
1299{
1300  char *start = *ccp;
1301  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1302  int ret;
1303
1304  /* Do not allow a scalar (reg+index) to parse as a register.  */
1305  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1306    return FAIL;
1307
1308  if (reg && reg->type == type)
1309    return reg->number;
1310
1311  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1312    return ret;
1313
1314  *ccp = start;
1315  return FAIL;
1316}
1317
1318/* Parse a Neon type specifier. *STR should point at the leading '.'
1319   character. Does no verification at this stage that the type fits the opcode
1320   properly. E.g.,
1321
1322     .i32.i32.s16
1323     .s32.f32
1324     .u16
1325
1326   Can all be legally parsed by this function.
1327
1328   Fills in neon_type struct pointer with parsed information, and updates STR
1329   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1330   type, FAIL if not.  */
1331
1332static int
1333parse_neon_type (struct neon_type *type, char **str)
1334{
1335  char *ptr = *str;
1336
1337  if (type)
1338    type->elems = 0;
1339
1340  while (type->elems < NEON_MAX_TYPE_ELS)
1341    {
1342      enum neon_el_type thistype = NT_untyped;
1343      unsigned thissize = -1u;
1344
1345      if (*ptr != '.')
1346	break;
1347
1348      ptr++;
1349
1350      /* Just a size without an explicit type.  */
1351      if (ISDIGIT (*ptr))
1352	goto parsesize;
1353
1354      switch (TOLOWER (*ptr))
1355	{
1356	case 'i': thistype = NT_integer; break;
1357	case 'f': thistype = NT_float; break;
1358	case 'p': thistype = NT_poly; break;
1359	case 's': thistype = NT_signed; break;
1360	case 'u': thistype = NT_unsigned; break;
1361	case 'd':
1362	  thistype = NT_float;
1363	  thissize = 64;
1364	  ptr++;
1365	  goto done;
1366	default:
1367	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1368	  return FAIL;
1369	}
1370
1371      ptr++;
1372
1373      /* .f is an abbreviation for .f32.  */
1374      if (thistype == NT_float && !ISDIGIT (*ptr))
1375	thissize = 32;
1376      else
1377	{
1378	parsesize:
1379	  thissize = strtoul (ptr, &ptr, 10);
1380
1381	  if (thissize != 8 && thissize != 16 && thissize != 32
1382	      && thissize != 64)
1383	    {
1384	      as_bad (_("bad size %d in type specifier"), thissize);
1385	      return FAIL;
1386	    }
1387	}
1388
1389      done:
1390      if (type)
1391	{
1392	  type->el[type->elems].type = thistype;
1393	  type->el[type->elems].size = thissize;
1394	  type->elems++;
1395	}
1396    }
1397
1398  /* Empty/missing type is not a successful parse.  */
1399  if (type->elems == 0)
1400    return FAIL;
1401
1402  *str = ptr;
1403
1404  return SUCCESS;
1405}
1406
1407/* Errors may be set multiple times during parsing or bit encoding
1408   (particularly in the Neon bits), but usually the earliest error which is set
1409   will be the most meaningful. Avoid overwriting it with later (cascading)
1410   errors by calling this function.  */
1411
1412static void
1413first_error (const char *err)
1414{
1415  if (!inst.error)
1416    inst.error = err;
1417}
1418
1419/* Parse a single type, e.g. ".s32", leading period included.  */
1420static int
1421parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1422{
1423  char *str = *ccp;
1424  struct neon_type optype;
1425
1426  if (*str == '.')
1427    {
1428      if (parse_neon_type (&optype, &str) == SUCCESS)
1429	{
1430	  if (optype.elems == 1)
1431	    *vectype = optype.el[0];
1432	  else
1433	    {
1434	      first_error (_("only one type should be specified for operand"));
1435	      return FAIL;
1436	    }
1437	}
1438      else
1439	{
1440	  first_error (_("vector type expected"));
1441	  return FAIL;
1442	}
1443    }
1444  else
1445    return FAIL;
1446
1447  *ccp = str;
1448
1449  return SUCCESS;
1450}
1451
1452/* Special meanings for indices (which have a range of 0-7), which will fit into
1453   a 4-bit integer.  */
1454
1455#define NEON_ALL_LANES		15
1456#define NEON_INTERLEAVE_LANES	14
1457
1458/* Parse either a register or a scalar, with an optional type. Return the
1459   register number, and optionally fill in the actual type of the register
1460   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1461   type/index information in *TYPEINFO.  */
1462
1463static int
1464parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1465			   enum arm_reg_type *rtype,
1466			   struct neon_typed_alias *typeinfo)
1467{
1468  char *str = *ccp;
1469  struct reg_entry *reg = arm_reg_parse_multi (&str);
1470  struct neon_typed_alias atype;
1471  struct neon_type_el parsetype;
1472
1473  atype.defined = 0;
1474  atype.index = -1;
1475  atype.eltype.type = NT_invtype;
1476  atype.eltype.size = -1;
1477
1478  /* Try alternate syntax for some types of register. Note these are mutually
1479     exclusive with the Neon syntax extensions.  */
1480  if (reg == NULL)
1481    {
1482      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1483      if (altreg != FAIL)
1484	*ccp = str;
1485      if (typeinfo)
1486	*typeinfo = atype;
1487      return altreg;
1488    }
1489
1490  /* Undo polymorphism when a set of register types may be accepted.  */
1491  if ((type == REG_TYPE_NDQ
1492       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1493      || (type == REG_TYPE_VFSD
1494	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1495      || (type == REG_TYPE_NSDQ
1496	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1497	      || reg->type == REG_TYPE_NQ))
1498      || (type == REG_TYPE_MMXWC
1499	  && (reg->type == REG_TYPE_MMXWCG)))
1500    type = (enum arm_reg_type) reg->type;
1501
1502  if (type != reg->type)
1503    return FAIL;
1504
1505  if (reg->neon)
1506    atype = *reg->neon;
1507
1508  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1509    {
1510      if ((atype.defined & NTA_HASTYPE) != 0)
1511	{
1512	  first_error (_("can't redefine type for operand"));
1513	  return FAIL;
1514	}
1515      atype.defined |= NTA_HASTYPE;
1516      atype.eltype = parsetype;
1517    }
1518
1519  if (skip_past_char (&str, '[') == SUCCESS)
1520    {
1521      if (type != REG_TYPE_VFD)
1522	{
1523	  first_error (_("only D registers may be indexed"));
1524	  return FAIL;
1525	}
1526
1527      if ((atype.defined & NTA_HASINDEX) != 0)
1528	{
1529	  first_error (_("can't change index for operand"));
1530	  return FAIL;
1531	}
1532
1533      atype.defined |= NTA_HASINDEX;
1534
1535      if (skip_past_char (&str, ']') == SUCCESS)
1536	atype.index = NEON_ALL_LANES;
1537      else
1538	{
1539	  expressionS exp;
1540
1541	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1542
1543	  if (exp.X_op != O_constant)
1544	    {
1545	      first_error (_("constant expression required"));
1546	      return FAIL;
1547	    }
1548
1549	  if (skip_past_char (&str, ']') == FAIL)
1550	    return FAIL;
1551
1552	  atype.index = exp.X_add_number;
1553	}
1554    }
1555
1556  if (typeinfo)
1557    *typeinfo = atype;
1558
1559  if (rtype)
1560    *rtype = type;
1561
1562  *ccp = str;
1563
1564  return reg->number;
1565}
1566
1567/* Like arm_reg_parse, but allow allow the following extra features:
1568    - If RTYPE is non-zero, return the (possibly restricted) type of the
1569      register (e.g. Neon double or quad reg when either has been requested).
1570    - If this is a Neon vector type with additional type information, fill
1571      in the struct pointed to by VECTYPE (if non-NULL).
1572   This function will fault on encountering a scalar.  */
1573
1574static int
1575arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1576		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1577{
1578  struct neon_typed_alias atype;
1579  char *str = *ccp;
1580  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1581
1582  if (reg == FAIL)
1583    return FAIL;
1584
1585  /* Do not allow regname(... to parse as a register.  */
1586  if (*str == '(')
1587    return FAIL;
1588
1589  /* Do not allow a scalar (reg+index) to parse as a register.  */
1590  if ((atype.defined & NTA_HASINDEX) != 0)
1591    {
1592      first_error (_("register operand expected, but got scalar"));
1593      return FAIL;
1594    }
1595
1596  if (vectype)
1597    *vectype = atype.eltype;
1598
1599  *ccp = str;
1600
1601  return reg;
1602}
1603
1604#define NEON_SCALAR_REG(X)	((X) >> 4)
1605#define NEON_SCALAR_INDEX(X)	((X) & 15)
1606
1607/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1608   have enough information to be able to do a good job bounds-checking. So, we
1609   just do easy checks here, and do further checks later.  */
1610
1611static int
1612parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1613{
1614  int reg;
1615  char *str = *ccp;
1616  struct neon_typed_alias atype;
1617
1618  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1619
1620  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1621    return FAIL;
1622
1623  if (atype.index == NEON_ALL_LANES)
1624    {
1625      first_error (_("scalar must have an index"));
1626      return FAIL;
1627    }
1628  else if (atype.index >= 64 / elsize)
1629    {
1630      first_error (_("scalar index out of range"));
1631      return FAIL;
1632    }
1633
1634  if (type)
1635    *type = atype.eltype;
1636
1637  *ccp = str;
1638
1639  return reg * 16 + atype.index;
1640}
1641
1642/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1643
1644static long
1645parse_reg_list (char ** strp)
1646{
1647  char * str = * strp;
1648  long	 range = 0;
1649  int	 another_range;
1650
1651  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1652  do
1653    {
1654      skip_whitespace (str);
1655
1656      another_range = 0;
1657
1658      if (*str == '{')
1659	{
1660	  int in_range = 0;
1661	  int cur_reg = -1;
1662
1663	  str++;
1664	  do
1665	    {
1666	      int reg;
1667
1668	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1669		{
1670		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1671		  return FAIL;
1672		}
1673
1674	      if (in_range)
1675		{
1676		  int i;
1677
1678		  if (reg <= cur_reg)
1679		    {
1680		      first_error (_("bad range in register list"));
1681		      return FAIL;
1682		    }
1683
1684		  for (i = cur_reg + 1; i < reg; i++)
1685		    {
1686		      if (range & (1 << i))
1687			as_tsktsk
1688			  (_("Warning: duplicated register (r%d) in register list"),
1689			   i);
1690		      else
1691			range |= 1 << i;
1692		    }
1693		  in_range = 0;
1694		}
1695
1696	      if (range & (1 << reg))
1697		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1698			   reg);
1699	      else if (reg <= cur_reg)
1700		as_tsktsk (_("Warning: register range not in ascending order"));
1701
1702	      range |= 1 << reg;
1703	      cur_reg = reg;
1704	    }
1705	  while (skip_past_comma (&str) != FAIL
1706		 || (in_range = 1, *str++ == '-'));
1707	  str--;
1708
1709	  if (skip_past_char (&str, '}') == FAIL)
1710	    {
1711	      first_error (_("missing `}'"));
1712	      return FAIL;
1713	    }
1714	}
1715      else
1716	{
1717	  expressionS exp;
1718
1719	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1720	    return FAIL;
1721
1722	  if (exp.X_op == O_constant)
1723	    {
1724	      if (exp.X_add_number
1725		  != (exp.X_add_number & 0x0000ffff))
1726		{
1727		  inst.error = _("invalid register mask");
1728		  return FAIL;
1729		}
1730
1731	      if ((range & exp.X_add_number) != 0)
1732		{
1733		  int regno = range & exp.X_add_number;
1734
1735		  regno &= -regno;
1736		  regno = (1 << regno) - 1;
1737		  as_tsktsk
1738		    (_("Warning: duplicated register (r%d) in register list"),
1739		     regno);
1740		}
1741
1742	      range |= exp.X_add_number;
1743	    }
1744	  else
1745	    {
1746	      if (inst.reloc.type != 0)
1747		{
1748		  inst.error = _("expression too complex");
1749		  return FAIL;
1750		}
1751
1752	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1753	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
1754	      inst.reloc.pc_rel = 0;
1755	    }
1756	}
1757
1758      if (*str == '|' || *str == '+')
1759	{
1760	  str++;
1761	  another_range = 1;
1762	}
1763    }
1764  while (another_range);
1765
1766  *strp = str;
1767  return range;
1768}
1769
1770/* Types of registers in a list.  */
1771
1772enum reg_list_els
1773{
1774  REGLIST_VFP_S,
1775  REGLIST_VFP_D,
1776  REGLIST_NEON_D
1777};
1778
1779/* Parse a VFP register list.  If the string is invalid return FAIL.
1780   Otherwise return the number of registers, and set PBASE to the first
1781   register.  Parses registers of type ETYPE.
1782   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1783     - Q registers can be used to specify pairs of D registers
1784     - { } can be omitted from around a singleton register list
1785	 FIXME: This is not implemented, as it would require backtracking in
1786	 some cases, e.g.:
1787	   vtbl.8 d3,d4,d5
1788	 This could be done (the meaning isn't really ambiguous), but doesn't
1789	 fit in well with the current parsing framework.
1790     - 32 D registers may be used (also true for VFPv3).
1791   FIXME: Types are ignored in these register lists, which is probably a
1792   bug.  */
1793
1794static int
1795parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1796{
1797  char *str = *ccp;
1798  int base_reg;
1799  int new_base;
1800  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1801  int max_regs = 0;
1802  int count = 0;
1803  int warned = 0;
1804  unsigned long mask = 0;
1805  int i;
1806
1807  if (skip_past_char (&str, '{') == FAIL)
1808    {
1809      inst.error = _("expecting {");
1810      return FAIL;
1811    }
1812
1813  switch (etype)
1814    {
1815    case REGLIST_VFP_S:
1816      regtype = REG_TYPE_VFS;
1817      max_regs = 32;
1818      break;
1819
1820    case REGLIST_VFP_D:
1821      regtype = REG_TYPE_VFD;
1822      break;
1823
1824    case REGLIST_NEON_D:
1825      regtype = REG_TYPE_NDQ;
1826      break;
1827    }
1828
1829  if (etype != REGLIST_VFP_S)
1830    {
1831      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1832      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1833	{
1834	  max_regs = 32;
1835	  if (thumb_mode)
1836	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1837				    fpu_vfp_ext_d32);
1838	  else
1839	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1840				    fpu_vfp_ext_d32);
1841	}
1842      else
1843	max_regs = 16;
1844    }
1845
1846  base_reg = max_regs;
1847
1848  do
1849    {
1850      int setmask = 1, addregs = 1;
1851
1852      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1853
1854      if (new_base == FAIL)
1855	{
1856	  first_error (_(reg_expected_msgs[regtype]));
1857	  return FAIL;
1858	}
1859
1860      if (new_base >= max_regs)
1861	{
1862	  first_error (_("register out of range in list"));
1863	  return FAIL;
1864	}
1865
1866      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1867      if (regtype == REG_TYPE_NQ)
1868	{
1869	  setmask = 3;
1870	  addregs = 2;
1871	}
1872
1873      if (new_base < base_reg)
1874	base_reg = new_base;
1875
1876      if (mask & (setmask << new_base))
1877	{
1878	  first_error (_("invalid register list"));
1879	  return FAIL;
1880	}
1881
1882      if ((mask >> new_base) != 0 && ! warned)
1883	{
1884	  as_tsktsk (_("register list not in ascending order"));
1885	  warned = 1;
1886	}
1887
1888      mask |= setmask << new_base;
1889      count += addregs;
1890
1891      if (*str == '-') /* We have the start of a range expression */
1892	{
1893	  int high_range;
1894
1895	  str++;
1896
1897	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1898	      == FAIL)
1899	    {
1900	      inst.error = gettext (reg_expected_msgs[regtype]);
1901	      return FAIL;
1902	    }
1903
1904	  if (high_range >= max_regs)
1905	    {
1906	      first_error (_("register out of range in list"));
1907	      return FAIL;
1908	    }
1909
1910	  if (regtype == REG_TYPE_NQ)
1911	    high_range = high_range + 1;
1912
1913	  if (high_range <= new_base)
1914	    {
1915	      inst.error = _("register range not in ascending order");
1916	      return FAIL;
1917	    }
1918
1919	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
1920	    {
1921	      if (mask & (setmask << new_base))
1922		{
1923		  inst.error = _("invalid register list");
1924		  return FAIL;
1925		}
1926
1927	      mask |= setmask << new_base;
1928	      count += addregs;
1929	    }
1930	}
1931    }
1932  while (skip_past_comma (&str) != FAIL);
1933
1934  str++;
1935
1936  /* Sanity check -- should have raised a parse error above.  */
1937  if (count == 0 || count > max_regs)
1938    abort ();
1939
1940  *pbase = base_reg;
1941
1942  /* Final test -- the registers must be consecutive.  */
1943  mask >>= base_reg;
1944  for (i = 0; i < count; i++)
1945    {
1946      if ((mask & (1u << i)) == 0)
1947	{
1948	  inst.error = _("non-contiguous register range");
1949	  return FAIL;
1950	}
1951    }
1952
1953  *ccp = str;
1954
1955  return count;
1956}
1957
1958/* True if two alias types are the same.  */
1959
1960static bfd_boolean
1961neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1962{
1963  if (!a && !b)
1964    return TRUE;
1965
1966  if (!a || !b)
1967    return FALSE;
1968
1969  if (a->defined != b->defined)
1970    return FALSE;
1971
1972  if ((a->defined & NTA_HASTYPE) != 0
1973      && (a->eltype.type != b->eltype.type
1974	  || a->eltype.size != b->eltype.size))
1975    return FALSE;
1976
1977  if ((a->defined & NTA_HASINDEX) != 0
1978      && (a->index != b->index))
1979    return FALSE;
1980
1981  return TRUE;
1982}
1983
1984/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1985   The base register is put in *PBASE.
1986   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1987   the return value.
1988   The register stride (minus one) is put in bit 4 of the return value.
1989   Bits [6:5] encode the list length (minus one).
1990   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1991
1992#define NEON_LANE(X)		((X) & 0xf)
1993#define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
1994#define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
1995
1996static int
1997parse_neon_el_struct_list (char **str, unsigned *pbase,
1998			   struct neon_type_el *eltype)
1999{
2000  char *ptr = *str;
2001  int base_reg = -1;
2002  int reg_incr = -1;
2003  int count = 0;
2004  int lane = -1;
2005  int leading_brace = 0;
2006  enum arm_reg_type rtype = REG_TYPE_NDQ;
2007  const char *const incr_error = _("register stride must be 1 or 2");
2008  const char *const type_error = _("mismatched element/structure types in list");
2009  struct neon_typed_alias firsttype;
2010  firsttype.defined = 0;
2011  firsttype.eltype.type = NT_invtype;
2012  firsttype.eltype.size = -1;
2013  firsttype.index = -1;
2014
2015  if (skip_past_char (&ptr, '{') == SUCCESS)
2016    leading_brace = 1;
2017
2018  do
2019    {
2020      struct neon_typed_alias atype;
2021      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2022
2023      if (getreg == FAIL)
2024	{
2025	  first_error (_(reg_expected_msgs[rtype]));
2026	  return FAIL;
2027	}
2028
2029      if (base_reg == -1)
2030	{
2031	  base_reg = getreg;
2032	  if (rtype == REG_TYPE_NQ)
2033	    {
2034	      reg_incr = 1;
2035	    }
2036	  firsttype = atype;
2037	}
2038      else if (reg_incr == -1)
2039	{
2040	  reg_incr = getreg - base_reg;
2041	  if (reg_incr < 1 || reg_incr > 2)
2042	    {
2043	      first_error (_(incr_error));
2044	      return FAIL;
2045	    }
2046	}
2047      else if (getreg != base_reg + reg_incr * count)
2048	{
2049	  first_error (_(incr_error));
2050	  return FAIL;
2051	}
2052
2053      if (! neon_alias_types_same (&atype, &firsttype))
2054	{
2055	  first_error (_(type_error));
2056	  return FAIL;
2057	}
2058
2059      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2060	 modes.  */
2061      if (ptr[0] == '-')
2062	{
2063	  struct neon_typed_alias htype;
2064	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2065	  if (lane == -1)
2066	    lane = NEON_INTERLEAVE_LANES;
2067	  else if (lane != NEON_INTERLEAVE_LANES)
2068	    {
2069	      first_error (_(type_error));
2070	      return FAIL;
2071	    }
2072	  if (reg_incr == -1)
2073	    reg_incr = 1;
2074	  else if (reg_incr != 1)
2075	    {
2076	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2077	      return FAIL;
2078	    }
2079	  ptr++;
2080	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2081	  if (hireg == FAIL)
2082	    {
2083	      first_error (_(reg_expected_msgs[rtype]));
2084	      return FAIL;
2085	    }
2086	  if (! neon_alias_types_same (&htype, &firsttype))
2087	    {
2088	      first_error (_(type_error));
2089	      return FAIL;
2090	    }
2091	  count += hireg + dregs - getreg;
2092	  continue;
2093	}
2094
2095      /* If we're using Q registers, we can't use [] or [n] syntax.  */
2096      if (rtype == REG_TYPE_NQ)
2097	{
2098	  count += 2;
2099	  continue;
2100	}
2101
2102      if ((atype.defined & NTA_HASINDEX) != 0)
2103	{
2104	  if (lane == -1)
2105	    lane = atype.index;
2106	  else if (lane != atype.index)
2107	    {
2108	      first_error (_(type_error));
2109	      return FAIL;
2110	    }
2111	}
2112      else if (lane == -1)
2113	lane = NEON_INTERLEAVE_LANES;
2114      else if (lane != NEON_INTERLEAVE_LANES)
2115	{
2116	  first_error (_(type_error));
2117	  return FAIL;
2118	}
2119      count++;
2120    }
2121  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2122
2123  /* No lane set by [x]. We must be interleaving structures.  */
2124  if (lane == -1)
2125    lane = NEON_INTERLEAVE_LANES;
2126
2127  /* Sanity check.  */
2128  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2129      || (count > 1 && reg_incr == -1))
2130    {
2131      first_error (_("error parsing element/structure list"));
2132      return FAIL;
2133    }
2134
2135  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2136    {
2137      first_error (_("expected }"));
2138      return FAIL;
2139    }
2140
2141  if (reg_incr == -1)
2142    reg_incr = 1;
2143
2144  if (eltype)
2145    *eltype = firsttype.eltype;
2146
2147  *pbase = base_reg;
2148  *str = ptr;
2149
2150  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2151}
2152
2153/* Parse an explicit relocation suffix on an expression.  This is
2154   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2155   arm_reloc_hsh contains no entries, so this function can only
2156   succeed if there is no () after the word.  Returns -1 on error,
2157   BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2158
2159static int
2160parse_reloc (char **str)
2161{
2162  struct reloc_entry *r;
2163  char *p, *q;
2164
2165  if (**str != '(')
2166    return BFD_RELOC_UNUSED;
2167
2168  p = *str + 1;
2169  q = p;
2170
2171  while (*q && *q != ')' && *q != ',')
2172    q++;
2173  if (*q != ')')
2174    return -1;
2175
2176  if ((r = (struct reloc_entry *)
2177       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2178    return -1;
2179
2180  *str = q + 1;
2181  return r->reloc;
2182}
2183
2184/* Directives: register aliases.  */
2185
2186static struct reg_entry *
2187insert_reg_alias (char *str, unsigned number, int type)
2188{
2189  struct reg_entry *new_reg;
2190  const char *name;
2191
2192  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2193    {
2194      if (new_reg->builtin)
2195	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2196
2197      /* Only warn about a redefinition if it's not defined as the
2198	 same register.	 */
2199      else if (new_reg->number != number || new_reg->type != type)
2200	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2201
2202      return NULL;
2203    }
2204
2205  name = xstrdup (str);
2206  new_reg = XNEW (struct reg_entry);
2207
2208  new_reg->name = name;
2209  new_reg->number = number;
2210  new_reg->type = type;
2211  new_reg->builtin = FALSE;
2212  new_reg->neon = NULL;
2213
2214  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2215    abort ();
2216
2217  return new_reg;
2218}
2219
2220static void
2221insert_neon_reg_alias (char *str, int number, int type,
2222		       struct neon_typed_alias *atype)
2223{
2224  struct reg_entry *reg = insert_reg_alias (str, number, type);
2225
2226  if (!reg)
2227    {
2228      first_error (_("attempt to redefine typed alias"));
2229      return;
2230    }
2231
2232  if (atype)
2233    {
2234      reg->neon = XNEW (struct neon_typed_alias);
2235      *reg->neon = *atype;
2236    }
2237}
2238
2239/* Look for the .req directive.	 This is of the form:
2240
2241	new_register_name .req existing_register_name
2242
2243   If we find one, or if it looks sufficiently like one that we want to
2244   handle any error here, return TRUE.  Otherwise return FALSE.  */
2245
2246static bfd_boolean
2247create_register_alias (char * newname, char *p)
2248{
2249  struct reg_entry *old;
2250  char *oldname, *nbuf;
2251  size_t nlen;
2252
2253  /* The input scrubber ensures that whitespace after the mnemonic is
2254     collapsed to single spaces.  */
2255  oldname = p;
2256  if (strncmp (oldname, " .req ", 6) != 0)
2257    return FALSE;
2258
2259  oldname += 6;
2260  if (*oldname == '\0')
2261    return FALSE;
2262
2263  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2264  if (!old)
2265    {
2266      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2267      return TRUE;
2268    }
2269
2270  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2271     the desired alias name, and p points to its end.  If not, then
2272     the desired alias name is in the global original_case_string.  */
2273#ifdef TC_CASE_SENSITIVE
2274  nlen = p - newname;
2275#else
2276  newname = original_case_string;
2277  nlen = strlen (newname);
2278#endif
2279
2280  nbuf = xmemdup0 (newname, nlen);
2281
2282  /* Create aliases under the new name as stated; an all-lowercase
2283     version of the new name; and an all-uppercase version of the new
2284     name.  */
2285  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2286    {
2287      for (p = nbuf; *p; p++)
2288	*p = TOUPPER (*p);
2289
2290      if (strncmp (nbuf, newname, nlen))
2291	{
2292	  /* If this attempt to create an additional alias fails, do not bother
2293	     trying to create the all-lower case alias.  We will fail and issue
2294	     a second, duplicate error message.  This situation arises when the
2295	     programmer does something like:
2296	       foo .req r0
2297	       Foo .req r1
2298	     The second .req creates the "Foo" alias but then fails to create
2299	     the artificial FOO alias because it has already been created by the
2300	     first .req.  */
2301	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2302	    {
2303	      free (nbuf);
2304	      return TRUE;
2305	    }
2306	}
2307
2308      for (p = nbuf; *p; p++)
2309	*p = TOLOWER (*p);
2310
2311      if (strncmp (nbuf, newname, nlen))
2312	insert_reg_alias (nbuf, old->number, old->type);
2313    }
2314
2315  free (nbuf);
2316  return TRUE;
2317}
2318
2319/* Create a Neon typed/indexed register alias using directives, e.g.:
2320     X .dn d5.s32[1]
2321     Y .qn 6.s16
2322     Z .dn d7
2323     T .dn Z[0]
2324   These typed registers can be used instead of the types specified after the
2325   Neon mnemonic, so long as all operands given have types. Types can also be
2326   specified directly, e.g.:
2327     vadd d0.s32, d1.s32, d2.s32  */
2328
2329static bfd_boolean
2330create_neon_reg_alias (char *newname, char *p)
2331{
2332  enum arm_reg_type basetype;
2333  struct reg_entry *basereg;
2334  struct reg_entry mybasereg;
2335  struct neon_type ntype;
2336  struct neon_typed_alias typeinfo;
2337  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2338  int namelen;
2339
2340  typeinfo.defined = 0;
2341  typeinfo.eltype.type = NT_invtype;
2342  typeinfo.eltype.size = -1;
2343  typeinfo.index = -1;
2344
2345  nameend = p;
2346
2347  if (strncmp (p, " .dn ", 5) == 0)
2348    basetype = REG_TYPE_VFD;
2349  else if (strncmp (p, " .qn ", 5) == 0)
2350    basetype = REG_TYPE_NQ;
2351  else
2352    return FALSE;
2353
2354  p += 5;
2355
2356  if (*p == '\0')
2357    return FALSE;
2358
2359  basereg = arm_reg_parse_multi (&p);
2360
2361  if (basereg && basereg->type != basetype)
2362    {
2363      as_bad (_("bad type for register"));
2364      return FALSE;
2365    }
2366
2367  if (basereg == NULL)
2368    {
2369      expressionS exp;
2370      /* Try parsing as an integer.  */
2371      my_get_expression (&exp, &p, GE_NO_PREFIX);
2372      if (exp.X_op != O_constant)
2373	{
2374	  as_bad (_("expression must be constant"));
2375	  return FALSE;
2376	}
2377      basereg = &mybasereg;
2378      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2379						  : exp.X_add_number;
2380      basereg->neon = 0;
2381    }
2382
2383  if (basereg->neon)
2384    typeinfo = *basereg->neon;
2385
2386  if (parse_neon_type (&ntype, &p) == SUCCESS)
2387    {
2388      /* We got a type.  */
2389      if (typeinfo.defined & NTA_HASTYPE)
2390	{
2391	  as_bad (_("can't redefine the type of a register alias"));
2392	  return FALSE;
2393	}
2394
2395      typeinfo.defined |= NTA_HASTYPE;
2396      if (ntype.elems != 1)
2397	{
2398	  as_bad (_("you must specify a single type only"));
2399	  return FALSE;
2400	}
2401      typeinfo.eltype = ntype.el[0];
2402    }
2403
2404  if (skip_past_char (&p, '[') == SUCCESS)
2405    {
2406      expressionS exp;
2407      /* We got a scalar index.  */
2408
2409      if (typeinfo.defined & NTA_HASINDEX)
2410	{
2411	  as_bad (_("can't redefine the index of a scalar alias"));
2412	  return FALSE;
2413	}
2414
2415      my_get_expression (&exp, &p, GE_NO_PREFIX);
2416
2417      if (exp.X_op != O_constant)
2418	{
2419	  as_bad (_("scalar index must be constant"));
2420	  return FALSE;
2421	}
2422
2423      typeinfo.defined |= NTA_HASINDEX;
2424      typeinfo.index = exp.X_add_number;
2425
2426      if (skip_past_char (&p, ']') == FAIL)
2427	{
2428	  as_bad (_("expecting ]"));
2429	  return FALSE;
2430	}
2431    }
2432
2433  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2434     the desired alias name, and p points to its end.  If not, then
2435     the desired alias name is in the global original_case_string.  */
2436#ifdef TC_CASE_SENSITIVE
2437  namelen = nameend - newname;
2438#else
2439  newname = original_case_string;
2440  namelen = strlen (newname);
2441#endif
2442
2443  namebuf = xmemdup0 (newname, namelen);
2444
2445  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2446			 typeinfo.defined != 0 ? &typeinfo : NULL);
2447
2448  /* Insert name in all uppercase.  */
2449  for (p = namebuf; *p; p++)
2450    *p = TOUPPER (*p);
2451
2452  if (strncmp (namebuf, newname, namelen))
2453    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2454			   typeinfo.defined != 0 ? &typeinfo : NULL);
2455
2456  /* Insert name in all lowercase.  */
2457  for (p = namebuf; *p; p++)
2458    *p = TOLOWER (*p);
2459
2460  if (strncmp (namebuf, newname, namelen))
2461    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2462			   typeinfo.defined != 0 ? &typeinfo : NULL);
2463
2464  free (namebuf);
2465  return TRUE;
2466}
2467
2468/* Should never be called, as .req goes between the alias and the
2469   register name, not at the beginning of the line.  */
2470
2471static void
2472s_req (int a ATTRIBUTE_UNUSED)
2473{
2474  as_bad (_("invalid syntax for .req directive"));
2475}
2476
2477static void
2478s_dn (int a ATTRIBUTE_UNUSED)
2479{
2480  as_bad (_("invalid syntax for .dn directive"));
2481}
2482
2483static void
2484s_qn (int a ATTRIBUTE_UNUSED)
2485{
2486  as_bad (_("invalid syntax for .qn directive"));
2487}
2488
2489/* The .unreq directive deletes an alias which was previously defined
2490   by .req.  For example:
2491
2492       my_alias .req r11
2493       .unreq my_alias	  */
2494
2495static void
2496s_unreq (int a ATTRIBUTE_UNUSED)
2497{
2498  char * name;
2499  char saved_char;
2500
2501  name = input_line_pointer;
2502
2503  while (*input_line_pointer != 0
2504	 && *input_line_pointer != ' '
2505	 && *input_line_pointer != '\n')
2506    ++input_line_pointer;
2507
2508  saved_char = *input_line_pointer;
2509  *input_line_pointer = 0;
2510
2511  if (!*name)
2512    as_bad (_("invalid syntax for .unreq directive"));
2513  else
2514    {
2515      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2516							      name);
2517
2518      if (!reg)
2519	as_bad (_("unknown register alias '%s'"), name);
2520      else if (reg->builtin)
2521	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2522		 name);
2523      else
2524	{
2525	  char * p;
2526	  char * nbuf;
2527
2528	  hash_delete (arm_reg_hsh, name, FALSE);
2529	  free ((char *) reg->name);
2530	  if (reg->neon)
2531	    free (reg->neon);
2532	  free (reg);
2533
2534	  /* Also locate the all upper case and all lower case versions.
2535	     Do not complain if we cannot find one or the other as it
2536	     was probably deleted above.  */
2537
2538	  nbuf = strdup (name);
2539	  for (p = nbuf; *p; p++)
2540	    *p = TOUPPER (*p);
2541	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2542	  if (reg)
2543	    {
2544	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2545	      free ((char *) reg->name);
2546	      if (reg->neon)
2547		free (reg->neon);
2548	      free (reg);
2549	    }
2550
2551	  for (p = nbuf; *p; p++)
2552	    *p = TOLOWER (*p);
2553	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2554	  if (reg)
2555	    {
2556	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2557	      free ((char *) reg->name);
2558	      if (reg->neon)
2559		free (reg->neon);
2560	      free (reg);
2561	    }
2562
2563	  free (nbuf);
2564	}
2565    }
2566
2567  *input_line_pointer = saved_char;
2568  demand_empty_rest_of_line ();
2569}
2570
2571/* Directives: Instruction set selection.  */
2572
2573#ifdef OBJ_ELF
2574/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2575   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2576   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2577   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2578
2579/* Create a new mapping symbol for the transition to STATE.  */
2580
2581static void
2582make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2583{
2584  symbolS * symbolP;
2585  const char * symname;
2586  int type;
2587
2588  switch (state)
2589    {
2590    case MAP_DATA:
2591      symname = "$d";
2592      type = BSF_NO_FLAGS;
2593      break;
2594    case MAP_ARM:
2595      symname = "$a";
2596      type = BSF_NO_FLAGS;
2597      break;
2598    case MAP_THUMB:
2599      symname = "$t";
2600      type = BSF_NO_FLAGS;
2601      break;
2602    default:
2603      abort ();
2604    }
2605
2606  symbolP = symbol_new (symname, now_seg, value, frag);
2607  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2608
2609  switch (state)
2610    {
2611    case MAP_ARM:
2612      THUMB_SET_FUNC (symbolP, 0);
2613      ARM_SET_THUMB (symbolP, 0);
2614      ARM_SET_INTERWORK (symbolP, support_interwork);
2615      break;
2616
2617    case MAP_THUMB:
2618      THUMB_SET_FUNC (symbolP, 1);
2619      ARM_SET_THUMB (symbolP, 1);
2620      ARM_SET_INTERWORK (symbolP, support_interwork);
2621      break;
2622
2623    case MAP_DATA:
2624    default:
2625      break;
2626    }
2627
2628  /* Save the mapping symbols for future reference.  Also check that
2629     we do not place two mapping symbols at the same offset within a
2630     frag.  We'll handle overlap between frags in
2631     check_mapping_symbols.
2632
2633     If .fill or other data filling directive generates zero sized data,
2634     the mapping symbol for the following code will have the same value
2635     as the one generated for the data filling directive.  In this case,
2636     we replace the old symbol with the new one at the same address.  */
2637  if (value == 0)
2638    {
2639      if (frag->tc_frag_data.first_map != NULL)
2640	{
2641	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2642	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2643	}
2644      frag->tc_frag_data.first_map = symbolP;
2645    }
2646  if (frag->tc_frag_data.last_map != NULL)
2647    {
2648      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2649      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2650	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2651    }
2652  frag->tc_frag_data.last_map = symbolP;
2653}
2654
2655/* We must sometimes convert a region marked as code to data during
2656   code alignment, if an odd number of bytes have to be padded.  The
2657   code mapping symbol is pushed to an aligned address.  */
2658
2659static void
2660insert_data_mapping_symbol (enum mstate state,
2661			    valueT value, fragS *frag, offsetT bytes)
2662{
2663  /* If there was already a mapping symbol, remove it.  */
2664  if (frag->tc_frag_data.last_map != NULL
2665      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2666    {
2667      symbolS *symp = frag->tc_frag_data.last_map;
2668
2669      if (value == 0)
2670	{
2671	  know (frag->tc_frag_data.first_map == symp);
2672	  frag->tc_frag_data.first_map = NULL;
2673	}
2674      frag->tc_frag_data.last_map = NULL;
2675      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2676    }
2677
2678  make_mapping_symbol (MAP_DATA, value, frag);
2679  make_mapping_symbol (state, value + bytes, frag);
2680}
2681
2682static void mapping_state_2 (enum mstate state, int max_chars);
2683
2684/* Set the mapping state to STATE.  Only call this when about to
2685   emit some STATE bytes to the file.  */
2686
2687#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2688void
2689mapping_state (enum mstate state)
2690{
2691  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2692
2693  if (mapstate == state)
2694    /* The mapping symbol has already been emitted.
2695       There is nothing else to do.  */
2696    return;
2697
2698  if (state == MAP_ARM || state == MAP_THUMB)
2699    /*  PR gas/12931
2700	All ARM instructions require 4-byte alignment.
2701	(Almost) all Thumb instructions require 2-byte alignment.
2702
2703	When emitting instructions into any section, mark the section
2704	appropriately.
2705
2706	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2707	but themselves require 2-byte alignment; this applies to some
2708	PC- relative forms.  However, these cases will invovle implicit
2709	literal pool generation or an explicit .align >=2, both of
2710	which will cause the section to me marked with sufficient
2711	alignment.  Thus, we don't handle those cases here.  */
2712    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2713
2714  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2715    /* This case will be evaluated later.  */
2716    return;
2717
2718  mapping_state_2 (state, 0);
2719}
2720
2721/* Same as mapping_state, but MAX_CHARS bytes have already been
2722   allocated.  Put the mapping symbol that far back.  */
2723
2724static void
2725mapping_state_2 (enum mstate state, int max_chars)
2726{
2727  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2728
2729  if (!SEG_NORMAL (now_seg))
2730    return;
2731
2732  if (mapstate == state)
2733    /* The mapping symbol has already been emitted.
2734       There is nothing else to do.  */
2735    return;
2736
2737  if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2738	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2739    {
2740      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2741      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2742
2743      if (add_symbol)
2744	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2745    }
2746
2747  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2748  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2749}
2750#undef TRANSITION
2751#else
2752#define mapping_state(x) ((void)0)
2753#define mapping_state_2(x, y) ((void)0)
2754#endif
2755
2756/* Find the real, Thumb encoded start of a Thumb function.  */
2757
2758#ifdef OBJ_COFF
2759static symbolS *
2760find_real_start (symbolS * symbolP)
2761{
2762  char *       real_start;
2763  const char * name = S_GET_NAME (symbolP);
2764  symbolS *    new_target;
2765
2766  /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
2767#define STUB_NAME ".real_start_of"
2768
2769  if (name == NULL)
2770    abort ();
2771
2772  /* The compiler may generate BL instructions to local labels because
2773     it needs to perform a branch to a far away location. These labels
2774     do not have a corresponding ".real_start_of" label.  We check
2775     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2776     the ".real_start_of" convention for nonlocal branches.  */
2777  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2778    return symbolP;
2779
2780  real_start = concat (STUB_NAME, name, NULL);
2781  new_target = symbol_find (real_start);
2782  free (real_start);
2783
2784  if (new_target == NULL)
2785    {
2786      as_warn (_("Failed to find real start of function: %s\n"), name);
2787      new_target = symbolP;
2788    }
2789
2790  return new_target;
2791}
2792#endif
2793
2794static void
2795opcode_select (int width)
2796{
2797  switch (width)
2798    {
2799    case 16:
2800      if (! thumb_mode)
2801	{
2802	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2803	    as_bad (_("selected processor does not support THUMB opcodes"));
2804
2805	  thumb_mode = 1;
2806	  /* No need to force the alignment, since we will have been
2807	     coming from ARM mode, which is word-aligned.  */
2808	  record_alignment (now_seg, 1);
2809	}
2810      break;
2811
2812    case 32:
2813      if (thumb_mode)
2814	{
2815	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2816	    as_bad (_("selected processor does not support ARM opcodes"));
2817
2818	  thumb_mode = 0;
2819
2820	  if (!need_pass_2)
2821	    frag_align (2, 0, 0);
2822
2823	  record_alignment (now_seg, 1);
2824	}
2825      break;
2826
2827    default:
2828      as_bad (_("invalid instruction size selected (%d)"), width);
2829    }
2830}
2831
2832static void
2833s_arm (int ignore ATTRIBUTE_UNUSED)
2834{
2835  opcode_select (32);
2836  demand_empty_rest_of_line ();
2837}
2838
2839static void
2840s_thumb (int ignore ATTRIBUTE_UNUSED)
2841{
2842  opcode_select (16);
2843  demand_empty_rest_of_line ();
2844}
2845
2846static void
2847s_code (int unused ATTRIBUTE_UNUSED)
2848{
2849  int temp;
2850
2851  temp = get_absolute_expression ();
2852  switch (temp)
2853    {
2854    case 16:
2855    case 32:
2856      opcode_select (temp);
2857      break;
2858
2859    default:
2860      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2861    }
2862}
2863
2864static void
2865s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2866{
2867  /* If we are not already in thumb mode go into it, EVEN if
2868     the target processor does not support thumb instructions.
2869     This is used by gcc/config/arm/lib1funcs.asm for example
2870     to compile interworking support functions even if the
2871     target processor should not support interworking.	*/
2872  if (! thumb_mode)
2873    {
2874      thumb_mode = 2;
2875      record_alignment (now_seg, 1);
2876    }
2877
2878  demand_empty_rest_of_line ();
2879}
2880
2881static void
2882s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2883{
2884  s_thumb (0);
2885
2886  /* The following label is the name/address of the start of a Thumb function.
2887     We need to know this for the interworking support.	 */
2888  label_is_thumb_function_name = TRUE;
2889}
2890
2891/* Perform a .set directive, but also mark the alias as
2892   being a thumb function.  */
2893
2894static void
2895s_thumb_set (int equiv)
2896{
2897  /* XXX the following is a duplicate of the code for s_set() in read.c
2898     We cannot just call that code as we need to get at the symbol that
2899     is created.  */
2900  char *    name;
2901  char	    delim;
2902  char *    end_name;
2903  symbolS * symbolP;
2904
2905  /* Especial apologies for the random logic:
2906     This just grew, and could be parsed much more simply!
2907     Dean - in haste.  */
2908  delim	    = get_symbol_name (& name);
2909  end_name  = input_line_pointer;
2910  (void) restore_line_pointer (delim);
2911
2912  if (*input_line_pointer != ',')
2913    {
2914      *end_name = 0;
2915      as_bad (_("expected comma after name \"%s\""), name);
2916      *end_name = delim;
2917      ignore_rest_of_line ();
2918      return;
2919    }
2920
2921  input_line_pointer++;
2922  *end_name = 0;
2923
2924  if (name[0] == '.' && name[1] == '\0')
2925    {
2926      /* XXX - this should not happen to .thumb_set.  */
2927      abort ();
2928    }
2929
2930  if ((symbolP = symbol_find (name)) == NULL
2931      && (symbolP = md_undefined_symbol (name)) == NULL)
2932    {
2933#ifndef NO_LISTING
2934      /* When doing symbol listings, play games with dummy fragments living
2935	 outside the normal fragment chain to record the file and line info
2936	 for this symbol.  */
2937      if (listing & LISTING_SYMBOLS)
2938	{
2939	  extern struct list_info_struct * listing_tail;
2940	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2941
2942	  memset (dummy_frag, 0, sizeof (fragS));
2943	  dummy_frag->fr_type = rs_fill;
2944	  dummy_frag->line = listing_tail;
2945	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2946	  dummy_frag->fr_symbol = symbolP;
2947	}
2948      else
2949#endif
2950	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2951
2952#ifdef OBJ_COFF
2953      /* "set" symbols are local unless otherwise specified.  */
2954      SF_SET_LOCAL (symbolP);
2955#endif /* OBJ_COFF  */
2956    }				/* Make a new symbol.  */
2957
2958  symbol_table_insert (symbolP);
2959
2960  * end_name = delim;
2961
2962  if (equiv
2963      && S_IS_DEFINED (symbolP)
2964      && S_GET_SEGMENT (symbolP) != reg_section)
2965    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2966
2967  pseudo_set (symbolP);
2968
2969  demand_empty_rest_of_line ();
2970
2971  /* XXX Now we come to the Thumb specific bit of code.	 */
2972
2973  THUMB_SET_FUNC (symbolP, 1);
2974  ARM_SET_THUMB (symbolP, 1);
2975#if defined OBJ_ELF || defined OBJ_COFF
2976  ARM_SET_INTERWORK (symbolP, support_interwork);
2977#endif
2978}
2979
2980/* Directives: Mode selection.  */
2981
2982/* .syntax [unified|divided] - choose the new unified syntax
2983   (same for Arm and Thumb encoding, modulo slight differences in what
2984   can be represented) or the old divergent syntax for each mode.  */
2985static void
2986s_syntax (int unused ATTRIBUTE_UNUSED)
2987{
2988  char *name, delim;
2989
2990  delim = get_symbol_name (& name);
2991
2992  if (!strcasecmp (name, "unified"))
2993    unified_syntax = TRUE;
2994  else if (!strcasecmp (name, "divided"))
2995    unified_syntax = FALSE;
2996  else
2997    {
2998      as_bad (_("unrecognized syntax mode \"%s\""), name);
2999      return;
3000    }
3001  (void) restore_line_pointer (delim);
3002  demand_empty_rest_of_line ();
3003}
3004
3005/* Directives: sectioning and alignment.  */
3006
3007static void
3008s_bss (int ignore ATTRIBUTE_UNUSED)
3009{
3010  /* We don't support putting frags in the BSS segment, we fake it by
3011     marking in_bss, then looking at s_skip for clues.	*/
3012  subseg_set (bss_section, 0);
3013  demand_empty_rest_of_line ();
3014
3015#ifdef md_elf_section_change_hook
3016  md_elf_section_change_hook ();
3017#endif
3018}
3019
3020static void
3021s_even (int ignore ATTRIBUTE_UNUSED)
3022{
3023  /* Never make frag if expect extra pass.  */
3024  if (!need_pass_2)
3025    frag_align (1, 0, 0);
3026
3027  record_alignment (now_seg, 1);
3028
3029  demand_empty_rest_of_line ();
3030}
3031
3032/* Directives: CodeComposer Studio.  */
3033
3034/*  .ref  (for CodeComposer Studio syntax only).  */
3035static void
3036s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3037{
3038  if (codecomposer_syntax)
3039    ignore_rest_of_line ();
3040  else
3041    as_bad (_(".ref pseudo-op only available with -mccs flag."));
3042}
3043
3044/*  If name is not NULL, then it is used for marking the beginning of a
3045    function, wherease if it is NULL then it means the function end.  */
3046static void
3047asmfunc_debug (const char * name)
3048{
3049  static const char * last_name = NULL;
3050
3051  if (name != NULL)
3052    {
3053      gas_assert (last_name == NULL);
3054      last_name = name;
3055
3056      if (debug_type == DEBUG_STABS)
3057         stabs_generate_asm_func (name, name);
3058    }
3059  else
3060    {
3061      gas_assert (last_name != NULL);
3062
3063      if (debug_type == DEBUG_STABS)
3064        stabs_generate_asm_endfunc (last_name, last_name);
3065
3066      last_name = NULL;
3067    }
3068}
3069
3070static void
3071s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3072{
3073  if (codecomposer_syntax)
3074    {
3075      switch (asmfunc_state)
3076	{
3077	case OUTSIDE_ASMFUNC:
3078	  asmfunc_state = WAITING_ASMFUNC_NAME;
3079	  break;
3080
3081	case WAITING_ASMFUNC_NAME:
3082	  as_bad (_(".asmfunc repeated."));
3083	  break;
3084
3085	case WAITING_ENDASMFUNC:
3086	  as_bad (_(".asmfunc without function."));
3087	  break;
3088	}
3089      demand_empty_rest_of_line ();
3090    }
3091  else
3092    as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3093}
3094
3095static void
3096s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3097{
3098  if (codecomposer_syntax)
3099    {
3100      switch (asmfunc_state)
3101	{
3102	case OUTSIDE_ASMFUNC:
3103	  as_bad (_(".endasmfunc without a .asmfunc."));
3104	  break;
3105
3106	case WAITING_ASMFUNC_NAME:
3107	  as_bad (_(".endasmfunc without function."));
3108	  break;
3109
3110	case WAITING_ENDASMFUNC:
3111	  asmfunc_state = OUTSIDE_ASMFUNC;
3112	  asmfunc_debug (NULL);
3113	  break;
3114	}
3115      demand_empty_rest_of_line ();
3116    }
3117  else
3118    as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3119}
3120
3121static void
3122s_ccs_def (int name)
3123{
3124  if (codecomposer_syntax)
3125    s_globl (name);
3126  else
3127    as_bad (_(".def pseudo-op only available with -mccs flag."));
3128}
3129
3130/* Directives: Literal pools.  */
3131
3132static literal_pool *
3133find_literal_pool (void)
3134{
3135  literal_pool * pool;
3136
3137  for (pool = list_of_pools; pool != NULL; pool = pool->next)
3138    {
3139      if (pool->section == now_seg
3140	  && pool->sub_section == now_subseg)
3141	break;
3142    }
3143
3144  return pool;
3145}
3146
3147static literal_pool *
3148find_or_make_literal_pool (void)
3149{
3150  /* Next literal pool ID number.  */
3151  static unsigned int latest_pool_num = 1;
3152  literal_pool *      pool;
3153
3154  pool = find_literal_pool ();
3155
3156  if (pool == NULL)
3157    {
3158      /* Create a new pool.  */
3159      pool = XNEW (literal_pool);
3160      if (! pool)
3161	return NULL;
3162
3163      pool->next_free_entry = 0;
3164      pool->section	    = now_seg;
3165      pool->sub_section	    = now_subseg;
3166      pool->next	    = list_of_pools;
3167      pool->symbol	    = NULL;
3168      pool->alignment	    = 2;
3169
3170      /* Add it to the list.  */
3171      list_of_pools = pool;
3172    }
3173
3174  /* New pools, and emptied pools, will have a NULL symbol.  */
3175  if (pool->symbol == NULL)
3176    {
3177      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3178				    (valueT) 0, &zero_address_frag);
3179      pool->id = latest_pool_num ++;
3180    }
3181
3182  /* Done.  */
3183  return pool;
3184}
3185
3186/* Add the literal in the global 'inst'
3187   structure to the relevant literal pool.  */
3188
3189static int
3190add_to_lit_pool (unsigned int nbytes)
3191{
3192#define PADDING_SLOT 0x1
3193#define LIT_ENTRY_SIZE_MASK 0xFF
3194  literal_pool * pool;
3195  unsigned int entry, pool_size = 0;
3196  bfd_boolean padding_slot_p = FALSE;
3197  unsigned imm1 = 0;
3198  unsigned imm2 = 0;
3199
3200  if (nbytes == 8)
3201    {
3202      imm1 = inst.operands[1].imm;
3203      imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3204	       : inst.reloc.exp.X_unsigned ? 0
3205	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3206      if (target_big_endian)
3207	{
3208	  imm1 = imm2;
3209	  imm2 = inst.operands[1].imm;
3210	}
3211    }
3212
3213  pool = find_or_make_literal_pool ();
3214
3215  /* Check if this literal value is already in the pool.  */
3216  for (entry = 0; entry < pool->next_free_entry; entry ++)
3217    {
3218      if (nbytes == 4)
3219	{
3220	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3221	      && (inst.reloc.exp.X_op == O_constant)
3222	      && (pool->literals[entry].X_add_number
3223		  == inst.reloc.exp.X_add_number)
3224	      && (pool->literals[entry].X_md == nbytes)
3225	      && (pool->literals[entry].X_unsigned
3226		  == inst.reloc.exp.X_unsigned))
3227	    break;
3228
3229	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3230	      && (inst.reloc.exp.X_op == O_symbol)
3231	      && (pool->literals[entry].X_add_number
3232		  == inst.reloc.exp.X_add_number)
3233	      && (pool->literals[entry].X_add_symbol
3234		  == inst.reloc.exp.X_add_symbol)
3235	      && (pool->literals[entry].X_op_symbol
3236		  == inst.reloc.exp.X_op_symbol)
3237	      && (pool->literals[entry].X_md == nbytes))
3238	    break;
3239	}
3240      else if ((nbytes == 8)
3241	       && !(pool_size & 0x7)
3242	       && ((entry + 1) != pool->next_free_entry)
3243	       && (pool->literals[entry].X_op == O_constant)
3244	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3245	       && (pool->literals[entry].X_unsigned
3246		   == inst.reloc.exp.X_unsigned)
3247	       && (pool->literals[entry + 1].X_op == O_constant)
3248	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3249	       && (pool->literals[entry + 1].X_unsigned
3250		   == inst.reloc.exp.X_unsigned))
3251	break;
3252
3253      padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3254      if (padding_slot_p && (nbytes == 4))
3255	break;
3256
3257      pool_size += 4;
3258    }
3259
3260  /* Do we need to create a new entry?	*/
3261  if (entry == pool->next_free_entry)
3262    {
3263      if (entry >= MAX_LITERAL_POOL_SIZE)
3264	{
3265	  inst.error = _("literal pool overflow");
3266	  return FAIL;
3267	}
3268
3269      if (nbytes == 8)
3270	{
3271	  /* For 8-byte entries, we align to an 8-byte boundary,
3272	     and split it into two 4-byte entries, because on 32-bit
3273	     host, 8-byte constants are treated as big num, thus
3274	     saved in "generic_bignum" which will be overwritten
3275	     by later assignments.
3276
3277	     We also need to make sure there is enough space for
3278	     the split.
3279
3280	     We also check to make sure the literal operand is a
3281	     constant number.  */
3282	  if (!(inst.reloc.exp.X_op == O_constant
3283	        || inst.reloc.exp.X_op == O_big))
3284	    {
3285	      inst.error = _("invalid type for literal pool");
3286	      return FAIL;
3287	    }
3288	  else if (pool_size & 0x7)
3289	    {
3290	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3291		{
3292		  inst.error = _("literal pool overflow");
3293		  return FAIL;
3294		}
3295
3296	      pool->literals[entry] = inst.reloc.exp;
3297	      pool->literals[entry].X_op = O_constant;
3298	      pool->literals[entry].X_add_number = 0;
3299	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3300	      pool->next_free_entry += 1;
3301	      pool_size += 4;
3302	    }
3303	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3304	    {
3305	      inst.error = _("literal pool overflow");
3306	      return FAIL;
3307	    }
3308
3309	  pool->literals[entry] = inst.reloc.exp;
3310	  pool->literals[entry].X_op = O_constant;
3311	  pool->literals[entry].X_add_number = imm1;
3312	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3313	  pool->literals[entry++].X_md = 4;
3314	  pool->literals[entry] = inst.reloc.exp;
3315	  pool->literals[entry].X_op = O_constant;
3316	  pool->literals[entry].X_add_number = imm2;
3317	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3318	  pool->literals[entry].X_md = 4;
3319	  pool->alignment = 3;
3320	  pool->next_free_entry += 1;
3321	}
3322      else
3323	{
3324	  pool->literals[entry] = inst.reloc.exp;
3325	  pool->literals[entry].X_md = 4;
3326	}
3327
3328#ifdef OBJ_ELF
3329      /* PR ld/12974: Record the location of the first source line to reference
3330	 this entry in the literal pool.  If it turns out during linking that the
3331	 symbol does not exist we will be able to give an accurate line number for
3332	 the (first use of the) missing reference.  */
3333      if (debug_type == DEBUG_DWARF2)
3334	dwarf2_where (pool->locs + entry);
3335#endif
3336      pool->next_free_entry += 1;
3337    }
3338  else if (padding_slot_p)
3339    {
3340      pool->literals[entry] = inst.reloc.exp;
3341      pool->literals[entry].X_md = nbytes;
3342    }
3343
3344  inst.reloc.exp.X_op	      = O_symbol;
3345  inst.reloc.exp.X_add_number = pool_size;
3346  inst.reloc.exp.X_add_symbol = pool->symbol;
3347
3348  return SUCCESS;
3349}
3350
3351bfd_boolean
3352tc_start_label_without_colon (void)
3353{
3354  bfd_boolean ret = TRUE;
3355
3356  if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3357    {
3358      const char *label = input_line_pointer;
3359
3360      while (!is_end_of_line[(int) label[-1]])
3361	--label;
3362
3363      if (*label == '.')
3364	{
3365	  as_bad (_("Invalid label '%s'"), label);
3366	  ret = FALSE;
3367	}
3368
3369      asmfunc_debug (label);
3370
3371      asmfunc_state = WAITING_ENDASMFUNC;
3372    }
3373
3374  return ret;
3375}
3376
3377/* Can't use symbol_new here, so have to create a symbol and then at
3378   a later date assign it a value. Thats what these functions do.  */
3379
3380static void
3381symbol_locate (symbolS *    symbolP,
3382	       const char * name,	/* It is copied, the caller can modify.	 */
3383	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3384	       valueT	    valu,	/* Symbol value.  */
3385	       fragS *	    frag)	/* Associated fragment.	 */
3386{
3387  size_t name_length;
3388  char * preserved_copy_of_name;
3389
3390  name_length = strlen (name) + 1;   /* +1 for \0.  */
3391  obstack_grow (&notes, name, name_length);
3392  preserved_copy_of_name = (char *) obstack_finish (&notes);
3393
3394#ifdef tc_canonicalize_symbol_name
3395  preserved_copy_of_name =
3396    tc_canonicalize_symbol_name (preserved_copy_of_name);
3397#endif
3398
3399  S_SET_NAME (symbolP, preserved_copy_of_name);
3400
3401  S_SET_SEGMENT (symbolP, segment);
3402  S_SET_VALUE (symbolP, valu);
3403  symbol_clear_list_pointers (symbolP);
3404
3405  symbol_set_frag (symbolP, frag);
3406
3407  /* Link to end of symbol chain.  */
3408  {
3409    extern int symbol_table_frozen;
3410
3411    if (symbol_table_frozen)
3412      abort ();
3413  }
3414
3415  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3416
3417  obj_symbol_new_hook (symbolP);
3418
3419#ifdef tc_symbol_new_hook
3420  tc_symbol_new_hook (symbolP);
3421#endif
3422
3423#ifdef DEBUG_SYMS
3424  verify_symbol_chain (symbol_rootP, symbol_lastP);
3425#endif /* DEBUG_SYMS  */
3426}
3427
3428static void
3429s_ltorg (int ignored ATTRIBUTE_UNUSED)
3430{
3431  unsigned int entry;
3432  literal_pool * pool;
3433  char sym_name[20];
3434
3435  pool = find_literal_pool ();
3436  if (pool == NULL
3437      || pool->symbol == NULL
3438      || pool->next_free_entry == 0)
3439    return;
3440
3441  /* Align pool as you have word accesses.
3442     Only make a frag if we have to.  */
3443  if (!need_pass_2)
3444    frag_align (pool->alignment, 0, 0);
3445
3446  record_alignment (now_seg, 2);
3447
3448#ifdef OBJ_ELF
3449  seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3450  make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3451#endif
3452  sprintf (sym_name, "$$lit_\002%x", pool->id);
3453
3454  symbol_locate (pool->symbol, sym_name, now_seg,
3455		 (valueT) frag_now_fix (), frag_now);
3456  symbol_table_insert (pool->symbol);
3457
3458  ARM_SET_THUMB (pool->symbol, thumb_mode);
3459
3460#if defined OBJ_COFF || defined OBJ_ELF
3461  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3462#endif
3463
3464  for (entry = 0; entry < pool->next_free_entry; entry ++)
3465    {
3466#ifdef OBJ_ELF
3467      if (debug_type == DEBUG_DWARF2)
3468	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3469#endif
3470      /* First output the expression in the instruction to the pool.  */
3471      emit_expr (&(pool->literals[entry]),
3472		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3473    }
3474
3475  /* Mark the pool as empty.  */
3476  pool->next_free_entry = 0;
3477  pool->symbol = NULL;
3478}
3479
3480#ifdef OBJ_ELF
3481/* Forward declarations for functions below, in the MD interface
3482   section.  */
3483static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3484static valueT create_unwind_entry (int);
3485static void start_unwind_section (const segT, int);
3486static void add_unwind_opcode (valueT, int);
3487static void flush_pending_unwind (void);
3488
3489/* Directives: Data.  */
3490
3491static void
3492s_arm_elf_cons (int nbytes)
3493{
3494  expressionS exp;
3495
3496#ifdef md_flush_pending_output
3497  md_flush_pending_output ();
3498#endif
3499
3500  if (is_it_end_of_statement ())
3501    {
3502      demand_empty_rest_of_line ();
3503      return;
3504    }
3505
3506#ifdef md_cons_align
3507  md_cons_align (nbytes);
3508#endif
3509
3510  mapping_state (MAP_DATA);
3511  do
3512    {
3513      int reloc;
3514      char *base = input_line_pointer;
3515
3516      expression (& exp);
3517
3518      if (exp.X_op != O_symbol)
3519	emit_expr (&exp, (unsigned int) nbytes);
3520      else
3521	{
3522	  char *before_reloc = input_line_pointer;
3523	  reloc = parse_reloc (&input_line_pointer);
3524	  if (reloc == -1)
3525	    {
3526	      as_bad (_("unrecognized relocation suffix"));
3527	      ignore_rest_of_line ();
3528	      return;
3529	    }
3530	  else if (reloc == BFD_RELOC_UNUSED)
3531	    emit_expr (&exp, (unsigned int) nbytes);
3532	  else
3533	    {
3534	      reloc_howto_type *howto = (reloc_howto_type *)
3535		  bfd_reloc_type_lookup (stdoutput,
3536					 (bfd_reloc_code_real_type) reloc);
3537	      int size = bfd_get_reloc_size (howto);
3538
3539	      if (reloc == BFD_RELOC_ARM_PLT32)
3540		{
3541		  as_bad (_("(plt) is only valid on branch targets"));
3542		  reloc = BFD_RELOC_UNUSED;
3543		  size = 0;
3544		}
3545
3546	      if (size > nbytes)
3547		as_bad (_("%s relocations do not fit in %d bytes"),
3548			howto->name, nbytes);
3549	      else
3550		{
3551		  /* We've parsed an expression stopping at O_symbol.
3552		     But there may be more expression left now that we
3553		     have parsed the relocation marker.  Parse it again.
3554		     XXX Surely there is a cleaner way to do this.  */
3555		  char *p = input_line_pointer;
3556		  int offset;
3557		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
3558
3559		  memcpy (save_buf, base, input_line_pointer - base);
3560		  memmove (base + (input_line_pointer - before_reloc),
3561			   base, before_reloc - base);
3562
3563		  input_line_pointer = base + (input_line_pointer-before_reloc);
3564		  expression (&exp);
3565		  memcpy (base, save_buf, p - base);
3566
3567		  offset = nbytes - size;
3568		  p = frag_more (nbytes);
3569		  memset (p, 0, nbytes);
3570		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3571			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3572		  free (save_buf);
3573		}
3574	    }
3575	}
3576    }
3577  while (*input_line_pointer++ == ',');
3578
3579  /* Put terminator back into stream.  */
3580  input_line_pointer --;
3581  demand_empty_rest_of_line ();
3582}
3583
3584/* Emit an expression containing a 32-bit thumb instruction.
3585   Implementation based on put_thumb32_insn.  */
3586
3587static void
3588emit_thumb32_expr (expressionS * exp)
3589{
3590  expressionS exp_high = *exp;
3591
3592  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3593  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3594  exp->X_add_number &= 0xffff;
3595  emit_expr (exp, (unsigned int) THUMB_SIZE);
3596}
3597
3598/*  Guess the instruction size based on the opcode.  */
3599
3600static int
3601thumb_insn_size (int opcode)
3602{
3603  if ((unsigned int) opcode < 0xe800u)
3604    return 2;
3605  else if ((unsigned int) opcode >= 0xe8000000u)
3606    return 4;
3607  else
3608    return 0;
3609}
3610
3611static bfd_boolean
3612emit_insn (expressionS *exp, int nbytes)
3613{
3614  int size = 0;
3615
3616  if (exp->X_op == O_constant)
3617    {
3618      size = nbytes;
3619
3620      if (size == 0)
3621	size = thumb_insn_size (exp->X_add_number);
3622
3623      if (size != 0)
3624	{
3625	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3626	    {
3627	      as_bad (_(".inst.n operand too big. "\
3628			"Use .inst.w instead"));
3629	      size = 0;
3630	    }
3631	  else
3632	    {
3633	      if (now_it.state == AUTOMATIC_IT_BLOCK)
3634		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3635	      else
3636		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3637
3638	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3639		emit_thumb32_expr (exp);
3640	      else
3641		emit_expr (exp, (unsigned int) size);
3642
3643	      it_fsm_post_encode ();
3644	    }
3645	}
3646      else
3647	as_bad (_("cannot determine Thumb instruction size. "	\
3648		  "Use .inst.n/.inst.w instead"));
3649    }
3650  else
3651    as_bad (_("constant expression required"));
3652
3653  return (size != 0);
3654}
3655
3656/* Like s_arm_elf_cons but do not use md_cons_align and
3657   set the mapping state to MAP_ARM/MAP_THUMB.  */
3658
3659static void
3660s_arm_elf_inst (int nbytes)
3661{
3662  if (is_it_end_of_statement ())
3663    {
3664      demand_empty_rest_of_line ();
3665      return;
3666    }
3667
3668  /* Calling mapping_state () here will not change ARM/THUMB,
3669     but will ensure not to be in DATA state.  */
3670
3671  if (thumb_mode)
3672    mapping_state (MAP_THUMB);
3673  else
3674    {
3675      if (nbytes != 0)
3676	{
3677	  as_bad (_("width suffixes are invalid in ARM mode"));
3678	  ignore_rest_of_line ();
3679	  return;
3680	}
3681
3682      nbytes = 4;
3683
3684      mapping_state (MAP_ARM);
3685    }
3686
3687  do
3688    {
3689      expressionS exp;
3690
3691      expression (& exp);
3692
3693      if (! emit_insn (& exp, nbytes))
3694	{
3695	  ignore_rest_of_line ();
3696	  return;
3697	}
3698    }
3699  while (*input_line_pointer++ == ',');
3700
3701  /* Put terminator back into stream.  */
3702  input_line_pointer --;
3703  demand_empty_rest_of_line ();
3704}
3705
3706/* Parse a .rel31 directive.  */
3707
3708static void
3709s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3710{
3711  expressionS exp;
3712  char *p;
3713  valueT highbit;
3714
3715  highbit = 0;
3716  if (*input_line_pointer == '1')
3717    highbit = 0x80000000;
3718  else if (*input_line_pointer != '0')
3719    as_bad (_("expected 0 or 1"));
3720
3721  input_line_pointer++;
3722  if (*input_line_pointer != ',')
3723    as_bad (_("missing comma"));
3724  input_line_pointer++;
3725
3726#ifdef md_flush_pending_output
3727  md_flush_pending_output ();
3728#endif
3729
3730#ifdef md_cons_align
3731  md_cons_align (4);
3732#endif
3733
3734  mapping_state (MAP_DATA);
3735
3736  expression (&exp);
3737
3738  p = frag_more (4);
3739  md_number_to_chars (p, highbit, 4);
3740  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3741	       BFD_RELOC_ARM_PREL31);
3742
3743  demand_empty_rest_of_line ();
3744}
3745
3746/* Directives: AEABI stack-unwind tables.  */
3747
3748/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3749
3750static void
3751s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3752{
3753  demand_empty_rest_of_line ();
3754  if (unwind.proc_start)
3755    {
3756      as_bad (_("duplicate .fnstart directive"));
3757      return;
3758    }
3759
3760  /* Mark the start of the function.  */
3761  unwind.proc_start = expr_build_dot ();
3762
3763  /* Reset the rest of the unwind info.	 */
3764  unwind.opcode_count = 0;
3765  unwind.table_entry = NULL;
3766  unwind.personality_routine = NULL;
3767  unwind.personality_index = -1;
3768  unwind.frame_size = 0;
3769  unwind.fp_offset = 0;
3770  unwind.fp_reg = REG_SP;
3771  unwind.fp_used = 0;
3772  unwind.sp_restored = 0;
3773}
3774
3775
3776/* Parse a handlerdata directive.  Creates the exception handling table entry
3777   for the function.  */
3778
3779static void
3780s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3781{
3782  demand_empty_rest_of_line ();
3783  if (!unwind.proc_start)
3784    as_bad (MISSING_FNSTART);
3785
3786  if (unwind.table_entry)
3787    as_bad (_("duplicate .handlerdata directive"));
3788
3789  create_unwind_entry (1);
3790}
3791
3792/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3793
3794static void
3795s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3796{
3797  long where;
3798  char *ptr;
3799  valueT val;
3800  unsigned int marked_pr_dependency;
3801
3802  demand_empty_rest_of_line ();
3803
3804  if (!unwind.proc_start)
3805    {
3806      as_bad (_(".fnend directive without .fnstart"));
3807      return;
3808    }
3809
3810  /* Add eh table entry.  */
3811  if (unwind.table_entry == NULL)
3812    val = create_unwind_entry (0);
3813  else
3814    val = 0;
3815
3816  /* Add index table entry.  This is two words.	 */
3817  start_unwind_section (unwind.saved_seg, 1);
3818  frag_align (2, 0, 0);
3819  record_alignment (now_seg, 2);
3820
3821  ptr = frag_more (8);
3822  memset (ptr, 0, 8);
3823  where = frag_now_fix () - 8;
3824
3825  /* Self relative offset of the function start.  */
3826  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3827	   BFD_RELOC_ARM_PREL31);
3828
3829  /* Indicate dependency on EHABI-defined personality routines to the
3830     linker, if it hasn't been done already.  */
3831  marked_pr_dependency
3832    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3833  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3834      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3835    {
3836      static const char *const name[] =
3837	{
3838	  "__aeabi_unwind_cpp_pr0",
3839	  "__aeabi_unwind_cpp_pr1",
3840	  "__aeabi_unwind_cpp_pr2"
3841	};
3842      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3843      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3844      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3845	|= 1 << unwind.personality_index;
3846    }
3847
3848  if (val)
3849    /* Inline exception table entry.  */
3850    md_number_to_chars (ptr + 4, val, 4);
3851  else
3852    /* Self relative offset of the table entry.	 */
3853    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3854	     BFD_RELOC_ARM_PREL31);
3855
3856  /* Restore the original section.  */
3857  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3858
3859  unwind.proc_start = NULL;
3860}
3861
3862
3863/* Parse an unwind_cantunwind directive.  */
3864
3865static void
3866s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3867{
3868  demand_empty_rest_of_line ();
3869  if (!unwind.proc_start)
3870    as_bad (MISSING_FNSTART);
3871
3872  if (unwind.personality_routine || unwind.personality_index != -1)
3873    as_bad (_("personality routine specified for cantunwind frame"));
3874
3875  unwind.personality_index = -2;
3876}
3877
3878
3879/* Parse a personalityindex directive.	*/
3880
3881static void
3882s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3883{
3884  expressionS exp;
3885
3886  if (!unwind.proc_start)
3887    as_bad (MISSING_FNSTART);
3888
3889  if (unwind.personality_routine || unwind.personality_index != -1)
3890    as_bad (_("duplicate .personalityindex directive"));
3891
3892  expression (&exp);
3893
3894  if (exp.X_op != O_constant
3895      || exp.X_add_number < 0 || exp.X_add_number > 15)
3896    {
3897      as_bad (_("bad personality routine number"));
3898      ignore_rest_of_line ();
3899      return;
3900    }
3901
3902  unwind.personality_index = exp.X_add_number;
3903
3904  demand_empty_rest_of_line ();
3905}
3906
3907
3908/* Parse a personality directive.  */
3909
3910static void
3911s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3912{
3913  char *name, *p, c;
3914
3915  if (!unwind.proc_start)
3916    as_bad (MISSING_FNSTART);
3917
3918  if (unwind.personality_routine || unwind.personality_index != -1)
3919    as_bad (_("duplicate .personality directive"));
3920
3921  c = get_symbol_name (& name);
3922  p = input_line_pointer;
3923  if (c == '"')
3924    ++ input_line_pointer;
3925  unwind.personality_routine = symbol_find_or_make (name);
3926  *p = c;
3927  demand_empty_rest_of_line ();
3928}
3929
3930
3931/* Parse a directive saving core registers.  */
3932
3933static void
3934s_arm_unwind_save_core (void)
3935{
3936  valueT op;
3937  long range;
3938  int n;
3939
3940  range = parse_reg_list (&input_line_pointer);
3941  if (range == FAIL)
3942    {
3943      as_bad (_("expected register list"));
3944      ignore_rest_of_line ();
3945      return;
3946    }
3947
3948  demand_empty_rest_of_line ();
3949
3950  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3951     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3952     ip because it is clobbered by calls.  */
3953  if (unwind.sp_restored && unwind.fp_reg == 12
3954      && (range & 0x3000) == 0x1000)
3955    {
3956      unwind.opcode_count--;
3957      unwind.sp_restored = 0;
3958      range = (range | 0x2000) & ~0x1000;
3959      unwind.pending_offset = 0;
3960    }
3961
3962  /* Pop r4-r15.  */
3963  if (range & 0xfff0)
3964    {
3965      /* See if we can use the short opcodes.  These pop a block of up to 8
3966	 registers starting with r4, plus maybe r14.  */
3967      for (n = 0; n < 8; n++)
3968	{
3969	  /* Break at the first non-saved register.	 */
3970	  if ((range & (1 << (n + 4))) == 0)
3971	    break;
3972	}
3973      /* See if there are any other bits set.  */
3974      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3975	{
3976	  /* Use the long form.  */
3977	  op = 0x8000 | ((range >> 4) & 0xfff);
3978	  add_unwind_opcode (op, 2);
3979	}
3980      else
3981	{
3982	  /* Use the short form.  */
3983	  if (range & 0x4000)
3984	    op = 0xa8; /* Pop r14.	*/
3985	  else
3986	    op = 0xa0; /* Do not pop r14.  */
3987	  op |= (n - 1);
3988	  add_unwind_opcode (op, 1);
3989	}
3990    }
3991
3992  /* Pop r0-r3.	 */
3993  if (range & 0xf)
3994    {
3995      op = 0xb100 | (range & 0xf);
3996      add_unwind_opcode (op, 2);
3997    }
3998
3999  /* Record the number of bytes pushed.	 */
4000  for (n = 0; n < 16; n++)
4001    {
4002      if (range & (1 << n))
4003	unwind.frame_size += 4;
4004    }
4005}
4006
4007
4008/* Parse a directive saving FPA registers.  */
4009
4010static void
4011s_arm_unwind_save_fpa (int reg)
4012{
4013  expressionS exp;
4014  int num_regs;
4015  valueT op;
4016
4017  /* Get Number of registers to transfer.  */
4018  if (skip_past_comma (&input_line_pointer) != FAIL)
4019    expression (&exp);
4020  else
4021    exp.X_op = O_illegal;
4022
4023  if (exp.X_op != O_constant)
4024    {
4025      as_bad (_("expected , <constant>"));
4026      ignore_rest_of_line ();
4027      return;
4028    }
4029
4030  num_regs = exp.X_add_number;
4031
4032  if (num_regs < 1 || num_regs > 4)
4033    {
4034      as_bad (_("number of registers must be in the range [1:4]"));
4035      ignore_rest_of_line ();
4036      return;
4037    }
4038
4039  demand_empty_rest_of_line ();
4040
4041  if (reg == 4)
4042    {
4043      /* Short form.  */
4044      op = 0xb4 | (num_regs - 1);
4045      add_unwind_opcode (op, 1);
4046    }
4047  else
4048    {
4049      /* Long form.  */
4050      op = 0xc800 | (reg << 4) | (num_regs - 1);
4051      add_unwind_opcode (op, 2);
4052    }
4053  unwind.frame_size += num_regs * 12;
4054}
4055
4056
4057/* Parse a directive saving VFP registers for ARMv6 and above.  */
4058
4059static void
4060s_arm_unwind_save_vfp_armv6 (void)
4061{
4062  int count;
4063  unsigned int start;
4064  valueT op;
4065  int num_vfpv3_regs = 0;
4066  int num_regs_below_16;
4067
4068  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4069  if (count == FAIL)
4070    {
4071      as_bad (_("expected register list"));
4072      ignore_rest_of_line ();
4073      return;
4074    }
4075
4076  demand_empty_rest_of_line ();
4077
4078  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4079     than FSTMX/FLDMX-style ones).  */
4080
4081  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4082  if (start >= 16)
4083    num_vfpv3_regs = count;
4084  else if (start + count > 16)
4085    num_vfpv3_regs = start + count - 16;
4086
4087  if (num_vfpv3_regs > 0)
4088    {
4089      int start_offset = start > 16 ? start - 16 : 0;
4090      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4091      add_unwind_opcode (op, 2);
4092    }
4093
4094  /* Generate opcode for registers numbered in the range 0 .. 15.  */
4095  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4096  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4097  if (num_regs_below_16 > 0)
4098    {
4099      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4100      add_unwind_opcode (op, 2);
4101    }
4102
4103  unwind.frame_size += count * 8;
4104}
4105
4106
4107/* Parse a directive saving VFP registers for pre-ARMv6.  */
4108
4109static void
4110s_arm_unwind_save_vfp (void)
4111{
4112  int count;
4113  unsigned int reg;
4114  valueT op;
4115
4116  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4117  if (count == FAIL)
4118    {
4119      as_bad (_("expected register list"));
4120      ignore_rest_of_line ();
4121      return;
4122    }
4123
4124  demand_empty_rest_of_line ();
4125
4126  if (reg == 8)
4127    {
4128      /* Short form.  */
4129      op = 0xb8 | (count - 1);
4130      add_unwind_opcode (op, 1);
4131    }
4132  else
4133    {
4134      /* Long form.  */
4135      op = 0xb300 | (reg << 4) | (count - 1);
4136      add_unwind_opcode (op, 2);
4137    }
4138  unwind.frame_size += count * 8 + 4;
4139}
4140
4141
4142/* Parse a directive saving iWMMXt data registers.  */
4143
4144static void
4145s_arm_unwind_save_mmxwr (void)
4146{
4147  int reg;
4148  int hi_reg;
4149  int i;
4150  unsigned mask = 0;
4151  valueT op;
4152
4153  if (*input_line_pointer == '{')
4154    input_line_pointer++;
4155
4156  do
4157    {
4158      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4159
4160      if (reg == FAIL)
4161	{
4162	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4163	  goto error;
4164	}
4165
4166      if (mask >> reg)
4167	as_tsktsk (_("register list not in ascending order"));
4168      mask |= 1 << reg;
4169
4170      if (*input_line_pointer == '-')
4171	{
4172	  input_line_pointer++;
4173	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4174	  if (hi_reg == FAIL)
4175	    {
4176	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4177	      goto error;
4178	    }
4179	  else if (reg >= hi_reg)
4180	    {
4181	      as_bad (_("bad register range"));
4182	      goto error;
4183	    }
4184	  for (; reg < hi_reg; reg++)
4185	    mask |= 1 << reg;
4186	}
4187    }
4188  while (skip_past_comma (&input_line_pointer) != FAIL);
4189
4190  skip_past_char (&input_line_pointer, '}');
4191
4192  demand_empty_rest_of_line ();
4193
4194  /* Generate any deferred opcodes because we're going to be looking at
4195     the list.	*/
4196  flush_pending_unwind ();
4197
4198  for (i = 0; i < 16; i++)
4199    {
4200      if (mask & (1 << i))
4201	unwind.frame_size += 8;
4202    }
4203
4204  /* Attempt to combine with a previous opcode.	 We do this because gcc
4205     likes to output separate unwind directives for a single block of
4206     registers.	 */
4207  if (unwind.opcode_count > 0)
4208    {
4209      i = unwind.opcodes[unwind.opcode_count - 1];
4210      if ((i & 0xf8) == 0xc0)
4211	{
4212	  i &= 7;
4213	  /* Only merge if the blocks are contiguous.  */
4214	  if (i < 6)
4215	    {
4216	      if ((mask & 0xfe00) == (1 << 9))
4217		{
4218		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4219		  unwind.opcode_count--;
4220		}
4221	    }
4222	  else if (i == 6 && unwind.opcode_count >= 2)
4223	    {
4224	      i = unwind.opcodes[unwind.opcode_count - 2];
4225	      reg = i >> 4;
4226	      i &= 0xf;
4227
4228	      op = 0xffff << (reg - 1);
4229	      if (reg > 0
4230		  && ((mask & op) == (1u << (reg - 1))))
4231		{
4232		  op = (1 << (reg + i + 1)) - 1;
4233		  op &= ~((1 << reg) - 1);
4234		  mask |= op;
4235		  unwind.opcode_count -= 2;
4236		}
4237	    }
4238	}
4239    }
4240
4241  hi_reg = 15;
4242  /* We want to generate opcodes in the order the registers have been
4243     saved, ie. descending order.  */
4244  for (reg = 15; reg >= -1; reg--)
4245    {
4246      /* Save registers in blocks.  */
4247      if (reg < 0
4248	  || !(mask & (1 << reg)))
4249	{
4250	  /* We found an unsaved reg.  Generate opcodes to save the
4251	     preceding block.	*/
4252	  if (reg != hi_reg)
4253	    {
4254	      if (reg == 9)
4255		{
4256		  /* Short form.  */
4257		  op = 0xc0 | (hi_reg - 10);
4258		  add_unwind_opcode (op, 1);
4259		}
4260	      else
4261		{
4262		  /* Long form.	 */
4263		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4264		  add_unwind_opcode (op, 2);
4265		}
4266	    }
4267	  hi_reg = reg - 1;
4268	}
4269    }
4270
4271  return;
4272error:
4273  ignore_rest_of_line ();
4274}
4275
4276static void
4277s_arm_unwind_save_mmxwcg (void)
4278{
4279  int reg;
4280  int hi_reg;
4281  unsigned mask = 0;
4282  valueT op;
4283
4284  if (*input_line_pointer == '{')
4285    input_line_pointer++;
4286
4287  skip_whitespace (input_line_pointer);
4288
4289  do
4290    {
4291      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4292
4293      if (reg == FAIL)
4294	{
4295	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4296	  goto error;
4297	}
4298
4299      reg -= 8;
4300      if (mask >> reg)
4301	as_tsktsk (_("register list not in ascending order"));
4302      mask |= 1 << reg;
4303
4304      if (*input_line_pointer == '-')
4305	{
4306	  input_line_pointer++;
4307	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4308	  if (hi_reg == FAIL)
4309	    {
4310	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4311	      goto error;
4312	    }
4313	  else if (reg >= hi_reg)
4314	    {
4315	      as_bad (_("bad register range"));
4316	      goto error;
4317	    }
4318	  for (; reg < hi_reg; reg++)
4319	    mask |= 1 << reg;
4320	}
4321    }
4322  while (skip_past_comma (&input_line_pointer) != FAIL);
4323
4324  skip_past_char (&input_line_pointer, '}');
4325
4326  demand_empty_rest_of_line ();
4327
4328  /* Generate any deferred opcodes because we're going to be looking at
4329     the list.	*/
4330  flush_pending_unwind ();
4331
4332  for (reg = 0; reg < 16; reg++)
4333    {
4334      if (mask & (1 << reg))
4335	unwind.frame_size += 4;
4336    }
4337  op = 0xc700 | mask;
4338  add_unwind_opcode (op, 2);
4339  return;
4340error:
4341  ignore_rest_of_line ();
4342}
4343
4344
4345/* Parse an unwind_save directive.
4346   If the argument is non-zero, this is a .vsave directive.  */
4347
4348static void
4349s_arm_unwind_save (int arch_v6)
4350{
4351  char *peek;
4352  struct reg_entry *reg;
4353  bfd_boolean had_brace = FALSE;
4354
4355  if (!unwind.proc_start)
4356    as_bad (MISSING_FNSTART);
4357
4358  /* Figure out what sort of save we have.  */
4359  peek = input_line_pointer;
4360
4361  if (*peek == '{')
4362    {
4363      had_brace = TRUE;
4364      peek++;
4365    }
4366
4367  reg = arm_reg_parse_multi (&peek);
4368
4369  if (!reg)
4370    {
4371      as_bad (_("register expected"));
4372      ignore_rest_of_line ();
4373      return;
4374    }
4375
4376  switch (reg->type)
4377    {
4378    case REG_TYPE_FN:
4379      if (had_brace)
4380	{
4381	  as_bad (_("FPA .unwind_save does not take a register list"));
4382	  ignore_rest_of_line ();
4383	  return;
4384	}
4385      input_line_pointer = peek;
4386      s_arm_unwind_save_fpa (reg->number);
4387      return;
4388
4389    case REG_TYPE_RN:
4390      s_arm_unwind_save_core ();
4391      return;
4392
4393    case REG_TYPE_VFD:
4394      if (arch_v6)
4395	s_arm_unwind_save_vfp_armv6 ();
4396      else
4397	s_arm_unwind_save_vfp ();
4398      return;
4399
4400    case REG_TYPE_MMXWR:
4401      s_arm_unwind_save_mmxwr ();
4402      return;
4403
4404    case REG_TYPE_MMXWCG:
4405      s_arm_unwind_save_mmxwcg ();
4406      return;
4407
4408    default:
4409      as_bad (_(".unwind_save does not support this kind of register"));
4410      ignore_rest_of_line ();
4411    }
4412}
4413
4414
4415/* Parse an unwind_movsp directive.  */
4416
4417static void
4418s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4419{
4420  int reg;
4421  valueT op;
4422  int offset;
4423
4424  if (!unwind.proc_start)
4425    as_bad (MISSING_FNSTART);
4426
4427  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4428  if (reg == FAIL)
4429    {
4430      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4431      ignore_rest_of_line ();
4432      return;
4433    }
4434
4435  /* Optional constant.	 */
4436  if (skip_past_comma (&input_line_pointer) != FAIL)
4437    {
4438      if (immediate_for_directive (&offset) == FAIL)
4439	return;
4440    }
4441  else
4442    offset = 0;
4443
4444  demand_empty_rest_of_line ();
4445
4446  if (reg == REG_SP || reg == REG_PC)
4447    {
4448      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4449      return;
4450    }
4451
4452  if (unwind.fp_reg != REG_SP)
4453    as_bad (_("unexpected .unwind_movsp directive"));
4454
4455  /* Generate opcode to restore the value.  */
4456  op = 0x90 | reg;
4457  add_unwind_opcode (op, 1);
4458
4459  /* Record the information for later.	*/
4460  unwind.fp_reg = reg;
4461  unwind.fp_offset = unwind.frame_size - offset;
4462  unwind.sp_restored = 1;
4463}
4464
4465/* Parse an unwind_pad directive.  */
4466
4467static void
4468s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4469{
4470  int offset;
4471
4472  if (!unwind.proc_start)
4473    as_bad (MISSING_FNSTART);
4474
4475  if (immediate_for_directive (&offset) == FAIL)
4476    return;
4477
4478  if (offset & 3)
4479    {
4480      as_bad (_("stack increment must be multiple of 4"));
4481      ignore_rest_of_line ();
4482      return;
4483    }
4484
4485  /* Don't generate any opcodes, just record the details for later.  */
4486  unwind.frame_size += offset;
4487  unwind.pending_offset += offset;
4488
4489  demand_empty_rest_of_line ();
4490}
4491
4492/* Parse an unwind_setfp directive.  */
4493
4494static void
4495s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4496{
4497  int sp_reg;
4498  int fp_reg;
4499  int offset;
4500
4501  if (!unwind.proc_start)
4502    as_bad (MISSING_FNSTART);
4503
4504  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4505  if (skip_past_comma (&input_line_pointer) == FAIL)
4506    sp_reg = FAIL;
4507  else
4508    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4509
4510  if (fp_reg == FAIL || sp_reg == FAIL)
4511    {
4512      as_bad (_("expected <reg>, <reg>"));
4513      ignore_rest_of_line ();
4514      return;
4515    }
4516
4517  /* Optional constant.	 */
4518  if (skip_past_comma (&input_line_pointer) != FAIL)
4519    {
4520      if (immediate_for_directive (&offset) == FAIL)
4521	return;
4522    }
4523  else
4524    offset = 0;
4525
4526  demand_empty_rest_of_line ();
4527
4528  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4529    {
4530      as_bad (_("register must be either sp or set by a previous"
4531		"unwind_movsp directive"));
4532      return;
4533    }
4534
4535  /* Don't generate any opcodes, just record the information for later.	 */
4536  unwind.fp_reg = fp_reg;
4537  unwind.fp_used = 1;
4538  if (sp_reg == REG_SP)
4539    unwind.fp_offset = unwind.frame_size - offset;
4540  else
4541    unwind.fp_offset -= offset;
4542}
4543
4544/* Parse an unwind_raw directive.  */
4545
4546static void
4547s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4548{
4549  expressionS exp;
4550  /* This is an arbitrary limit.	 */
4551  unsigned char op[16];
4552  int count;
4553
4554  if (!unwind.proc_start)
4555    as_bad (MISSING_FNSTART);
4556
4557  expression (&exp);
4558  if (exp.X_op == O_constant
4559      && skip_past_comma (&input_line_pointer) != FAIL)
4560    {
4561      unwind.frame_size += exp.X_add_number;
4562      expression (&exp);
4563    }
4564  else
4565    exp.X_op = O_illegal;
4566
4567  if (exp.X_op != O_constant)
4568    {
4569      as_bad (_("expected <offset>, <opcode>"));
4570      ignore_rest_of_line ();
4571      return;
4572    }
4573
4574  count = 0;
4575
4576  /* Parse the opcode.	*/
4577  for (;;)
4578    {
4579      if (count >= 16)
4580	{
4581	  as_bad (_("unwind opcode too long"));
4582	  ignore_rest_of_line ();
4583	}
4584      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4585	{
4586	  as_bad (_("invalid unwind opcode"));
4587	  ignore_rest_of_line ();
4588	  return;
4589	}
4590      op[count++] = exp.X_add_number;
4591
4592      /* Parse the next byte.  */
4593      if (skip_past_comma (&input_line_pointer) == FAIL)
4594	break;
4595
4596      expression (&exp);
4597    }
4598
4599  /* Add the opcode bytes in reverse order.  */
4600  while (count--)
4601    add_unwind_opcode (op[count], 1);
4602
4603  demand_empty_rest_of_line ();
4604}
4605
4606
4607/* Parse a .eabi_attribute directive.  */
4608
4609static void
4610s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4611{
4612  int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4613
4614  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4615    attributes_set_explicitly[tag] = 1;
4616}
4617
4618/* Emit a tls fix for the symbol.  */
4619
4620static void
4621s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4622{
4623  char *p;
4624  expressionS exp;
4625#ifdef md_flush_pending_output
4626  md_flush_pending_output ();
4627#endif
4628
4629#ifdef md_cons_align
4630  md_cons_align (4);
4631#endif
4632
4633  /* Since we're just labelling the code, there's no need to define a
4634     mapping symbol.  */
4635  expression (&exp);
4636  p = obstack_next_free (&frchain_now->frch_obstack);
4637  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4638	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4639	       : BFD_RELOC_ARM_TLS_DESCSEQ);
4640}
4641#endif /* OBJ_ELF */
4642
4643static void s_arm_arch (int);
4644static void s_arm_object_arch (int);
4645static void s_arm_cpu (int);
4646static void s_arm_fpu (int);
4647static void s_arm_arch_extension (int);
4648
4649#ifdef TE_PE
4650
4651static void
4652pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4653{
4654  expressionS exp;
4655
4656  do
4657    {
4658      expression (&exp);
4659      if (exp.X_op == O_symbol)
4660	exp.X_op = O_secrel;
4661
4662      emit_expr (&exp, 4);
4663    }
4664  while (*input_line_pointer++ == ',');
4665
4666  input_line_pointer--;
4667  demand_empty_rest_of_line ();
4668}
4669#endif /* TE_PE */
4670
4671/* This table describes all the machine specific pseudo-ops the assembler
4672   has to support.  The fields are:
4673     pseudo-op name without dot
4674     function to call to execute this pseudo-op
4675     Integer arg to pass to the function.  */
4676
4677const pseudo_typeS md_pseudo_table[] =
4678{
4679  /* Never called because '.req' does not start a line.	 */
4680  { "req",	   s_req,	  0 },
4681  /* Following two are likewise never called.  */
4682  { "dn",	   s_dn,          0 },
4683  { "qn",          s_qn,          0 },
4684  { "unreq",	   s_unreq,	  0 },
4685  { "bss",	   s_bss,	  0 },
4686  { "align",	   s_align_ptwo,  2 },
4687  { "arm",	   s_arm,	  0 },
4688  { "thumb",	   s_thumb,	  0 },
4689  { "code",	   s_code,	  0 },
4690  { "force_thumb", s_force_thumb, 0 },
4691  { "thumb_func",  s_thumb_func,  0 },
4692  { "thumb_set",   s_thumb_set,	  0 },
4693  { "even",	   s_even,	  0 },
4694  { "ltorg",	   s_ltorg,	  0 },
4695  { "pool",	   s_ltorg,	  0 },
4696  { "syntax",	   s_syntax,	  0 },
4697  { "cpu",	   s_arm_cpu,	  0 },
4698  { "arch",	   s_arm_arch,	  0 },
4699  { "object_arch", s_arm_object_arch,	0 },
4700  { "fpu",	   s_arm_fpu,	  0 },
4701  { "arch_extension", s_arm_arch_extension, 0 },
4702#ifdef OBJ_ELF
4703  { "word",	        s_arm_elf_cons, 4 },
4704  { "long",	        s_arm_elf_cons, 4 },
4705  { "inst.n",           s_arm_elf_inst, 2 },
4706  { "inst.w",           s_arm_elf_inst, 4 },
4707  { "inst",             s_arm_elf_inst, 0 },
4708  { "rel31",	        s_arm_rel31,	  0 },
4709  { "fnstart",		s_arm_unwind_fnstart,	0 },
4710  { "fnend",		s_arm_unwind_fnend,	0 },
4711  { "cantunwind",	s_arm_unwind_cantunwind, 0 },
4712  { "personality",	s_arm_unwind_personality, 0 },
4713  { "personalityindex",	s_arm_unwind_personalityindex, 0 },
4714  { "handlerdata",	s_arm_unwind_handlerdata, 0 },
4715  { "save",		s_arm_unwind_save,	0 },
4716  { "vsave",		s_arm_unwind_save,	1 },
4717  { "movsp",		s_arm_unwind_movsp,	0 },
4718  { "pad",		s_arm_unwind_pad,	0 },
4719  { "setfp",		s_arm_unwind_setfp,	0 },
4720  { "unwind_raw",	s_arm_unwind_raw,	0 },
4721  { "eabi_attribute",	s_arm_eabi_attribute,	0 },
4722  { "tlsdescseq",	s_arm_tls_descseq,      0 },
4723#else
4724  { "word",	   cons, 4},
4725
4726  /* These are used for dwarf.  */
4727  {"2byte", cons, 2},
4728  {"4byte", cons, 4},
4729  {"8byte", cons, 8},
4730  /* These are used for dwarf2.  */
4731  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4732  { "loc",  dwarf2_directive_loc,  0 },
4733  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4734#endif
4735  { "extend",	   float_cons, 'x' },
4736  { "ldouble",	   float_cons, 'x' },
4737  { "packed",	   float_cons, 'p' },
4738#ifdef TE_PE
4739  {"secrel32", pe_directive_secrel, 0},
4740#endif
4741
4742  /* These are for compatibility with CodeComposer Studio.  */
4743  {"ref",          s_ccs_ref,        0},
4744  {"def",          s_ccs_def,        0},
4745  {"asmfunc",      s_ccs_asmfunc,    0},
4746  {"endasmfunc",   s_ccs_endasmfunc, 0},
4747
4748  { 0, 0, 0 }
4749};
4750
4751/* Parser functions used exclusively in instruction operands.  */
4752
4753/* Generic immediate-value read function for use in insn parsing.
4754   STR points to the beginning of the immediate (the leading #);
4755   VAL receives the value; if the value is outside [MIN, MAX]
4756   issue an error.  PREFIX_OPT is true if the immediate prefix is
4757   optional.  */
4758
4759static int
4760parse_immediate (char **str, int *val, int min, int max,
4761		 bfd_boolean prefix_opt)
4762{
4763  expressionS exp;
4764  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4765  if (exp.X_op != O_constant)
4766    {
4767      inst.error = _("constant expression required");
4768      return FAIL;
4769    }
4770
4771  if (exp.X_add_number < min || exp.X_add_number > max)
4772    {
4773      inst.error = _("immediate value out of range");
4774      return FAIL;
4775    }
4776
4777  *val = exp.X_add_number;
4778  return SUCCESS;
4779}
4780
4781/* Less-generic immediate-value read function with the possibility of loading a
4782   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4783   instructions. Puts the result directly in inst.operands[i].  */
4784
4785static int
4786parse_big_immediate (char **str, int i, expressionS *in_exp,
4787		     bfd_boolean allow_symbol_p)
4788{
4789  expressionS exp;
4790  expressionS *exp_p = in_exp ? in_exp : &exp;
4791  char *ptr = *str;
4792
4793  my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4794
4795  if (exp_p->X_op == O_constant)
4796    {
4797      inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4798      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4799	 O_constant.  We have to be careful not to break compilation for
4800	 32-bit X_add_number, though.  */
4801      if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4802	{
4803	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
4804	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4805				  & 0xffffffff);
4806	  inst.operands[i].regisimm = 1;
4807	}
4808    }
4809  else if (exp_p->X_op == O_big
4810	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4811    {
4812      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4813
4814      /* Bignums have their least significant bits in
4815	 generic_bignum[0]. Make sure we put 32 bits in imm and
4816	 32 bits in reg,  in a (hopefully) portable way.  */
4817      gas_assert (parts != 0);
4818
4819      /* Make sure that the number is not too big.
4820	 PR 11972: Bignums can now be sign-extended to the
4821	 size of a .octa so check that the out of range bits
4822	 are all zero or all one.  */
4823      if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4824	{
4825	  LITTLENUM_TYPE m = -1;
4826
4827	  if (generic_bignum[parts * 2] != 0
4828	      && generic_bignum[parts * 2] != m)
4829	    return FAIL;
4830
4831	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4832	    if (generic_bignum[j] != generic_bignum[j-1])
4833	      return FAIL;
4834	}
4835
4836      inst.operands[i].imm = 0;
4837      for (j = 0; j < parts; j++, idx++)
4838	inst.operands[i].imm |= generic_bignum[idx]
4839				<< (LITTLENUM_NUMBER_OF_BITS * j);
4840      inst.operands[i].reg = 0;
4841      for (j = 0; j < parts; j++, idx++)
4842	inst.operands[i].reg |= generic_bignum[idx]
4843				<< (LITTLENUM_NUMBER_OF_BITS * j);
4844      inst.operands[i].regisimm = 1;
4845    }
4846  else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4847    return FAIL;
4848
4849  *str = ptr;
4850
4851  return SUCCESS;
4852}
4853
4854/* Returns the pseudo-register number of an FPA immediate constant,
4855   or FAIL if there isn't a valid constant here.  */
4856
4857static int
4858parse_fpa_immediate (char ** str)
4859{
4860  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4861  char *	 save_in;
4862  expressionS	 exp;
4863  int		 i;
4864  int		 j;
4865
4866  /* First try and match exact strings, this is to guarantee
4867     that some formats will work even for cross assembly.  */
4868
4869  for (i = 0; fp_const[i]; i++)
4870    {
4871      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4872	{
4873	  char *start = *str;
4874
4875	  *str += strlen (fp_const[i]);
4876	  if (is_end_of_line[(unsigned char) **str])
4877	    return i + 8;
4878	  *str = start;
4879	}
4880    }
4881
4882  /* Just because we didn't get a match doesn't mean that the constant
4883     isn't valid, just that it is in a format that we don't
4884     automatically recognize.  Try parsing it with the standard
4885     expression routines.  */
4886
4887  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4888
4889  /* Look for a raw floating point number.  */
4890  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4891      && is_end_of_line[(unsigned char) *save_in])
4892    {
4893      for (i = 0; i < NUM_FLOAT_VALS; i++)
4894	{
4895	  for (j = 0; j < MAX_LITTLENUMS; j++)
4896	    {
4897	      if (words[j] != fp_values[i][j])
4898		break;
4899	    }
4900
4901	  if (j == MAX_LITTLENUMS)
4902	    {
4903	      *str = save_in;
4904	      return i + 8;
4905	    }
4906	}
4907    }
4908
4909  /* Try and parse a more complex expression, this will probably fail
4910     unless the code uses a floating point prefix (eg "0f").  */
4911  save_in = input_line_pointer;
4912  input_line_pointer = *str;
4913  if (expression (&exp) == absolute_section
4914      && exp.X_op == O_big
4915      && exp.X_add_number < 0)
4916    {
4917      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4918	 Ditto for 15.	*/
4919#define X_PRECISION 5
4920#define E_PRECISION 15L
4921      if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4922	{
4923	  for (i = 0; i < NUM_FLOAT_VALS; i++)
4924	    {
4925	      for (j = 0; j < MAX_LITTLENUMS; j++)
4926		{
4927		  if (words[j] != fp_values[i][j])
4928		    break;
4929		}
4930
4931	      if (j == MAX_LITTLENUMS)
4932		{
4933		  *str = input_line_pointer;
4934		  input_line_pointer = save_in;
4935		  return i + 8;
4936		}
4937	    }
4938	}
4939    }
4940
4941  *str = input_line_pointer;
4942  input_line_pointer = save_in;
4943  inst.error = _("invalid FPA immediate expression");
4944  return FAIL;
4945}
4946
4947/* Returns 1 if a number has "quarter-precision" float format
4948   0baBbbbbbc defgh000 00000000 00000000.  */
4949
4950static int
4951is_quarter_float (unsigned imm)
4952{
4953  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4954  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4955}
4956
4957
4958/* Detect the presence of a floating point or integer zero constant,
4959   i.e. #0.0 or #0.  */
4960
4961static bfd_boolean
4962parse_ifimm_zero (char **in)
4963{
4964  int error_code;
4965
4966  if (!is_immediate_prefix (**in))
4967    return FALSE;
4968
4969  ++*in;
4970
4971  /* Accept #0x0 as a synonym for #0.  */
4972  if (strncmp (*in, "0x", 2) == 0)
4973    {
4974      int val;
4975      if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4976        return FALSE;
4977      return TRUE;
4978    }
4979
4980  error_code = atof_generic (in, ".", EXP_CHARS,
4981                             &generic_floating_point_number);
4982
4983  if (!error_code
4984      && generic_floating_point_number.sign == '+'
4985      && (generic_floating_point_number.low
4986          > generic_floating_point_number.leader))
4987    return TRUE;
4988
4989  return FALSE;
4990}
4991
4992/* Parse an 8-bit "quarter-precision" floating point number of the form:
4993   0baBbbbbbc defgh000 00000000 00000000.
4994   The zero and minus-zero cases need special handling, since they can't be
4995   encoded in the "quarter-precision" float format, but can nonetheless be
4996   loaded as integer constants.  */
4997
4998static unsigned
4999parse_qfloat_immediate (char **ccp, int *immed)
5000{
5001  char *str = *ccp;
5002  char *fpnum;
5003  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5004  int found_fpchar = 0;
5005
5006  skip_past_char (&str, '#');
5007
5008  /* We must not accidentally parse an integer as a floating-point number. Make
5009     sure that the value we parse is not an integer by checking for special
5010     characters '.' or 'e'.
5011     FIXME: This is a horrible hack, but doing better is tricky because type
5012     information isn't in a very usable state at parse time.  */
5013  fpnum = str;
5014  skip_whitespace (fpnum);
5015
5016  if (strncmp (fpnum, "0x", 2) == 0)
5017    return FAIL;
5018  else
5019    {
5020      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5021	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5022	  {
5023	    found_fpchar = 1;
5024	    break;
5025	  }
5026
5027      if (!found_fpchar)
5028	return FAIL;
5029    }
5030
5031  if ((str = atof_ieee (str, 's', words)) != NULL)
5032    {
5033      unsigned fpword = 0;
5034      int i;
5035
5036      /* Our FP word must be 32 bits (single-precision FP).  */
5037      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5038	{
5039	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5040	  fpword |= words[i];
5041	}
5042
5043      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5044	*immed = fpword;
5045      else
5046	return FAIL;
5047
5048      *ccp = str;
5049
5050      return SUCCESS;
5051    }
5052
5053  return FAIL;
5054}
5055
5056/* Shift operands.  */
5057enum shift_kind
5058{
5059  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5060};
5061
5062struct asm_shift_name
5063{
5064  const char	  *name;
5065  enum shift_kind  kind;
5066};
5067
5068/* Third argument to parse_shift.  */
5069enum parse_shift_mode
5070{
5071  NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5072  SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5073  SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5074  SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5075  SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5076};
5077
5078/* Parse a <shift> specifier on an ARM data processing instruction.
5079   This has three forms:
5080
5081     (LSL|LSR|ASL|ASR|ROR) Rs
5082     (LSL|LSR|ASL|ASR|ROR) #imm
5083     RRX
5084
5085   Note that ASL is assimilated to LSL in the instruction encoding, and
5086   RRX to ROR #0 (which cannot be written as such).  */
5087
5088static int
5089parse_shift (char **str, int i, enum parse_shift_mode mode)
5090{
5091  const struct asm_shift_name *shift_name;
5092  enum shift_kind shift;
5093  char *s = *str;
5094  char *p = s;
5095  int reg;
5096
5097  for (p = *str; ISALPHA (*p); p++)
5098    ;
5099
5100  if (p == *str)
5101    {
5102      inst.error = _("shift expression expected");
5103      return FAIL;
5104    }
5105
5106  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5107							    p - *str);
5108
5109  if (shift_name == NULL)
5110    {
5111      inst.error = _("shift expression expected");
5112      return FAIL;
5113    }
5114
5115  shift = shift_name->kind;
5116
5117  switch (mode)
5118    {
5119    case NO_SHIFT_RESTRICT:
5120    case SHIFT_IMMEDIATE:   break;
5121
5122    case SHIFT_LSL_OR_ASR_IMMEDIATE:
5123      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5124	{
5125	  inst.error = _("'LSL' or 'ASR' required");
5126	  return FAIL;
5127	}
5128      break;
5129
5130    case SHIFT_LSL_IMMEDIATE:
5131      if (shift != SHIFT_LSL)
5132	{
5133	  inst.error = _("'LSL' required");
5134	  return FAIL;
5135	}
5136      break;
5137
5138    case SHIFT_ASR_IMMEDIATE:
5139      if (shift != SHIFT_ASR)
5140	{
5141	  inst.error = _("'ASR' required");
5142	  return FAIL;
5143	}
5144      break;
5145
5146    default: abort ();
5147    }
5148
5149  if (shift != SHIFT_RRX)
5150    {
5151      /* Whitespace can appear here if the next thing is a bare digit.	*/
5152      skip_whitespace (p);
5153
5154      if (mode == NO_SHIFT_RESTRICT
5155	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5156	{
5157	  inst.operands[i].imm = reg;
5158	  inst.operands[i].immisreg = 1;
5159	}
5160      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5161	return FAIL;
5162    }
5163  inst.operands[i].shift_kind = shift;
5164  inst.operands[i].shifted = 1;
5165  *str = p;
5166  return SUCCESS;
5167}
5168
5169/* Parse a <shifter_operand> for an ARM data processing instruction:
5170
5171      #<immediate>
5172      #<immediate>, <rotate>
5173      <Rm>
5174      <Rm>, <shift>
5175
5176   where <shift> is defined by parse_shift above, and <rotate> is a
5177   multiple of 2 between 0 and 30.  Validation of immediate operands
5178   is deferred to md_apply_fix.  */
5179
5180static int
5181parse_shifter_operand (char **str, int i)
5182{
5183  int value;
5184  expressionS exp;
5185
5186  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5187    {
5188      inst.operands[i].reg = value;
5189      inst.operands[i].isreg = 1;
5190
5191      /* parse_shift will override this if appropriate */
5192      inst.reloc.exp.X_op = O_constant;
5193      inst.reloc.exp.X_add_number = 0;
5194
5195      if (skip_past_comma (str) == FAIL)
5196	return SUCCESS;
5197
5198      /* Shift operation on register.  */
5199      return parse_shift (str, i, NO_SHIFT_RESTRICT);
5200    }
5201
5202  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5203    return FAIL;
5204
5205  if (skip_past_comma (str) == SUCCESS)
5206    {
5207      /* #x, y -- ie explicit rotation by Y.  */
5208      if (my_get_expression (&exp, str, GE_NO_PREFIX))
5209	return FAIL;
5210
5211      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5212	{
5213	  inst.error = _("constant expression expected");
5214	  return FAIL;
5215	}
5216
5217      value = exp.X_add_number;
5218      if (value < 0 || value > 30 || value % 2 != 0)
5219	{
5220	  inst.error = _("invalid rotation");
5221	  return FAIL;
5222	}
5223      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5224	{
5225	  inst.error = _("invalid constant");
5226	  return FAIL;
5227	}
5228
5229      /* Encode as specified.  */
5230      inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5231      return SUCCESS;
5232    }
5233
5234  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5235  inst.reloc.pc_rel = 0;
5236  return SUCCESS;
5237}
5238
5239/* Group relocation information.  Each entry in the table contains the
5240   textual name of the relocation as may appear in assembler source
5241   and must end with a colon.
5242   Along with this textual name are the relocation codes to be used if
5243   the corresponding instruction is an ALU instruction (ADD or SUB only),
5244   an LDR, an LDRS, or an LDC.  */
5245
5246struct group_reloc_table_entry
5247{
5248  const char *name;
5249  int alu_code;
5250  int ldr_code;
5251  int ldrs_code;
5252  int ldc_code;
5253};
5254
5255typedef enum
5256{
5257  /* Varieties of non-ALU group relocation.  */
5258
5259  GROUP_LDR,
5260  GROUP_LDRS,
5261  GROUP_LDC
5262} group_reloc_type;
5263
5264static struct group_reloc_table_entry group_reloc_table[] =
5265  { /* Program counter relative: */
5266    { "pc_g0_nc",
5267      BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5268      0,				/* LDR */
5269      0,				/* LDRS */
5270      0 },				/* LDC */
5271    { "pc_g0",
5272      BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5273      BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5274      BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5275      BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5276    { "pc_g1_nc",
5277      BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5278      0,				/* LDR */
5279      0,				/* LDRS */
5280      0 },				/* LDC */
5281    { "pc_g1",
5282      BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5283      BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5284      BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5285      BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5286    { "pc_g2",
5287      BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5288      BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5289      BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5290      BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5291    /* Section base relative */
5292    { "sb_g0_nc",
5293      BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5294      0,				/* LDR */
5295      0,				/* LDRS */
5296      0 },				/* LDC */
5297    { "sb_g0",
5298      BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5299      BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5300      BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5301      BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5302    { "sb_g1_nc",
5303      BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5304      0,				/* LDR */
5305      0,				/* LDRS */
5306      0 },				/* LDC */
5307    { "sb_g1",
5308      BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5309      BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5310      BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5311      BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5312    { "sb_g2",
5313      BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5314      BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5315      BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5316      BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
5317    /* Absolute thumb alu relocations.  */
5318    { "lower0_7",
5319      BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
5320      0,				/* LDR.  */
5321      0,				/* LDRS.  */
5322      0 },				/* LDC.  */
5323    { "lower8_15",
5324      BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
5325      0,				/* LDR.  */
5326      0,				/* LDRS.  */
5327      0 },				/* LDC.  */
5328    { "upper0_7",
5329      BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
5330      0,				/* LDR.  */
5331      0,				/* LDRS.  */
5332      0 },				/* LDC.  */
5333    { "upper8_15",
5334      BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
5335      0,				/* LDR.  */
5336      0,				/* LDRS.  */
5337      0 } };				/* LDC.  */
5338
5339/* Given the address of a pointer pointing to the textual name of a group
5340   relocation as may appear in assembler source, attempt to find its details
5341   in group_reloc_table.  The pointer will be updated to the character after
5342   the trailing colon.  On failure, FAIL will be returned; SUCCESS
5343   otherwise.  On success, *entry will be updated to point at the relevant
5344   group_reloc_table entry. */
5345
5346static int
5347find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5348{
5349  unsigned int i;
5350  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5351    {
5352      int length = strlen (group_reloc_table[i].name);
5353
5354      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5355	  && (*str)[length] == ':')
5356	{
5357	  *out = &group_reloc_table[i];
5358	  *str += (length + 1);
5359	  return SUCCESS;
5360	}
5361    }
5362
5363  return FAIL;
5364}
5365
5366/* Parse a <shifter_operand> for an ARM data processing instruction
5367   (as for parse_shifter_operand) where group relocations are allowed:
5368
5369      #<immediate>
5370      #<immediate>, <rotate>
5371      #:<group_reloc>:<expression>
5372      <Rm>
5373      <Rm>, <shift>
5374
5375   where <group_reloc> is one of the strings defined in group_reloc_table.
5376   The hashes are optional.
5377
5378   Everything else is as for parse_shifter_operand.  */
5379
5380static parse_operand_result
5381parse_shifter_operand_group_reloc (char **str, int i)
5382{
5383  /* Determine if we have the sequence of characters #: or just :
5384     coming next.  If we do, then we check for a group relocation.
5385     If we don't, punt the whole lot to parse_shifter_operand.  */
5386
5387  if (((*str)[0] == '#' && (*str)[1] == ':')
5388      || (*str)[0] == ':')
5389    {
5390      struct group_reloc_table_entry *entry;
5391
5392      if ((*str)[0] == '#')
5393	(*str) += 2;
5394      else
5395	(*str)++;
5396
5397      /* Try to parse a group relocation.  Anything else is an error.  */
5398      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5399	{
5400	  inst.error = _("unknown group relocation");
5401	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5402	}
5403
5404      /* We now have the group relocation table entry corresponding to
5405	 the name in the assembler source.  Next, we parse the expression.  */
5406      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5407	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5408
5409      /* Record the relocation type (always the ALU variant here).  */
5410      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5411      gas_assert (inst.reloc.type != 0);
5412
5413      return PARSE_OPERAND_SUCCESS;
5414    }
5415  else
5416    return parse_shifter_operand (str, i) == SUCCESS
5417	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5418
5419  /* Never reached.  */
5420}
5421
5422/* Parse a Neon alignment expression.  Information is written to
5423   inst.operands[i].  We assume the initial ':' has been skipped.
5424
5425   align	.imm = align << 8, .immisalign=1, .preind=0  */
5426static parse_operand_result
5427parse_neon_alignment (char **str, int i)
5428{
5429  char *p = *str;
5430  expressionS exp;
5431
5432  my_get_expression (&exp, &p, GE_NO_PREFIX);
5433
5434  if (exp.X_op != O_constant)
5435    {
5436      inst.error = _("alignment must be constant");
5437      return PARSE_OPERAND_FAIL;
5438    }
5439
5440  inst.operands[i].imm = exp.X_add_number << 8;
5441  inst.operands[i].immisalign = 1;
5442  /* Alignments are not pre-indexes.  */
5443  inst.operands[i].preind = 0;
5444
5445  *str = p;
5446  return PARSE_OPERAND_SUCCESS;
5447}
5448
5449/* Parse all forms of an ARM address expression.  Information is written
5450   to inst.operands[i] and/or inst.reloc.
5451
5452   Preindexed addressing (.preind=1):
5453
5454   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5455   [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5456   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5457		       .shift_kind=shift .reloc.exp=shift_imm
5458
5459   These three may have a trailing ! which causes .writeback to be set also.
5460
5461   Postindexed addressing (.postind=1, .writeback=1):
5462
5463   [Rn], #offset       .reg=Rn .reloc.exp=offset
5464   [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5465   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5466		       .shift_kind=shift .reloc.exp=shift_imm
5467
5468   Unindexed addressing (.preind=0, .postind=0):
5469
5470   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5471
5472   Other:
5473
5474   [Rn]{!}	       shorthand for [Rn,#0]{!}
5475   =immediate	       .isreg=0 .reloc.exp=immediate
5476   label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5477
5478  It is the caller's responsibility to check for addressing modes not
5479  supported by the instruction, and to set inst.reloc.type.  */
5480
5481static parse_operand_result
5482parse_address_main (char **str, int i, int group_relocations,
5483		    group_reloc_type group_type)
5484{
5485  char *p = *str;
5486  int reg;
5487
5488  if (skip_past_char (&p, '[') == FAIL)
5489    {
5490      if (skip_past_char (&p, '=') == FAIL)
5491	{
5492	  /* Bare address - translate to PC-relative offset.  */
5493	  inst.reloc.pc_rel = 1;
5494	  inst.operands[i].reg = REG_PC;
5495	  inst.operands[i].isreg = 1;
5496	  inst.operands[i].preind = 1;
5497
5498	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5499	    return PARSE_OPERAND_FAIL;
5500	}
5501      else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5502				    /*allow_symbol_p=*/TRUE))
5503	return PARSE_OPERAND_FAIL;
5504
5505      *str = p;
5506      return PARSE_OPERAND_SUCCESS;
5507    }
5508
5509  /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5510  skip_whitespace (p);
5511
5512  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5513    {
5514      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5515      return PARSE_OPERAND_FAIL;
5516    }
5517  inst.operands[i].reg = reg;
5518  inst.operands[i].isreg = 1;
5519
5520  if (skip_past_comma (&p) == SUCCESS)
5521    {
5522      inst.operands[i].preind = 1;
5523
5524      if (*p == '+') p++;
5525      else if (*p == '-') p++, inst.operands[i].negative = 1;
5526
5527      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5528	{
5529	  inst.operands[i].imm = reg;
5530	  inst.operands[i].immisreg = 1;
5531
5532	  if (skip_past_comma (&p) == SUCCESS)
5533	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5534	      return PARSE_OPERAND_FAIL;
5535	}
5536      else if (skip_past_char (&p, ':') == SUCCESS)
5537	{
5538	  /* FIXME: '@' should be used here, but it's filtered out by generic
5539	     code before we get to see it here. This may be subject to
5540	     change.  */
5541	  parse_operand_result result = parse_neon_alignment (&p, i);
5542
5543	  if (result != PARSE_OPERAND_SUCCESS)
5544	    return result;
5545	}
5546      else
5547	{
5548	  if (inst.operands[i].negative)
5549	    {
5550	      inst.operands[i].negative = 0;
5551	      p--;
5552	    }
5553
5554	  if (group_relocations
5555	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5556	    {
5557	      struct group_reloc_table_entry *entry;
5558
5559	      /* Skip over the #: or : sequence.  */
5560	      if (*p == '#')
5561		p += 2;
5562	      else
5563		p++;
5564
5565	      /* Try to parse a group relocation.  Anything else is an
5566		 error.  */
5567	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5568		{
5569		  inst.error = _("unknown group relocation");
5570		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5571		}
5572
5573	      /* We now have the group relocation table entry corresponding to
5574		 the name in the assembler source.  Next, we parse the
5575		 expression.  */
5576	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5577		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5578
5579	      /* Record the relocation type.  */
5580	      switch (group_type)
5581		{
5582		  case GROUP_LDR:
5583		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5584		    break;
5585
5586		  case GROUP_LDRS:
5587		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5588		    break;
5589
5590		  case GROUP_LDC:
5591		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5592		    break;
5593
5594		  default:
5595		    gas_assert (0);
5596		}
5597
5598	      if (inst.reloc.type == 0)
5599		{
5600		  inst.error = _("this group relocation is not allowed on this instruction");
5601		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5602		}
5603	    }
5604	  else
5605	    {
5606	      char *q = p;
5607	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5608		return PARSE_OPERAND_FAIL;
5609	      /* If the offset is 0, find out if it's a +0 or -0.  */
5610	      if (inst.reloc.exp.X_op == O_constant
5611		  && inst.reloc.exp.X_add_number == 0)
5612		{
5613		  skip_whitespace (q);
5614		  if (*q == '#')
5615		    {
5616		      q++;
5617		      skip_whitespace (q);
5618		    }
5619		  if (*q == '-')
5620		    inst.operands[i].negative = 1;
5621		}
5622	    }
5623	}
5624    }
5625  else if (skip_past_char (&p, ':') == SUCCESS)
5626    {
5627      /* FIXME: '@' should be used here, but it's filtered out by generic code
5628	 before we get to see it here. This may be subject to change.  */
5629      parse_operand_result result = parse_neon_alignment (&p, i);
5630
5631      if (result != PARSE_OPERAND_SUCCESS)
5632	return result;
5633    }
5634
5635  if (skip_past_char (&p, ']') == FAIL)
5636    {
5637      inst.error = _("']' expected");
5638      return PARSE_OPERAND_FAIL;
5639    }
5640
5641  if (skip_past_char (&p, '!') == SUCCESS)
5642    inst.operands[i].writeback = 1;
5643
5644  else if (skip_past_comma (&p) == SUCCESS)
5645    {
5646      if (skip_past_char (&p, '{') == SUCCESS)
5647	{
5648	  /* [Rn], {expr} - unindexed, with option */
5649	  if (parse_immediate (&p, &inst.operands[i].imm,
5650			       0, 255, TRUE) == FAIL)
5651	    return PARSE_OPERAND_FAIL;
5652
5653	  if (skip_past_char (&p, '}') == FAIL)
5654	    {
5655	      inst.error = _("'}' expected at end of 'option' field");
5656	      return PARSE_OPERAND_FAIL;
5657	    }
5658	  if (inst.operands[i].preind)
5659	    {
5660	      inst.error = _("cannot combine index with option");
5661	      return PARSE_OPERAND_FAIL;
5662	    }
5663	  *str = p;
5664	  return PARSE_OPERAND_SUCCESS;
5665	}
5666      else
5667	{
5668	  inst.operands[i].postind = 1;
5669	  inst.operands[i].writeback = 1;
5670
5671	  if (inst.operands[i].preind)
5672	    {
5673	      inst.error = _("cannot combine pre- and post-indexing");
5674	      return PARSE_OPERAND_FAIL;
5675	    }
5676
5677	  if (*p == '+') p++;
5678	  else if (*p == '-') p++, inst.operands[i].negative = 1;
5679
5680	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5681	    {
5682	      /* We might be using the immediate for alignment already. If we
5683		 are, OR the register number into the low-order bits.  */
5684	      if (inst.operands[i].immisalign)
5685		inst.operands[i].imm |= reg;
5686	      else
5687		inst.operands[i].imm = reg;
5688	      inst.operands[i].immisreg = 1;
5689
5690	      if (skip_past_comma (&p) == SUCCESS)
5691		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5692		  return PARSE_OPERAND_FAIL;
5693	    }
5694	  else
5695	    {
5696	      char *q = p;
5697	      if (inst.operands[i].negative)
5698		{
5699		  inst.operands[i].negative = 0;
5700		  p--;
5701		}
5702	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5703		return PARSE_OPERAND_FAIL;
5704	      /* If the offset is 0, find out if it's a +0 or -0.  */
5705	      if (inst.reloc.exp.X_op == O_constant
5706		  && inst.reloc.exp.X_add_number == 0)
5707		{
5708		  skip_whitespace (q);
5709		  if (*q == '#')
5710		    {
5711		      q++;
5712		      skip_whitespace (q);
5713		    }
5714		  if (*q == '-')
5715		    inst.operands[i].negative = 1;
5716		}
5717	    }
5718	}
5719    }
5720
5721  /* If at this point neither .preind nor .postind is set, we have a
5722     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5723  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5724    {
5725      inst.operands[i].preind = 1;
5726      inst.reloc.exp.X_op = O_constant;
5727      inst.reloc.exp.X_add_number = 0;
5728    }
5729  *str = p;
5730  return PARSE_OPERAND_SUCCESS;
5731}
5732
5733static int
5734parse_address (char **str, int i)
5735{
5736  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5737	 ? SUCCESS : FAIL;
5738}
5739
5740static parse_operand_result
5741parse_address_group_reloc (char **str, int i, group_reloc_type type)
5742{
5743  return parse_address_main (str, i, 1, type);
5744}
5745
5746/* Parse an operand for a MOVW or MOVT instruction.  */
5747static int
5748parse_half (char **str)
5749{
5750  char * p;
5751
5752  p = *str;
5753  skip_past_char (&p, '#');
5754  if (strncasecmp (p, ":lower16:", 9) == 0)
5755    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5756  else if (strncasecmp (p, ":upper16:", 9) == 0)
5757    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5758
5759  if (inst.reloc.type != BFD_RELOC_UNUSED)
5760    {
5761      p += 9;
5762      skip_whitespace (p);
5763    }
5764
5765  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5766    return FAIL;
5767
5768  if (inst.reloc.type == BFD_RELOC_UNUSED)
5769    {
5770      if (inst.reloc.exp.X_op != O_constant)
5771	{
5772	  inst.error = _("constant expression expected");
5773	  return FAIL;
5774	}
5775      if (inst.reloc.exp.X_add_number < 0
5776	  || inst.reloc.exp.X_add_number > 0xffff)
5777	{
5778	  inst.error = _("immediate value out of range");
5779	  return FAIL;
5780	}
5781    }
5782  *str = p;
5783  return SUCCESS;
5784}
5785
5786/* Miscellaneous. */
5787
5788/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5789   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5790static int
5791parse_psr (char **str, bfd_boolean lhs)
5792{
5793  char *p;
5794  unsigned long psr_field;
5795  const struct asm_psr *psr;
5796  char *start;
5797  bfd_boolean is_apsr = FALSE;
5798  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5799
5800  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5801     be TRUE, but we want to ignore it in this case as we are building for any
5802     CPU type, including non-m variants.  */
5803  if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5804    m_profile = FALSE;
5805
5806  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5807     feature for ease of use and backwards compatibility.  */
5808  p = *str;
5809  if (strncasecmp (p, "SPSR", 4) == 0)
5810    {
5811      if (m_profile)
5812	goto unsupported_psr;
5813
5814      psr_field = SPSR_BIT;
5815    }
5816  else if (strncasecmp (p, "CPSR", 4) == 0)
5817    {
5818      if (m_profile)
5819	goto unsupported_psr;
5820
5821      psr_field = 0;
5822    }
5823  else if (strncasecmp (p, "APSR", 4) == 0)
5824    {
5825      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5826	 and ARMv7-R architecture CPUs.  */
5827      is_apsr = TRUE;
5828      psr_field = 0;
5829    }
5830  else if (m_profile)
5831    {
5832      start = p;
5833      do
5834	p++;
5835      while (ISALNUM (*p) || *p == '_');
5836
5837      if (strncasecmp (start, "iapsr", 5) == 0
5838	  || strncasecmp (start, "eapsr", 5) == 0
5839	  || strncasecmp (start, "xpsr", 4) == 0
5840	  || strncasecmp (start, "psr", 3) == 0)
5841	p = start + strcspn (start, "rR") + 1;
5842
5843      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5844						  p - start);
5845
5846      if (!psr)
5847	return FAIL;
5848
5849      /* If APSR is being written, a bitfield may be specified.  Note that
5850	 APSR itself is handled above.  */
5851      if (psr->field <= 3)
5852	{
5853	  psr_field = psr->field;
5854	  is_apsr = TRUE;
5855	  goto check_suffix;
5856	}
5857
5858      *str = p;
5859      /* M-profile MSR instructions have the mask field set to "10", except
5860	 *PSR variants which modify APSR, which may use a different mask (and
5861	 have been handled already).  Do that by setting the PSR_f field
5862	 here.  */
5863      return psr->field | (lhs ? PSR_f : 0);
5864    }
5865  else
5866    goto unsupported_psr;
5867
5868  p += 4;
5869check_suffix:
5870  if (*p == '_')
5871    {
5872      /* A suffix follows.  */
5873      p++;
5874      start = p;
5875
5876      do
5877	p++;
5878      while (ISALNUM (*p) || *p == '_');
5879
5880      if (is_apsr)
5881	{
5882	  /* APSR uses a notation for bits, rather than fields.  */
5883	  unsigned int nzcvq_bits = 0;
5884	  unsigned int g_bit = 0;
5885	  char *bit;
5886
5887	  for (bit = start; bit != p; bit++)
5888	    {
5889	      switch (TOLOWER (*bit))
5890		{
5891		case 'n':
5892		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5893		  break;
5894
5895		case 'z':
5896		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5897		  break;
5898
5899		case 'c':
5900		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5901		  break;
5902
5903		case 'v':
5904		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5905		  break;
5906
5907		case 'q':
5908		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5909		  break;
5910
5911		case 'g':
5912		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5913		  break;
5914
5915		default:
5916		  inst.error = _("unexpected bit specified after APSR");
5917		  return FAIL;
5918		}
5919	    }
5920
5921	  if (nzcvq_bits == 0x1f)
5922	    psr_field |= PSR_f;
5923
5924	  if (g_bit == 0x1)
5925	    {
5926	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5927		{
5928		  inst.error = _("selected processor does not "
5929				 "support DSP extension");
5930		  return FAIL;
5931		}
5932
5933	      psr_field |= PSR_s;
5934	    }
5935
5936	  if ((nzcvq_bits & 0x20) != 0
5937	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5938	      || (g_bit & 0x2) != 0)
5939	    {
5940	      inst.error = _("bad bitmask specified after APSR");
5941	      return FAIL;
5942	    }
5943	}
5944      else
5945	{
5946	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5947						      p - start);
5948	  if (!psr)
5949	    goto error;
5950
5951	  psr_field |= psr->field;
5952	}
5953    }
5954  else
5955    {
5956      if (ISALNUM (*p))
5957	goto error;    /* Garbage after "[CS]PSR".  */
5958
5959      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5960	 is deprecated, but allow it anyway.  */
5961      if (is_apsr && lhs)
5962	{
5963	  psr_field |= PSR_f;
5964	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
5965		       "deprecated"));
5966	}
5967      else if (!m_profile)
5968	/* These bits are never right for M-profile devices: don't set them
5969	   (only code paths which read/write APSR reach here).  */
5970	psr_field |= (PSR_c | PSR_f);
5971    }
5972  *str = p;
5973  return psr_field;
5974
5975 unsupported_psr:
5976  inst.error = _("selected processor does not support requested special "
5977		 "purpose register");
5978  return FAIL;
5979
5980 error:
5981  inst.error = _("flag for {c}psr instruction expected");
5982  return FAIL;
5983}
5984
5985/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5986   value suitable for splatting into the AIF field of the instruction.	*/
5987
5988static int
5989parse_cps_flags (char **str)
5990{
5991  int val = 0;
5992  int saw_a_flag = 0;
5993  char *s = *str;
5994
5995  for (;;)
5996    switch (*s++)
5997      {
5998      case '\0': case ',':
5999	goto done;
6000
6001      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6002      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6003      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6004
6005      default:
6006	inst.error = _("unrecognized CPS flag");
6007	return FAIL;
6008      }
6009
6010 done:
6011  if (saw_a_flag == 0)
6012    {
6013      inst.error = _("missing CPS flags");
6014      return FAIL;
6015    }
6016
6017  *str = s - 1;
6018  return val;
6019}
6020
6021/* Parse an endian specifier ("BE" or "LE", case insensitive);
6022   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
6023
6024static int
6025parse_endian_specifier (char **str)
6026{
6027  int little_endian;
6028  char *s = *str;
6029
6030  if (strncasecmp (s, "BE", 2))
6031    little_endian = 0;
6032  else if (strncasecmp (s, "LE", 2))
6033    little_endian = 1;
6034  else
6035    {
6036      inst.error = _("valid endian specifiers are be or le");
6037      return FAIL;
6038    }
6039
6040  if (ISALNUM (s[2]) || s[2] == '_')
6041    {
6042      inst.error = _("valid endian specifiers are be or le");
6043      return FAIL;
6044    }
6045
6046  *str = s + 2;
6047  return little_endian;
6048}
6049
6050/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
6051   value suitable for poking into the rotate field of an sxt or sxta
6052   instruction, or FAIL on error.  */
6053
6054static int
6055parse_ror (char **str)
6056{
6057  int rot;
6058  char *s = *str;
6059
6060  if (strncasecmp (s, "ROR", 3) == 0)
6061    s += 3;
6062  else
6063    {
6064      inst.error = _("missing rotation field after comma");
6065      return FAIL;
6066    }
6067
6068  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6069    return FAIL;
6070
6071  switch (rot)
6072    {
6073    case  0: *str = s; return 0x0;
6074    case  8: *str = s; return 0x1;
6075    case 16: *str = s; return 0x2;
6076    case 24: *str = s; return 0x3;
6077
6078    default:
6079      inst.error = _("rotation can only be 0, 8, 16, or 24");
6080      return FAIL;
6081    }
6082}
6083
6084/* Parse a conditional code (from conds[] below).  The value returned is in the
6085   range 0 .. 14, or FAIL.  */
6086static int
6087parse_cond (char **str)
6088{
6089  char *q;
6090  const struct asm_cond *c;
6091  int n;
6092  /* Condition codes are always 2 characters, so matching up to
6093     3 characters is sufficient.  */
6094  char cond[3];
6095
6096  q = *str;
6097  n = 0;
6098  while (ISALPHA (*q) && n < 3)
6099    {
6100      cond[n] = TOLOWER (*q);
6101      q++;
6102      n++;
6103    }
6104
6105  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6106  if (!c)
6107    {
6108      inst.error = _("condition required");
6109      return FAIL;
6110    }
6111
6112  *str = q;
6113  return c->value;
6114}
6115
6116/* Record a use of the given feature.  */
6117static void
6118record_feature_use (const arm_feature_set *feature)
6119{
6120  if (thumb_mode)
6121    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6122  else
6123    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6124}
6125
6126/* If the given feature available in the selected CPU, mark it as used.
6127   Returns TRUE iff feature is available.  */
6128static bfd_boolean
6129mark_feature_used (const arm_feature_set *feature)
6130{
6131  /* Ensure the option is valid on the current architecture.  */
6132  if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6133    return FALSE;
6134
6135  /* Add the appropriate architecture feature for the barrier option used.
6136     */
6137  record_feature_use (feature);
6138
6139  return TRUE;
6140}
6141
6142/* Parse an option for a barrier instruction.  Returns the encoding for the
6143   option, or FAIL.  */
6144static int
6145parse_barrier (char **str)
6146{
6147  char *p, *q;
6148  const struct asm_barrier_opt *o;
6149
6150  p = q = *str;
6151  while (ISALPHA (*q))
6152    q++;
6153
6154  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6155						    q - p);
6156  if (!o)
6157    return FAIL;
6158
6159  if (!mark_feature_used (&o->arch))
6160    return FAIL;
6161
6162  *str = q;
6163  return o->value;
6164}
6165
6166/* Parse the operands of a table branch instruction.  Similar to a memory
6167   operand.  */
6168static int
6169parse_tb (char **str)
6170{
6171  char * p = *str;
6172  int reg;
6173
6174  if (skip_past_char (&p, '[') == FAIL)
6175    {
6176      inst.error = _("'[' expected");
6177      return FAIL;
6178    }
6179
6180  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6181    {
6182      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6183      return FAIL;
6184    }
6185  inst.operands[0].reg = reg;
6186
6187  if (skip_past_comma (&p) == FAIL)
6188    {
6189      inst.error = _("',' expected");
6190      return FAIL;
6191    }
6192
6193  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6194    {
6195      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6196      return FAIL;
6197    }
6198  inst.operands[0].imm = reg;
6199
6200  if (skip_past_comma (&p) == SUCCESS)
6201    {
6202      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6203	return FAIL;
6204      if (inst.reloc.exp.X_add_number != 1)
6205	{
6206	  inst.error = _("invalid shift");
6207	  return FAIL;
6208	}
6209      inst.operands[0].shifted = 1;
6210    }
6211
6212  if (skip_past_char (&p, ']') == FAIL)
6213    {
6214      inst.error = _("']' expected");
6215      return FAIL;
6216    }
6217  *str = p;
6218  return SUCCESS;
6219}
6220
6221/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6222   information on the types the operands can take and how they are encoded.
6223   Up to four operands may be read; this function handles setting the
6224   ".present" field for each read operand itself.
6225   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6226   else returns FAIL.  */
6227
6228static int
6229parse_neon_mov (char **str, int *which_operand)
6230{
6231  int i = *which_operand, val;
6232  enum arm_reg_type rtype;
6233  char *ptr = *str;
6234  struct neon_type_el optype;
6235
6236  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6237    {
6238      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6239      inst.operands[i].reg = val;
6240      inst.operands[i].isscalar = 1;
6241      inst.operands[i].vectype = optype;
6242      inst.operands[i++].present = 1;
6243
6244      if (skip_past_comma (&ptr) == FAIL)
6245	goto wanted_comma;
6246
6247      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6248	goto wanted_arm;
6249
6250      inst.operands[i].reg = val;
6251      inst.operands[i].isreg = 1;
6252      inst.operands[i].present = 1;
6253    }
6254  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6255	   != FAIL)
6256    {
6257      /* Cases 0, 1, 2, 3, 5 (D only).  */
6258      if (skip_past_comma (&ptr) == FAIL)
6259	goto wanted_comma;
6260
6261      inst.operands[i].reg = val;
6262      inst.operands[i].isreg = 1;
6263      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6264      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6265      inst.operands[i].isvec = 1;
6266      inst.operands[i].vectype = optype;
6267      inst.operands[i++].present = 1;
6268
6269      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6270	{
6271	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6272	     Case 13: VMOV <Sd>, <Rm>  */
6273	  inst.operands[i].reg = val;
6274	  inst.operands[i].isreg = 1;
6275	  inst.operands[i].present = 1;
6276
6277	  if (rtype == REG_TYPE_NQ)
6278	    {
6279	      first_error (_("can't use Neon quad register here"));
6280	      return FAIL;
6281	    }
6282	  else if (rtype != REG_TYPE_VFS)
6283	    {
6284	      i++;
6285	      if (skip_past_comma (&ptr) == FAIL)
6286		goto wanted_comma;
6287	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6288		goto wanted_arm;
6289	      inst.operands[i].reg = val;
6290	      inst.operands[i].isreg = 1;
6291	      inst.operands[i].present = 1;
6292	    }
6293	}
6294      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6295					   &optype)) != FAIL)
6296	{
6297	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6298	     Case 1: VMOV<c><q> <Dd>, <Dm>
6299	     Case 8: VMOV.F32 <Sd>, <Sm>
6300	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6301
6302	  inst.operands[i].reg = val;
6303	  inst.operands[i].isreg = 1;
6304	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6305	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6306	  inst.operands[i].isvec = 1;
6307	  inst.operands[i].vectype = optype;
6308	  inst.operands[i].present = 1;
6309
6310	  if (skip_past_comma (&ptr) == SUCCESS)
6311	    {
6312	      /* Case 15.  */
6313	      i++;
6314
6315	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6316		goto wanted_arm;
6317
6318	      inst.operands[i].reg = val;
6319	      inst.operands[i].isreg = 1;
6320	      inst.operands[i++].present = 1;
6321
6322	      if (skip_past_comma (&ptr) == FAIL)
6323		goto wanted_comma;
6324
6325	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6326		goto wanted_arm;
6327
6328	      inst.operands[i].reg = val;
6329	      inst.operands[i].isreg = 1;
6330	      inst.operands[i].present = 1;
6331	    }
6332	}
6333      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6334	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6335	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6336	     Case 10: VMOV.F32 <Sd>, #<imm>
6337	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6338	inst.operands[i].immisfloat = 1;
6339      else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6340	       == SUCCESS)
6341	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6342	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6343	;
6344      else
6345	{
6346	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6347	  return FAIL;
6348	}
6349    }
6350  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6351    {
6352      /* Cases 6, 7.  */
6353      inst.operands[i].reg = val;
6354      inst.operands[i].isreg = 1;
6355      inst.operands[i++].present = 1;
6356
6357      if (skip_past_comma (&ptr) == FAIL)
6358	goto wanted_comma;
6359
6360      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6361	{
6362	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6363	  inst.operands[i].reg = val;
6364	  inst.operands[i].isscalar = 1;
6365	  inst.operands[i].present = 1;
6366	  inst.operands[i].vectype = optype;
6367	}
6368      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6369	{
6370	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6371	  inst.operands[i].reg = val;
6372	  inst.operands[i].isreg = 1;
6373	  inst.operands[i++].present = 1;
6374
6375	  if (skip_past_comma (&ptr) == FAIL)
6376	    goto wanted_comma;
6377
6378	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6379	      == FAIL)
6380	    {
6381	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6382	      return FAIL;
6383	    }
6384
6385	  inst.operands[i].reg = val;
6386	  inst.operands[i].isreg = 1;
6387	  inst.operands[i].isvec = 1;
6388	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6389	  inst.operands[i].vectype = optype;
6390	  inst.operands[i].present = 1;
6391
6392	  if (rtype == REG_TYPE_VFS)
6393	    {
6394	      /* Case 14.  */
6395	      i++;
6396	      if (skip_past_comma (&ptr) == FAIL)
6397		goto wanted_comma;
6398	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6399					      &optype)) == FAIL)
6400		{
6401		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6402		  return FAIL;
6403		}
6404	      inst.operands[i].reg = val;
6405	      inst.operands[i].isreg = 1;
6406	      inst.operands[i].isvec = 1;
6407	      inst.operands[i].issingle = 1;
6408	      inst.operands[i].vectype = optype;
6409	      inst.operands[i].present = 1;
6410	    }
6411	}
6412      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6413	       != FAIL)
6414	{
6415	  /* Case 13.  */
6416	  inst.operands[i].reg = val;
6417	  inst.operands[i].isreg = 1;
6418	  inst.operands[i].isvec = 1;
6419	  inst.operands[i].issingle = 1;
6420	  inst.operands[i].vectype = optype;
6421	  inst.operands[i].present = 1;
6422	}
6423    }
6424  else
6425    {
6426      first_error (_("parse error"));
6427      return FAIL;
6428    }
6429
6430  /* Successfully parsed the operands. Update args.  */
6431  *which_operand = i;
6432  *str = ptr;
6433  return SUCCESS;
6434
6435 wanted_comma:
6436  first_error (_("expected comma"));
6437  return FAIL;
6438
6439 wanted_arm:
6440  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6441  return FAIL;
6442}
6443
6444/* Use this macro when the operand constraints are different
6445   for ARM and THUMB (e.g. ldrd).  */
6446#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6447	((arm_operand) | ((thumb_operand) << 16))
6448
6449/* Matcher codes for parse_operands.  */
6450enum operand_parse_code
6451{
6452  OP_stop,	/* end of line */
6453
6454  OP_RR,	/* ARM register */
6455  OP_RRnpc,	/* ARM register, not r15 */
6456  OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6457  OP_RRnpcb,	/* ARM register, not r15, in square brackets */
6458  OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
6459		   optional trailing ! */
6460  OP_RRw,	/* ARM register, not r15, optional trailing ! */
6461  OP_RCP,	/* Coprocessor number */
6462  OP_RCN,	/* Coprocessor register */
6463  OP_RF,	/* FPA register */
6464  OP_RVS,	/* VFP single precision register */
6465  OP_RVD,	/* VFP double precision register (0..15) */
6466  OP_RND,       /* Neon double precision register (0..31) */
6467  OP_RNQ,	/* Neon quad precision register */
6468  OP_RVSD,	/* VFP single or double precision register */
6469  OP_RNDQ,      /* Neon double or quad precision register */
6470  OP_RNSDQ,	/* Neon single, double or quad precision register */
6471  OP_RNSC,      /* Neon scalar D[X] */
6472  OP_RVC,	/* VFP control register */
6473  OP_RMF,	/* Maverick F register */
6474  OP_RMD,	/* Maverick D register */
6475  OP_RMFX,	/* Maverick FX register */
6476  OP_RMDX,	/* Maverick DX register */
6477  OP_RMAX,	/* Maverick AX register */
6478  OP_RMDS,	/* Maverick DSPSC register */
6479  OP_RIWR,	/* iWMMXt wR register */
6480  OP_RIWC,	/* iWMMXt wC register */
6481  OP_RIWG,	/* iWMMXt wCG register */
6482  OP_RXA,	/* XScale accumulator register */
6483
6484  OP_REGLST,	/* ARM register list */
6485  OP_VRSLST,	/* VFP single-precision register list */
6486  OP_VRDLST,	/* VFP double-precision register list */
6487  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6488  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6489  OP_NSTRLST,   /* Neon element/structure list */
6490
6491  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6492  OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
6493  OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
6494  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6495  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6496  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6497  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6498  OP_VMOV,      /* Neon VMOV operands.  */
6499  OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
6500  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6501  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6502
6503  OP_I0,        /* immediate zero */
6504  OP_I7,	/* immediate value 0 .. 7 */
6505  OP_I15,	/*		   0 .. 15 */
6506  OP_I16,	/*		   1 .. 16 */
6507  OP_I16z,      /*                 0 .. 16 */
6508  OP_I31,	/*		   0 .. 31 */
6509  OP_I31w,	/*		   0 .. 31, optional trailing ! */
6510  OP_I32,	/*		   1 .. 32 */
6511  OP_I32z,	/*		   0 .. 32 */
6512  OP_I63,	/*		   0 .. 63 */
6513  OP_I63s,	/*		 -64 .. 63 */
6514  OP_I64,	/*		   1 .. 64 */
6515  OP_I64z,	/*		   0 .. 64 */
6516  OP_I255,	/*		   0 .. 255 */
6517
6518  OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
6519  OP_I7b,	/*			       0 .. 7 */
6520  OP_I15b,	/*			       0 .. 15 */
6521  OP_I31b,	/*			       0 .. 31 */
6522
6523  OP_SH,	/* shifter operand */
6524  OP_SHG,	/* shifter operand with possible group relocation */
6525  OP_ADDR,	/* Memory address expression (any mode) */
6526  OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
6527  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6528  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6529  OP_EXP,	/* arbitrary expression */
6530  OP_EXPi,	/* same, with optional immediate prefix */
6531  OP_EXPr,	/* same, with optional relocation suffix */
6532  OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
6533
6534  OP_CPSF,	/* CPS flags */
6535  OP_ENDI,	/* Endianness specifier */
6536  OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
6537  OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
6538  OP_COND,	/* conditional code */
6539  OP_TB,	/* Table branch.  */
6540
6541  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6542
6543  OP_RRnpc_I0,	/* ARM register or literal 0 */
6544  OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
6545  OP_RR_EXi,	/* ARM register or expression with imm prefix */
6546  OP_RF_IF,	/* FPA register or immediate */
6547  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6548  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6549
6550  /* Optional operands.	 */
6551  OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
6552  OP_oI31b,	 /*				0 .. 31 */
6553  OP_oI32b,      /*                             1 .. 32 */
6554  OP_oI32z,      /*                             0 .. 32 */
6555  OP_oIffffb,	 /*				0 .. 65535 */
6556  OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
6557
6558  OP_oRR,	 /* ARM register */
6559  OP_oRRnpc,	 /* ARM register, not the PC */
6560  OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6561  OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
6562  OP_oRND,       /* Optional Neon double precision register */
6563  OP_oRNQ,       /* Optional Neon quad precision register */
6564  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6565  OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
6566  OP_oSHll,	 /* LSL immediate */
6567  OP_oSHar,	 /* ASR immediate */
6568  OP_oSHllar,	 /* LSL or ASR immediate */
6569  OP_oROR,	 /* ROR 0/8/16/24 */
6570  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6571
6572  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6573  OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6574  OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6575  OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6576
6577  OP_FIRST_OPTIONAL = OP_oI7b
6578};
6579
6580/* Generic instruction operand parser.	This does no encoding and no
6581   semantic validation; it merely squirrels values away in the inst
6582   structure.  Returns SUCCESS or FAIL depending on whether the
6583   specified grammar matched.  */
6584static int
6585parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6586{
6587  unsigned const int *upat = pattern;
6588  char *backtrack_pos = 0;
6589  const char *backtrack_error = 0;
6590  int i, val = 0, backtrack_index = 0;
6591  enum arm_reg_type rtype;
6592  parse_operand_result result;
6593  unsigned int op_parse_code;
6594
6595#define po_char_or_fail(chr)			\
6596  do						\
6597    {						\
6598      if (skip_past_char (&str, chr) == FAIL)	\
6599	goto bad_args;				\
6600    }						\
6601  while (0)
6602
6603#define po_reg_or_fail(regtype)					\
6604  do								\
6605    {								\
6606      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6607				 & inst.operands[i].vectype);	\
6608      if (val == FAIL)						\
6609	{							\
6610	  first_error (_(reg_expected_msgs[regtype]));		\
6611	  goto failure;						\
6612	}							\
6613      inst.operands[i].reg = val;				\
6614      inst.operands[i].isreg = 1;				\
6615      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6616      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6617      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6618			     || rtype == REG_TYPE_VFD		\
6619			     || rtype == REG_TYPE_NQ);		\
6620    }								\
6621  while (0)
6622
6623#define po_reg_or_goto(regtype, label)				\
6624  do								\
6625    {								\
6626      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6627				 & inst.operands[i].vectype);	\
6628      if (val == FAIL)						\
6629	goto label;						\
6630								\
6631      inst.operands[i].reg = val;				\
6632      inst.operands[i].isreg = 1;				\
6633      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6634      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6635      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6636			     || rtype == REG_TYPE_VFD		\
6637			     || rtype == REG_TYPE_NQ);		\
6638    }								\
6639  while (0)
6640
6641#define po_imm_or_fail(min, max, popt)				\
6642  do								\
6643    {								\
6644      if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
6645	goto failure;						\
6646      inst.operands[i].imm = val;				\
6647    }								\
6648  while (0)
6649
6650#define po_scalar_or_goto(elsz, label)					\
6651  do									\
6652    {									\
6653      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
6654      if (val == FAIL)							\
6655	goto label;							\
6656      inst.operands[i].reg = val;					\
6657      inst.operands[i].isscalar = 1;					\
6658    }									\
6659  while (0)
6660
6661#define po_misc_or_fail(expr)			\
6662  do						\
6663    {						\
6664      if (expr)					\
6665	goto failure;				\
6666    }						\
6667  while (0)
6668
6669#define po_misc_or_fail_no_backtrack(expr)		\
6670  do							\
6671    {							\
6672      result = expr;					\
6673      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
6674	backtrack_pos = 0;				\
6675      if (result != PARSE_OPERAND_SUCCESS)		\
6676	goto failure;					\
6677    }							\
6678  while (0)
6679
6680#define po_barrier_or_imm(str)				   \
6681  do							   \
6682    {						 	   \
6683      val = parse_barrier (&str);			   \
6684      if (val == FAIL && ! ISALPHA (*str))		   \
6685	goto immediate;					   \
6686      if (val == FAIL					   \
6687	  /* ISB can only take SY as an option.  */	   \
6688	  || ((inst.instruction & 0xf0) == 0x60		   \
6689	       && val != 0xf))				   \
6690	{						   \
6691	   inst.error = _("invalid barrier type");	   \
6692	   backtrack_pos = 0;				   \
6693	   goto failure;				   \
6694	}						   \
6695    }							   \
6696  while (0)
6697
6698  skip_whitespace (str);
6699
6700  for (i = 0; upat[i] != OP_stop; i++)
6701    {
6702      op_parse_code = upat[i];
6703      if (op_parse_code >= 1<<16)
6704	op_parse_code = thumb ? (op_parse_code >> 16)
6705				: (op_parse_code & ((1<<16)-1));
6706
6707      if (op_parse_code >= OP_FIRST_OPTIONAL)
6708	{
6709	  /* Remember where we are in case we need to backtrack.  */
6710	  gas_assert (!backtrack_pos);
6711	  backtrack_pos = str;
6712	  backtrack_error = inst.error;
6713	  backtrack_index = i;
6714	}
6715
6716      if (i > 0 && (i > 1 || inst.operands[0].present))
6717	po_char_or_fail (',');
6718
6719      switch (op_parse_code)
6720	{
6721	  /* Registers */
6722	case OP_oRRnpc:
6723	case OP_oRRnpcsp:
6724	case OP_RRnpc:
6725	case OP_RRnpcsp:
6726	case OP_oRR:
6727	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
6728	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
6729	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
6730	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
6731	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
6732	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6733	case OP_oRND:
6734	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6735	case OP_RVC:
6736	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6737	  break;
6738	  /* Also accept generic coprocessor regs for unknown registers.  */
6739	  coproc_reg:
6740	  po_reg_or_fail (REG_TYPE_CN);
6741	  break;
6742	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
6743	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
6744	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
6745	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
6746	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
6747	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
6748	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
6749	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
6750	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6751	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6752	case OP_oRNQ:
6753	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6754	case OP_oRNDQ:
6755	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6756	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6757	case OP_oRNSDQ:
6758	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6759
6760	/* Neon scalar. Using an element size of 8 means that some invalid
6761	   scalars are accepted here, so deal with those in later code.  */
6762	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6763
6764	case OP_RNDQ_I0:
6765	  {
6766	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6767	    break;
6768	    try_imm0:
6769	    po_imm_or_fail (0, 0, TRUE);
6770	  }
6771	  break;
6772
6773	case OP_RVSD_I0:
6774	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6775	  break;
6776
6777	case OP_RSVD_FI0:
6778	  {
6779	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6780	    break;
6781	    try_ifimm0:
6782	    if (parse_ifimm_zero (&str))
6783	      inst.operands[i].imm = 0;
6784	    else
6785	    {
6786	      inst.error
6787	        = _("only floating point zero is allowed as immediate value");
6788	      goto failure;
6789	    }
6790	  }
6791	  break;
6792
6793	case OP_RR_RNSC:
6794	  {
6795	    po_scalar_or_goto (8, try_rr);
6796	    break;
6797	    try_rr:
6798	    po_reg_or_fail (REG_TYPE_RN);
6799	  }
6800	  break;
6801
6802	case OP_RNSDQ_RNSC:
6803	  {
6804	    po_scalar_or_goto (8, try_nsdq);
6805	    break;
6806	    try_nsdq:
6807	    po_reg_or_fail (REG_TYPE_NSDQ);
6808	  }
6809	  break;
6810
6811	case OP_RNDQ_RNSC:
6812	  {
6813	    po_scalar_or_goto (8, try_ndq);
6814	    break;
6815	    try_ndq:
6816	    po_reg_or_fail (REG_TYPE_NDQ);
6817	  }
6818	  break;
6819
6820	case OP_RND_RNSC:
6821	  {
6822	    po_scalar_or_goto (8, try_vfd);
6823	    break;
6824	    try_vfd:
6825	    po_reg_or_fail (REG_TYPE_VFD);
6826	  }
6827	  break;
6828
6829	case OP_VMOV:
6830	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6831	     not careful then bad things might happen.  */
6832	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6833	  break;
6834
6835	case OP_RNDQ_Ibig:
6836	  {
6837	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6838	    break;
6839	    try_immbig:
6840	    /* There's a possibility of getting a 64-bit immediate here, so
6841	       we need special handling.  */
6842	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6843		== FAIL)
6844	      {
6845		inst.error = _("immediate value is out of range");
6846		goto failure;
6847	      }
6848	  }
6849	  break;
6850
6851	case OP_RNDQ_I63b:
6852	  {
6853	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6854	    break;
6855	    try_shimm:
6856	    po_imm_or_fail (0, 63, TRUE);
6857	  }
6858	  break;
6859
6860	case OP_RRnpcb:
6861	  po_char_or_fail ('[');
6862	  po_reg_or_fail  (REG_TYPE_RN);
6863	  po_char_or_fail (']');
6864	  break;
6865
6866	case OP_RRnpctw:
6867	case OP_RRw:
6868	case OP_oRRw:
6869	  po_reg_or_fail (REG_TYPE_RN);
6870	  if (skip_past_char (&str, '!') == SUCCESS)
6871	    inst.operands[i].writeback = 1;
6872	  break;
6873
6874	  /* Immediates */
6875	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
6876	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
6877	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
6878	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
6879	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
6880	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
6881	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
6882	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
6883	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
6884	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
6885	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
6886	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
6887
6888	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
6889	case OP_oI7b:
6890	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
6891	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
6892	case OP_oI31b:
6893	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
6894	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6895	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6896	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
6897
6898	  /* Immediate variants */
6899	case OP_oI255c:
6900	  po_char_or_fail ('{');
6901	  po_imm_or_fail (0, 255, TRUE);
6902	  po_char_or_fail ('}');
6903	  break;
6904
6905	case OP_I31w:
6906	  /* The expression parser chokes on a trailing !, so we have
6907	     to find it first and zap it.  */
6908	  {
6909	    char *s = str;
6910	    while (*s && *s != ',')
6911	      s++;
6912	    if (s[-1] == '!')
6913	      {
6914		s[-1] = '\0';
6915		inst.operands[i].writeback = 1;
6916	      }
6917	    po_imm_or_fail (0, 31, TRUE);
6918	    if (str == s - 1)
6919	      str = s;
6920	  }
6921	  break;
6922
6923	  /* Expressions */
6924	case OP_EXPi:	EXPi:
6925	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6926					      GE_OPT_PREFIX));
6927	  break;
6928
6929	case OP_EXP:
6930	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6931					      GE_NO_PREFIX));
6932	  break;
6933
6934	case OP_EXPr:	EXPr:
6935	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6936					      GE_NO_PREFIX));
6937	  if (inst.reloc.exp.X_op == O_symbol)
6938	    {
6939	      val = parse_reloc (&str);
6940	      if (val == -1)
6941		{
6942		  inst.error = _("unrecognized relocation suffix");
6943		  goto failure;
6944		}
6945	      else if (val != BFD_RELOC_UNUSED)
6946		{
6947		  inst.operands[i].imm = val;
6948		  inst.operands[i].hasreloc = 1;
6949		}
6950	    }
6951	  break;
6952
6953	  /* Operand for MOVW or MOVT.  */
6954	case OP_HALF:
6955	  po_misc_or_fail (parse_half (&str));
6956	  break;
6957
6958	  /* Register or expression.  */
6959	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6960	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6961
6962	  /* Register or immediate.  */
6963	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6964	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
6965
6966	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6967	IF:
6968	  if (!is_immediate_prefix (*str))
6969	    goto bad_args;
6970	  str++;
6971	  val = parse_fpa_immediate (&str);
6972	  if (val == FAIL)
6973	    goto failure;
6974	  /* FPA immediates are encoded as registers 8-15.
6975	     parse_fpa_immediate has already applied the offset.  */
6976	  inst.operands[i].reg = val;
6977	  inst.operands[i].isreg = 1;
6978	  break;
6979
6980	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6981	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
6982
6983	  /* Two kinds of register.  */
6984	case OP_RIWR_RIWC:
6985	  {
6986	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6987	    if (!rege
6988		|| (rege->type != REG_TYPE_MMXWR
6989		    && rege->type != REG_TYPE_MMXWC
6990		    && rege->type != REG_TYPE_MMXWCG))
6991	      {
6992		inst.error = _("iWMMXt data or control register expected");
6993		goto failure;
6994	      }
6995	    inst.operands[i].reg = rege->number;
6996	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6997	  }
6998	  break;
6999
7000	case OP_RIWC_RIWG:
7001	  {
7002	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7003	    if (!rege
7004		|| (rege->type != REG_TYPE_MMXWC
7005		    && rege->type != REG_TYPE_MMXWCG))
7006	      {
7007		inst.error = _("iWMMXt control register expected");
7008		goto failure;
7009	      }
7010	    inst.operands[i].reg = rege->number;
7011	    inst.operands[i].isreg = 1;
7012	  }
7013	  break;
7014
7015	  /* Misc */
7016	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
7017	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
7018	case OP_oROR:	 val = parse_ror (&str);		break;
7019	case OP_COND:	 val = parse_cond (&str);		break;
7020	case OP_oBARRIER_I15:
7021	  po_barrier_or_imm (str); break;
7022	  immediate:
7023	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7024	    goto failure;
7025	  break;
7026
7027	case OP_wPSR:
7028	case OP_rPSR:
7029	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
7030	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7031	    {
7032	      inst.error = _("Banked registers are not available with this "
7033			     "architecture.");
7034	      goto failure;
7035	    }
7036	  break;
7037	  try_psr:
7038	  val = parse_psr (&str, op_parse_code == OP_wPSR);
7039	  break;
7040
7041	case OP_APSR_RR:
7042	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
7043	  break;
7044	  try_apsr:
7045	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7046	     instruction).  */
7047	  if (strncasecmp (str, "APSR_", 5) == 0)
7048	    {
7049	      unsigned found = 0;
7050	      str += 5;
7051	      while (found < 15)
7052		switch (*str++)
7053		  {
7054		  case 'c': found = (found & 1) ? 16 : found | 1; break;
7055		  case 'n': found = (found & 2) ? 16 : found | 2; break;
7056		  case 'z': found = (found & 4) ? 16 : found | 4; break;
7057		  case 'v': found = (found & 8) ? 16 : found | 8; break;
7058		  default: found = 16;
7059		  }
7060	      if (found != 15)
7061		goto failure;
7062	      inst.operands[i].isvec = 1;
7063	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
7064	      inst.operands[i].reg = REG_PC;
7065	    }
7066	  else
7067	    goto failure;
7068	  break;
7069
7070	case OP_TB:
7071	  po_misc_or_fail (parse_tb (&str));
7072	  break;
7073
7074	  /* Register lists.  */
7075	case OP_REGLST:
7076	  val = parse_reg_list (&str);
7077	  if (*str == '^')
7078	    {
7079	      inst.operands[i].writeback = 1;
7080	      str++;
7081	    }
7082	  break;
7083
7084	case OP_VRSLST:
7085	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7086	  break;
7087
7088	case OP_VRDLST:
7089	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7090	  break;
7091
7092	case OP_VRSDLST:
7093	  /* Allow Q registers too.  */
7094	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7095				    REGLIST_NEON_D);
7096	  if (val == FAIL)
7097	    {
7098	      inst.error = NULL;
7099	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7100					REGLIST_VFP_S);
7101	      inst.operands[i].issingle = 1;
7102	    }
7103	  break;
7104
7105	case OP_NRDLST:
7106	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7107				    REGLIST_NEON_D);
7108	  break;
7109
7110	case OP_NSTRLST:
7111	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7112					   &inst.operands[i].vectype);
7113	  break;
7114
7115	  /* Addressing modes */
7116	case OP_ADDR:
7117	  po_misc_or_fail (parse_address (&str, i));
7118	  break;
7119
7120	case OP_ADDRGLDR:
7121	  po_misc_or_fail_no_backtrack (
7122	    parse_address_group_reloc (&str, i, GROUP_LDR));
7123	  break;
7124
7125	case OP_ADDRGLDRS:
7126	  po_misc_or_fail_no_backtrack (
7127	    parse_address_group_reloc (&str, i, GROUP_LDRS));
7128	  break;
7129
7130	case OP_ADDRGLDC:
7131	  po_misc_or_fail_no_backtrack (
7132	    parse_address_group_reloc (&str, i, GROUP_LDC));
7133	  break;
7134
7135	case OP_SH:
7136	  po_misc_or_fail (parse_shifter_operand (&str, i));
7137	  break;
7138
7139	case OP_SHG:
7140	  po_misc_or_fail_no_backtrack (
7141	    parse_shifter_operand_group_reloc (&str, i));
7142	  break;
7143
7144	case OP_oSHll:
7145	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7146	  break;
7147
7148	case OP_oSHar:
7149	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7150	  break;
7151
7152	case OP_oSHllar:
7153	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7154	  break;
7155
7156	default:
7157	  as_fatal (_("unhandled operand code %d"), op_parse_code);
7158	}
7159
7160      /* Various value-based sanity checks and shared operations.  We
7161	 do not signal immediate failures for the register constraints;
7162	 this allows a syntax error to take precedence.	 */
7163      switch (op_parse_code)
7164	{
7165	case OP_oRRnpc:
7166	case OP_RRnpc:
7167	case OP_RRnpcb:
7168	case OP_RRw:
7169	case OP_oRRw:
7170	case OP_RRnpc_I0:
7171	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7172	    inst.error = BAD_PC;
7173	  break;
7174
7175	case OP_oRRnpcsp:
7176	case OP_RRnpcsp:
7177	  if (inst.operands[i].isreg)
7178	    {
7179	      if (inst.operands[i].reg == REG_PC)
7180		inst.error = BAD_PC;
7181	      else if (inst.operands[i].reg == REG_SP)
7182		inst.error = BAD_SP;
7183	    }
7184	  break;
7185
7186	case OP_RRnpctw:
7187	  if (inst.operands[i].isreg
7188	      && inst.operands[i].reg == REG_PC
7189	      && (inst.operands[i].writeback || thumb))
7190	    inst.error = BAD_PC;
7191	  break;
7192
7193	case OP_CPSF:
7194	case OP_ENDI:
7195	case OP_oROR:
7196	case OP_wPSR:
7197	case OP_rPSR:
7198	case OP_COND:
7199	case OP_oBARRIER_I15:
7200	case OP_REGLST:
7201	case OP_VRSLST:
7202	case OP_VRDLST:
7203	case OP_VRSDLST:
7204	case OP_NRDLST:
7205	case OP_NSTRLST:
7206	  if (val == FAIL)
7207	    goto failure;
7208	  inst.operands[i].imm = val;
7209	  break;
7210
7211	default:
7212	  break;
7213	}
7214
7215      /* If we get here, this operand was successfully parsed.	*/
7216      inst.operands[i].present = 1;
7217      continue;
7218
7219    bad_args:
7220      inst.error = BAD_ARGS;
7221
7222    failure:
7223      if (!backtrack_pos)
7224	{
7225	  /* The parse routine should already have set inst.error, but set a
7226	     default here just in case.  */
7227	  if (!inst.error)
7228	    inst.error = _("syntax error");
7229	  return FAIL;
7230	}
7231
7232      /* Do not backtrack over a trailing optional argument that
7233	 absorbed some text.  We will only fail again, with the
7234	 'garbage following instruction' error message, which is
7235	 probably less helpful than the current one.  */
7236      if (backtrack_index == i && backtrack_pos != str
7237	  && upat[i+1] == OP_stop)
7238	{
7239	  if (!inst.error)
7240	    inst.error = _("syntax error");
7241	  return FAIL;
7242	}
7243
7244      /* Try again, skipping the optional argument at backtrack_pos.  */
7245      str = backtrack_pos;
7246      inst.error = backtrack_error;
7247      inst.operands[backtrack_index].present = 0;
7248      i = backtrack_index;
7249      backtrack_pos = 0;
7250    }
7251
7252  /* Check that we have parsed all the arguments.  */
7253  if (*str != '\0' && !inst.error)
7254    inst.error = _("garbage following instruction");
7255
7256  return inst.error ? FAIL : SUCCESS;
7257}
7258
7259#undef po_char_or_fail
7260#undef po_reg_or_fail
7261#undef po_reg_or_goto
7262#undef po_imm_or_fail
7263#undef po_scalar_or_fail
7264#undef po_barrier_or_imm
7265
7266/* Shorthand macro for instruction encoding functions issuing errors.  */
7267#define constraint(expr, err)			\
7268  do						\
7269    {						\
7270      if (expr)					\
7271	{					\
7272	  inst.error = err;			\
7273	  return;				\
7274	}					\
7275    }						\
7276  while (0)
7277
7278/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
7279   instructions are unpredictable if these registers are used.  This
7280   is the BadReg predicate in ARM's Thumb-2 documentation.  */
7281#define reject_bad_reg(reg)				\
7282  do							\
7283   if (reg == REG_SP || reg == REG_PC)			\
7284     {							\
7285       inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;	\
7286       return;						\
7287     }							\
7288  while (0)
7289
7290/* If REG is R13 (the stack pointer), warn that its use is
7291   deprecated.  */
7292#define warn_deprecated_sp(reg)			\
7293  do						\
7294    if (warn_on_deprecated && reg == REG_SP)	\
7295       as_tsktsk (_("use of r13 is deprecated"));	\
7296  while (0)
7297
7298/* Functions for operand encoding.  ARM, then Thumb.  */
7299
7300#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7301
7302/* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7303
7304   The only binary encoding difference is the Coprocessor number.  Coprocessor
7305   9 is used for half-precision calculations or conversions.  The format of the
7306   instruction is the same as the equivalent Coprocessor 10 instuction that
7307   exists for Single-Precision operation.  */
7308
7309static void
7310do_scalar_fp16_v82_encode (void)
7311{
7312  if (inst.cond != COND_ALWAYS)
7313    as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7314	       " the behaviour is UNPREDICTABLE"));
7315  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7316	      _(BAD_FP16));
7317
7318  inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7319  mark_feature_used (&arm_ext_fp16);
7320}
7321
7322/* If VAL can be encoded in the immediate field of an ARM instruction,
7323   return the encoded form.  Otherwise, return FAIL.  */
7324
7325static unsigned int
7326encode_arm_immediate (unsigned int val)
7327{
7328  unsigned int a, i;
7329
7330  if (val <= 0xff)
7331    return val;
7332
7333  for (i = 2; i < 32; i += 2)
7334    if ((a = rotate_left (val, i)) <= 0xff)
7335      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
7336
7337  return FAIL;
7338}
7339
7340/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7341   return the encoded form.  Otherwise, return FAIL.  */
7342static unsigned int
7343encode_thumb32_immediate (unsigned int val)
7344{
7345  unsigned int a, i;
7346
7347  if (val <= 0xff)
7348    return val;
7349
7350  for (i = 1; i <= 24; i++)
7351    {
7352      a = val >> i;
7353      if ((val & ~(0xff << i)) == 0)
7354	return ((val >> i) & 0x7f) | ((32 - i) << 7);
7355    }
7356
7357  a = val & 0xff;
7358  if (val == ((a << 16) | a))
7359    return 0x100 | a;
7360  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7361    return 0x300 | a;
7362
7363  a = val & 0xff00;
7364  if (val == ((a << 16) | a))
7365    return 0x200 | (a >> 8);
7366
7367  return FAIL;
7368}
7369/* Encode a VFP SP or DP register number into inst.instruction.  */
7370
7371static void
7372encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7373{
7374  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7375      && reg > 15)
7376    {
7377      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7378	{
7379	  if (thumb_mode)
7380	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7381				    fpu_vfp_ext_d32);
7382	  else
7383	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7384				    fpu_vfp_ext_d32);
7385	}
7386      else
7387	{
7388	  first_error (_("D register out of range for selected VFP version"));
7389	  return;
7390	}
7391    }
7392
7393  switch (pos)
7394    {
7395    case VFP_REG_Sd:
7396      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7397      break;
7398
7399    case VFP_REG_Sn:
7400      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7401      break;
7402
7403    case VFP_REG_Sm:
7404      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7405      break;
7406
7407    case VFP_REG_Dd:
7408      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7409      break;
7410
7411    case VFP_REG_Dn:
7412      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7413      break;
7414
7415    case VFP_REG_Dm:
7416      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7417      break;
7418
7419    default:
7420      abort ();
7421    }
7422}
7423
7424/* Encode a <shift> in an ARM-format instruction.  The immediate,
7425   if any, is handled by md_apply_fix.	 */
7426static void
7427encode_arm_shift (int i)
7428{
7429  if (inst.operands[i].shift_kind == SHIFT_RRX)
7430    inst.instruction |= SHIFT_ROR << 5;
7431  else
7432    {
7433      inst.instruction |= inst.operands[i].shift_kind << 5;
7434      if (inst.operands[i].immisreg)
7435	{
7436	  inst.instruction |= SHIFT_BY_REG;
7437	  inst.instruction |= inst.operands[i].imm << 8;
7438	}
7439      else
7440	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7441    }
7442}
7443
7444static void
7445encode_arm_shifter_operand (int i)
7446{
7447  if (inst.operands[i].isreg)
7448    {
7449      inst.instruction |= inst.operands[i].reg;
7450      encode_arm_shift (i);
7451    }
7452  else
7453    {
7454      inst.instruction |= INST_IMMEDIATE;
7455      if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7456	inst.instruction |= inst.operands[i].imm;
7457    }
7458}
7459
7460/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7461static void
7462encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7463{
7464  /* PR 14260:
7465     Generate an error if the operand is not a register.  */
7466  constraint (!inst.operands[i].isreg,
7467	      _("Instruction does not support =N addresses"));
7468
7469  inst.instruction |= inst.operands[i].reg << 16;
7470
7471  if (inst.operands[i].preind)
7472    {
7473      if (is_t)
7474	{
7475	  inst.error = _("instruction does not accept preindexed addressing");
7476	  return;
7477	}
7478      inst.instruction |= PRE_INDEX;
7479      if (inst.operands[i].writeback)
7480	inst.instruction |= WRITE_BACK;
7481
7482    }
7483  else if (inst.operands[i].postind)
7484    {
7485      gas_assert (inst.operands[i].writeback);
7486      if (is_t)
7487	inst.instruction |= WRITE_BACK;
7488    }
7489  else /* unindexed - only for coprocessor */
7490    {
7491      inst.error = _("instruction does not accept unindexed addressing");
7492      return;
7493    }
7494
7495  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7496      && (((inst.instruction & 0x000f0000) >> 16)
7497	  == ((inst.instruction & 0x0000f000) >> 12)))
7498    as_warn ((inst.instruction & LOAD_BIT)
7499	     ? _("destination register same as write-back base")
7500	     : _("source register same as write-back base"));
7501}
7502
7503/* inst.operands[i] was set up by parse_address.  Encode it into an
7504   ARM-format mode 2 load or store instruction.	 If is_t is true,
7505   reject forms that cannot be used with a T instruction (i.e. not
7506   post-indexed).  */
7507static void
7508encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7509{
7510  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7511
7512  encode_arm_addr_mode_common (i, is_t);
7513
7514  if (inst.operands[i].immisreg)
7515    {
7516      constraint ((inst.operands[i].imm == REG_PC
7517		   || (is_pc && inst.operands[i].writeback)),
7518		  BAD_PC_ADDRESSING);
7519      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7520      inst.instruction |= inst.operands[i].imm;
7521      if (!inst.operands[i].negative)
7522	inst.instruction |= INDEX_UP;
7523      if (inst.operands[i].shifted)
7524	{
7525	  if (inst.operands[i].shift_kind == SHIFT_RRX)
7526	    inst.instruction |= SHIFT_ROR << 5;
7527	  else
7528	    {
7529	      inst.instruction |= inst.operands[i].shift_kind << 5;
7530	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7531	    }
7532	}
7533    }
7534  else /* immediate offset in inst.reloc */
7535    {
7536      if (is_pc && !inst.reloc.pc_rel)
7537	{
7538	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7539
7540	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7541	     cannot use PC in addressing.
7542	     PC cannot be used in writeback addressing, either.  */
7543	  constraint ((is_t || inst.operands[i].writeback),
7544		      BAD_PC_ADDRESSING);
7545
7546	  /* Use of PC in str is deprecated for ARMv7.  */
7547	  if (warn_on_deprecated
7548	      && !is_load
7549	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7550	    as_tsktsk (_("use of PC in this instruction is deprecated"));
7551	}
7552
7553      if (inst.reloc.type == BFD_RELOC_UNUSED)
7554	{
7555	  /* Prefer + for zero encoded value.  */
7556	  if (!inst.operands[i].negative)
7557	    inst.instruction |= INDEX_UP;
7558	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7559	}
7560    }
7561}
7562
7563/* inst.operands[i] was set up by parse_address.  Encode it into an
7564   ARM-format mode 3 load or store instruction.	 Reject forms that
7565   cannot be used with such instructions.  If is_t is true, reject
7566   forms that cannot be used with a T instruction (i.e. not
7567   post-indexed).  */
7568static void
7569encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7570{
7571  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7572    {
7573      inst.error = _("instruction does not accept scaled register index");
7574      return;
7575    }
7576
7577  encode_arm_addr_mode_common (i, is_t);
7578
7579  if (inst.operands[i].immisreg)
7580    {
7581      constraint ((inst.operands[i].imm == REG_PC
7582		   || (is_t && inst.operands[i].reg == REG_PC)),
7583		  BAD_PC_ADDRESSING);
7584      constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7585		  BAD_PC_WRITEBACK);
7586      inst.instruction |= inst.operands[i].imm;
7587      if (!inst.operands[i].negative)
7588	inst.instruction |= INDEX_UP;
7589    }
7590  else /* immediate offset in inst.reloc */
7591    {
7592      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7593		   && inst.operands[i].writeback),
7594		  BAD_PC_WRITEBACK);
7595      inst.instruction |= HWOFFSET_IMM;
7596      if (inst.reloc.type == BFD_RELOC_UNUSED)
7597	{
7598	  /* Prefer + for zero encoded value.  */
7599	  if (!inst.operands[i].negative)
7600	    inst.instruction |= INDEX_UP;
7601
7602	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7603	}
7604    }
7605}
7606
7607/* Write immediate bits [7:0] to the following locations:
7608
7609  |28/24|23     19|18 16|15                    4|3     0|
7610  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7611
7612  This function is used by VMOV/VMVN/VORR/VBIC.  */
7613
7614static void
7615neon_write_immbits (unsigned immbits)
7616{
7617  inst.instruction |= immbits & 0xf;
7618  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7619  inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7620}
7621
7622/* Invert low-order SIZE bits of XHI:XLO.  */
7623
7624static void
7625neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7626{
7627  unsigned immlo = xlo ? *xlo : 0;
7628  unsigned immhi = xhi ? *xhi : 0;
7629
7630  switch (size)
7631    {
7632    case 8:
7633      immlo = (~immlo) & 0xff;
7634      break;
7635
7636    case 16:
7637      immlo = (~immlo) & 0xffff;
7638      break;
7639
7640    case 64:
7641      immhi = (~immhi) & 0xffffffff;
7642      /* fall through.  */
7643
7644    case 32:
7645      immlo = (~immlo) & 0xffffffff;
7646      break;
7647
7648    default:
7649      abort ();
7650    }
7651
7652  if (xlo)
7653    *xlo = immlo;
7654
7655  if (xhi)
7656    *xhi = immhi;
7657}
7658
7659/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7660   A, B, C, D.  */
7661
7662static int
7663neon_bits_same_in_bytes (unsigned imm)
7664{
7665  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7666	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7667	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7668	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7669}
7670
7671/* For immediate of above form, return 0bABCD.  */
7672
7673static unsigned
7674neon_squash_bits (unsigned imm)
7675{
7676  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7677	 | ((imm & 0x01000000) >> 21);
7678}
7679
7680/* Compress quarter-float representation to 0b...000 abcdefgh.  */
7681
7682static unsigned
7683neon_qfloat_bits (unsigned imm)
7684{
7685  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7686}
7687
7688/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7689   the instruction. *OP is passed as the initial value of the op field, and
7690   may be set to a different value depending on the constant (i.e.
7691   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7692   MVN).  If the immediate looks like a repeated pattern then also
7693   try smaller element sizes.  */
7694
7695static int
7696neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7697			 unsigned *immbits, int *op, int size,
7698			 enum neon_el_type type)
7699{
7700  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7701     float.  */
7702  if (type == NT_float && !float_p)
7703    return FAIL;
7704
7705  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7706    {
7707      if (size != 32 || *op == 1)
7708	return FAIL;
7709      *immbits = neon_qfloat_bits (immlo);
7710      return 0xf;
7711    }
7712
7713  if (size == 64)
7714    {
7715      if (neon_bits_same_in_bytes (immhi)
7716	  && neon_bits_same_in_bytes (immlo))
7717	{
7718	  if (*op == 1)
7719	    return FAIL;
7720	  *immbits = (neon_squash_bits (immhi) << 4)
7721		     | neon_squash_bits (immlo);
7722	  *op = 1;
7723	  return 0xe;
7724	}
7725
7726      if (immhi != immlo)
7727	return FAIL;
7728    }
7729
7730  if (size >= 32)
7731    {
7732      if (immlo == (immlo & 0x000000ff))
7733	{
7734	  *immbits = immlo;
7735	  return 0x0;
7736	}
7737      else if (immlo == (immlo & 0x0000ff00))
7738	{
7739	  *immbits = immlo >> 8;
7740	  return 0x2;
7741	}
7742      else if (immlo == (immlo & 0x00ff0000))
7743	{
7744	  *immbits = immlo >> 16;
7745	  return 0x4;
7746	}
7747      else if (immlo == (immlo & 0xff000000))
7748	{
7749	  *immbits = immlo >> 24;
7750	  return 0x6;
7751	}
7752      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7753	{
7754	  *immbits = (immlo >> 8) & 0xff;
7755	  return 0xc;
7756	}
7757      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7758	{
7759	  *immbits = (immlo >> 16) & 0xff;
7760	  return 0xd;
7761	}
7762
7763      if ((immlo & 0xffff) != (immlo >> 16))
7764	return FAIL;
7765      immlo &= 0xffff;
7766    }
7767
7768  if (size >= 16)
7769    {
7770      if (immlo == (immlo & 0x000000ff))
7771	{
7772	  *immbits = immlo;
7773	  return 0x8;
7774	}
7775      else if (immlo == (immlo & 0x0000ff00))
7776	{
7777	  *immbits = immlo >> 8;
7778	  return 0xa;
7779	}
7780
7781      if ((immlo & 0xff) != (immlo >> 8))
7782	return FAIL;
7783      immlo &= 0xff;
7784    }
7785
7786  if (immlo == (immlo & 0x000000ff))
7787    {
7788      /* Don't allow MVN with 8-bit immediate.  */
7789      if (*op == 1)
7790	return FAIL;
7791      *immbits = immlo;
7792      return 0xe;
7793    }
7794
7795  return FAIL;
7796}
7797
7798#if defined BFD_HOST_64_BIT
7799/* Returns TRUE if double precision value V may be cast
7800   to single precision without loss of accuracy.  */
7801
7802static bfd_boolean
7803is_double_a_single (bfd_int64_t v)
7804{
7805  int exp = (int)((v >> 52) & 0x7FF);
7806  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7807
7808  return (exp == 0 || exp == 0x7FF
7809	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
7810    && (mantissa & 0x1FFFFFFFl) == 0;
7811}
7812
7813/* Returns a double precision value casted to single precision
7814   (ignoring the least significant bits in exponent and mantissa).  */
7815
7816static int
7817double_to_single (bfd_int64_t v)
7818{
7819  int sign = (int) ((v >> 63) & 1l);
7820  int exp = (int) ((v >> 52) & 0x7FF);
7821  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7822
7823  if (exp == 0x7FF)
7824    exp = 0xFF;
7825  else
7826    {
7827      exp = exp - 1023 + 127;
7828      if (exp >= 0xFF)
7829	{
7830	  /* Infinity.  */
7831	  exp = 0x7F;
7832	  mantissa = 0;
7833	}
7834      else if (exp < 0)
7835	{
7836	  /* No denormalized numbers.  */
7837	  exp = 0;
7838	  mantissa = 0;
7839	}
7840    }
7841  mantissa >>= 29;
7842  return (sign << 31) | (exp << 23) | mantissa;
7843}
7844#endif /* BFD_HOST_64_BIT */
7845
7846enum lit_type
7847{
7848  CONST_THUMB,
7849  CONST_ARM,
7850  CONST_VEC
7851};
7852
7853static void do_vfp_nsyn_opcode (const char *);
7854
7855/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7856   Determine whether it can be performed with a move instruction; if
7857   it can, convert inst.instruction to that move instruction and
7858   return TRUE; if it can't, convert inst.instruction to a literal-pool
7859   load and return FALSE.  If this is not a valid thing to do in the
7860   current context, set inst.error and return TRUE.
7861
7862   inst.operands[i] describes the destination register.	 */
7863
7864static bfd_boolean
7865move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7866{
7867  unsigned long tbit;
7868  bfd_boolean thumb_p = (t == CONST_THUMB);
7869  bfd_boolean arm_p   = (t == CONST_ARM);
7870
7871  if (thumb_p)
7872    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7873  else
7874    tbit = LOAD_BIT;
7875
7876  if ((inst.instruction & tbit) == 0)
7877    {
7878      inst.error = _("invalid pseudo operation");
7879      return TRUE;
7880    }
7881
7882  if (inst.reloc.exp.X_op != O_constant
7883      && inst.reloc.exp.X_op != O_symbol
7884      && inst.reloc.exp.X_op != O_big)
7885    {
7886      inst.error = _("constant expression expected");
7887      return TRUE;
7888    }
7889
7890  if (inst.reloc.exp.X_op == O_constant
7891      || inst.reloc.exp.X_op == O_big)
7892    {
7893#if defined BFD_HOST_64_BIT
7894      bfd_int64_t v;
7895#else
7896      offsetT v;
7897#endif
7898      if (inst.reloc.exp.X_op == O_big)
7899	{
7900	  LITTLENUM_TYPE w[X_PRECISION];
7901	  LITTLENUM_TYPE * l;
7902
7903	  if (inst.reloc.exp.X_add_number == -1)
7904	    {
7905	      gen_to_words (w, X_PRECISION, E_PRECISION);
7906	      l = w;
7907	      /* FIXME: Should we check words w[2..5] ?  */
7908	    }
7909	  else
7910	    l = generic_bignum;
7911
7912#if defined BFD_HOST_64_BIT
7913	  v =
7914	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7915		  << LITTLENUM_NUMBER_OF_BITS)
7916		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7917		<< LITTLENUM_NUMBER_OF_BITS)
7918	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7919	      << LITTLENUM_NUMBER_OF_BITS)
7920	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7921#else
7922	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7923	    |  (l[0] & LITTLENUM_MASK);
7924#endif
7925	}
7926      else
7927	v = inst.reloc.exp.X_add_number;
7928
7929      if (!inst.operands[i].issingle)
7930	{
7931	  if (thumb_p)
7932	    {
7933	      /* This can be encoded only for a low register.  */
7934	      if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7935		{
7936		  /* This can be done with a mov(1) instruction.  */
7937		  inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7938		  inst.instruction |= v;
7939		  return TRUE;
7940		}
7941
7942	      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7943		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7944		{
7945		  /* Check if on thumb2 it can be done with a mov.w, mvn or
7946		     movw instruction.  */
7947		  unsigned int newimm;
7948		  bfd_boolean isNegated;
7949
7950		  newimm = encode_thumb32_immediate (v);
7951		  if (newimm != (unsigned int) FAIL)
7952		    isNegated = FALSE;
7953		  else
7954		    {
7955		      newimm = encode_thumb32_immediate (~v);
7956		      if (newimm != (unsigned int) FAIL)
7957			isNegated = TRUE;
7958		    }
7959
7960		  /* The number can be loaded with a mov.w or mvn
7961		     instruction.  */
7962		  if (newimm != (unsigned int) FAIL
7963		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7964		    {
7965		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
7966					  | (inst.operands[i].reg << 8));
7967		      /* Change to MOVN.  */
7968		      inst.instruction |= (isNegated ? 0x200000 : 0);
7969		      inst.instruction |= (newimm & 0x800) << 15;
7970		      inst.instruction |= (newimm & 0x700) << 4;
7971		      inst.instruction |= (newimm & 0x0ff);
7972		      return TRUE;
7973		    }
7974		  /* The number can be loaded with a movw instruction.  */
7975		  else if ((v & ~0xFFFF) == 0
7976			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7977		    {
7978		      int imm = v & 0xFFFF;
7979
7980		      inst.instruction = 0xf2400000;  /* MOVW.  */
7981		      inst.instruction |= (inst.operands[i].reg << 8);
7982		      inst.instruction |= (imm & 0xf000) << 4;
7983		      inst.instruction |= (imm & 0x0800) << 15;
7984		      inst.instruction |= (imm & 0x0700) << 4;
7985		      inst.instruction |= (imm & 0x00ff);
7986		      return TRUE;
7987		    }
7988		}
7989	    }
7990	  else if (arm_p)
7991	    {
7992	      int value = encode_arm_immediate (v);
7993
7994	      if (value != FAIL)
7995		{
7996		  /* This can be done with a mov instruction.  */
7997		  inst.instruction &= LITERAL_MASK;
7998		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7999		  inst.instruction |= value & 0xfff;
8000		  return TRUE;
8001		}
8002
8003	      value = encode_arm_immediate (~ v);
8004	      if (value != FAIL)
8005		{
8006		  /* This can be done with a mvn instruction.  */
8007		  inst.instruction &= LITERAL_MASK;
8008		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8009		  inst.instruction |= value & 0xfff;
8010		  return TRUE;
8011		}
8012	    }
8013	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8014	    {
8015	      int op = 0;
8016	      unsigned immbits = 0;
8017	      unsigned immlo = inst.operands[1].imm;
8018	      unsigned immhi = inst.operands[1].regisimm
8019		? inst.operands[1].reg
8020		: inst.reloc.exp.X_unsigned
8021		? 0
8022		: ((bfd_int64_t)((int) immlo)) >> 32;
8023	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8024						   &op, 64, NT_invtype);
8025
8026	      if (cmode == FAIL)
8027		{
8028		  neon_invert_size (&immlo, &immhi, 64);
8029		  op = !op;
8030		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8031						   &op, 64, NT_invtype);
8032		}
8033
8034	      if (cmode != FAIL)
8035		{
8036		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8037		    | (1 << 23)
8038		    | (cmode << 8)
8039		    | (op << 5)
8040		    | (1 << 4);
8041
8042		  /* Fill other bits in vmov encoding for both thumb and arm.  */
8043		  if (thumb_mode)
8044		    inst.instruction |= (0x7U << 29) | (0xF << 24);
8045		  else
8046		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
8047		  neon_write_immbits (immbits);
8048		  return TRUE;
8049		}
8050	    }
8051	}
8052
8053      if (t == CONST_VEC)
8054	{
8055	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
8056	  if (inst.operands[i].issingle
8057	      && is_quarter_float (inst.operands[1].imm)
8058	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8059	    {
8060	      inst.operands[1].imm =
8061		neon_qfloat_bits (v);
8062	      do_vfp_nsyn_opcode ("fconsts");
8063	      return TRUE;
8064	    }
8065
8066	  /* If our host does not support a 64-bit type then we cannot perform
8067	     the following optimization.  This mean that there will be a
8068	     discrepancy between the output produced by an assembler built for
8069	     a 32-bit-only host and the output produced from a 64-bit host, but
8070	     this cannot be helped.  */
8071#if defined BFD_HOST_64_BIT
8072	  else if (!inst.operands[1].issingle
8073		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8074	    {
8075	      if (is_double_a_single (v)
8076		  && is_quarter_float (double_to_single (v)))
8077		{
8078		  inst.operands[1].imm =
8079		    neon_qfloat_bits (double_to_single (v));
8080		  do_vfp_nsyn_opcode ("fconstd");
8081		  return TRUE;
8082		}
8083	    }
8084#endif
8085	}
8086    }
8087
8088  if (add_to_lit_pool ((!inst.operands[i].isvec
8089			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
8090    return TRUE;
8091
8092  inst.operands[1].reg = REG_PC;
8093  inst.operands[1].isreg = 1;
8094  inst.operands[1].preind = 1;
8095  inst.reloc.pc_rel = 1;
8096  inst.reloc.type = (thumb_p
8097		     ? BFD_RELOC_ARM_THUMB_OFFSET
8098		     : (mode_3
8099			? BFD_RELOC_ARM_HWLITERAL
8100			: BFD_RELOC_ARM_LITERAL));
8101  return FALSE;
8102}
8103
8104/* inst.operands[i] was set up by parse_address.  Encode it into an
8105   ARM-format instruction.  Reject all forms which cannot be encoded
8106   into a coprocessor load/store instruction.  If wb_ok is false,
8107   reject use of writeback; if unind_ok is false, reject use of
8108   unindexed addressing.  If reloc_override is not 0, use it instead
8109   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8110   (in which case it is preserved).  */
8111
8112static int
8113encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8114{
8115  if (!inst.operands[i].isreg)
8116    {
8117      /* PR 18256 */
8118      if (! inst.operands[0].isvec)
8119	{
8120	  inst.error = _("invalid co-processor operand");
8121	  return FAIL;
8122	}
8123      if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8124	return SUCCESS;
8125    }
8126
8127  inst.instruction |= inst.operands[i].reg << 16;
8128
8129  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8130
8131  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8132    {
8133      gas_assert (!inst.operands[i].writeback);
8134      if (!unind_ok)
8135	{
8136	  inst.error = _("instruction does not support unindexed addressing");
8137	  return FAIL;
8138	}
8139      inst.instruction |= inst.operands[i].imm;
8140      inst.instruction |= INDEX_UP;
8141      return SUCCESS;
8142    }
8143
8144  if (inst.operands[i].preind)
8145    inst.instruction |= PRE_INDEX;
8146
8147  if (inst.operands[i].writeback)
8148    {
8149      if (inst.operands[i].reg == REG_PC)
8150	{
8151	  inst.error = _("pc may not be used with write-back");
8152	  return FAIL;
8153	}
8154      if (!wb_ok)
8155	{
8156	  inst.error = _("instruction does not support writeback");
8157	  return FAIL;
8158	}
8159      inst.instruction |= WRITE_BACK;
8160    }
8161
8162  if (reloc_override)
8163    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8164  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8165	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8166	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8167    {
8168      if (thumb_mode)
8169	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8170      else
8171	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8172    }
8173
8174  /* Prefer + for zero encoded value.  */
8175  if (!inst.operands[i].negative)
8176    inst.instruction |= INDEX_UP;
8177
8178  return SUCCESS;
8179}
8180
8181/* Functions for instruction encoding, sorted by sub-architecture.
8182   First some generics; their names are taken from the conventional
8183   bit positions for register arguments in ARM format instructions.  */
8184
8185static void
8186do_noargs (void)
8187{
8188}
8189
8190static void
8191do_rd (void)
8192{
8193  inst.instruction |= inst.operands[0].reg << 12;
8194}
8195
8196static void
8197do_rn (void)
8198{
8199  inst.instruction |= inst.operands[0].reg << 16;
8200}
8201
8202static void
8203do_rd_rm (void)
8204{
8205  inst.instruction |= inst.operands[0].reg << 12;
8206  inst.instruction |= inst.operands[1].reg;
8207}
8208
8209static void
8210do_rm_rn (void)
8211{
8212  inst.instruction |= inst.operands[0].reg;
8213  inst.instruction |= inst.operands[1].reg << 16;
8214}
8215
8216static void
8217do_rd_rn (void)
8218{
8219  inst.instruction |= inst.operands[0].reg << 12;
8220  inst.instruction |= inst.operands[1].reg << 16;
8221}
8222
8223static void
8224do_rn_rd (void)
8225{
8226  inst.instruction |= inst.operands[0].reg << 16;
8227  inst.instruction |= inst.operands[1].reg << 12;
8228}
8229
8230static void
8231do_tt (void)
8232{
8233  inst.instruction |= inst.operands[0].reg << 8;
8234  inst.instruction |= inst.operands[1].reg << 16;
8235}
8236
8237static bfd_boolean
8238check_obsolete (const arm_feature_set *feature, const char *msg)
8239{
8240  if (ARM_CPU_IS_ANY (cpu_variant))
8241    {
8242      as_tsktsk ("%s", msg);
8243      return TRUE;
8244    }
8245  else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8246    {
8247      as_bad ("%s", msg);
8248      return TRUE;
8249    }
8250
8251  return FALSE;
8252}
8253
8254static void
8255do_rd_rm_rn (void)
8256{
8257  unsigned Rn = inst.operands[2].reg;
8258  /* Enforce restrictions on SWP instruction.  */
8259  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8260    {
8261      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8262		  _("Rn must not overlap other operands"));
8263
8264      /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8265       */
8266      if (!check_obsolete (&arm_ext_v8,
8267			   _("swp{b} use is obsoleted for ARMv8 and later"))
8268	  && warn_on_deprecated
8269	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8270	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8271    }
8272
8273  inst.instruction |= inst.operands[0].reg << 12;
8274  inst.instruction |= inst.operands[1].reg;
8275  inst.instruction |= Rn << 16;
8276}
8277
8278static void
8279do_rd_rn_rm (void)
8280{
8281  inst.instruction |= inst.operands[0].reg << 12;
8282  inst.instruction |= inst.operands[1].reg << 16;
8283  inst.instruction |= inst.operands[2].reg;
8284}
8285
8286static void
8287do_rm_rd_rn (void)
8288{
8289  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8290  constraint (((inst.reloc.exp.X_op != O_constant
8291		&& inst.reloc.exp.X_op != O_illegal)
8292	       || inst.reloc.exp.X_add_number != 0),
8293	      BAD_ADDR_MODE);
8294  inst.instruction |= inst.operands[0].reg;
8295  inst.instruction |= inst.operands[1].reg << 12;
8296  inst.instruction |= inst.operands[2].reg << 16;
8297}
8298
8299static void
8300do_imm0 (void)
8301{
8302  inst.instruction |= inst.operands[0].imm;
8303}
8304
8305static void
8306do_rd_cpaddr (void)
8307{
8308  inst.instruction |= inst.operands[0].reg << 12;
8309  encode_arm_cp_address (1, TRUE, TRUE, 0);
8310}
8311
8312/* ARM instructions, in alphabetical order by function name (except
8313   that wrapper functions appear immediately after the function they
8314   wrap).  */
8315
8316/* This is a pseudo-op of the form "adr rd, label" to be converted
8317   into a relative address of the form "add rd, pc, #label-.-8".  */
8318
8319static void
8320do_adr (void)
8321{
8322  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8323
8324  /* Frag hacking will turn this into a sub instruction if the offset turns
8325     out to be negative.  */
8326  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8327  inst.reloc.pc_rel = 1;
8328  inst.reloc.exp.X_add_number -= 8;
8329}
8330
8331/* This is a pseudo-op of the form "adrl rd, label" to be converted
8332   into a relative address of the form:
8333   add rd, pc, #low(label-.-8)"
8334   add rd, rd, #high(label-.-8)"  */
8335
8336static void
8337do_adrl (void)
8338{
8339  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8340
8341  /* Frag hacking will turn this into a sub instruction if the offset turns
8342     out to be negative.  */
8343  inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8344  inst.reloc.pc_rel	       = 1;
8345  inst.size		       = INSN_SIZE * 2;
8346  inst.reloc.exp.X_add_number -= 8;
8347}
8348
8349static void
8350do_arit (void)
8351{
8352  constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8353	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8354	      THUMB1_RELOC_ONLY);
8355  if (!inst.operands[1].present)
8356    inst.operands[1].reg = inst.operands[0].reg;
8357  inst.instruction |= inst.operands[0].reg << 12;
8358  inst.instruction |= inst.operands[1].reg << 16;
8359  encode_arm_shifter_operand (2);
8360}
8361
8362static void
8363do_barrier (void)
8364{
8365  if (inst.operands[0].present)
8366    inst.instruction |= inst.operands[0].imm;
8367  else
8368    inst.instruction |= 0xf;
8369}
8370
8371static void
8372do_bfc (void)
8373{
8374  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8375  constraint (msb > 32, _("bit-field extends past end of register"));
8376  /* The instruction encoding stores the LSB and MSB,
8377     not the LSB and width.  */
8378  inst.instruction |= inst.operands[0].reg << 12;
8379  inst.instruction |= inst.operands[1].imm << 7;
8380  inst.instruction |= (msb - 1) << 16;
8381}
8382
8383static void
8384do_bfi (void)
8385{
8386  unsigned int msb;
8387
8388  /* #0 in second position is alternative syntax for bfc, which is
8389     the same instruction but with REG_PC in the Rm field.  */
8390  if (!inst.operands[1].isreg)
8391    inst.operands[1].reg = REG_PC;
8392
8393  msb = inst.operands[2].imm + inst.operands[3].imm;
8394  constraint (msb > 32, _("bit-field extends past end of register"));
8395  /* The instruction encoding stores the LSB and MSB,
8396     not the LSB and width.  */
8397  inst.instruction |= inst.operands[0].reg << 12;
8398  inst.instruction |= inst.operands[1].reg;
8399  inst.instruction |= inst.operands[2].imm << 7;
8400  inst.instruction |= (msb - 1) << 16;
8401}
8402
8403static void
8404do_bfx (void)
8405{
8406  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8407	      _("bit-field extends past end of register"));
8408  inst.instruction |= inst.operands[0].reg << 12;
8409  inst.instruction |= inst.operands[1].reg;
8410  inst.instruction |= inst.operands[2].imm << 7;
8411  inst.instruction |= (inst.operands[3].imm - 1) << 16;
8412}
8413
8414/* ARM V5 breakpoint instruction (argument parse)
8415     BKPT <16 bit unsigned immediate>
8416     Instruction is not conditional.
8417	The bit pattern given in insns[] has the COND_ALWAYS condition,
8418	and it is an error if the caller tried to override that.  */
8419
8420static void
8421do_bkpt (void)
8422{
8423  /* Top 12 of 16 bits to bits 19:8.  */
8424  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8425
8426  /* Bottom 4 of 16 bits to bits 3:0.  */
8427  inst.instruction |= inst.operands[0].imm & 0xf;
8428}
8429
8430static void
8431encode_branch (int default_reloc)
8432{
8433  if (inst.operands[0].hasreloc)
8434    {
8435      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8436		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8437		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8438      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8439	? BFD_RELOC_ARM_PLT32
8440	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8441    }
8442  else
8443    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8444  inst.reloc.pc_rel = 1;
8445}
8446
8447static void
8448do_branch (void)
8449{
8450#ifdef OBJ_ELF
8451  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8452    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8453  else
8454#endif
8455    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8456}
8457
8458static void
8459do_bl (void)
8460{
8461#ifdef OBJ_ELF
8462  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8463    {
8464      if (inst.cond == COND_ALWAYS)
8465	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8466      else
8467	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8468    }
8469  else
8470#endif
8471    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8472}
8473
8474/* ARM V5 branch-link-exchange instruction (argument parse)
8475     BLX <target_addr>		ie BLX(1)
8476     BLX{<condition>} <Rm>	ie BLX(2)
8477   Unfortunately, there are two different opcodes for this mnemonic.
8478   So, the insns[].value is not used, and the code here zaps values
8479	into inst.instruction.
8480   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
8481
8482static void
8483do_blx (void)
8484{
8485  if (inst.operands[0].isreg)
8486    {
8487      /* Arg is a register; the opcode provided by insns[] is correct.
8488	 It is not illegal to do "blx pc", just useless.  */
8489      if (inst.operands[0].reg == REG_PC)
8490	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8491
8492      inst.instruction |= inst.operands[0].reg;
8493    }
8494  else
8495    {
8496      /* Arg is an address; this instruction cannot be executed
8497	 conditionally, and the opcode must be adjusted.
8498	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8499	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
8500      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8501      inst.instruction = 0xfa000000;
8502      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8503    }
8504}
8505
8506static void
8507do_bx (void)
8508{
8509  bfd_boolean want_reloc;
8510
8511  if (inst.operands[0].reg == REG_PC)
8512    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8513
8514  inst.instruction |= inst.operands[0].reg;
8515  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8516     it is for ARMv4t or earlier.  */
8517  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8518  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8519      want_reloc = TRUE;
8520
8521#ifdef OBJ_ELF
8522  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8523#endif
8524    want_reloc = FALSE;
8525
8526  if (want_reloc)
8527    inst.reloc.type = BFD_RELOC_ARM_V4BX;
8528}
8529
8530
8531/* ARM v5TEJ.  Jump to Jazelle code.  */
8532
8533static void
8534do_bxj (void)
8535{
8536  if (inst.operands[0].reg == REG_PC)
8537    as_tsktsk (_("use of r15 in bxj is not really useful"));
8538
8539  inst.instruction |= inst.operands[0].reg;
8540}
8541
8542/* Co-processor data operation:
8543      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8544      CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
8545static void
8546do_cdp (void)
8547{
8548  inst.instruction |= inst.operands[0].reg << 8;
8549  inst.instruction |= inst.operands[1].imm << 20;
8550  inst.instruction |= inst.operands[2].reg << 12;
8551  inst.instruction |= inst.operands[3].reg << 16;
8552  inst.instruction |= inst.operands[4].reg;
8553  inst.instruction |= inst.operands[5].imm << 5;
8554}
8555
8556static void
8557do_cmp (void)
8558{
8559  inst.instruction |= inst.operands[0].reg << 16;
8560  encode_arm_shifter_operand (1);
8561}
8562
8563/* Transfer between coprocessor and ARM registers.
8564   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8565   MRC2
8566   MCR{cond}
8567   MCR2
8568
8569   No special properties.  */
8570
8571struct deprecated_coproc_regs_s
8572{
8573  unsigned cp;
8574  int opc1;
8575  unsigned crn;
8576  unsigned crm;
8577  int opc2;
8578  arm_feature_set deprecated;
8579  arm_feature_set obsoleted;
8580  const char *dep_msg;
8581  const char *obs_msg;
8582};
8583
8584#define DEPR_ACCESS_V8 \
8585  N_("This coprocessor register access is deprecated in ARMv8")
8586
8587/* Table of all deprecated coprocessor registers.  */
8588static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8589{
8590    {15, 0, 7, 10, 5,					/* CP15DMB.  */
8591     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8592     DEPR_ACCESS_V8, NULL},
8593    {15, 0, 7, 10, 4,					/* CP15DSB.  */
8594     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8595     DEPR_ACCESS_V8, NULL},
8596    {15, 0, 7,  5, 4,					/* CP15ISB.  */
8597     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8598     DEPR_ACCESS_V8, NULL},
8599    {14, 6, 1,  0, 0,					/* TEEHBR.  */
8600     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8601     DEPR_ACCESS_V8, NULL},
8602    {14, 6, 0,  0, 0,					/* TEECR.  */
8603     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8604     DEPR_ACCESS_V8, NULL},
8605};
8606
8607#undef DEPR_ACCESS_V8
8608
8609static const size_t deprecated_coproc_reg_count =
8610  sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8611
8612static void
8613do_co_reg (void)
8614{
8615  unsigned Rd;
8616  size_t i;
8617
8618  Rd = inst.operands[2].reg;
8619  if (thumb_mode)
8620    {
8621      if (inst.instruction == 0xee000010
8622	  || inst.instruction == 0xfe000010)
8623	/* MCR, MCR2  */
8624	reject_bad_reg (Rd);
8625      else
8626	/* MRC, MRC2  */
8627	constraint (Rd == REG_SP, BAD_SP);
8628    }
8629  else
8630    {
8631      /* MCR */
8632      if (inst.instruction == 0xe000010)
8633	constraint (Rd == REG_PC, BAD_PC);
8634    }
8635
8636    for (i = 0; i < deprecated_coproc_reg_count; ++i)
8637      {
8638	const struct deprecated_coproc_regs_s *r =
8639	  deprecated_coproc_regs + i;
8640
8641	if (inst.operands[0].reg == r->cp
8642	    && inst.operands[1].imm == r->opc1
8643	    && inst.operands[3].reg == r->crn
8644	    && inst.operands[4].reg == r->crm
8645	    && inst.operands[5].imm == r->opc2)
8646	  {
8647	    if (! ARM_CPU_IS_ANY (cpu_variant)
8648		&& warn_on_deprecated
8649		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8650	      as_tsktsk ("%s", r->dep_msg);
8651	  }
8652      }
8653
8654  inst.instruction |= inst.operands[0].reg << 8;
8655  inst.instruction |= inst.operands[1].imm << 21;
8656  inst.instruction |= Rd << 12;
8657  inst.instruction |= inst.operands[3].reg << 16;
8658  inst.instruction |= inst.operands[4].reg;
8659  inst.instruction |= inst.operands[5].imm << 5;
8660}
8661
8662/* Transfer between coprocessor register and pair of ARM registers.
8663   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8664   MCRR2
8665   MRRC{cond}
8666   MRRC2
8667
8668   Two XScale instructions are special cases of these:
8669
8670     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8671     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8672
8673   Result unpredictable if Rd or Rn is R15.  */
8674
8675static void
8676do_co_reg2c (void)
8677{
8678  unsigned Rd, Rn;
8679
8680  Rd = inst.operands[2].reg;
8681  Rn = inst.operands[3].reg;
8682
8683  if (thumb_mode)
8684    {
8685      reject_bad_reg (Rd);
8686      reject_bad_reg (Rn);
8687    }
8688  else
8689    {
8690      constraint (Rd == REG_PC, BAD_PC);
8691      constraint (Rn == REG_PC, BAD_PC);
8692    }
8693
8694  inst.instruction |= inst.operands[0].reg << 8;
8695  inst.instruction |= inst.operands[1].imm << 4;
8696  inst.instruction |= Rd << 12;
8697  inst.instruction |= Rn << 16;
8698  inst.instruction |= inst.operands[4].reg;
8699}
8700
8701static void
8702do_cpsi (void)
8703{
8704  inst.instruction |= inst.operands[0].imm << 6;
8705  if (inst.operands[1].present)
8706    {
8707      inst.instruction |= CPSI_MMOD;
8708      inst.instruction |= inst.operands[1].imm;
8709    }
8710}
8711
8712static void
8713do_dbg (void)
8714{
8715  inst.instruction |= inst.operands[0].imm;
8716}
8717
8718static void
8719do_div (void)
8720{
8721  unsigned Rd, Rn, Rm;
8722
8723  Rd = inst.operands[0].reg;
8724  Rn = (inst.operands[1].present
8725	? inst.operands[1].reg : Rd);
8726  Rm = inst.operands[2].reg;
8727
8728  constraint ((Rd == REG_PC), BAD_PC);
8729  constraint ((Rn == REG_PC), BAD_PC);
8730  constraint ((Rm == REG_PC), BAD_PC);
8731
8732  inst.instruction |= Rd << 16;
8733  inst.instruction |= Rn << 0;
8734  inst.instruction |= Rm << 8;
8735}
8736
8737static void
8738do_it (void)
8739{
8740  /* There is no IT instruction in ARM mode.  We
8741     process it to do the validation as if in
8742     thumb mode, just in case the code gets
8743     assembled for thumb using the unified syntax.  */
8744
8745  inst.size = 0;
8746  if (unified_syntax)
8747    {
8748      set_it_insn_type (IT_INSN);
8749      now_it.mask = (inst.instruction & 0xf) | 0x10;
8750      now_it.cc = inst.operands[0].imm;
8751    }
8752}
8753
8754/* If there is only one register in the register list,
8755   then return its register number.  Otherwise return -1.  */
8756static int
8757only_one_reg_in_list (int range)
8758{
8759  int i = ffs (range) - 1;
8760  return (i > 15 || range != (1 << i)) ? -1 : i;
8761}
8762
8763static void
8764encode_ldmstm(int from_push_pop_mnem)
8765{
8766  int base_reg = inst.operands[0].reg;
8767  int range = inst.operands[1].imm;
8768  int one_reg;
8769
8770  inst.instruction |= base_reg << 16;
8771  inst.instruction |= range;
8772
8773  if (inst.operands[1].writeback)
8774    inst.instruction |= LDM_TYPE_2_OR_3;
8775
8776  if (inst.operands[0].writeback)
8777    {
8778      inst.instruction |= WRITE_BACK;
8779      /* Check for unpredictable uses of writeback.  */
8780      if (inst.instruction & LOAD_BIT)
8781	{
8782	  /* Not allowed in LDM type 2.	 */
8783	  if ((inst.instruction & LDM_TYPE_2_OR_3)
8784	      && ((range & (1 << REG_PC)) == 0))
8785	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8786	  /* Only allowed if base reg not in list for other types.  */
8787	  else if (range & (1 << base_reg))
8788	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8789	}
8790      else /* STM.  */
8791	{
8792	  /* Not allowed for type 2.  */
8793	  if (inst.instruction & LDM_TYPE_2_OR_3)
8794	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8795	  /* Only allowed if base reg not in list, or first in list.  */
8796	  else if ((range & (1 << base_reg))
8797		   && (range & ((1 << base_reg) - 1)))
8798	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8799	}
8800    }
8801
8802  /* If PUSH/POP has only one register, then use the A2 encoding.  */
8803  one_reg = only_one_reg_in_list (range);
8804  if (from_push_pop_mnem && one_reg >= 0)
8805    {
8806      int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8807
8808      inst.instruction &= A_COND_MASK;
8809      inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8810      inst.instruction |= one_reg << 12;
8811    }
8812}
8813
8814static void
8815do_ldmstm (void)
8816{
8817  encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8818}
8819
8820/* ARMv5TE load-consecutive (argument parse)
8821   Mode is like LDRH.
8822
8823     LDRccD R, mode
8824     STRccD R, mode.  */
8825
8826static void
8827do_ldrd (void)
8828{
8829  constraint (inst.operands[0].reg % 2 != 0,
8830	      _("first transfer register must be even"));
8831  constraint (inst.operands[1].present
8832	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8833	      _("can only transfer two consecutive registers"));
8834  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8835  constraint (!inst.operands[2].isreg, _("'[' expected"));
8836
8837  if (!inst.operands[1].present)
8838    inst.operands[1].reg = inst.operands[0].reg + 1;
8839
8840  /* encode_arm_addr_mode_3 will diagnose overlap between the base
8841     register and the first register written; we have to diagnose
8842     overlap between the base and the second register written here.  */
8843
8844  if (inst.operands[2].reg == inst.operands[1].reg
8845      && (inst.operands[2].writeback || inst.operands[2].postind))
8846    as_warn (_("base register written back, and overlaps "
8847	       "second transfer register"));
8848
8849  if (!(inst.instruction & V4_STR_BIT))
8850    {
8851      /* For an index-register load, the index register must not overlap the
8852	destination (even if not write-back).  */
8853      if (inst.operands[2].immisreg
8854	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8855	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8856	as_warn (_("index register overlaps transfer register"));
8857    }
8858  inst.instruction |= inst.operands[0].reg << 12;
8859  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8860}
8861
8862static void
8863do_ldrex (void)
8864{
8865  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8866	      || inst.operands[1].postind || inst.operands[1].writeback
8867	      || inst.operands[1].immisreg || inst.operands[1].shifted
8868	      || inst.operands[1].negative
8869	      /* This can arise if the programmer has written
8870		   strex rN, rM, foo
8871		 or if they have mistakenly used a register name as the last
8872		 operand,  eg:
8873		   strex rN, rM, rX
8874		 It is very difficult to distinguish between these two cases
8875		 because "rX" might actually be a label. ie the register
8876		 name has been occluded by a symbol of the same name. So we
8877		 just generate a general 'bad addressing mode' type error
8878		 message and leave it up to the programmer to discover the
8879		 true cause and fix their mistake.  */
8880	      || (inst.operands[1].reg == REG_PC),
8881	      BAD_ADDR_MODE);
8882
8883  constraint (inst.reloc.exp.X_op != O_constant
8884	      || inst.reloc.exp.X_add_number != 0,
8885	      _("offset must be zero in ARM encoding"));
8886
8887  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8888
8889  inst.instruction |= inst.operands[0].reg << 12;
8890  inst.instruction |= inst.operands[1].reg << 16;
8891  inst.reloc.type = BFD_RELOC_UNUSED;
8892}
8893
8894static void
8895do_ldrexd (void)
8896{
8897  constraint (inst.operands[0].reg % 2 != 0,
8898	      _("even register required"));
8899  constraint (inst.operands[1].present
8900	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8901	      _("can only load two consecutive registers"));
8902  /* If op 1 were present and equal to PC, this function wouldn't
8903     have been called in the first place.  */
8904  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8905
8906  inst.instruction |= inst.operands[0].reg << 12;
8907  inst.instruction |= inst.operands[2].reg << 16;
8908}
8909
8910/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
8911   which is not a multiple of four is UNPREDICTABLE.  */
8912static void
8913check_ldr_r15_aligned (void)
8914{
8915  constraint (!(inst.operands[1].immisreg)
8916	      && (inst.operands[0].reg == REG_PC
8917	      && inst.operands[1].reg == REG_PC
8918	      && (inst.reloc.exp.X_add_number & 0x3)),
8919	      _("ldr to register 15 must be 4-byte alligned"));
8920}
8921
8922static void
8923do_ldst (void)
8924{
8925  inst.instruction |= inst.operands[0].reg << 12;
8926  if (!inst.operands[1].isreg)
8927    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8928      return;
8929  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8930  check_ldr_r15_aligned ();
8931}
8932
8933static void
8934do_ldstt (void)
8935{
8936  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8937     reject [Rn,...].  */
8938  if (inst.operands[1].preind)
8939    {
8940      constraint (inst.reloc.exp.X_op != O_constant
8941		  || inst.reloc.exp.X_add_number != 0,
8942		  _("this instruction requires a post-indexed address"));
8943
8944      inst.operands[1].preind = 0;
8945      inst.operands[1].postind = 1;
8946      inst.operands[1].writeback = 1;
8947    }
8948  inst.instruction |= inst.operands[0].reg << 12;
8949  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8950}
8951
8952/* Halfword and signed-byte load/store operations.  */
8953
8954static void
8955do_ldstv4 (void)
8956{
8957  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8958  inst.instruction |= inst.operands[0].reg << 12;
8959  if (!inst.operands[1].isreg)
8960    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8961      return;
8962  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8963}
8964
8965static void
8966do_ldsttv4 (void)
8967{
8968  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8969     reject [Rn,...].  */
8970  if (inst.operands[1].preind)
8971    {
8972      constraint (inst.reloc.exp.X_op != O_constant
8973		  || inst.reloc.exp.X_add_number != 0,
8974		  _("this instruction requires a post-indexed address"));
8975
8976      inst.operands[1].preind = 0;
8977      inst.operands[1].postind = 1;
8978      inst.operands[1].writeback = 1;
8979    }
8980  inst.instruction |= inst.operands[0].reg << 12;
8981  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8982}
8983
8984/* Co-processor register load/store.
8985   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
8986static void
8987do_lstc (void)
8988{
8989  inst.instruction |= inst.operands[0].reg << 8;
8990  inst.instruction |= inst.operands[1].reg << 12;
8991  encode_arm_cp_address (2, TRUE, TRUE, 0);
8992}
8993
8994static void
8995do_mlas (void)
8996{
8997  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
8998  if (inst.operands[0].reg == inst.operands[1].reg
8999      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9000      && !(inst.instruction & 0x00400000))
9001    as_tsktsk (_("Rd and Rm should be different in mla"));
9002
9003  inst.instruction |= inst.operands[0].reg << 16;
9004  inst.instruction |= inst.operands[1].reg;
9005  inst.instruction |= inst.operands[2].reg << 8;
9006  inst.instruction |= inst.operands[3].reg << 12;
9007}
9008
9009static void
9010do_mov (void)
9011{
9012  constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9013	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9014	      THUMB1_RELOC_ONLY);
9015  inst.instruction |= inst.operands[0].reg << 12;
9016  encode_arm_shifter_operand (1);
9017}
9018
9019/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
9020static void
9021do_mov16 (void)
9022{
9023  bfd_vma imm;
9024  bfd_boolean top;
9025
9026  top = (inst.instruction & 0x00400000) != 0;
9027  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9028	      _(":lower16: not allowed this instruction"));
9029  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9030	      _(":upper16: not allowed instruction"));
9031  inst.instruction |= inst.operands[0].reg << 12;
9032  if (inst.reloc.type == BFD_RELOC_UNUSED)
9033    {
9034      imm = inst.reloc.exp.X_add_number;
9035      /* The value is in two pieces: 0:11, 16:19.  */
9036      inst.instruction |= (imm & 0x00000fff);
9037      inst.instruction |= (imm & 0x0000f000) << 4;
9038    }
9039}
9040
9041static int
9042do_vfp_nsyn_mrs (void)
9043{
9044  if (inst.operands[0].isvec)
9045    {
9046      if (inst.operands[1].reg != 1)
9047	first_error (_("operand 1 must be FPSCR"));
9048      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9049      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9050      do_vfp_nsyn_opcode ("fmstat");
9051    }
9052  else if (inst.operands[1].isvec)
9053    do_vfp_nsyn_opcode ("fmrx");
9054  else
9055    return FAIL;
9056
9057  return SUCCESS;
9058}
9059
9060static int
9061do_vfp_nsyn_msr (void)
9062{
9063  if (inst.operands[0].isvec)
9064    do_vfp_nsyn_opcode ("fmxr");
9065  else
9066    return FAIL;
9067
9068  return SUCCESS;
9069}
9070
9071static void
9072do_vmrs (void)
9073{
9074  unsigned Rt = inst.operands[0].reg;
9075
9076  if (thumb_mode && Rt == REG_SP)
9077    {
9078      inst.error = BAD_SP;
9079      return;
9080    }
9081
9082  /* APSR_ sets isvec. All other refs to PC are illegal.  */
9083  if (!inst.operands[0].isvec && Rt == REG_PC)
9084    {
9085      inst.error = BAD_PC;
9086      return;
9087    }
9088
9089  /* If we get through parsing the register name, we just insert the number
9090     generated into the instruction without further validation.  */
9091  inst.instruction |= (inst.operands[1].reg << 16);
9092  inst.instruction |= (Rt << 12);
9093}
9094
9095static void
9096do_vmsr (void)
9097{
9098  unsigned Rt = inst.operands[1].reg;
9099
9100  if (thumb_mode)
9101    reject_bad_reg (Rt);
9102  else if (Rt == REG_PC)
9103    {
9104      inst.error = BAD_PC;
9105      return;
9106    }
9107
9108  /* If we get through parsing the register name, we just insert the number
9109     generated into the instruction without further validation.  */
9110  inst.instruction |= (inst.operands[0].reg << 16);
9111  inst.instruction |= (Rt << 12);
9112}
9113
9114static void
9115do_mrs (void)
9116{
9117  unsigned br;
9118
9119  if (do_vfp_nsyn_mrs () == SUCCESS)
9120    return;
9121
9122  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9123  inst.instruction |= inst.operands[0].reg << 12;
9124
9125  if (inst.operands[1].isreg)
9126    {
9127      br = inst.operands[1].reg;
9128      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9129	as_bad (_("bad register for mrs"));
9130    }
9131  else
9132    {
9133      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
9134      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9135		  != (PSR_c|PSR_f),
9136		  _("'APSR', 'CPSR' or 'SPSR' expected"));
9137      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9138    }
9139
9140  inst.instruction |= br;
9141}
9142
9143/* Two possible forms:
9144      "{C|S}PSR_<field>, Rm",
9145      "{C|S}PSR_f, #expression".  */
9146
9147static void
9148do_msr (void)
9149{
9150  if (do_vfp_nsyn_msr () == SUCCESS)
9151    return;
9152
9153  inst.instruction |= inst.operands[0].imm;
9154  if (inst.operands[1].isreg)
9155    inst.instruction |= inst.operands[1].reg;
9156  else
9157    {
9158      inst.instruction |= INST_IMMEDIATE;
9159      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9160      inst.reloc.pc_rel = 0;
9161    }
9162}
9163
9164static void
9165do_mul (void)
9166{
9167  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9168
9169  if (!inst.operands[2].present)
9170    inst.operands[2].reg = inst.operands[0].reg;
9171  inst.instruction |= inst.operands[0].reg << 16;
9172  inst.instruction |= inst.operands[1].reg;
9173  inst.instruction |= inst.operands[2].reg << 8;
9174
9175  if (inst.operands[0].reg == inst.operands[1].reg
9176      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9177    as_tsktsk (_("Rd and Rm should be different in mul"));
9178}
9179
9180/* Long Multiply Parser
9181   UMULL RdLo, RdHi, Rm, Rs
9182   SMULL RdLo, RdHi, Rm, Rs
9183   UMLAL RdLo, RdHi, Rm, Rs
9184   SMLAL RdLo, RdHi, Rm, Rs.  */
9185
9186static void
9187do_mull (void)
9188{
9189  inst.instruction |= inst.operands[0].reg << 12;
9190  inst.instruction |= inst.operands[1].reg << 16;
9191  inst.instruction |= inst.operands[2].reg;
9192  inst.instruction |= inst.operands[3].reg << 8;
9193
9194  /* rdhi and rdlo must be different.  */
9195  if (inst.operands[0].reg == inst.operands[1].reg)
9196    as_tsktsk (_("rdhi and rdlo must be different"));
9197
9198  /* rdhi, rdlo and rm must all be different before armv6.  */
9199  if ((inst.operands[0].reg == inst.operands[2].reg
9200      || inst.operands[1].reg == inst.operands[2].reg)
9201      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9202    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9203}
9204
9205static void
9206do_nop (void)
9207{
9208  if (inst.operands[0].present
9209      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9210    {
9211      /* Architectural NOP hints are CPSR sets with no bits selected.  */
9212      inst.instruction &= 0xf0000000;
9213      inst.instruction |= 0x0320f000;
9214      if (inst.operands[0].present)
9215	inst.instruction |= inst.operands[0].imm;
9216    }
9217}
9218
9219/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9220   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9221   Condition defaults to COND_ALWAYS.
9222   Error if Rd, Rn or Rm are R15.  */
9223
9224static void
9225do_pkhbt (void)
9226{
9227  inst.instruction |= inst.operands[0].reg << 12;
9228  inst.instruction |= inst.operands[1].reg << 16;
9229  inst.instruction |= inst.operands[2].reg;
9230  if (inst.operands[3].present)
9231    encode_arm_shift (3);
9232}
9233
9234/* ARM V6 PKHTB (Argument Parse).  */
9235
9236static void
9237do_pkhtb (void)
9238{
9239  if (!inst.operands[3].present)
9240    {
9241      /* If the shift specifier is omitted, turn the instruction
9242	 into pkhbt rd, rm, rn. */
9243      inst.instruction &= 0xfff00010;
9244      inst.instruction |= inst.operands[0].reg << 12;
9245      inst.instruction |= inst.operands[1].reg;
9246      inst.instruction |= inst.operands[2].reg << 16;
9247    }
9248  else
9249    {
9250      inst.instruction |= inst.operands[0].reg << 12;
9251      inst.instruction |= inst.operands[1].reg << 16;
9252      inst.instruction |= inst.operands[2].reg;
9253      encode_arm_shift (3);
9254    }
9255}
9256
9257/* ARMv5TE: Preload-Cache
9258   MP Extensions: Preload for write
9259
9260    PLD(W) <addr_mode>
9261
9262  Syntactically, like LDR with B=1, W=0, L=1.  */
9263
9264static void
9265do_pld (void)
9266{
9267  constraint (!inst.operands[0].isreg,
9268	      _("'[' expected after PLD mnemonic"));
9269  constraint (inst.operands[0].postind,
9270	      _("post-indexed expression used in preload instruction"));
9271  constraint (inst.operands[0].writeback,
9272	      _("writeback used in preload instruction"));
9273  constraint (!inst.operands[0].preind,
9274	      _("unindexed addressing used in preload instruction"));
9275  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9276}
9277
9278/* ARMv7: PLI <addr_mode>  */
9279static void
9280do_pli (void)
9281{
9282  constraint (!inst.operands[0].isreg,
9283	      _("'[' expected after PLI mnemonic"));
9284  constraint (inst.operands[0].postind,
9285	      _("post-indexed expression used in preload instruction"));
9286  constraint (inst.operands[0].writeback,
9287	      _("writeback used in preload instruction"));
9288  constraint (!inst.operands[0].preind,
9289	      _("unindexed addressing used in preload instruction"));
9290  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9291  inst.instruction &= ~PRE_INDEX;
9292}
9293
9294static void
9295do_push_pop (void)
9296{
9297  constraint (inst.operands[0].writeback,
9298	      _("push/pop do not support {reglist}^"));
9299  inst.operands[1] = inst.operands[0];
9300  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9301  inst.operands[0].isreg = 1;
9302  inst.operands[0].writeback = 1;
9303  inst.operands[0].reg = REG_SP;
9304  encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9305}
9306
9307/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9308   word at the specified address and the following word
9309   respectively.
9310   Unconditionally executed.
9311   Error if Rn is R15.	*/
9312
9313static void
9314do_rfe (void)
9315{
9316  inst.instruction |= inst.operands[0].reg << 16;
9317  if (inst.operands[0].writeback)
9318    inst.instruction |= WRITE_BACK;
9319}
9320
9321/* ARM V6 ssat (argument parse).  */
9322
9323static void
9324do_ssat (void)
9325{
9326  inst.instruction |= inst.operands[0].reg << 12;
9327  inst.instruction |= (inst.operands[1].imm - 1) << 16;
9328  inst.instruction |= inst.operands[2].reg;
9329
9330  if (inst.operands[3].present)
9331    encode_arm_shift (3);
9332}
9333
9334/* ARM V6 usat (argument parse).  */
9335
9336static void
9337do_usat (void)
9338{
9339  inst.instruction |= inst.operands[0].reg << 12;
9340  inst.instruction |= inst.operands[1].imm << 16;
9341  inst.instruction |= inst.operands[2].reg;
9342
9343  if (inst.operands[3].present)
9344    encode_arm_shift (3);
9345}
9346
9347/* ARM V6 ssat16 (argument parse).  */
9348
9349static void
9350do_ssat16 (void)
9351{
9352  inst.instruction |= inst.operands[0].reg << 12;
9353  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9354  inst.instruction |= inst.operands[2].reg;
9355}
9356
9357static void
9358do_usat16 (void)
9359{
9360  inst.instruction |= inst.operands[0].reg << 12;
9361  inst.instruction |= inst.operands[1].imm << 16;
9362  inst.instruction |= inst.operands[2].reg;
9363}
9364
9365/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
9366   preserving the other bits.
9367
9368   setend <endian_specifier>, where <endian_specifier> is either
9369   BE or LE.  */
9370
9371static void
9372do_setend (void)
9373{
9374  if (warn_on_deprecated
9375      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9376      as_tsktsk (_("setend use is deprecated for ARMv8"));
9377
9378  if (inst.operands[0].imm)
9379    inst.instruction |= 0x200;
9380}
9381
9382static void
9383do_shift (void)
9384{
9385  unsigned int Rm = (inst.operands[1].present
9386		     ? inst.operands[1].reg
9387		     : inst.operands[0].reg);
9388
9389  inst.instruction |= inst.operands[0].reg << 12;
9390  inst.instruction |= Rm;
9391  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
9392    {
9393      inst.instruction |= inst.operands[2].reg << 8;
9394      inst.instruction |= SHIFT_BY_REG;
9395      /* PR 12854: Error on extraneous shifts.  */
9396      constraint (inst.operands[2].shifted,
9397		  _("extraneous shift as part of operand to shift insn"));
9398    }
9399  else
9400    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9401}
9402
9403static void
9404do_smc (void)
9405{
9406  inst.reloc.type = BFD_RELOC_ARM_SMC;
9407  inst.reloc.pc_rel = 0;
9408}
9409
9410static void
9411do_hvc (void)
9412{
9413  inst.reloc.type = BFD_RELOC_ARM_HVC;
9414  inst.reloc.pc_rel = 0;
9415}
9416
9417static void
9418do_swi (void)
9419{
9420  inst.reloc.type = BFD_RELOC_ARM_SWI;
9421  inst.reloc.pc_rel = 0;
9422}
9423
9424static void
9425do_setpan (void)
9426{
9427  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9428	      _("selected processor does not support SETPAN instruction"));
9429
9430  inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9431}
9432
9433static void
9434do_t_setpan (void)
9435{
9436  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9437	      _("selected processor does not support SETPAN instruction"));
9438
9439  inst.instruction |= (inst.operands[0].imm << 3);
9440}
9441
9442/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9443   SMLAxy{cond} Rd,Rm,Rs,Rn
9444   SMLAWy{cond} Rd,Rm,Rs,Rn
9445   Error if any register is R15.  */
9446
9447static void
9448do_smla (void)
9449{
9450  inst.instruction |= inst.operands[0].reg << 16;
9451  inst.instruction |= inst.operands[1].reg;
9452  inst.instruction |= inst.operands[2].reg << 8;
9453  inst.instruction |= inst.operands[3].reg << 12;
9454}
9455
9456/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9457   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9458   Error if any register is R15.
9459   Warning if Rdlo == Rdhi.  */
9460
9461static void
9462do_smlal (void)
9463{
9464  inst.instruction |= inst.operands[0].reg << 12;
9465  inst.instruction |= inst.operands[1].reg << 16;
9466  inst.instruction |= inst.operands[2].reg;
9467  inst.instruction |= inst.operands[3].reg << 8;
9468
9469  if (inst.operands[0].reg == inst.operands[1].reg)
9470    as_tsktsk (_("rdhi and rdlo must be different"));
9471}
9472
9473/* ARM V5E (El Segundo) signed-multiply (argument parse)
9474   SMULxy{cond} Rd,Rm,Rs
9475   Error if any register is R15.  */
9476
9477static void
9478do_smul (void)
9479{
9480  inst.instruction |= inst.operands[0].reg << 16;
9481  inst.instruction |= inst.operands[1].reg;
9482  inst.instruction |= inst.operands[2].reg << 8;
9483}
9484
9485/* ARM V6 srs (argument parse).  The variable fields in the encoding are
9486   the same for both ARM and Thumb-2.  */
9487
9488static void
9489do_srs (void)
9490{
9491  int reg;
9492
9493  if (inst.operands[0].present)
9494    {
9495      reg = inst.operands[0].reg;
9496      constraint (reg != REG_SP, _("SRS base register must be r13"));
9497    }
9498  else
9499    reg = REG_SP;
9500
9501  inst.instruction |= reg << 16;
9502  inst.instruction |= inst.operands[1].imm;
9503  if (inst.operands[0].writeback || inst.operands[1].writeback)
9504    inst.instruction |= WRITE_BACK;
9505}
9506
9507/* ARM V6 strex (argument parse).  */
9508
9509static void
9510do_strex (void)
9511{
9512  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9513	      || inst.operands[2].postind || inst.operands[2].writeback
9514	      || inst.operands[2].immisreg || inst.operands[2].shifted
9515	      || inst.operands[2].negative
9516	      /* See comment in do_ldrex().  */
9517	      || (inst.operands[2].reg == REG_PC),
9518	      BAD_ADDR_MODE);
9519
9520  constraint (inst.operands[0].reg == inst.operands[1].reg
9521	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9522
9523  constraint (inst.reloc.exp.X_op != O_constant
9524	      || inst.reloc.exp.X_add_number != 0,
9525	      _("offset must be zero in ARM encoding"));
9526
9527  inst.instruction |= inst.operands[0].reg << 12;
9528  inst.instruction |= inst.operands[1].reg;
9529  inst.instruction |= inst.operands[2].reg << 16;
9530  inst.reloc.type = BFD_RELOC_UNUSED;
9531}
9532
9533static void
9534do_t_strexbh (void)
9535{
9536  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9537	      || inst.operands[2].postind || inst.operands[2].writeback
9538	      || inst.operands[2].immisreg || inst.operands[2].shifted
9539	      || inst.operands[2].negative,
9540	      BAD_ADDR_MODE);
9541
9542  constraint (inst.operands[0].reg == inst.operands[1].reg
9543	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9544
9545  do_rm_rd_rn ();
9546}
9547
9548static void
9549do_strexd (void)
9550{
9551  constraint (inst.operands[1].reg % 2 != 0,
9552	      _("even register required"));
9553  constraint (inst.operands[2].present
9554	      && inst.operands[2].reg != inst.operands[1].reg + 1,
9555	      _("can only store two consecutive registers"));
9556  /* If op 2 were present and equal to PC, this function wouldn't
9557     have been called in the first place.  */
9558  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9559
9560  constraint (inst.operands[0].reg == inst.operands[1].reg
9561	      || inst.operands[0].reg == inst.operands[1].reg + 1
9562	      || inst.operands[0].reg == inst.operands[3].reg,
9563	      BAD_OVERLAP);
9564
9565  inst.instruction |= inst.operands[0].reg << 12;
9566  inst.instruction |= inst.operands[1].reg;
9567  inst.instruction |= inst.operands[3].reg << 16;
9568}
9569
9570/* ARM V8 STRL.  */
9571static void
9572do_stlex (void)
9573{
9574  constraint (inst.operands[0].reg == inst.operands[1].reg
9575	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9576
9577  do_rd_rm_rn ();
9578}
9579
9580static void
9581do_t_stlex (void)
9582{
9583  constraint (inst.operands[0].reg == inst.operands[1].reg
9584	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9585
9586  do_rm_rd_rn ();
9587}
9588
9589/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9590   extends it to 32-bits, and adds the result to a value in another
9591   register.  You can specify a rotation by 0, 8, 16, or 24 bits
9592   before extracting the 16-bit value.
9593   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9594   Condition defaults to COND_ALWAYS.
9595   Error if any register uses R15.  */
9596
9597static void
9598do_sxtah (void)
9599{
9600  inst.instruction |= inst.operands[0].reg << 12;
9601  inst.instruction |= inst.operands[1].reg << 16;
9602  inst.instruction |= inst.operands[2].reg;
9603  inst.instruction |= inst.operands[3].imm << 10;
9604}
9605
9606/* ARM V6 SXTH.
9607
9608   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9609   Condition defaults to COND_ALWAYS.
9610   Error if any register uses R15.  */
9611
9612static void
9613do_sxth (void)
9614{
9615  inst.instruction |= inst.operands[0].reg << 12;
9616  inst.instruction |= inst.operands[1].reg;
9617  inst.instruction |= inst.operands[2].imm << 10;
9618}
9619
9620/* VFP instructions.  In a logical order: SP variant first, monad
9621   before dyad, arithmetic then move then load/store.  */
9622
9623static void
9624do_vfp_sp_monadic (void)
9625{
9626  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9627  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9628}
9629
9630static void
9631do_vfp_sp_dyadic (void)
9632{
9633  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9634  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9635  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9636}
9637
9638static void
9639do_vfp_sp_compare_z (void)
9640{
9641  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9642}
9643
9644static void
9645do_vfp_dp_sp_cvt (void)
9646{
9647  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9648  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9649}
9650
9651static void
9652do_vfp_sp_dp_cvt (void)
9653{
9654  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9655  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9656}
9657
9658static void
9659do_vfp_reg_from_sp (void)
9660{
9661  inst.instruction |= inst.operands[0].reg << 12;
9662  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9663}
9664
9665static void
9666do_vfp_reg2_from_sp2 (void)
9667{
9668  constraint (inst.operands[2].imm != 2,
9669	      _("only two consecutive VFP SP registers allowed here"));
9670  inst.instruction |= inst.operands[0].reg << 12;
9671  inst.instruction |= inst.operands[1].reg << 16;
9672  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9673}
9674
9675static void
9676do_vfp_sp_from_reg (void)
9677{
9678  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9679  inst.instruction |= inst.operands[1].reg << 12;
9680}
9681
9682static void
9683do_vfp_sp2_from_reg2 (void)
9684{
9685  constraint (inst.operands[0].imm != 2,
9686	      _("only two consecutive VFP SP registers allowed here"));
9687  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9688  inst.instruction |= inst.operands[1].reg << 12;
9689  inst.instruction |= inst.operands[2].reg << 16;
9690}
9691
9692static void
9693do_vfp_sp_ldst (void)
9694{
9695  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9696  encode_arm_cp_address (1, FALSE, TRUE, 0);
9697}
9698
9699static void
9700do_vfp_dp_ldst (void)
9701{
9702  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9703  encode_arm_cp_address (1, FALSE, TRUE, 0);
9704}
9705
9706
9707static void
9708vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9709{
9710  if (inst.operands[0].writeback)
9711    inst.instruction |= WRITE_BACK;
9712  else
9713    constraint (ldstm_type != VFP_LDSTMIA,
9714		_("this addressing mode requires base-register writeback"));
9715  inst.instruction |= inst.operands[0].reg << 16;
9716  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9717  inst.instruction |= inst.operands[1].imm;
9718}
9719
9720static void
9721vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9722{
9723  int count;
9724
9725  if (inst.operands[0].writeback)
9726    inst.instruction |= WRITE_BACK;
9727  else
9728    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9729		_("this addressing mode requires base-register writeback"));
9730
9731  inst.instruction |= inst.operands[0].reg << 16;
9732  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9733
9734  count = inst.operands[1].imm << 1;
9735  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9736    count += 1;
9737
9738  inst.instruction |= count;
9739}
9740
9741static void
9742do_vfp_sp_ldstmia (void)
9743{
9744  vfp_sp_ldstm (VFP_LDSTMIA);
9745}
9746
9747static void
9748do_vfp_sp_ldstmdb (void)
9749{
9750  vfp_sp_ldstm (VFP_LDSTMDB);
9751}
9752
9753static void
9754do_vfp_dp_ldstmia (void)
9755{
9756  vfp_dp_ldstm (VFP_LDSTMIA);
9757}
9758
9759static void
9760do_vfp_dp_ldstmdb (void)
9761{
9762  vfp_dp_ldstm (VFP_LDSTMDB);
9763}
9764
9765static void
9766do_vfp_xp_ldstmia (void)
9767{
9768  vfp_dp_ldstm (VFP_LDSTMIAX);
9769}
9770
9771static void
9772do_vfp_xp_ldstmdb (void)
9773{
9774  vfp_dp_ldstm (VFP_LDSTMDBX);
9775}
9776
9777static void
9778do_vfp_dp_rd_rm (void)
9779{
9780  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9781  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9782}
9783
9784static void
9785do_vfp_dp_rn_rd (void)
9786{
9787  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9788  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9789}
9790
9791static void
9792do_vfp_dp_rd_rn (void)
9793{
9794  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9795  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9796}
9797
9798static void
9799do_vfp_dp_rd_rn_rm (void)
9800{
9801  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9802  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9803  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9804}
9805
9806static void
9807do_vfp_dp_rd (void)
9808{
9809  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9810}
9811
9812static void
9813do_vfp_dp_rm_rd_rn (void)
9814{
9815  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9816  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9817  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9818}
9819
9820/* VFPv3 instructions.  */
9821static void
9822do_vfp_sp_const (void)
9823{
9824  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9825  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9826  inst.instruction |= (inst.operands[1].imm & 0x0f);
9827}
9828
9829static void
9830do_vfp_dp_const (void)
9831{
9832  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9833  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9834  inst.instruction |= (inst.operands[1].imm & 0x0f);
9835}
9836
9837static void
9838vfp_conv (int srcsize)
9839{
9840  int immbits = srcsize - inst.operands[1].imm;
9841
9842  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9843    {
9844      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9845	 i.e. immbits must be in range 0 - 16.  */
9846      inst.error = _("immediate value out of range, expected range [0, 16]");
9847      return;
9848    }
9849  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9850    {
9851      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9852	 i.e. immbits must be in range 0 - 31.  */
9853      inst.error = _("immediate value out of range, expected range [1, 32]");
9854      return;
9855    }
9856
9857  inst.instruction |= (immbits & 1) << 5;
9858  inst.instruction |= (immbits >> 1);
9859}
9860
9861static void
9862do_vfp_sp_conv_16 (void)
9863{
9864  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9865  vfp_conv (16);
9866}
9867
9868static void
9869do_vfp_dp_conv_16 (void)
9870{
9871  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9872  vfp_conv (16);
9873}
9874
9875static void
9876do_vfp_sp_conv_32 (void)
9877{
9878  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9879  vfp_conv (32);
9880}
9881
9882static void
9883do_vfp_dp_conv_32 (void)
9884{
9885  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9886  vfp_conv (32);
9887}
9888
9889/* FPA instructions.  Also in a logical order.	*/
9890
9891static void
9892do_fpa_cmp (void)
9893{
9894  inst.instruction |= inst.operands[0].reg << 16;
9895  inst.instruction |= inst.operands[1].reg;
9896}
9897
9898static void
9899do_fpa_ldmstm (void)
9900{
9901  inst.instruction |= inst.operands[0].reg << 12;
9902  switch (inst.operands[1].imm)
9903    {
9904    case 1: inst.instruction |= CP_T_X;		 break;
9905    case 2: inst.instruction |= CP_T_Y;		 break;
9906    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9907    case 4:					 break;
9908    default: abort ();
9909    }
9910
9911  if (inst.instruction & (PRE_INDEX | INDEX_UP))
9912    {
9913      /* The instruction specified "ea" or "fd", so we can only accept
9914	 [Rn]{!}.  The instruction does not really support stacking or
9915	 unstacking, so we have to emulate these by setting appropriate
9916	 bits and offsets.  */
9917      constraint (inst.reloc.exp.X_op != O_constant
9918		  || inst.reloc.exp.X_add_number != 0,
9919		  _("this instruction does not support indexing"));
9920
9921      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9922	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9923
9924      if (!(inst.instruction & INDEX_UP))
9925	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9926
9927      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9928	{
9929	  inst.operands[2].preind = 0;
9930	  inst.operands[2].postind = 1;
9931	}
9932    }
9933
9934  encode_arm_cp_address (2, TRUE, TRUE, 0);
9935}
9936
9937/* iWMMXt instructions: strictly in alphabetical order.	 */
9938
9939static void
9940do_iwmmxt_tandorc (void)
9941{
9942  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9943}
9944
9945static void
9946do_iwmmxt_textrc (void)
9947{
9948  inst.instruction |= inst.operands[0].reg << 12;
9949  inst.instruction |= inst.operands[1].imm;
9950}
9951
9952static void
9953do_iwmmxt_textrm (void)
9954{
9955  inst.instruction |= inst.operands[0].reg << 12;
9956  inst.instruction |= inst.operands[1].reg << 16;
9957  inst.instruction |= inst.operands[2].imm;
9958}
9959
9960static void
9961do_iwmmxt_tinsr (void)
9962{
9963  inst.instruction |= inst.operands[0].reg << 16;
9964  inst.instruction |= inst.operands[1].reg << 12;
9965  inst.instruction |= inst.operands[2].imm;
9966}
9967
9968static void
9969do_iwmmxt_tmia (void)
9970{
9971  inst.instruction |= inst.operands[0].reg << 5;
9972  inst.instruction |= inst.operands[1].reg;
9973  inst.instruction |= inst.operands[2].reg << 12;
9974}
9975
9976static void
9977do_iwmmxt_waligni (void)
9978{
9979  inst.instruction |= inst.operands[0].reg << 12;
9980  inst.instruction |= inst.operands[1].reg << 16;
9981  inst.instruction |= inst.operands[2].reg;
9982  inst.instruction |= inst.operands[3].imm << 20;
9983}
9984
9985static void
9986do_iwmmxt_wmerge (void)
9987{
9988  inst.instruction |= inst.operands[0].reg << 12;
9989  inst.instruction |= inst.operands[1].reg << 16;
9990  inst.instruction |= inst.operands[2].reg;
9991  inst.instruction |= inst.operands[3].imm << 21;
9992}
9993
9994static void
9995do_iwmmxt_wmov (void)
9996{
9997  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
9998  inst.instruction |= inst.operands[0].reg << 12;
9999  inst.instruction |= inst.operands[1].reg << 16;
10000  inst.instruction |= inst.operands[1].reg;
10001}
10002
10003static void
10004do_iwmmxt_wldstbh (void)
10005{
10006  int reloc;
10007  inst.instruction |= inst.operands[0].reg << 12;
10008  if (thumb_mode)
10009    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10010  else
10011    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10012  encode_arm_cp_address (1, TRUE, FALSE, reloc);
10013}
10014
10015static void
10016do_iwmmxt_wldstw (void)
10017{
10018  /* RIWR_RIWC clears .isreg for a control register.  */
10019  if (!inst.operands[0].isreg)
10020    {
10021      constraint (inst.cond != COND_ALWAYS, BAD_COND);
10022      inst.instruction |= 0xf0000000;
10023    }
10024
10025  inst.instruction |= inst.operands[0].reg << 12;
10026  encode_arm_cp_address (1, TRUE, TRUE, 0);
10027}
10028
10029static void
10030do_iwmmxt_wldstd (void)
10031{
10032  inst.instruction |= inst.operands[0].reg << 12;
10033  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10034      && inst.operands[1].immisreg)
10035    {
10036      inst.instruction &= ~0x1a000ff;
10037      inst.instruction |= (0xfU << 28);
10038      if (inst.operands[1].preind)
10039	inst.instruction |= PRE_INDEX;
10040      if (!inst.operands[1].negative)
10041	inst.instruction |= INDEX_UP;
10042      if (inst.operands[1].writeback)
10043	inst.instruction |= WRITE_BACK;
10044      inst.instruction |= inst.operands[1].reg << 16;
10045      inst.instruction |= inst.reloc.exp.X_add_number << 4;
10046      inst.instruction |= inst.operands[1].imm;
10047    }
10048  else
10049    encode_arm_cp_address (1, TRUE, FALSE, 0);
10050}
10051
10052static void
10053do_iwmmxt_wshufh (void)
10054{
10055  inst.instruction |= inst.operands[0].reg << 12;
10056  inst.instruction |= inst.operands[1].reg << 16;
10057  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10058  inst.instruction |= (inst.operands[2].imm & 0x0f);
10059}
10060
10061static void
10062do_iwmmxt_wzero (void)
10063{
10064  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
10065  inst.instruction |= inst.operands[0].reg;
10066  inst.instruction |= inst.operands[0].reg << 12;
10067  inst.instruction |= inst.operands[0].reg << 16;
10068}
10069
10070static void
10071do_iwmmxt_wrwrwr_or_imm5 (void)
10072{
10073  if (inst.operands[2].isreg)
10074    do_rd_rn_rm ();
10075  else {
10076    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10077		_("immediate operand requires iWMMXt2"));
10078    do_rd_rn ();
10079    if (inst.operands[2].imm == 0)
10080      {
10081	switch ((inst.instruction >> 20) & 0xf)
10082	  {
10083	  case 4:
10084	  case 5:
10085	  case 6:
10086	  case 7:
10087	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
10088	    inst.operands[2].imm = 16;
10089	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10090	    break;
10091	  case 8:
10092	  case 9:
10093	  case 10:
10094	  case 11:
10095	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
10096	    inst.operands[2].imm = 32;
10097	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10098	    break;
10099	  case 12:
10100	  case 13:
10101	  case 14:
10102	  case 15:
10103	    {
10104	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
10105	      unsigned long wrn;
10106	      wrn = (inst.instruction >> 16) & 0xf;
10107	      inst.instruction &= 0xff0fff0f;
10108	      inst.instruction |= wrn;
10109	      /* Bail out here; the instruction is now assembled.  */
10110	      return;
10111	    }
10112	  }
10113      }
10114    /* Map 32 -> 0, etc.  */
10115    inst.operands[2].imm &= 0x1f;
10116    inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10117  }
10118}
10119
10120/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
10121   operations first, then control, shift, and load/store.  */
10122
10123/* Insns like "foo X,Y,Z".  */
10124
10125static void
10126do_mav_triple (void)
10127{
10128  inst.instruction |= inst.operands[0].reg << 16;
10129  inst.instruction |= inst.operands[1].reg;
10130  inst.instruction |= inst.operands[2].reg << 12;
10131}
10132
10133/* Insns like "foo W,X,Y,Z".
10134    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
10135
10136static void
10137do_mav_quad (void)
10138{
10139  inst.instruction |= inst.operands[0].reg << 5;
10140  inst.instruction |= inst.operands[1].reg << 12;
10141  inst.instruction |= inst.operands[2].reg << 16;
10142  inst.instruction |= inst.operands[3].reg;
10143}
10144
10145/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
10146static void
10147do_mav_dspsc (void)
10148{
10149  inst.instruction |= inst.operands[1].reg << 12;
10150}
10151
10152/* Maverick shift immediate instructions.
10153   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10154   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
10155
10156static void
10157do_mav_shift (void)
10158{
10159  int imm = inst.operands[2].imm;
10160
10161  inst.instruction |= inst.operands[0].reg << 12;
10162  inst.instruction |= inst.operands[1].reg << 16;
10163
10164  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10165     Bits 5-7 of the insn should have bits 4-6 of the immediate.
10166     Bit 4 should be 0.	 */
10167  imm = (imm & 0xf) | ((imm & 0x70) << 1);
10168
10169  inst.instruction |= imm;
10170}
10171
10172/* XScale instructions.	 Also sorted arithmetic before move.  */
10173
10174/* Xscale multiply-accumulate (argument parse)
10175     MIAcc   acc0,Rm,Rs
10176     MIAPHcc acc0,Rm,Rs
10177     MIAxycc acc0,Rm,Rs.  */
10178
10179static void
10180do_xsc_mia (void)
10181{
10182  inst.instruction |= inst.operands[1].reg;
10183  inst.instruction |= inst.operands[2].reg << 12;
10184}
10185
10186/* Xscale move-accumulator-register (argument parse)
10187
10188     MARcc   acc0,RdLo,RdHi.  */
10189
10190static void
10191do_xsc_mar (void)
10192{
10193  inst.instruction |= inst.operands[1].reg << 12;
10194  inst.instruction |= inst.operands[2].reg << 16;
10195}
10196
10197/* Xscale move-register-accumulator (argument parse)
10198
10199     MRAcc   RdLo,RdHi,acc0.  */
10200
10201static void
10202do_xsc_mra (void)
10203{
10204  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10205  inst.instruction |= inst.operands[0].reg << 12;
10206  inst.instruction |= inst.operands[1].reg << 16;
10207}
10208
10209/* Encoding functions relevant only to Thumb.  */
10210
10211/* inst.operands[i] is a shifted-register operand; encode
10212   it into inst.instruction in the format used by Thumb32.  */
10213
10214static void
10215encode_thumb32_shifted_operand (int i)
10216{
10217  unsigned int value = inst.reloc.exp.X_add_number;
10218  unsigned int shift = inst.operands[i].shift_kind;
10219
10220  constraint (inst.operands[i].immisreg,
10221	      _("shift by register not allowed in thumb mode"));
10222  inst.instruction |= inst.operands[i].reg;
10223  if (shift == SHIFT_RRX)
10224    inst.instruction |= SHIFT_ROR << 4;
10225  else
10226    {
10227      constraint (inst.reloc.exp.X_op != O_constant,
10228		  _("expression too complex"));
10229
10230      constraint (value > 32
10231		  || (value == 32 && (shift == SHIFT_LSL
10232				      || shift == SHIFT_ROR)),
10233		  _("shift expression is too large"));
10234
10235      if (value == 0)
10236	shift = SHIFT_LSL;
10237      else if (value == 32)
10238	value = 0;
10239
10240      inst.instruction |= shift << 4;
10241      inst.instruction |= (value & 0x1c) << 10;
10242      inst.instruction |= (value & 0x03) << 6;
10243    }
10244}
10245
10246
10247/* inst.operands[i] was set up by parse_address.  Encode it into a
10248   Thumb32 format load or store instruction.  Reject forms that cannot
10249   be used with such instructions.  If is_t is true, reject forms that
10250   cannot be used with a T instruction; if is_d is true, reject forms
10251   that cannot be used with a D instruction.  If it is a store insn,
10252   reject PC in Rn.  */
10253
10254static void
10255encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10256{
10257  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10258
10259  constraint (!inst.operands[i].isreg,
10260	      _("Instruction does not support =N addresses"));
10261
10262  inst.instruction |= inst.operands[i].reg << 16;
10263  if (inst.operands[i].immisreg)
10264    {
10265      constraint (is_pc, BAD_PC_ADDRESSING);
10266      constraint (is_t || is_d, _("cannot use register index with this instruction"));
10267      constraint (inst.operands[i].negative,
10268		  _("Thumb does not support negative register indexing"));
10269      constraint (inst.operands[i].postind,
10270		  _("Thumb does not support register post-indexing"));
10271      constraint (inst.operands[i].writeback,
10272		  _("Thumb does not support register indexing with writeback"));
10273      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10274		  _("Thumb supports only LSL in shifted register indexing"));
10275
10276      inst.instruction |= inst.operands[i].imm;
10277      if (inst.operands[i].shifted)
10278	{
10279	  constraint (inst.reloc.exp.X_op != O_constant,
10280		      _("expression too complex"));
10281	  constraint (inst.reloc.exp.X_add_number < 0
10282		      || inst.reloc.exp.X_add_number > 3,
10283		      _("shift out of range"));
10284	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
10285	}
10286      inst.reloc.type = BFD_RELOC_UNUSED;
10287    }
10288  else if (inst.operands[i].preind)
10289    {
10290      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10291      constraint (is_t && inst.operands[i].writeback,
10292		  _("cannot use writeback with this instruction"));
10293      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10294		  BAD_PC_ADDRESSING);
10295
10296      if (is_d)
10297	{
10298	  inst.instruction |= 0x01000000;
10299	  if (inst.operands[i].writeback)
10300	    inst.instruction |= 0x00200000;
10301	}
10302      else
10303	{
10304	  inst.instruction |= 0x00000c00;
10305	  if (inst.operands[i].writeback)
10306	    inst.instruction |= 0x00000100;
10307	}
10308      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10309    }
10310  else if (inst.operands[i].postind)
10311    {
10312      gas_assert (inst.operands[i].writeback);
10313      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10314      constraint (is_t, _("cannot use post-indexing with this instruction"));
10315
10316      if (is_d)
10317	inst.instruction |= 0x00200000;
10318      else
10319	inst.instruction |= 0x00000900;
10320      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10321    }
10322  else /* unindexed - only for coprocessor */
10323    inst.error = _("instruction does not accept unindexed addressing");
10324}
10325
10326/* Table of Thumb instructions which exist in both 16- and 32-bit
10327   encodings (the latter only in post-V6T2 cores).  The index is the
10328   value used in the insns table below.  When there is more than one
10329   possible 16-bit encoding for the instruction, this table always
10330   holds variant (1).
10331   Also contains several pseudo-instructions used during relaxation.  */
10332#define T16_32_TAB				\
10333  X(_adc,   4140, eb400000),			\
10334  X(_adcs,  4140, eb500000),			\
10335  X(_add,   1c00, eb000000),			\
10336  X(_adds,  1c00, eb100000),			\
10337  X(_addi,  0000, f1000000),			\
10338  X(_addis, 0000, f1100000),			\
10339  X(_add_pc,000f, f20f0000),			\
10340  X(_add_sp,000d, f10d0000),			\
10341  X(_adr,   000f, f20f0000),			\
10342  X(_and,   4000, ea000000),			\
10343  X(_ands,  4000, ea100000),			\
10344  X(_asr,   1000, fa40f000),			\
10345  X(_asrs,  1000, fa50f000),			\
10346  X(_b,     e000, f000b000),			\
10347  X(_bcond, d000, f0008000),			\
10348  X(_bic,   4380, ea200000),			\
10349  X(_bics,  4380, ea300000),			\
10350  X(_cmn,   42c0, eb100f00),			\
10351  X(_cmp,   2800, ebb00f00),			\
10352  X(_cpsie, b660, f3af8400),			\
10353  X(_cpsid, b670, f3af8600),			\
10354  X(_cpy,   4600, ea4f0000),			\
10355  X(_dec_sp,80dd, f1ad0d00),			\
10356  X(_eor,   4040, ea800000),			\
10357  X(_eors,  4040, ea900000),			\
10358  X(_inc_sp,00dd, f10d0d00),			\
10359  X(_ldmia, c800, e8900000),			\
10360  X(_ldr,   6800, f8500000),			\
10361  X(_ldrb,  7800, f8100000),			\
10362  X(_ldrh,  8800, f8300000),			\
10363  X(_ldrsb, 5600, f9100000),			\
10364  X(_ldrsh, 5e00, f9300000),			\
10365  X(_ldr_pc,4800, f85f0000),			\
10366  X(_ldr_pc2,4800, f85f0000),			\
10367  X(_ldr_sp,9800, f85d0000),			\
10368  X(_lsl,   0000, fa00f000),			\
10369  X(_lsls,  0000, fa10f000),			\
10370  X(_lsr,   0800, fa20f000),			\
10371  X(_lsrs,  0800, fa30f000),			\
10372  X(_mov,   2000, ea4f0000),			\
10373  X(_movs,  2000, ea5f0000),			\
10374  X(_mul,   4340, fb00f000),                     \
10375  X(_muls,  4340, ffffffff), /* no 32b muls */	\
10376  X(_mvn,   43c0, ea6f0000),			\
10377  X(_mvns,  43c0, ea7f0000),			\
10378  X(_neg,   4240, f1c00000), /* rsb #0 */	\
10379  X(_negs,  4240, f1d00000), /* rsbs #0 */	\
10380  X(_orr,   4300, ea400000),			\
10381  X(_orrs,  4300, ea500000),			\
10382  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
10383  X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
10384  X(_rev,   ba00, fa90f080),			\
10385  X(_rev16, ba40, fa90f090),			\
10386  X(_revsh, bac0, fa90f0b0),			\
10387  X(_ror,   41c0, fa60f000),			\
10388  X(_rors,  41c0, fa70f000),			\
10389  X(_sbc,   4180, eb600000),			\
10390  X(_sbcs,  4180, eb700000),			\
10391  X(_stmia, c000, e8800000),			\
10392  X(_str,   6000, f8400000),			\
10393  X(_strb,  7000, f8000000),			\
10394  X(_strh,  8000, f8200000),			\
10395  X(_str_sp,9000, f84d0000),			\
10396  X(_sub,   1e00, eba00000),			\
10397  X(_subs,  1e00, ebb00000),			\
10398  X(_subi,  8000, f1a00000),			\
10399  X(_subis, 8000, f1b00000),			\
10400  X(_sxtb,  b240, fa4ff080),			\
10401  X(_sxth,  b200, fa0ff080),			\
10402  X(_tst,   4200, ea100f00),			\
10403  X(_uxtb,  b2c0, fa5ff080),			\
10404  X(_uxth,  b280, fa1ff080),			\
10405  X(_nop,   bf00, f3af8000),			\
10406  X(_yield, bf10, f3af8001),			\
10407  X(_wfe,   bf20, f3af8002),			\
10408  X(_wfi,   bf30, f3af8003),			\
10409  X(_sev,   bf40, f3af8004),                    \
10410  X(_sevl,  bf50, f3af8005),			\
10411  X(_udf,   de00, f7f0a000)
10412
10413/* To catch errors in encoding functions, the codes are all offset by
10414   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10415   as 16-bit instructions.  */
10416#define X(a,b,c) T_MNEM##a
10417enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10418#undef X
10419
10420#define X(a,b,c) 0x##b
10421static const unsigned short thumb_op16[] = { T16_32_TAB };
10422#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10423#undef X
10424
10425#define X(a,b,c) 0x##c
10426static const unsigned int thumb_op32[] = { T16_32_TAB };
10427#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10428#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
10429#undef X
10430#undef T16_32_TAB
10431
10432/* Thumb instruction encoders, in alphabetical order.  */
10433
10434/* ADDW or SUBW.  */
10435
10436static void
10437do_t_add_sub_w (void)
10438{
10439  int Rd, Rn;
10440
10441  Rd = inst.operands[0].reg;
10442  Rn = inst.operands[1].reg;
10443
10444  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10445     is the SP-{plus,minus}-immediate form of the instruction.  */
10446  if (Rn == REG_SP)
10447    constraint (Rd == REG_PC, BAD_PC);
10448  else
10449    reject_bad_reg (Rd);
10450
10451  inst.instruction |= (Rn << 16) | (Rd << 8);
10452  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10453}
10454
10455/* Parse an add or subtract instruction.  We get here with inst.instruction
10456   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
10457
10458static void
10459do_t_add_sub (void)
10460{
10461  int Rd, Rs, Rn;
10462
10463  Rd = inst.operands[0].reg;
10464  Rs = (inst.operands[1].present
10465	? inst.operands[1].reg    /* Rd, Rs, foo */
10466	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10467
10468  if (Rd == REG_PC)
10469    set_it_insn_type_last ();
10470
10471  if (unified_syntax)
10472    {
10473      bfd_boolean flags;
10474      bfd_boolean narrow;
10475      int opcode;
10476
10477      flags = (inst.instruction == T_MNEM_adds
10478	       || inst.instruction == T_MNEM_subs);
10479      if (flags)
10480	narrow = !in_it_block ();
10481      else
10482	narrow = in_it_block ();
10483      if (!inst.operands[2].isreg)
10484	{
10485	  int add;
10486
10487	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10488
10489	  add = (inst.instruction == T_MNEM_add
10490		 || inst.instruction == T_MNEM_adds);
10491	  opcode = 0;
10492	  if (inst.size_req != 4)
10493	    {
10494	      /* Attempt to use a narrow opcode, with relaxation if
10495		 appropriate.  */
10496	      if (Rd == REG_SP && Rs == REG_SP && !flags)
10497		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10498	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10499		opcode = T_MNEM_add_sp;
10500	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10501		opcode = T_MNEM_add_pc;
10502	      else if (Rd <= 7 && Rs <= 7 && narrow)
10503		{
10504		  if (flags)
10505		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
10506		  else
10507		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
10508		}
10509	      if (opcode)
10510		{
10511		  inst.instruction = THUMB_OP16(opcode);
10512		  inst.instruction |= (Rd << 4) | Rs;
10513		  if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10514		      || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10515		  {
10516		    if (inst.size_req == 2)
10517		      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10518		    else
10519		      inst.relax = opcode;
10520		  }
10521		}
10522	      else
10523		constraint (inst.size_req == 2, BAD_HIREG);
10524	    }
10525	  if (inst.size_req == 4
10526	      || (inst.size_req != 2 && !opcode))
10527	    {
10528	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10529			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10530			  THUMB1_RELOC_ONLY);
10531	      if (Rd == REG_PC)
10532		{
10533		  constraint (add, BAD_PC);
10534		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10535			     _("only SUBS PC, LR, #const allowed"));
10536		  constraint (inst.reloc.exp.X_op != O_constant,
10537			      _("expression too complex"));
10538		  constraint (inst.reloc.exp.X_add_number < 0
10539			      || inst.reloc.exp.X_add_number > 0xff,
10540			     _("immediate value out of range"));
10541		  inst.instruction = T2_SUBS_PC_LR
10542				     | inst.reloc.exp.X_add_number;
10543		  inst.reloc.type = BFD_RELOC_UNUSED;
10544		  return;
10545		}
10546	      else if (Rs == REG_PC)
10547		{
10548		  /* Always use addw/subw.  */
10549		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10550		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10551		}
10552	      else
10553		{
10554		  inst.instruction = THUMB_OP32 (inst.instruction);
10555		  inst.instruction = (inst.instruction & 0xe1ffffff)
10556				     | 0x10000000;
10557		  if (flags)
10558		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10559		  else
10560		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10561		}
10562	      inst.instruction |= Rd << 8;
10563	      inst.instruction |= Rs << 16;
10564	    }
10565	}
10566      else
10567	{
10568	  unsigned int value = inst.reloc.exp.X_add_number;
10569	  unsigned int shift = inst.operands[2].shift_kind;
10570
10571	  Rn = inst.operands[2].reg;
10572	  /* See if we can do this with a 16-bit instruction.  */
10573	  if (!inst.operands[2].shifted && inst.size_req != 4)
10574	    {
10575	      if (Rd > 7 || Rs > 7 || Rn > 7)
10576		narrow = FALSE;
10577
10578	      if (narrow)
10579		{
10580		  inst.instruction = ((inst.instruction == T_MNEM_adds
10581				       || inst.instruction == T_MNEM_add)
10582				      ? T_OPCODE_ADD_R3
10583				      : T_OPCODE_SUB_R3);
10584		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10585		  return;
10586		}
10587
10588	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10589		{
10590		  /* Thumb-1 cores (except v6-M) require at least one high
10591		     register in a narrow non flag setting add.  */
10592		  if (Rd > 7 || Rn > 7
10593		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10594		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10595		    {
10596		      if (Rd == Rn)
10597			{
10598			  Rn = Rs;
10599			  Rs = Rd;
10600			}
10601		      inst.instruction = T_OPCODE_ADD_HI;
10602		      inst.instruction |= (Rd & 8) << 4;
10603		      inst.instruction |= (Rd & 7);
10604		      inst.instruction |= Rn << 3;
10605		      return;
10606		    }
10607		}
10608	    }
10609
10610	  constraint (Rd == REG_PC, BAD_PC);
10611	  constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10612	  constraint (Rs == REG_PC, BAD_PC);
10613	  reject_bad_reg (Rn);
10614
10615	  /* If we get here, it can't be done in 16 bits.  */
10616	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10617		      _("shift must be constant"));
10618	  inst.instruction = THUMB_OP32 (inst.instruction);
10619	  inst.instruction |= Rd << 8;
10620	  inst.instruction |= Rs << 16;
10621	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10622		      _("shift value over 3 not allowed in thumb mode"));
10623	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10624		      _("only LSL shift allowed in thumb mode"));
10625	  encode_thumb32_shifted_operand (2);
10626	}
10627    }
10628  else
10629    {
10630      constraint (inst.instruction == T_MNEM_adds
10631		  || inst.instruction == T_MNEM_subs,
10632		  BAD_THUMB32);
10633
10634      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10635	{
10636	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10637		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10638		      BAD_HIREG);
10639
10640	  inst.instruction = (inst.instruction == T_MNEM_add
10641			      ? 0x0000 : 0x8000);
10642	  inst.instruction |= (Rd << 4) | Rs;
10643	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10644	  return;
10645	}
10646
10647      Rn = inst.operands[2].reg;
10648      constraint (inst.operands[2].shifted, _("unshifted register required"));
10649
10650      /* We now have Rd, Rs, and Rn set to registers.  */
10651      if (Rd > 7 || Rs > 7 || Rn > 7)
10652	{
10653	  /* Can't do this for SUB.	 */
10654	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10655	  inst.instruction = T_OPCODE_ADD_HI;
10656	  inst.instruction |= (Rd & 8) << 4;
10657	  inst.instruction |= (Rd & 7);
10658	  if (Rs == Rd)
10659	    inst.instruction |= Rn << 3;
10660	  else if (Rn == Rd)
10661	    inst.instruction |= Rs << 3;
10662	  else
10663	    constraint (1, _("dest must overlap one source register"));
10664	}
10665      else
10666	{
10667	  inst.instruction = (inst.instruction == T_MNEM_add
10668			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10669	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10670	}
10671    }
10672}
10673
10674static void
10675do_t_adr (void)
10676{
10677  unsigned Rd;
10678
10679  Rd = inst.operands[0].reg;
10680  reject_bad_reg (Rd);
10681
10682  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10683    {
10684      /* Defer to section relaxation.  */
10685      inst.relax = inst.instruction;
10686      inst.instruction = THUMB_OP16 (inst.instruction);
10687      inst.instruction |= Rd << 4;
10688    }
10689  else if (unified_syntax && inst.size_req != 2)
10690    {
10691      /* Generate a 32-bit opcode.  */
10692      inst.instruction = THUMB_OP32 (inst.instruction);
10693      inst.instruction |= Rd << 8;
10694      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10695      inst.reloc.pc_rel = 1;
10696    }
10697  else
10698    {
10699      /* Generate a 16-bit opcode.  */
10700      inst.instruction = THUMB_OP16 (inst.instruction);
10701      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10702      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
10703      inst.reloc.pc_rel = 1;
10704
10705      inst.instruction |= Rd << 4;
10706    }
10707}
10708
10709/* Arithmetic instructions for which there is just one 16-bit
10710   instruction encoding, and it allows only two low registers.
10711   For maximal compatibility with ARM syntax, we allow three register
10712   operands even when Thumb-32 instructions are not available, as long
10713   as the first two are identical.  For instance, both "sbc r0,r1" and
10714   "sbc r0,r0,r1" are allowed.  */
10715static void
10716do_t_arit3 (void)
10717{
10718  int Rd, Rs, Rn;
10719
10720  Rd = inst.operands[0].reg;
10721  Rs = (inst.operands[1].present
10722	? inst.operands[1].reg    /* Rd, Rs, foo */
10723	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10724  Rn = inst.operands[2].reg;
10725
10726  reject_bad_reg (Rd);
10727  reject_bad_reg (Rs);
10728  if (inst.operands[2].isreg)
10729    reject_bad_reg (Rn);
10730
10731  if (unified_syntax)
10732    {
10733      if (!inst.operands[2].isreg)
10734	{
10735	  /* For an immediate, we always generate a 32-bit opcode;
10736	     section relaxation will shrink it later if possible.  */
10737	  inst.instruction = THUMB_OP32 (inst.instruction);
10738	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10739	  inst.instruction |= Rd << 8;
10740	  inst.instruction |= Rs << 16;
10741	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10742	}
10743      else
10744	{
10745	  bfd_boolean narrow;
10746
10747	  /* See if we can do this with a 16-bit instruction.  */
10748	  if (THUMB_SETS_FLAGS (inst.instruction))
10749	    narrow = !in_it_block ();
10750	  else
10751	    narrow = in_it_block ();
10752
10753	  if (Rd > 7 || Rn > 7 || Rs > 7)
10754	    narrow = FALSE;
10755	  if (inst.operands[2].shifted)
10756	    narrow = FALSE;
10757	  if (inst.size_req == 4)
10758	    narrow = FALSE;
10759
10760	  if (narrow
10761	      && Rd == Rs)
10762	    {
10763	      inst.instruction = THUMB_OP16 (inst.instruction);
10764	      inst.instruction |= Rd;
10765	      inst.instruction |= Rn << 3;
10766	      return;
10767	    }
10768
10769	  /* If we get here, it can't be done in 16 bits.  */
10770	  constraint (inst.operands[2].shifted
10771		      && inst.operands[2].immisreg,
10772		      _("shift must be constant"));
10773	  inst.instruction = THUMB_OP32 (inst.instruction);
10774	  inst.instruction |= Rd << 8;
10775	  inst.instruction |= Rs << 16;
10776	  encode_thumb32_shifted_operand (2);
10777	}
10778    }
10779  else
10780    {
10781      /* On its face this is a lie - the instruction does set the
10782	 flags.  However, the only supported mnemonic in this mode
10783	 says it doesn't.  */
10784      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10785
10786      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10787		  _("unshifted register required"));
10788      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10789      constraint (Rd != Rs,
10790		  _("dest and source1 must be the same register"));
10791
10792      inst.instruction = THUMB_OP16 (inst.instruction);
10793      inst.instruction |= Rd;
10794      inst.instruction |= Rn << 3;
10795    }
10796}
10797
10798/* Similarly, but for instructions where the arithmetic operation is
10799   commutative, so we can allow either of them to be different from
10800   the destination operand in a 16-bit instruction.  For instance, all
10801   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10802   accepted.  */
10803static void
10804do_t_arit3c (void)
10805{
10806  int Rd, Rs, Rn;
10807
10808  Rd = inst.operands[0].reg;
10809  Rs = (inst.operands[1].present
10810	? inst.operands[1].reg    /* Rd, Rs, foo */
10811	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10812  Rn = inst.operands[2].reg;
10813
10814  reject_bad_reg (Rd);
10815  reject_bad_reg (Rs);
10816  if (inst.operands[2].isreg)
10817    reject_bad_reg (Rn);
10818
10819  if (unified_syntax)
10820    {
10821      if (!inst.operands[2].isreg)
10822	{
10823	  /* For an immediate, we always generate a 32-bit opcode;
10824	     section relaxation will shrink it later if possible.  */
10825	  inst.instruction = THUMB_OP32 (inst.instruction);
10826	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10827	  inst.instruction |= Rd << 8;
10828	  inst.instruction |= Rs << 16;
10829	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10830	}
10831      else
10832	{
10833	  bfd_boolean narrow;
10834
10835	  /* See if we can do this with a 16-bit instruction.  */
10836	  if (THUMB_SETS_FLAGS (inst.instruction))
10837	    narrow = !in_it_block ();
10838	  else
10839	    narrow = in_it_block ();
10840
10841	  if (Rd > 7 || Rn > 7 || Rs > 7)
10842	    narrow = FALSE;
10843	  if (inst.operands[2].shifted)
10844	    narrow = FALSE;
10845	  if (inst.size_req == 4)
10846	    narrow = FALSE;
10847
10848	  if (narrow)
10849	    {
10850	      if (Rd == Rs)
10851		{
10852		  inst.instruction = THUMB_OP16 (inst.instruction);
10853		  inst.instruction |= Rd;
10854		  inst.instruction |= Rn << 3;
10855		  return;
10856		}
10857	      if (Rd == Rn)
10858		{
10859		  inst.instruction = THUMB_OP16 (inst.instruction);
10860		  inst.instruction |= Rd;
10861		  inst.instruction |= Rs << 3;
10862		  return;
10863		}
10864	    }
10865
10866	  /* If we get here, it can't be done in 16 bits.  */
10867	  constraint (inst.operands[2].shifted
10868		      && inst.operands[2].immisreg,
10869		      _("shift must be constant"));
10870	  inst.instruction = THUMB_OP32 (inst.instruction);
10871	  inst.instruction |= Rd << 8;
10872	  inst.instruction |= Rs << 16;
10873	  encode_thumb32_shifted_operand (2);
10874	}
10875    }
10876  else
10877    {
10878      /* On its face this is a lie - the instruction does set the
10879	 flags.  However, the only supported mnemonic in this mode
10880	 says it doesn't.  */
10881      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10882
10883      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10884		  _("unshifted register required"));
10885      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10886
10887      inst.instruction = THUMB_OP16 (inst.instruction);
10888      inst.instruction |= Rd;
10889
10890      if (Rd == Rs)
10891	inst.instruction |= Rn << 3;
10892      else if (Rd == Rn)
10893	inst.instruction |= Rs << 3;
10894      else
10895	constraint (1, _("dest must overlap one source register"));
10896    }
10897}
10898
10899static void
10900do_t_bfc (void)
10901{
10902  unsigned Rd;
10903  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10904  constraint (msb > 32, _("bit-field extends past end of register"));
10905  /* The instruction encoding stores the LSB and MSB,
10906     not the LSB and width.  */
10907  Rd = inst.operands[0].reg;
10908  reject_bad_reg (Rd);
10909  inst.instruction |= Rd << 8;
10910  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10911  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10912  inst.instruction |= msb - 1;
10913}
10914
10915static void
10916do_t_bfi (void)
10917{
10918  int Rd, Rn;
10919  unsigned int msb;
10920
10921  Rd = inst.operands[0].reg;
10922  reject_bad_reg (Rd);
10923
10924  /* #0 in second position is alternative syntax for bfc, which is
10925     the same instruction but with REG_PC in the Rm field.  */
10926  if (!inst.operands[1].isreg)
10927    Rn = REG_PC;
10928  else
10929    {
10930      Rn = inst.operands[1].reg;
10931      reject_bad_reg (Rn);
10932    }
10933
10934  msb = inst.operands[2].imm + inst.operands[3].imm;
10935  constraint (msb > 32, _("bit-field extends past end of register"));
10936  /* The instruction encoding stores the LSB and MSB,
10937     not the LSB and width.  */
10938  inst.instruction |= Rd << 8;
10939  inst.instruction |= Rn << 16;
10940  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10941  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10942  inst.instruction |= msb - 1;
10943}
10944
10945static void
10946do_t_bfx (void)
10947{
10948  unsigned Rd, Rn;
10949
10950  Rd = inst.operands[0].reg;
10951  Rn = inst.operands[1].reg;
10952
10953  reject_bad_reg (Rd);
10954  reject_bad_reg (Rn);
10955
10956  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10957	      _("bit-field extends past end of register"));
10958  inst.instruction |= Rd << 8;
10959  inst.instruction |= Rn << 16;
10960  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10961  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10962  inst.instruction |= inst.operands[3].imm - 1;
10963}
10964
10965/* ARM V5 Thumb BLX (argument parse)
10966	BLX <target_addr>	which is BLX(1)
10967	BLX <Rm>		which is BLX(2)
10968   Unfortunately, there are two different opcodes for this mnemonic.
10969   So, the insns[].value is not used, and the code here zaps values
10970	into inst.instruction.
10971
10972   ??? How to take advantage of the additional two bits of displacement
10973   available in Thumb32 mode?  Need new relocation?  */
10974
10975static void
10976do_t_blx (void)
10977{
10978  set_it_insn_type_last ();
10979
10980  if (inst.operands[0].isreg)
10981    {
10982      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10983      /* We have a register, so this is BLX(2).  */
10984      inst.instruction |= inst.operands[0].reg << 3;
10985    }
10986  else
10987    {
10988      /* No register.  This must be BLX(1).  */
10989      inst.instruction = 0xf000e800;
10990      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10991    }
10992}
10993
10994static void
10995do_t_branch (void)
10996{
10997  int opcode;
10998  int cond;
10999  bfd_reloc_code_real_type reloc;
11000
11001  cond = inst.cond;
11002  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11003
11004  if (in_it_block ())
11005    {
11006      /* Conditional branches inside IT blocks are encoded as unconditional
11007	 branches.  */
11008      cond = COND_ALWAYS;
11009    }
11010  else
11011    cond = inst.cond;
11012
11013  if (cond != COND_ALWAYS)
11014    opcode = T_MNEM_bcond;
11015  else
11016    opcode = inst.instruction;
11017
11018  if (unified_syntax
11019      && (inst.size_req == 4
11020	  || (inst.size_req != 2
11021	      && (inst.operands[0].hasreloc
11022		  || inst.reloc.exp.X_op == O_constant))))
11023    {
11024      inst.instruction = THUMB_OP32(opcode);
11025      if (cond == COND_ALWAYS)
11026	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11027      else
11028	{
11029	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11030		      _("selected architecture does not support "
11031			"wide conditional branch instruction"));
11032
11033	  gas_assert (cond != 0xF);
11034	  inst.instruction |= cond << 22;
11035	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11036	}
11037    }
11038  else
11039    {
11040      inst.instruction = THUMB_OP16(opcode);
11041      if (cond == COND_ALWAYS)
11042	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11043      else
11044	{
11045	  inst.instruction |= cond << 8;
11046	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11047	}
11048      /* Allow section relaxation.  */
11049      if (unified_syntax && inst.size_req != 2)
11050	inst.relax = opcode;
11051    }
11052  inst.reloc.type = reloc;
11053  inst.reloc.pc_rel = 1;
11054}
11055
11056/* Actually do the work for Thumb state bkpt and hlt.  The only difference
11057   between the two is the maximum immediate allowed - which is passed in
11058   RANGE.  */
11059static void
11060do_t_bkpt_hlt1 (int range)
11061{
11062  constraint (inst.cond != COND_ALWAYS,
11063	      _("instruction is always unconditional"));
11064  if (inst.operands[0].present)
11065    {
11066      constraint (inst.operands[0].imm > range,
11067		  _("immediate value out of range"));
11068      inst.instruction |= inst.operands[0].imm;
11069    }
11070
11071  set_it_insn_type (NEUTRAL_IT_INSN);
11072}
11073
11074static void
11075do_t_hlt (void)
11076{
11077  do_t_bkpt_hlt1 (63);
11078}
11079
11080static void
11081do_t_bkpt (void)
11082{
11083  do_t_bkpt_hlt1 (255);
11084}
11085
11086static void
11087do_t_branch23 (void)
11088{
11089  set_it_insn_type_last ();
11090  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11091
11092  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11093     this file.  We used to simply ignore the PLT reloc type here --
11094     the branch encoding is now needed to deal with TLSCALL relocs.
11095     So if we see a PLT reloc now, put it back to how it used to be to
11096     keep the preexisting behaviour.  */
11097  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11098    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11099
11100#if defined(OBJ_COFF)
11101  /* If the destination of the branch is a defined symbol which does not have
11102     the THUMB_FUNC attribute, then we must be calling a function which has
11103     the (interfacearm) attribute.  We look for the Thumb entry point to that
11104     function and change the branch to refer to that function instead.	*/
11105  if (	 inst.reloc.exp.X_op == O_symbol
11106      && inst.reloc.exp.X_add_symbol != NULL
11107      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11108      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11109    inst.reloc.exp.X_add_symbol =
11110      find_real_start (inst.reloc.exp.X_add_symbol);
11111#endif
11112}
11113
11114static void
11115do_t_bx (void)
11116{
11117  set_it_insn_type_last ();
11118  inst.instruction |= inst.operands[0].reg << 3;
11119  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
11120     should cause the alignment to be checked once it is known.	 This is
11121     because BX PC only works if the instruction is word aligned.  */
11122}
11123
11124static void
11125do_t_bxj (void)
11126{
11127  int Rm;
11128
11129  set_it_insn_type_last ();
11130  Rm = inst.operands[0].reg;
11131  reject_bad_reg (Rm);
11132  inst.instruction |= Rm << 16;
11133}
11134
11135static void
11136do_t_clz (void)
11137{
11138  unsigned Rd;
11139  unsigned Rm;
11140
11141  Rd = inst.operands[0].reg;
11142  Rm = inst.operands[1].reg;
11143
11144  reject_bad_reg (Rd);
11145  reject_bad_reg (Rm);
11146
11147  inst.instruction |= Rd << 8;
11148  inst.instruction |= Rm << 16;
11149  inst.instruction |= Rm;
11150}
11151
11152static void
11153do_t_cps (void)
11154{
11155  set_it_insn_type (OUTSIDE_IT_INSN);
11156  inst.instruction |= inst.operands[0].imm;
11157}
11158
11159static void
11160do_t_cpsi (void)
11161{
11162  set_it_insn_type (OUTSIDE_IT_INSN);
11163  if (unified_syntax
11164      && (inst.operands[1].present || inst.size_req == 4)
11165      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11166    {
11167      unsigned int imod = (inst.instruction & 0x0030) >> 4;
11168      inst.instruction = 0xf3af8000;
11169      inst.instruction |= imod << 9;
11170      inst.instruction |= inst.operands[0].imm << 5;
11171      if (inst.operands[1].present)
11172	inst.instruction |= 0x100 | inst.operands[1].imm;
11173    }
11174  else
11175    {
11176      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11177		  && (inst.operands[0].imm & 4),
11178		  _("selected processor does not support 'A' form "
11179		    "of this instruction"));
11180      constraint (inst.operands[1].present || inst.size_req == 4,
11181		  _("Thumb does not support the 2-argument "
11182		    "form of this instruction"));
11183      inst.instruction |= inst.operands[0].imm;
11184    }
11185}
11186
11187/* THUMB CPY instruction (argument parse).  */
11188
11189static void
11190do_t_cpy (void)
11191{
11192  if (inst.size_req == 4)
11193    {
11194      inst.instruction = THUMB_OP32 (T_MNEM_mov);
11195      inst.instruction |= inst.operands[0].reg << 8;
11196      inst.instruction |= inst.operands[1].reg;
11197    }
11198  else
11199    {
11200      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11201      inst.instruction |= (inst.operands[0].reg & 0x7);
11202      inst.instruction |= inst.operands[1].reg << 3;
11203    }
11204}
11205
11206static void
11207do_t_cbz (void)
11208{
11209  set_it_insn_type (OUTSIDE_IT_INSN);
11210  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11211  inst.instruction |= inst.operands[0].reg;
11212  inst.reloc.pc_rel = 1;
11213  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11214}
11215
11216static void
11217do_t_dbg (void)
11218{
11219  inst.instruction |= inst.operands[0].imm;
11220}
11221
11222static void
11223do_t_div (void)
11224{
11225  unsigned Rd, Rn, Rm;
11226
11227  Rd = inst.operands[0].reg;
11228  Rn = (inst.operands[1].present
11229	? inst.operands[1].reg : Rd);
11230  Rm = inst.operands[2].reg;
11231
11232  reject_bad_reg (Rd);
11233  reject_bad_reg (Rn);
11234  reject_bad_reg (Rm);
11235
11236  inst.instruction |= Rd << 8;
11237  inst.instruction |= Rn << 16;
11238  inst.instruction |= Rm;
11239}
11240
11241static void
11242do_t_hint (void)
11243{
11244  if (unified_syntax && inst.size_req == 4)
11245    inst.instruction = THUMB_OP32 (inst.instruction);
11246  else
11247    inst.instruction = THUMB_OP16 (inst.instruction);
11248}
11249
11250static void
11251do_t_it (void)
11252{
11253  unsigned int cond = inst.operands[0].imm;
11254
11255  set_it_insn_type (IT_INSN);
11256  now_it.mask = (inst.instruction & 0xf) | 0x10;
11257  now_it.cc = cond;
11258  now_it.warn_deprecated = FALSE;
11259
11260  /* If the condition is a negative condition, invert the mask.  */
11261  if ((cond & 0x1) == 0x0)
11262    {
11263      unsigned int mask = inst.instruction & 0x000f;
11264
11265      if ((mask & 0x7) == 0)
11266	{
11267	  /* No conversion needed.  */
11268	  now_it.block_length = 1;
11269	}
11270      else if ((mask & 0x3) == 0)
11271	{
11272	  mask ^= 0x8;
11273	  now_it.block_length = 2;
11274	}
11275      else if ((mask & 0x1) == 0)
11276	{
11277	  mask ^= 0xC;
11278	  now_it.block_length = 3;
11279	}
11280      else
11281	{
11282	  mask ^= 0xE;
11283	  now_it.block_length = 4;
11284	}
11285
11286      inst.instruction &= 0xfff0;
11287      inst.instruction |= mask;
11288    }
11289
11290  inst.instruction |= cond << 4;
11291}
11292
11293/* Helper function used for both push/pop and ldm/stm.  */
11294static void
11295encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11296{
11297  bfd_boolean load;
11298
11299  load = (inst.instruction & (1 << 20)) != 0;
11300
11301  if (mask & (1 << 13))
11302    inst.error =  _("SP not allowed in register list");
11303
11304  if ((mask & (1 << base)) != 0
11305      && writeback)
11306    inst.error = _("having the base register in the register list when "
11307		   "using write back is UNPREDICTABLE");
11308
11309  if (load)
11310    {
11311      if (mask & (1 << 15))
11312	{
11313	  if (mask & (1 << 14))
11314	    inst.error = _("LR and PC should not both be in register list");
11315	  else
11316	    set_it_insn_type_last ();
11317	}
11318    }
11319  else
11320    {
11321      if (mask & (1 << 15))
11322	inst.error = _("PC not allowed in register list");
11323    }
11324
11325  if ((mask & (mask - 1)) == 0)
11326    {
11327      /* Single register transfers implemented as str/ldr.  */
11328      if (writeback)
11329	{
11330	  if (inst.instruction & (1 << 23))
11331	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11332	  else
11333	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11334	}
11335      else
11336	{
11337	  if (inst.instruction & (1 << 23))
11338	    inst.instruction = 0x00800000; /* ia -> [base] */
11339	  else
11340	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11341	}
11342
11343      inst.instruction |= 0xf8400000;
11344      if (load)
11345	inst.instruction |= 0x00100000;
11346
11347      mask = ffs (mask) - 1;
11348      mask <<= 12;
11349    }
11350  else if (writeback)
11351    inst.instruction |= WRITE_BACK;
11352
11353  inst.instruction |= mask;
11354  inst.instruction |= base << 16;
11355}
11356
11357static void
11358do_t_ldmstm (void)
11359{
11360  /* This really doesn't seem worth it.  */
11361  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11362	      _("expression too complex"));
11363  constraint (inst.operands[1].writeback,
11364	      _("Thumb load/store multiple does not support {reglist}^"));
11365
11366  if (unified_syntax)
11367    {
11368      bfd_boolean narrow;
11369      unsigned mask;
11370
11371      narrow = FALSE;
11372      /* See if we can use a 16-bit instruction.  */
11373      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11374	  && inst.size_req != 4
11375	  && !(inst.operands[1].imm & ~0xff))
11376	{
11377	  mask = 1 << inst.operands[0].reg;
11378
11379	  if (inst.operands[0].reg <= 7)
11380	    {
11381	      if (inst.instruction == T_MNEM_stmia
11382		  ? inst.operands[0].writeback
11383		  : (inst.operands[0].writeback
11384		     == !(inst.operands[1].imm & mask)))
11385		{
11386		  if (inst.instruction == T_MNEM_stmia
11387		      && (inst.operands[1].imm & mask)
11388		      && (inst.operands[1].imm & (mask - 1)))
11389		    as_warn (_("value stored for r%d is UNKNOWN"),
11390			     inst.operands[0].reg);
11391
11392		  inst.instruction = THUMB_OP16 (inst.instruction);
11393		  inst.instruction |= inst.operands[0].reg << 8;
11394		  inst.instruction |= inst.operands[1].imm;
11395		  narrow = TRUE;
11396		}
11397	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11398		{
11399		  /* This means 1 register in reg list one of 3 situations:
11400		     1. Instruction is stmia, but without writeback.
11401		     2. lmdia without writeback, but with Rn not in
11402			reglist.
11403		     3. ldmia with writeback, but with Rn in reglist.
11404		     Case 3 is UNPREDICTABLE behaviour, so we handle
11405		     case 1 and 2 which can be converted into a 16-bit
11406		     str or ldr. The SP cases are handled below.  */
11407		  unsigned long opcode;
11408		  /* First, record an error for Case 3.  */
11409		  if (inst.operands[1].imm & mask
11410		      && inst.operands[0].writeback)
11411		    inst.error =
11412			_("having the base register in the register list when "
11413			  "using write back is UNPREDICTABLE");
11414
11415		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11416							     : T_MNEM_ldr);
11417		  inst.instruction = THUMB_OP16 (opcode);
11418		  inst.instruction |= inst.operands[0].reg << 3;
11419		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
11420		  narrow = TRUE;
11421		}
11422	    }
11423	  else if (inst.operands[0] .reg == REG_SP)
11424	    {
11425	      if (inst.operands[0].writeback)
11426		{
11427		  inst.instruction =
11428			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11429				    ? T_MNEM_push : T_MNEM_pop);
11430		  inst.instruction |= inst.operands[1].imm;
11431		  narrow = TRUE;
11432		}
11433	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11434		{
11435		  inst.instruction =
11436			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11437				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11438		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11439		  narrow = TRUE;
11440		}
11441	    }
11442	}
11443
11444      if (!narrow)
11445	{
11446	  if (inst.instruction < 0xffff)
11447	    inst.instruction = THUMB_OP32 (inst.instruction);
11448
11449	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11450				inst.operands[0].writeback);
11451	}
11452    }
11453  else
11454    {
11455      constraint (inst.operands[0].reg > 7
11456		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11457      constraint (inst.instruction != T_MNEM_ldmia
11458		  && inst.instruction != T_MNEM_stmia,
11459		  _("Thumb-2 instruction only valid in unified syntax"));
11460      if (inst.instruction == T_MNEM_stmia)
11461	{
11462	  if (!inst.operands[0].writeback)
11463	    as_warn (_("this instruction will write back the base register"));
11464	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11465	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11466	    as_warn (_("value stored for r%d is UNKNOWN"),
11467		     inst.operands[0].reg);
11468	}
11469      else
11470	{
11471	  if (!inst.operands[0].writeback
11472	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11473	    as_warn (_("this instruction will write back the base register"));
11474	  else if (inst.operands[0].writeback
11475		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11476	    as_warn (_("this instruction will not write back the base register"));
11477	}
11478
11479      inst.instruction = THUMB_OP16 (inst.instruction);
11480      inst.instruction |= inst.operands[0].reg << 8;
11481      inst.instruction |= inst.operands[1].imm;
11482    }
11483}
11484
11485static void
11486do_t_ldrex (void)
11487{
11488  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11489	      || inst.operands[1].postind || inst.operands[1].writeback
11490	      || inst.operands[1].immisreg || inst.operands[1].shifted
11491	      || inst.operands[1].negative,
11492	      BAD_ADDR_MODE);
11493
11494  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11495
11496  inst.instruction |= inst.operands[0].reg << 12;
11497  inst.instruction |= inst.operands[1].reg << 16;
11498  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11499}
11500
11501static void
11502do_t_ldrexd (void)
11503{
11504  if (!inst.operands[1].present)
11505    {
11506      constraint (inst.operands[0].reg == REG_LR,
11507		  _("r14 not allowed as first register "
11508		    "when second register is omitted"));
11509      inst.operands[1].reg = inst.operands[0].reg + 1;
11510    }
11511  constraint (inst.operands[0].reg == inst.operands[1].reg,
11512	      BAD_OVERLAP);
11513
11514  inst.instruction |= inst.operands[0].reg << 12;
11515  inst.instruction |= inst.operands[1].reg << 8;
11516  inst.instruction |= inst.operands[2].reg << 16;
11517}
11518
11519static void
11520do_t_ldst (void)
11521{
11522  unsigned long opcode;
11523  int Rn;
11524
11525  if (inst.operands[0].isreg
11526      && !inst.operands[0].preind
11527      && inst.operands[0].reg == REG_PC)
11528    set_it_insn_type_last ();
11529
11530  opcode = inst.instruction;
11531  if (unified_syntax)
11532    {
11533      if (!inst.operands[1].isreg)
11534	{
11535	  if (opcode <= 0xffff)
11536	    inst.instruction = THUMB_OP32 (opcode);
11537	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11538	    return;
11539	}
11540      if (inst.operands[1].isreg
11541	  && !inst.operands[1].writeback
11542	  && !inst.operands[1].shifted && !inst.operands[1].postind
11543	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
11544	  && opcode <= 0xffff
11545	  && inst.size_req != 4)
11546	{
11547	  /* Insn may have a 16-bit form.  */
11548	  Rn = inst.operands[1].reg;
11549	  if (inst.operands[1].immisreg)
11550	    {
11551	      inst.instruction = THUMB_OP16 (opcode);
11552	      /* [Rn, Rik] */
11553	      if (Rn <= 7 && inst.operands[1].imm <= 7)
11554		goto op16;
11555	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11556		reject_bad_reg (inst.operands[1].imm);
11557	    }
11558	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11559		    && opcode != T_MNEM_ldrsb)
11560		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11561		   || (Rn == REG_SP && opcode == T_MNEM_str))
11562	    {
11563	      /* [Rn, #const] */
11564	      if (Rn > 7)
11565		{
11566		  if (Rn == REG_PC)
11567		    {
11568		      if (inst.reloc.pc_rel)
11569			opcode = T_MNEM_ldr_pc2;
11570		      else
11571			opcode = T_MNEM_ldr_pc;
11572		    }
11573		  else
11574		    {
11575		      if (opcode == T_MNEM_ldr)
11576			opcode = T_MNEM_ldr_sp;
11577		      else
11578			opcode = T_MNEM_str_sp;
11579		    }
11580		  inst.instruction = inst.operands[0].reg << 8;
11581		}
11582	      else
11583		{
11584		  inst.instruction = inst.operands[0].reg;
11585		  inst.instruction |= inst.operands[1].reg << 3;
11586		}
11587	      inst.instruction |= THUMB_OP16 (opcode);
11588	      if (inst.size_req == 2)
11589		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11590	      else
11591		inst.relax = opcode;
11592	      return;
11593	    }
11594	}
11595      /* Definitely a 32-bit variant.  */
11596
11597      /* Warning for Erratum 752419.  */
11598      if (opcode == T_MNEM_ldr
11599	  && inst.operands[0].reg == REG_SP
11600	  && inst.operands[1].writeback == 1
11601	  && !inst.operands[1].immisreg)
11602	{
11603	  if (no_cpu_selected ()
11604	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11605		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11606		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11607	    as_warn (_("This instruction may be unpredictable "
11608		       "if executed on M-profile cores "
11609		       "with interrupts enabled."));
11610	}
11611
11612      /* Do some validations regarding addressing modes.  */
11613      if (inst.operands[1].immisreg)
11614	reject_bad_reg (inst.operands[1].imm);
11615
11616      constraint (inst.operands[1].writeback == 1
11617		  && inst.operands[0].reg == inst.operands[1].reg,
11618		  BAD_OVERLAP);
11619
11620      inst.instruction = THUMB_OP32 (opcode);
11621      inst.instruction |= inst.operands[0].reg << 12;
11622      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11623      check_ldr_r15_aligned ();
11624      return;
11625    }
11626
11627  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11628
11629  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11630    {
11631      /* Only [Rn,Rm] is acceptable.  */
11632      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11633      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11634		  || inst.operands[1].postind || inst.operands[1].shifted
11635		  || inst.operands[1].negative,
11636		  _("Thumb does not support this addressing mode"));
11637      inst.instruction = THUMB_OP16 (inst.instruction);
11638      goto op16;
11639    }
11640
11641  inst.instruction = THUMB_OP16 (inst.instruction);
11642  if (!inst.operands[1].isreg)
11643    if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11644      return;
11645
11646  constraint (!inst.operands[1].preind
11647	      || inst.operands[1].shifted
11648	      || inst.operands[1].writeback,
11649	      _("Thumb does not support this addressing mode"));
11650  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11651    {
11652      constraint (inst.instruction & 0x0600,
11653		  _("byte or halfword not valid for base register"));
11654      constraint (inst.operands[1].reg == REG_PC
11655		  && !(inst.instruction & THUMB_LOAD_BIT),
11656		  _("r15 based store not allowed"));
11657      constraint (inst.operands[1].immisreg,
11658		  _("invalid base register for register offset"));
11659
11660      if (inst.operands[1].reg == REG_PC)
11661	inst.instruction = T_OPCODE_LDR_PC;
11662      else if (inst.instruction & THUMB_LOAD_BIT)
11663	inst.instruction = T_OPCODE_LDR_SP;
11664      else
11665	inst.instruction = T_OPCODE_STR_SP;
11666
11667      inst.instruction |= inst.operands[0].reg << 8;
11668      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11669      return;
11670    }
11671
11672  constraint (inst.operands[1].reg > 7, BAD_HIREG);
11673  if (!inst.operands[1].immisreg)
11674    {
11675      /* Immediate offset.  */
11676      inst.instruction |= inst.operands[0].reg;
11677      inst.instruction |= inst.operands[1].reg << 3;
11678      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11679      return;
11680    }
11681
11682  /* Register offset.  */
11683  constraint (inst.operands[1].imm > 7, BAD_HIREG);
11684  constraint (inst.operands[1].negative,
11685	      _("Thumb does not support this addressing mode"));
11686
11687 op16:
11688  switch (inst.instruction)
11689    {
11690    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11691    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11692    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11693    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11694    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11695    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11696    case 0x5600 /* ldrsb */:
11697    case 0x5e00 /* ldrsh */: break;
11698    default: abort ();
11699    }
11700
11701  inst.instruction |= inst.operands[0].reg;
11702  inst.instruction |= inst.operands[1].reg << 3;
11703  inst.instruction |= inst.operands[1].imm << 6;
11704}
11705
11706static void
11707do_t_ldstd (void)
11708{
11709  if (!inst.operands[1].present)
11710    {
11711      inst.operands[1].reg = inst.operands[0].reg + 1;
11712      constraint (inst.operands[0].reg == REG_LR,
11713		  _("r14 not allowed here"));
11714      constraint (inst.operands[0].reg == REG_R12,
11715		  _("r12 not allowed here"));
11716    }
11717
11718  if (inst.operands[2].writeback
11719      && (inst.operands[0].reg == inst.operands[2].reg
11720      || inst.operands[1].reg == inst.operands[2].reg))
11721    as_warn (_("base register written back, and overlaps "
11722	       "one of transfer registers"));
11723
11724  inst.instruction |= inst.operands[0].reg << 12;
11725  inst.instruction |= inst.operands[1].reg << 8;
11726  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11727}
11728
11729static void
11730do_t_ldstt (void)
11731{
11732  inst.instruction |= inst.operands[0].reg << 12;
11733  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11734}
11735
11736static void
11737do_t_mla (void)
11738{
11739  unsigned Rd, Rn, Rm, Ra;
11740
11741  Rd = inst.operands[0].reg;
11742  Rn = inst.operands[1].reg;
11743  Rm = inst.operands[2].reg;
11744  Ra = inst.operands[3].reg;
11745
11746  reject_bad_reg (Rd);
11747  reject_bad_reg (Rn);
11748  reject_bad_reg (Rm);
11749  reject_bad_reg (Ra);
11750
11751  inst.instruction |= Rd << 8;
11752  inst.instruction |= Rn << 16;
11753  inst.instruction |= Rm;
11754  inst.instruction |= Ra << 12;
11755}
11756
11757static void
11758do_t_mlal (void)
11759{
11760  unsigned RdLo, RdHi, Rn, Rm;
11761
11762  RdLo = inst.operands[0].reg;
11763  RdHi = inst.operands[1].reg;
11764  Rn = inst.operands[2].reg;
11765  Rm = inst.operands[3].reg;
11766
11767  reject_bad_reg (RdLo);
11768  reject_bad_reg (RdHi);
11769  reject_bad_reg (Rn);
11770  reject_bad_reg (Rm);
11771
11772  inst.instruction |= RdLo << 12;
11773  inst.instruction |= RdHi << 8;
11774  inst.instruction |= Rn << 16;
11775  inst.instruction |= Rm;
11776}
11777
11778static void
11779do_t_mov_cmp (void)
11780{
11781  unsigned Rn, Rm;
11782
11783  Rn = inst.operands[0].reg;
11784  Rm = inst.operands[1].reg;
11785
11786  if (Rn == REG_PC)
11787    set_it_insn_type_last ();
11788
11789  if (unified_syntax)
11790    {
11791      int r0off = (inst.instruction == T_MNEM_mov
11792		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
11793      unsigned long opcode;
11794      bfd_boolean narrow;
11795      bfd_boolean low_regs;
11796
11797      low_regs = (Rn <= 7 && Rm <= 7);
11798      opcode = inst.instruction;
11799      if (in_it_block ())
11800	narrow = opcode != T_MNEM_movs;
11801      else
11802	narrow = opcode != T_MNEM_movs || low_regs;
11803      if (inst.size_req == 4
11804	  || inst.operands[1].shifted)
11805	narrow = FALSE;
11806
11807      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
11808      if (opcode == T_MNEM_movs && inst.operands[1].isreg
11809	  && !inst.operands[1].shifted
11810	  && Rn == REG_PC
11811	  && Rm == REG_LR)
11812	{
11813	  inst.instruction = T2_SUBS_PC_LR;
11814	  return;
11815	}
11816
11817      if (opcode == T_MNEM_cmp)
11818	{
11819	  constraint (Rn == REG_PC, BAD_PC);
11820	  if (narrow)
11821	    {
11822	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11823		 but valid.  */
11824	      warn_deprecated_sp (Rm);
11825	      /* R15 was documented as a valid choice for Rm in ARMv6,
11826		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
11827		 tools reject R15, so we do too.  */
11828	      constraint (Rm == REG_PC, BAD_PC);
11829	    }
11830	  else
11831	    reject_bad_reg (Rm);
11832	}
11833      else if (opcode == T_MNEM_mov
11834	       || opcode == T_MNEM_movs)
11835	{
11836	  if (inst.operands[1].isreg)
11837	    {
11838	      if (opcode == T_MNEM_movs)
11839		{
11840		  reject_bad_reg (Rn);
11841		  reject_bad_reg (Rm);
11842		}
11843	      else if (narrow)
11844		{
11845		  /* This is mov.n.  */
11846		  if ((Rn == REG_SP || Rn == REG_PC)
11847		      && (Rm == REG_SP || Rm == REG_PC))
11848		    {
11849		      as_tsktsk (_("Use of r%u as a source register is "
11850				 "deprecated when r%u is the destination "
11851				 "register."), Rm, Rn);
11852		    }
11853		}
11854	      else
11855		{
11856		  /* This is mov.w.  */
11857		  constraint (Rn == REG_PC, BAD_PC);
11858		  constraint (Rm == REG_PC, BAD_PC);
11859		  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11860		}
11861	    }
11862	  else
11863	    reject_bad_reg (Rn);
11864	}
11865
11866      if (!inst.operands[1].isreg)
11867	{
11868	  /* Immediate operand.  */
11869	  if (!in_it_block () && opcode == T_MNEM_mov)
11870	    narrow = 0;
11871	  if (low_regs && narrow)
11872	    {
11873	      inst.instruction = THUMB_OP16 (opcode);
11874	      inst.instruction |= Rn << 8;
11875	      if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11876		  || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11877		{
11878		  if (inst.size_req == 2)
11879		    inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11880		  else
11881		    inst.relax = opcode;
11882		}
11883	    }
11884	  else
11885	    {
11886	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11887			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11888			  THUMB1_RELOC_ONLY);
11889
11890	      inst.instruction = THUMB_OP32 (inst.instruction);
11891	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11892	      inst.instruction |= Rn << r0off;
11893	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11894	    }
11895	}
11896      else if (inst.operands[1].shifted && inst.operands[1].immisreg
11897	       && (inst.instruction == T_MNEM_mov
11898		   || inst.instruction == T_MNEM_movs))
11899	{
11900	  /* Register shifts are encoded as separate shift instructions.  */
11901	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11902
11903	  if (in_it_block ())
11904	    narrow = !flags;
11905	  else
11906	    narrow = flags;
11907
11908	  if (inst.size_req == 4)
11909	    narrow = FALSE;
11910
11911	  if (!low_regs || inst.operands[1].imm > 7)
11912	    narrow = FALSE;
11913
11914	  if (Rn != Rm)
11915	    narrow = FALSE;
11916
11917	  switch (inst.operands[1].shift_kind)
11918	    {
11919	    case SHIFT_LSL:
11920	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11921	      break;
11922	    case SHIFT_ASR:
11923	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11924	      break;
11925	    case SHIFT_LSR:
11926	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11927	      break;
11928	    case SHIFT_ROR:
11929	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11930	      break;
11931	    default:
11932	      abort ();
11933	    }
11934
11935	  inst.instruction = opcode;
11936	  if (narrow)
11937	    {
11938	      inst.instruction |= Rn;
11939	      inst.instruction |= inst.operands[1].imm << 3;
11940	    }
11941	  else
11942	    {
11943	      if (flags)
11944		inst.instruction |= CONDS_BIT;
11945
11946	      inst.instruction |= Rn << 8;
11947	      inst.instruction |= Rm << 16;
11948	      inst.instruction |= inst.operands[1].imm;
11949	    }
11950	}
11951      else if (!narrow)
11952	{
11953	  /* Some mov with immediate shift have narrow variants.
11954	     Register shifts are handled above.  */
11955	  if (low_regs && inst.operands[1].shifted
11956	      && (inst.instruction == T_MNEM_mov
11957		  || inst.instruction == T_MNEM_movs))
11958	    {
11959	      if (in_it_block ())
11960		narrow = (inst.instruction == T_MNEM_mov);
11961	      else
11962		narrow = (inst.instruction == T_MNEM_movs);
11963	    }
11964
11965	  if (narrow)
11966	    {
11967	      switch (inst.operands[1].shift_kind)
11968		{
11969		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11970		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11971		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11972		default: narrow = FALSE; break;
11973		}
11974	    }
11975
11976	  if (narrow)
11977	    {
11978	      inst.instruction |= Rn;
11979	      inst.instruction |= Rm << 3;
11980	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11981	    }
11982	  else
11983	    {
11984	      inst.instruction = THUMB_OP32 (inst.instruction);
11985	      inst.instruction |= Rn << r0off;
11986	      encode_thumb32_shifted_operand (1);
11987	    }
11988	}
11989      else
11990	switch (inst.instruction)
11991	  {
11992	  case T_MNEM_mov:
11993	    /* In v4t or v5t a move of two lowregs produces unpredictable
11994	       results. Don't allow this.  */
11995	    if (low_regs)
11996	      {
11997		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11998			    "MOV Rd, Rs with two low registers is not "
11999			    "permitted on this architecture");
12000		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12001					arm_ext_v6);
12002	      }
12003
12004	    inst.instruction = T_OPCODE_MOV_HR;
12005	    inst.instruction |= (Rn & 0x8) << 4;
12006	    inst.instruction |= (Rn & 0x7);
12007	    inst.instruction |= Rm << 3;
12008	    break;
12009
12010	  case T_MNEM_movs:
12011	    /* We know we have low registers at this point.
12012	       Generate LSLS Rd, Rs, #0.  */
12013	    inst.instruction = T_OPCODE_LSL_I;
12014	    inst.instruction |= Rn;
12015	    inst.instruction |= Rm << 3;
12016	    break;
12017
12018	  case T_MNEM_cmp:
12019	    if (low_regs)
12020	      {
12021		inst.instruction = T_OPCODE_CMP_LR;
12022		inst.instruction |= Rn;
12023		inst.instruction |= Rm << 3;
12024	      }
12025	    else
12026	      {
12027		inst.instruction = T_OPCODE_CMP_HR;
12028		inst.instruction |= (Rn & 0x8) << 4;
12029		inst.instruction |= (Rn & 0x7);
12030		inst.instruction |= Rm << 3;
12031	      }
12032	    break;
12033	  }
12034      return;
12035    }
12036
12037  inst.instruction = THUMB_OP16 (inst.instruction);
12038
12039  /* PR 10443: Do not silently ignore shifted operands.  */
12040  constraint (inst.operands[1].shifted,
12041	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12042
12043  if (inst.operands[1].isreg)
12044    {
12045      if (Rn < 8 && Rm < 8)
12046	{
12047	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12048	     since a MOV instruction produces unpredictable results.  */
12049	  if (inst.instruction == T_OPCODE_MOV_I8)
12050	    inst.instruction = T_OPCODE_ADD_I3;
12051	  else
12052	    inst.instruction = T_OPCODE_CMP_LR;
12053
12054	  inst.instruction |= Rn;
12055	  inst.instruction |= Rm << 3;
12056	}
12057      else
12058	{
12059	  if (inst.instruction == T_OPCODE_MOV_I8)
12060	    inst.instruction = T_OPCODE_MOV_HR;
12061	  else
12062	    inst.instruction = T_OPCODE_CMP_HR;
12063	  do_t_cpy ();
12064	}
12065    }
12066  else
12067    {
12068      constraint (Rn > 7,
12069		  _("only lo regs allowed with immediate"));
12070      inst.instruction |= Rn << 8;
12071      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12072    }
12073}
12074
12075static void
12076do_t_mov16 (void)
12077{
12078  unsigned Rd;
12079  bfd_vma imm;
12080  bfd_boolean top;
12081
12082  top = (inst.instruction & 0x00800000) != 0;
12083  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12084    {
12085      constraint (top, _(":lower16: not allowed this instruction"));
12086      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12087    }
12088  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12089    {
12090      constraint (!top, _(":upper16: not allowed this instruction"));
12091      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12092    }
12093
12094  Rd = inst.operands[0].reg;
12095  reject_bad_reg (Rd);
12096
12097  inst.instruction |= Rd << 8;
12098  if (inst.reloc.type == BFD_RELOC_UNUSED)
12099    {
12100      imm = inst.reloc.exp.X_add_number;
12101      inst.instruction |= (imm & 0xf000) << 4;
12102      inst.instruction |= (imm & 0x0800) << 15;
12103      inst.instruction |= (imm & 0x0700) << 4;
12104      inst.instruction |= (imm & 0x00ff);
12105    }
12106}
12107
12108static void
12109do_t_mvn_tst (void)
12110{
12111  unsigned Rn, Rm;
12112
12113  Rn = inst.operands[0].reg;
12114  Rm = inst.operands[1].reg;
12115
12116  if (inst.instruction == T_MNEM_cmp
12117      || inst.instruction == T_MNEM_cmn)
12118    constraint (Rn == REG_PC, BAD_PC);
12119  else
12120    reject_bad_reg (Rn);
12121  reject_bad_reg (Rm);
12122
12123  if (unified_syntax)
12124    {
12125      int r0off = (inst.instruction == T_MNEM_mvn
12126		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12127      bfd_boolean narrow;
12128
12129      if (inst.size_req == 4
12130	  || inst.instruction > 0xffff
12131	  || inst.operands[1].shifted
12132	  || Rn > 7 || Rm > 7)
12133	narrow = FALSE;
12134      else if (inst.instruction == T_MNEM_cmn
12135	       || inst.instruction == T_MNEM_tst)
12136	narrow = TRUE;
12137      else if (THUMB_SETS_FLAGS (inst.instruction))
12138	narrow = !in_it_block ();
12139      else
12140	narrow = in_it_block ();
12141
12142      if (!inst.operands[1].isreg)
12143	{
12144	  /* For an immediate, we always generate a 32-bit opcode;
12145	     section relaxation will shrink it later if possible.  */
12146	  if (inst.instruction < 0xffff)
12147	    inst.instruction = THUMB_OP32 (inst.instruction);
12148	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12149	  inst.instruction |= Rn << r0off;
12150	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12151	}
12152      else
12153	{
12154	  /* See if we can do this with a 16-bit instruction.  */
12155	  if (narrow)
12156	    {
12157	      inst.instruction = THUMB_OP16 (inst.instruction);
12158	      inst.instruction |= Rn;
12159	      inst.instruction |= Rm << 3;
12160	    }
12161	  else
12162	    {
12163	      constraint (inst.operands[1].shifted
12164			  && inst.operands[1].immisreg,
12165			  _("shift must be constant"));
12166	      if (inst.instruction < 0xffff)
12167		inst.instruction = THUMB_OP32 (inst.instruction);
12168	      inst.instruction |= Rn << r0off;
12169	      encode_thumb32_shifted_operand (1);
12170	    }
12171	}
12172    }
12173  else
12174    {
12175      constraint (inst.instruction > 0xffff
12176		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12177      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12178		  _("unshifted register required"));
12179      constraint (Rn > 7 || Rm > 7,
12180		  BAD_HIREG);
12181
12182      inst.instruction = THUMB_OP16 (inst.instruction);
12183      inst.instruction |= Rn;
12184      inst.instruction |= Rm << 3;
12185    }
12186}
12187
12188static void
12189do_t_mrs (void)
12190{
12191  unsigned Rd;
12192
12193  if (do_vfp_nsyn_mrs () == SUCCESS)
12194    return;
12195
12196  Rd = inst.operands[0].reg;
12197  reject_bad_reg (Rd);
12198  inst.instruction |= Rd << 8;
12199
12200  if (inst.operands[1].isreg)
12201    {
12202      unsigned br = inst.operands[1].reg;
12203      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12204	as_bad (_("bad register for mrs"));
12205
12206      inst.instruction |= br & (0xf << 16);
12207      inst.instruction |= (br & 0x300) >> 4;
12208      inst.instruction |= (br & SPSR_BIT) >> 2;
12209    }
12210  else
12211    {
12212      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12213
12214      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12215	{
12216	  /* PR gas/12698:  The constraint is only applied for m_profile.
12217	     If the user has specified -march=all, we want to ignore it as
12218	     we are building for any CPU type, including non-m variants.  */
12219	  bfd_boolean m_profile =
12220	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12221	  constraint ((flags != 0) && m_profile, _("selected processor does "
12222						   "not support requested special purpose register"));
12223	}
12224      else
12225	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12226	   devices).  */
12227	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12228		    _("'APSR', 'CPSR' or 'SPSR' expected"));
12229
12230      inst.instruction |= (flags & SPSR_BIT) >> 2;
12231      inst.instruction |= inst.operands[1].imm & 0xff;
12232      inst.instruction |= 0xf0000;
12233    }
12234}
12235
12236static void
12237do_t_msr (void)
12238{
12239  int flags;
12240  unsigned Rn;
12241
12242  if (do_vfp_nsyn_msr () == SUCCESS)
12243    return;
12244
12245  constraint (!inst.operands[1].isreg,
12246	      _("Thumb encoding does not support an immediate here"));
12247
12248  if (inst.operands[0].isreg)
12249    flags = (int)(inst.operands[0].reg);
12250  else
12251    flags = inst.operands[0].imm;
12252
12253  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12254    {
12255      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12256
12257      /* PR gas/12698:  The constraint is only applied for m_profile.
12258	 If the user has specified -march=all, we want to ignore it as
12259	 we are building for any CPU type, including non-m variants.  */
12260      bfd_boolean m_profile =
12261	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12262      constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12263	   && (bits & ~(PSR_s | PSR_f)) != 0)
12264	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12265	      && bits != PSR_f)) && m_profile,
12266	  _("selected processor does not support requested special "
12267	    "purpose register"));
12268    }
12269  else
12270     constraint ((flags & 0xff) != 0, _("selected processor does not support "
12271		 "requested special purpose register"));
12272
12273  Rn = inst.operands[1].reg;
12274  reject_bad_reg (Rn);
12275
12276  inst.instruction |= (flags & SPSR_BIT) >> 2;
12277  inst.instruction |= (flags & 0xf0000) >> 8;
12278  inst.instruction |= (flags & 0x300) >> 4;
12279  inst.instruction |= (flags & 0xff);
12280  inst.instruction |= Rn << 16;
12281}
12282
12283static void
12284do_t_mul (void)
12285{
12286  bfd_boolean narrow;
12287  unsigned Rd, Rn, Rm;
12288
12289  if (!inst.operands[2].present)
12290    inst.operands[2].reg = inst.operands[0].reg;
12291
12292  Rd = inst.operands[0].reg;
12293  Rn = inst.operands[1].reg;
12294  Rm = inst.operands[2].reg;
12295
12296  if (unified_syntax)
12297    {
12298      if (inst.size_req == 4
12299	  || (Rd != Rn
12300	      && Rd != Rm)
12301	  || Rn > 7
12302	  || Rm > 7)
12303	narrow = FALSE;
12304      else if (inst.instruction == T_MNEM_muls)
12305	narrow = !in_it_block ();
12306      else
12307	narrow = in_it_block ();
12308    }
12309  else
12310    {
12311      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12312      constraint (Rn > 7 || Rm > 7,
12313		  BAD_HIREG);
12314      narrow = TRUE;
12315    }
12316
12317  if (narrow)
12318    {
12319      /* 16-bit MULS/Conditional MUL.  */
12320      inst.instruction = THUMB_OP16 (inst.instruction);
12321      inst.instruction |= Rd;
12322
12323      if (Rd == Rn)
12324	inst.instruction |= Rm << 3;
12325      else if (Rd == Rm)
12326	inst.instruction |= Rn << 3;
12327      else
12328	constraint (1, _("dest must overlap one source register"));
12329    }
12330  else
12331    {
12332      constraint (inst.instruction != T_MNEM_mul,
12333		  _("Thumb-2 MUL must not set flags"));
12334      /* 32-bit MUL.  */
12335      inst.instruction = THUMB_OP32 (inst.instruction);
12336      inst.instruction |= Rd << 8;
12337      inst.instruction |= Rn << 16;
12338      inst.instruction |= Rm << 0;
12339
12340      reject_bad_reg (Rd);
12341      reject_bad_reg (Rn);
12342      reject_bad_reg (Rm);
12343    }
12344}
12345
12346static void
12347do_t_mull (void)
12348{
12349  unsigned RdLo, RdHi, Rn, Rm;
12350
12351  RdLo = inst.operands[0].reg;
12352  RdHi = inst.operands[1].reg;
12353  Rn = inst.operands[2].reg;
12354  Rm = inst.operands[3].reg;
12355
12356  reject_bad_reg (RdLo);
12357  reject_bad_reg (RdHi);
12358  reject_bad_reg (Rn);
12359  reject_bad_reg (Rm);
12360
12361  inst.instruction |= RdLo << 12;
12362  inst.instruction |= RdHi << 8;
12363  inst.instruction |= Rn << 16;
12364  inst.instruction |= Rm;
12365
12366 if (RdLo == RdHi)
12367    as_tsktsk (_("rdhi and rdlo must be different"));
12368}
12369
12370static void
12371do_t_nop (void)
12372{
12373  set_it_insn_type (NEUTRAL_IT_INSN);
12374
12375  if (unified_syntax)
12376    {
12377      if (inst.size_req == 4 || inst.operands[0].imm > 15)
12378	{
12379	  inst.instruction = THUMB_OP32 (inst.instruction);
12380	  inst.instruction |= inst.operands[0].imm;
12381	}
12382      else
12383	{
12384	  /* PR9722: Check for Thumb2 availability before
12385	     generating a thumb2 nop instruction.  */
12386	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12387	    {
12388	      inst.instruction = THUMB_OP16 (inst.instruction);
12389	      inst.instruction |= inst.operands[0].imm << 4;
12390	    }
12391	  else
12392	    inst.instruction = 0x46c0;
12393	}
12394    }
12395  else
12396    {
12397      constraint (inst.operands[0].present,
12398		  _("Thumb does not support NOP with hints"));
12399      inst.instruction = 0x46c0;
12400    }
12401}
12402
12403static void
12404do_t_neg (void)
12405{
12406  if (unified_syntax)
12407    {
12408      bfd_boolean narrow;
12409
12410      if (THUMB_SETS_FLAGS (inst.instruction))
12411	narrow = !in_it_block ();
12412      else
12413	narrow = in_it_block ();
12414      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12415	narrow = FALSE;
12416      if (inst.size_req == 4)
12417	narrow = FALSE;
12418
12419      if (!narrow)
12420	{
12421	  inst.instruction = THUMB_OP32 (inst.instruction);
12422	  inst.instruction |= inst.operands[0].reg << 8;
12423	  inst.instruction |= inst.operands[1].reg << 16;
12424	}
12425      else
12426	{
12427	  inst.instruction = THUMB_OP16 (inst.instruction);
12428	  inst.instruction |= inst.operands[0].reg;
12429	  inst.instruction |= inst.operands[1].reg << 3;
12430	}
12431    }
12432  else
12433    {
12434      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12435		  BAD_HIREG);
12436      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12437
12438      inst.instruction = THUMB_OP16 (inst.instruction);
12439      inst.instruction |= inst.operands[0].reg;
12440      inst.instruction |= inst.operands[1].reg << 3;
12441    }
12442}
12443
12444static void
12445do_t_orn (void)
12446{
12447  unsigned Rd, Rn;
12448
12449  Rd = inst.operands[0].reg;
12450  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12451
12452  reject_bad_reg (Rd);
12453  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
12454  reject_bad_reg (Rn);
12455
12456  inst.instruction |= Rd << 8;
12457  inst.instruction |= Rn << 16;
12458
12459  if (!inst.operands[2].isreg)
12460    {
12461      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12462      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12463    }
12464  else
12465    {
12466      unsigned Rm;
12467
12468      Rm = inst.operands[2].reg;
12469      reject_bad_reg (Rm);
12470
12471      constraint (inst.operands[2].shifted
12472		  && inst.operands[2].immisreg,
12473		  _("shift must be constant"));
12474      encode_thumb32_shifted_operand (2);
12475    }
12476}
12477
12478static void
12479do_t_pkhbt (void)
12480{
12481  unsigned Rd, Rn, Rm;
12482
12483  Rd = inst.operands[0].reg;
12484  Rn = inst.operands[1].reg;
12485  Rm = inst.operands[2].reg;
12486
12487  reject_bad_reg (Rd);
12488  reject_bad_reg (Rn);
12489  reject_bad_reg (Rm);
12490
12491  inst.instruction |= Rd << 8;
12492  inst.instruction |= Rn << 16;
12493  inst.instruction |= Rm;
12494  if (inst.operands[3].present)
12495    {
12496      unsigned int val = inst.reloc.exp.X_add_number;
12497      constraint (inst.reloc.exp.X_op != O_constant,
12498		  _("expression too complex"));
12499      inst.instruction |= (val & 0x1c) << 10;
12500      inst.instruction |= (val & 0x03) << 6;
12501    }
12502}
12503
12504static void
12505do_t_pkhtb (void)
12506{
12507  if (!inst.operands[3].present)
12508    {
12509      unsigned Rtmp;
12510
12511      inst.instruction &= ~0x00000020;
12512
12513      /* PR 10168.  Swap the Rm and Rn registers.  */
12514      Rtmp = inst.operands[1].reg;
12515      inst.operands[1].reg = inst.operands[2].reg;
12516      inst.operands[2].reg = Rtmp;
12517    }
12518  do_t_pkhbt ();
12519}
12520
12521static void
12522do_t_pld (void)
12523{
12524  if (inst.operands[0].immisreg)
12525    reject_bad_reg (inst.operands[0].imm);
12526
12527  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12528}
12529
12530static void
12531do_t_push_pop (void)
12532{
12533  unsigned mask;
12534
12535  constraint (inst.operands[0].writeback,
12536	      _("push/pop do not support {reglist}^"));
12537  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12538	      _("expression too complex"));
12539
12540  mask = inst.operands[0].imm;
12541  if (inst.size_req != 4 && (mask & ~0xff) == 0)
12542    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12543  else if (inst.size_req != 4
12544	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12545				       ? REG_LR : REG_PC)))
12546    {
12547      inst.instruction = THUMB_OP16 (inst.instruction);
12548      inst.instruction |= THUMB_PP_PC_LR;
12549      inst.instruction |= mask & 0xff;
12550    }
12551  else if (unified_syntax)
12552    {
12553      inst.instruction = THUMB_OP32 (inst.instruction);
12554      encode_thumb2_ldmstm (13, mask, TRUE);
12555    }
12556  else
12557    {
12558      inst.error = _("invalid register list to push/pop instruction");
12559      return;
12560    }
12561}
12562
12563static void
12564do_t_rbit (void)
12565{
12566  unsigned Rd, Rm;
12567
12568  Rd = inst.operands[0].reg;
12569  Rm = inst.operands[1].reg;
12570
12571  reject_bad_reg (Rd);
12572  reject_bad_reg (Rm);
12573
12574  inst.instruction |= Rd << 8;
12575  inst.instruction |= Rm << 16;
12576  inst.instruction |= Rm;
12577}
12578
12579static void
12580do_t_rev (void)
12581{
12582  unsigned Rd, Rm;
12583
12584  Rd = inst.operands[0].reg;
12585  Rm = inst.operands[1].reg;
12586
12587  reject_bad_reg (Rd);
12588  reject_bad_reg (Rm);
12589
12590  if (Rd <= 7 && Rm <= 7
12591      && inst.size_req != 4)
12592    {
12593      inst.instruction = THUMB_OP16 (inst.instruction);
12594      inst.instruction |= Rd;
12595      inst.instruction |= Rm << 3;
12596    }
12597  else if (unified_syntax)
12598    {
12599      inst.instruction = THUMB_OP32 (inst.instruction);
12600      inst.instruction |= Rd << 8;
12601      inst.instruction |= Rm << 16;
12602      inst.instruction |= Rm;
12603    }
12604  else
12605    inst.error = BAD_HIREG;
12606}
12607
12608static void
12609do_t_rrx (void)
12610{
12611  unsigned Rd, Rm;
12612
12613  Rd = inst.operands[0].reg;
12614  Rm = inst.operands[1].reg;
12615
12616  reject_bad_reg (Rd);
12617  reject_bad_reg (Rm);
12618
12619  inst.instruction |= Rd << 8;
12620  inst.instruction |= Rm;
12621}
12622
12623static void
12624do_t_rsb (void)
12625{
12626  unsigned Rd, Rs;
12627
12628  Rd = inst.operands[0].reg;
12629  Rs = (inst.operands[1].present
12630	? inst.operands[1].reg    /* Rd, Rs, foo */
12631	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
12632
12633  reject_bad_reg (Rd);
12634  reject_bad_reg (Rs);
12635  if (inst.operands[2].isreg)
12636    reject_bad_reg (inst.operands[2].reg);
12637
12638  inst.instruction |= Rd << 8;
12639  inst.instruction |= Rs << 16;
12640  if (!inst.operands[2].isreg)
12641    {
12642      bfd_boolean narrow;
12643
12644      if ((inst.instruction & 0x00100000) != 0)
12645	narrow = !in_it_block ();
12646      else
12647	narrow = in_it_block ();
12648
12649      if (Rd > 7 || Rs > 7)
12650	narrow = FALSE;
12651
12652      if (inst.size_req == 4 || !unified_syntax)
12653	narrow = FALSE;
12654
12655      if (inst.reloc.exp.X_op != O_constant
12656	  || inst.reloc.exp.X_add_number != 0)
12657	narrow = FALSE;
12658
12659      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
12660	 relaxation, but it doesn't seem worth the hassle.  */
12661      if (narrow)
12662	{
12663	  inst.reloc.type = BFD_RELOC_UNUSED;
12664	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
12665	  inst.instruction |= Rs << 3;
12666	  inst.instruction |= Rd;
12667	}
12668      else
12669	{
12670	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12671	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12672	}
12673    }
12674  else
12675    encode_thumb32_shifted_operand (2);
12676}
12677
12678static void
12679do_t_setend (void)
12680{
12681  if (warn_on_deprecated
12682      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12683      as_tsktsk (_("setend use is deprecated for ARMv8"));
12684
12685  set_it_insn_type (OUTSIDE_IT_INSN);
12686  if (inst.operands[0].imm)
12687    inst.instruction |= 0x8;
12688}
12689
12690static void
12691do_t_shift (void)
12692{
12693  if (!inst.operands[1].present)
12694    inst.operands[1].reg = inst.operands[0].reg;
12695
12696  if (unified_syntax)
12697    {
12698      bfd_boolean narrow;
12699      int shift_kind;
12700
12701      switch (inst.instruction)
12702	{
12703	case T_MNEM_asr:
12704	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12705	case T_MNEM_lsl:
12706	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12707	case T_MNEM_lsr:
12708	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12709	case T_MNEM_ror:
12710	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12711	default: abort ();
12712	}
12713
12714      if (THUMB_SETS_FLAGS (inst.instruction))
12715	narrow = !in_it_block ();
12716      else
12717	narrow = in_it_block ();
12718      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12719	narrow = FALSE;
12720      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12721	narrow = FALSE;
12722      if (inst.operands[2].isreg
12723	  && (inst.operands[1].reg != inst.operands[0].reg
12724	      || inst.operands[2].reg > 7))
12725	narrow = FALSE;
12726      if (inst.size_req == 4)
12727	narrow = FALSE;
12728
12729      reject_bad_reg (inst.operands[0].reg);
12730      reject_bad_reg (inst.operands[1].reg);
12731
12732      if (!narrow)
12733	{
12734	  if (inst.operands[2].isreg)
12735	    {
12736	      reject_bad_reg (inst.operands[2].reg);
12737	      inst.instruction = THUMB_OP32 (inst.instruction);
12738	      inst.instruction |= inst.operands[0].reg << 8;
12739	      inst.instruction |= inst.operands[1].reg << 16;
12740	      inst.instruction |= inst.operands[2].reg;
12741
12742	      /* PR 12854: Error on extraneous shifts.  */
12743	      constraint (inst.operands[2].shifted,
12744			  _("extraneous shift as part of operand to shift insn"));
12745	    }
12746	  else
12747	    {
12748	      inst.operands[1].shifted = 1;
12749	      inst.operands[1].shift_kind = shift_kind;
12750	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12751					     ? T_MNEM_movs : T_MNEM_mov);
12752	      inst.instruction |= inst.operands[0].reg << 8;
12753	      encode_thumb32_shifted_operand (1);
12754	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
12755	      inst.reloc.type = BFD_RELOC_UNUSED;
12756	    }
12757	}
12758      else
12759	{
12760	  if (inst.operands[2].isreg)
12761	    {
12762	      switch (shift_kind)
12763		{
12764		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12765		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12766		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12767		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12768		default: abort ();
12769		}
12770
12771	      inst.instruction |= inst.operands[0].reg;
12772	      inst.instruction |= inst.operands[2].reg << 3;
12773
12774	      /* PR 12854: Error on extraneous shifts.  */
12775	      constraint (inst.operands[2].shifted,
12776			  _("extraneous shift as part of operand to shift insn"));
12777	    }
12778	  else
12779	    {
12780	      switch (shift_kind)
12781		{
12782		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12783		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12784		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12785		default: abort ();
12786		}
12787	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12788	      inst.instruction |= inst.operands[0].reg;
12789	      inst.instruction |= inst.operands[1].reg << 3;
12790	    }
12791	}
12792    }
12793  else
12794    {
12795      constraint (inst.operands[0].reg > 7
12796		  || inst.operands[1].reg > 7, BAD_HIREG);
12797      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12798
12799      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
12800	{
12801	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
12802	  constraint (inst.operands[0].reg != inst.operands[1].reg,
12803		      _("source1 and dest must be same register"));
12804
12805	  switch (inst.instruction)
12806	    {
12807	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12808	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12809	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12810	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12811	    default: abort ();
12812	    }
12813
12814	  inst.instruction |= inst.operands[0].reg;
12815	  inst.instruction |= inst.operands[2].reg << 3;
12816
12817	  /* PR 12854: Error on extraneous shifts.  */
12818	  constraint (inst.operands[2].shifted,
12819		      _("extraneous shift as part of operand to shift insn"));
12820	}
12821      else
12822	{
12823	  switch (inst.instruction)
12824	    {
12825	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12826	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12827	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12828	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12829	    default: abort ();
12830	    }
12831	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12832	  inst.instruction |= inst.operands[0].reg;
12833	  inst.instruction |= inst.operands[1].reg << 3;
12834	}
12835    }
12836}
12837
12838static void
12839do_t_simd (void)
12840{
12841  unsigned Rd, Rn, Rm;
12842
12843  Rd = inst.operands[0].reg;
12844  Rn = inst.operands[1].reg;
12845  Rm = inst.operands[2].reg;
12846
12847  reject_bad_reg (Rd);
12848  reject_bad_reg (Rn);
12849  reject_bad_reg (Rm);
12850
12851  inst.instruction |= Rd << 8;
12852  inst.instruction |= Rn << 16;
12853  inst.instruction |= Rm;
12854}
12855
12856static void
12857do_t_simd2 (void)
12858{
12859  unsigned Rd, Rn, Rm;
12860
12861  Rd = inst.operands[0].reg;
12862  Rm = inst.operands[1].reg;
12863  Rn = inst.operands[2].reg;
12864
12865  reject_bad_reg (Rd);
12866  reject_bad_reg (Rn);
12867  reject_bad_reg (Rm);
12868
12869  inst.instruction |= Rd << 8;
12870  inst.instruction |= Rn << 16;
12871  inst.instruction |= Rm;
12872}
12873
12874static void
12875do_t_smc (void)
12876{
12877  unsigned int value = inst.reloc.exp.X_add_number;
12878  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12879	      _("SMC is not permitted on this architecture"));
12880  constraint (inst.reloc.exp.X_op != O_constant,
12881	      _("expression too complex"));
12882  inst.reloc.type = BFD_RELOC_UNUSED;
12883  inst.instruction |= (value & 0xf000) >> 12;
12884  inst.instruction |= (value & 0x0ff0);
12885  inst.instruction |= (value & 0x000f) << 16;
12886  /* PR gas/15623: SMC instructions must be last in an IT block.  */
12887  set_it_insn_type_last ();
12888}
12889
12890static void
12891do_t_hvc (void)
12892{
12893  unsigned int value = inst.reloc.exp.X_add_number;
12894
12895  inst.reloc.type = BFD_RELOC_UNUSED;
12896  inst.instruction |= (value & 0x0fff);
12897  inst.instruction |= (value & 0xf000) << 4;
12898}
12899
12900static void
12901do_t_ssat_usat (int bias)
12902{
12903  unsigned Rd, Rn;
12904
12905  Rd = inst.operands[0].reg;
12906  Rn = inst.operands[2].reg;
12907
12908  reject_bad_reg (Rd);
12909  reject_bad_reg (Rn);
12910
12911  inst.instruction |= Rd << 8;
12912  inst.instruction |= inst.operands[1].imm - bias;
12913  inst.instruction |= Rn << 16;
12914
12915  if (inst.operands[3].present)
12916    {
12917      offsetT shift_amount = inst.reloc.exp.X_add_number;
12918
12919      inst.reloc.type = BFD_RELOC_UNUSED;
12920
12921      constraint (inst.reloc.exp.X_op != O_constant,
12922		  _("expression too complex"));
12923
12924      if (shift_amount != 0)
12925	{
12926	  constraint (shift_amount > 31,
12927		      _("shift expression is too large"));
12928
12929	  if (inst.operands[3].shift_kind == SHIFT_ASR)
12930	    inst.instruction |= 0x00200000;  /* sh bit.  */
12931
12932	  inst.instruction |= (shift_amount & 0x1c) << 10;
12933	  inst.instruction |= (shift_amount & 0x03) << 6;
12934	}
12935    }
12936}
12937
12938static void
12939do_t_ssat (void)
12940{
12941  do_t_ssat_usat (1);
12942}
12943
12944static void
12945do_t_ssat16 (void)
12946{
12947  unsigned Rd, Rn;
12948
12949  Rd = inst.operands[0].reg;
12950  Rn = inst.operands[2].reg;
12951
12952  reject_bad_reg (Rd);
12953  reject_bad_reg (Rn);
12954
12955  inst.instruction |= Rd << 8;
12956  inst.instruction |= inst.operands[1].imm - 1;
12957  inst.instruction |= Rn << 16;
12958}
12959
12960static void
12961do_t_strex (void)
12962{
12963  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12964	      || inst.operands[2].postind || inst.operands[2].writeback
12965	      || inst.operands[2].immisreg || inst.operands[2].shifted
12966	      || inst.operands[2].negative,
12967	      BAD_ADDR_MODE);
12968
12969  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12970
12971  inst.instruction |= inst.operands[0].reg << 8;
12972  inst.instruction |= inst.operands[1].reg << 12;
12973  inst.instruction |= inst.operands[2].reg << 16;
12974  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12975}
12976
12977static void
12978do_t_strexd (void)
12979{
12980  if (!inst.operands[2].present)
12981    inst.operands[2].reg = inst.operands[1].reg + 1;
12982
12983  constraint (inst.operands[0].reg == inst.operands[1].reg
12984	      || inst.operands[0].reg == inst.operands[2].reg
12985	      || inst.operands[0].reg == inst.operands[3].reg,
12986	      BAD_OVERLAP);
12987
12988  inst.instruction |= inst.operands[0].reg;
12989  inst.instruction |= inst.operands[1].reg << 12;
12990  inst.instruction |= inst.operands[2].reg << 8;
12991  inst.instruction |= inst.operands[3].reg << 16;
12992}
12993
12994static void
12995do_t_sxtah (void)
12996{
12997  unsigned Rd, Rn, Rm;
12998
12999  Rd = inst.operands[0].reg;
13000  Rn = inst.operands[1].reg;
13001  Rm = inst.operands[2].reg;
13002
13003  reject_bad_reg (Rd);
13004  reject_bad_reg (Rn);
13005  reject_bad_reg (Rm);
13006
13007  inst.instruction |= Rd << 8;
13008  inst.instruction |= Rn << 16;
13009  inst.instruction |= Rm;
13010  inst.instruction |= inst.operands[3].imm << 4;
13011}
13012
13013static void
13014do_t_sxth (void)
13015{
13016  unsigned Rd, Rm;
13017
13018  Rd = inst.operands[0].reg;
13019  Rm = inst.operands[1].reg;
13020
13021  reject_bad_reg (Rd);
13022  reject_bad_reg (Rm);
13023
13024  if (inst.instruction <= 0xffff
13025      && inst.size_req != 4
13026      && Rd <= 7 && Rm <= 7
13027      && (!inst.operands[2].present || inst.operands[2].imm == 0))
13028    {
13029      inst.instruction = THUMB_OP16 (inst.instruction);
13030      inst.instruction |= Rd;
13031      inst.instruction |= Rm << 3;
13032    }
13033  else if (unified_syntax)
13034    {
13035      if (inst.instruction <= 0xffff)
13036	inst.instruction = THUMB_OP32 (inst.instruction);
13037      inst.instruction |= Rd << 8;
13038      inst.instruction |= Rm;
13039      inst.instruction |= inst.operands[2].imm << 4;
13040    }
13041  else
13042    {
13043      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13044		  _("Thumb encoding does not support rotation"));
13045      constraint (1, BAD_HIREG);
13046    }
13047}
13048
13049static void
13050do_t_swi (void)
13051{
13052  /* We have to do the following check manually as ARM_EXT_OS only applies
13053     to ARM_EXT_V6M.  */
13054  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13055    {
13056      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13057	  /* This only applies to the v6m howver, not later architectures.  */
13058	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13059	as_bad (_("SVC is not permitted on this architecture"));
13060      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13061    }
13062
13063  inst.reloc.type = BFD_RELOC_ARM_SWI;
13064}
13065
13066static void
13067do_t_tb (void)
13068{
13069  unsigned Rn, Rm;
13070  int half;
13071
13072  half = (inst.instruction & 0x10) != 0;
13073  set_it_insn_type_last ();
13074  constraint (inst.operands[0].immisreg,
13075	      _("instruction requires register index"));
13076
13077  Rn = inst.operands[0].reg;
13078  Rm = inst.operands[0].imm;
13079
13080  constraint (Rn == REG_SP, BAD_SP);
13081  reject_bad_reg (Rm);
13082
13083  constraint (!half && inst.operands[0].shifted,
13084	      _("instruction does not allow shifted index"));
13085  inst.instruction |= (Rn << 16) | Rm;
13086}
13087
13088static void
13089do_t_udf (void)
13090{
13091  if (!inst.operands[0].present)
13092    inst.operands[0].imm = 0;
13093
13094  if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13095    {
13096      constraint (inst.size_req == 2,
13097                  _("immediate value out of range"));
13098      inst.instruction = THUMB_OP32 (inst.instruction);
13099      inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13100      inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13101    }
13102  else
13103    {
13104      inst.instruction = THUMB_OP16 (inst.instruction);
13105      inst.instruction |= inst.operands[0].imm;
13106    }
13107
13108  set_it_insn_type (NEUTRAL_IT_INSN);
13109}
13110
13111
13112static void
13113do_t_usat (void)
13114{
13115  do_t_ssat_usat (0);
13116}
13117
13118static void
13119do_t_usat16 (void)
13120{
13121  unsigned Rd, Rn;
13122
13123  Rd = inst.operands[0].reg;
13124  Rn = inst.operands[2].reg;
13125
13126  reject_bad_reg (Rd);
13127  reject_bad_reg (Rn);
13128
13129  inst.instruction |= Rd << 8;
13130  inst.instruction |= inst.operands[1].imm;
13131  inst.instruction |= Rn << 16;
13132}
13133
13134/* Neon instruction encoder helpers.  */
13135
13136/* Encodings for the different types for various Neon opcodes.  */
13137
13138/* An "invalid" code for the following tables.  */
13139#define N_INV -1u
13140
13141struct neon_tab_entry
13142{
13143  unsigned integer;
13144  unsigned float_or_poly;
13145  unsigned scalar_or_imm;
13146};
13147
13148/* Map overloaded Neon opcodes to their respective encodings.  */
13149#define NEON_ENC_TAB					\
13150  X(vabd,	0x0000700, 0x1200d00, N_INV),		\
13151  X(vmax,	0x0000600, 0x0000f00, N_INV),		\
13152  X(vmin,	0x0000610, 0x0200f00, N_INV),		\
13153  X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
13154  X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
13155  X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
13156  X(vadd,	0x0000800, 0x0000d00, N_INV),		\
13157  X(vsub,	0x1000800, 0x0200d00, N_INV),		\
13158  X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
13159  X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
13160  X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
13161  /* Register variants of the following two instructions are encoded as
13162     vcge / vcgt with the operands reversed.  */  	\
13163  X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
13164  X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
13165  X(vfma,	N_INV, 0x0000c10, N_INV),		\
13166  X(vfms,	N_INV, 0x0200c10, N_INV),		\
13167  X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
13168  X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
13169  X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
13170  X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
13171  X(vmlal,	0x0800800, N_INV,     0x0800240),	\
13172  X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
13173  X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
13174  X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
13175  X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
13176  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
13177  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
13178  X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
13179  X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
13180  X(vshl,	0x0000400, N_INV,     0x0800510),	\
13181  X(vqshl,	0x0000410, N_INV,     0x0800710),	\
13182  X(vand,	0x0000110, N_INV,     0x0800030),	\
13183  X(vbic,	0x0100110, N_INV,     0x0800030),	\
13184  X(veor,	0x1000110, N_INV,     N_INV),		\
13185  X(vorn,	0x0300110, N_INV,     0x0800010),	\
13186  X(vorr,	0x0200110, N_INV,     0x0800010),	\
13187  X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
13188  X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
13189  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
13190  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
13191  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
13192  X(vst1,	0x0000000, 0x0800000, N_INV),		\
13193  X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
13194  X(vst2,	0x0000100, 0x0800100, N_INV),		\
13195  X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
13196  X(vst3,	0x0000200, 0x0800200, N_INV),		\
13197  X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
13198  X(vst4,	0x0000300, 0x0800300, N_INV),		\
13199  X(vmovn,	0x1b20200, N_INV,     N_INV),		\
13200  X(vtrn,	0x1b20080, N_INV,     N_INV),		\
13201  X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
13202  X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
13203  X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
13204  X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
13205  X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
13206  X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
13207  X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
13208  X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
13209  X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
13210  X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
13211  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
13212  X(vseleq,	0xe000a00, N_INV,     N_INV),		\
13213  X(vselvs,	0xe100a00, N_INV,     N_INV),		\
13214  X(vselge,	0xe200a00, N_INV,     N_INV),		\
13215  X(vselgt,	0xe300a00, N_INV,     N_INV),		\
13216  X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
13217  X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
13218  X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
13219  X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
13220  X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
13221  X(aes,	0x3b00300, N_INV,     N_INV),		\
13222  X(sha3op,	0x2000c00, N_INV,     N_INV),		\
13223  X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
13224  X(sha2op,     0x3ba0380, N_INV,     N_INV)
13225
13226enum neon_opc
13227{
13228#define X(OPC,I,F,S) N_MNEM_##OPC
13229NEON_ENC_TAB
13230#undef X
13231};
13232
13233static const struct neon_tab_entry neon_enc_tab[] =
13234{
13235#define X(OPC,I,F,S) { (I), (F), (S) }
13236NEON_ENC_TAB
13237#undef X
13238};
13239
13240/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
13241#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13242#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
13243#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13244#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13245#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13246#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13247#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13248#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13249#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13250#define NEON_ENC_SINGLE_(X) \
13251  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13252#define NEON_ENC_DOUBLE_(X) \
13253  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13254#define NEON_ENC_FPV8_(X) \
13255  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13256
13257#define NEON_ENCODE(type, inst)					\
13258  do								\
13259    {								\
13260      inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
13261      inst.is_neon = 1;						\
13262    }								\
13263  while (0)
13264
13265#define check_neon_suffixes						\
13266  do									\
13267    {									\
13268      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
13269	{								\
13270	  as_bad (_("invalid neon suffix for non neon instruction"));	\
13271	  return;							\
13272	}								\
13273    }									\
13274  while (0)
13275
13276/* Define shapes for instruction operands. The following mnemonic characters
13277   are used in this table:
13278
13279     F - VFP S<n> register
13280     D - Neon D<n> register
13281     Q - Neon Q<n> register
13282     I - Immediate
13283     S - Scalar
13284     R - ARM register
13285     L - D<n> register list
13286
13287   This table is used to generate various data:
13288     - enumerations of the form NS_DDR to be used as arguments to
13289       neon_select_shape.
13290     - a table classifying shapes into single, double, quad, mixed.
13291     - a table used to drive neon_select_shape.  */
13292
13293#define NEON_SHAPE_DEF			\
13294  X(3, (D, D, D), DOUBLE),		\
13295  X(3, (Q, Q, Q), QUAD),		\
13296  X(3, (D, D, I), DOUBLE),		\
13297  X(3, (Q, Q, I), QUAD),		\
13298  X(3, (D, D, S), DOUBLE),		\
13299  X(3, (Q, Q, S), QUAD),		\
13300  X(2, (D, D), DOUBLE),			\
13301  X(2, (Q, Q), QUAD),			\
13302  X(2, (D, S), DOUBLE),			\
13303  X(2, (Q, S), QUAD),			\
13304  X(2, (D, R), DOUBLE),			\
13305  X(2, (Q, R), QUAD),			\
13306  X(2, (D, I), DOUBLE),			\
13307  X(2, (Q, I), QUAD),			\
13308  X(3, (D, L, D), DOUBLE),		\
13309  X(2, (D, Q), MIXED),			\
13310  X(2, (Q, D), MIXED),			\
13311  X(3, (D, Q, I), MIXED),		\
13312  X(3, (Q, D, I), MIXED),		\
13313  X(3, (Q, D, D), MIXED),		\
13314  X(3, (D, Q, Q), MIXED),		\
13315  X(3, (Q, Q, D), MIXED),		\
13316  X(3, (Q, D, S), MIXED),		\
13317  X(3, (D, Q, S), MIXED),		\
13318  X(4, (D, D, D, I), DOUBLE),		\
13319  X(4, (Q, Q, Q, I), QUAD),		\
13320  X(2, (F, F), SINGLE),			\
13321  X(3, (F, F, F), SINGLE),		\
13322  X(2, (F, I), SINGLE),			\
13323  X(2, (F, D), MIXED),			\
13324  X(2, (D, F), MIXED),			\
13325  X(3, (F, F, I), MIXED),		\
13326  X(4, (R, R, F, F), SINGLE),		\
13327  X(4, (F, F, R, R), SINGLE),		\
13328  X(3, (D, R, R), DOUBLE),		\
13329  X(3, (R, R, D), DOUBLE),		\
13330  X(2, (S, R), SINGLE),			\
13331  X(2, (R, S), SINGLE),			\
13332  X(2, (F, R), SINGLE),			\
13333  X(2, (R, F), SINGLE),			\
13334/* Half float shape supported so far.  */\
13335  X (2, (H, D), MIXED),			\
13336  X (2, (D, H), MIXED),			\
13337  X (2, (H, F), MIXED),			\
13338  X (2, (F, H), MIXED),			\
13339  X (2, (H, H), HALF),			\
13340  X (2, (H, R), HALF),			\
13341  X (2, (R, H), HALF),			\
13342  X (2, (H, I), HALF),			\
13343  X (3, (H, H, H), HALF),		\
13344  X (3, (H, F, I), MIXED),		\
13345  X (3, (F, H, I), MIXED)
13346
13347#define S2(A,B)		NS_##A##B
13348#define S3(A,B,C)	NS_##A##B##C
13349#define S4(A,B,C,D)	NS_##A##B##C##D
13350
13351#define X(N, L, C) S##N L
13352
13353enum neon_shape
13354{
13355  NEON_SHAPE_DEF,
13356  NS_NULL
13357};
13358
13359#undef X
13360#undef S2
13361#undef S3
13362#undef S4
13363
13364enum neon_shape_class
13365{
13366  SC_HALF,
13367  SC_SINGLE,
13368  SC_DOUBLE,
13369  SC_QUAD,
13370  SC_MIXED
13371};
13372
13373#define X(N, L, C) SC_##C
13374
13375static enum neon_shape_class neon_shape_class[] =
13376{
13377  NEON_SHAPE_DEF
13378};
13379
13380#undef X
13381
13382enum neon_shape_el
13383{
13384  SE_H,
13385  SE_F,
13386  SE_D,
13387  SE_Q,
13388  SE_I,
13389  SE_S,
13390  SE_R,
13391  SE_L
13392};
13393
13394/* Register widths of above.  */
13395static unsigned neon_shape_el_size[] =
13396{
13397  16,
13398  32,
13399  64,
13400  128,
13401  0,
13402  32,
13403  32,
13404  0
13405};
13406
13407struct neon_shape_info
13408{
13409  unsigned els;
13410  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13411};
13412
13413#define S2(A,B)		{ SE_##A, SE_##B }
13414#define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
13415#define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
13416
13417#define X(N, L, C) { N, S##N L }
13418
13419static struct neon_shape_info neon_shape_tab[] =
13420{
13421  NEON_SHAPE_DEF
13422};
13423
13424#undef X
13425#undef S2
13426#undef S3
13427#undef S4
13428
13429/* Bit masks used in type checking given instructions.
13430  'N_EQK' means the type must be the same as (or based on in some way) the key
13431   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13432   set, various other bits can be set as well in order to modify the meaning of
13433   the type constraint.  */
13434
13435enum neon_type_mask
13436{
13437  N_S8   = 0x0000001,
13438  N_S16  = 0x0000002,
13439  N_S32  = 0x0000004,
13440  N_S64  = 0x0000008,
13441  N_U8   = 0x0000010,
13442  N_U16  = 0x0000020,
13443  N_U32  = 0x0000040,
13444  N_U64  = 0x0000080,
13445  N_I8   = 0x0000100,
13446  N_I16  = 0x0000200,
13447  N_I32  = 0x0000400,
13448  N_I64  = 0x0000800,
13449  N_8    = 0x0001000,
13450  N_16   = 0x0002000,
13451  N_32   = 0x0004000,
13452  N_64   = 0x0008000,
13453  N_P8   = 0x0010000,
13454  N_P16  = 0x0020000,
13455  N_F16  = 0x0040000,
13456  N_F32  = 0x0080000,
13457  N_F64  = 0x0100000,
13458  N_P64	 = 0x0200000,
13459  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
13460  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
13461  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
13462  N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
13463  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
13464  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
13465  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
13466  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
13467  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
13468  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
13469  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
13470  N_UTYP = 0,
13471  N_MAX_NONSPECIAL = N_P64
13472};
13473
13474#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13475
13476#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13477#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13478#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13479#define N_S_32     (N_S8 | N_S16 | N_S32)
13480#define N_F_16_32  (N_F16 | N_F32)
13481#define N_SUF_32   (N_SU_32 | N_F_16_32)
13482#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
13483#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13484#define N_F_ALL    (N_F16 | N_F32 | N_F64)
13485
13486/* Pass this as the first type argument to neon_check_type to ignore types
13487   altogether.  */
13488#define N_IGNORE_TYPE (N_KEY | N_EQK)
13489
13490/* Select a "shape" for the current instruction (describing register types or
13491   sizes) from a list of alternatives. Return NS_NULL if the current instruction
13492   doesn't fit. For non-polymorphic shapes, checking is usually done as a
13493   function of operand parsing, so this function doesn't need to be called.
13494   Shapes should be listed in order of decreasing length.  */
13495
13496static enum neon_shape
13497neon_select_shape (enum neon_shape shape, ...)
13498{
13499  va_list ap;
13500  enum neon_shape first_shape = shape;
13501
13502  /* Fix missing optional operands. FIXME: we don't know at this point how
13503     many arguments we should have, so this makes the assumption that we have
13504     > 1. This is true of all current Neon opcodes, I think, but may not be
13505     true in the future.  */
13506  if (!inst.operands[1].present)
13507    inst.operands[1] = inst.operands[0];
13508
13509  va_start (ap, shape);
13510
13511  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13512    {
13513      unsigned j;
13514      int matches = 1;
13515
13516      for (j = 0; j < neon_shape_tab[shape].els; j++)
13517	{
13518	  if (!inst.operands[j].present)
13519	    {
13520	      matches = 0;
13521	      break;
13522	    }
13523
13524	  switch (neon_shape_tab[shape].el[j])
13525	    {
13526	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
13527		 a VFP single precision register operand, it's essentially
13528		 means only half of the register is used.
13529
13530		 If the type specifier is given after the mnemonics, the
13531		 information is stored in inst.vectype.  If the type specifier
13532		 is given after register operand, the information is stored
13533		 in inst.operands[].vectype.
13534
13535		 When there is only one type specifier, and all the register
13536		 operands are the same type of hardware register, the type
13537		 specifier applies to all register operands.
13538
13539		 If no type specifier is given, the shape is inferred from
13540		 operand information.
13541
13542		 for example:
13543		 vadd.f16 s0, s1, s2:		NS_HHH
13544		 vabs.f16 s0, s1:		NS_HH
13545		 vmov.f16 s0, r1:		NS_HR
13546		 vmov.f16 r0, s1:		NS_RH
13547		 vcvt.f16 r0, s1:		NS_RH
13548		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
13549		 vcvt.f16.s32	s2, s2:		NS_HF
13550	      */
13551	    case SE_H:
13552	      if (!(inst.operands[j].isreg
13553		    && inst.operands[j].isvec
13554		    && inst.operands[j].issingle
13555		    && !inst.operands[j].isquad
13556		    && ((inst.vectype.elems == 1
13557			 && inst.vectype.el[0].size == 16)
13558			|| (inst.vectype.elems > 1
13559			    && inst.vectype.el[j].size == 16)
13560			|| (inst.vectype.elems == 0
13561			    && inst.operands[j].vectype.type != NT_invtype
13562			    && inst.operands[j].vectype.size == 16))))
13563		matches = 0;
13564	      break;
13565
13566	    case SE_F:
13567	      if (!(inst.operands[j].isreg
13568		    && inst.operands[j].isvec
13569		    && inst.operands[j].issingle
13570		    && !inst.operands[j].isquad
13571		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13572			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13573			|| (inst.vectype.elems == 0
13574			    && (inst.operands[j].vectype.size == 32
13575				|| inst.operands[j].vectype.type == NT_invtype)))))
13576		matches = 0;
13577	      break;
13578
13579	    case SE_D:
13580	      if (!(inst.operands[j].isreg
13581		    && inst.operands[j].isvec
13582		    && !inst.operands[j].isquad
13583		    && !inst.operands[j].issingle))
13584		matches = 0;
13585	      break;
13586
13587	    case SE_R:
13588	      if (!(inst.operands[j].isreg
13589		    && !inst.operands[j].isvec))
13590		matches = 0;
13591	      break;
13592
13593	    case SE_Q:
13594	      if (!(inst.operands[j].isreg
13595		    && inst.operands[j].isvec
13596		    && inst.operands[j].isquad
13597		    && !inst.operands[j].issingle))
13598		matches = 0;
13599	      break;
13600
13601	    case SE_I:
13602	      if (!(!inst.operands[j].isreg
13603		    && !inst.operands[j].isscalar))
13604		matches = 0;
13605	      break;
13606
13607	    case SE_S:
13608	      if (!(!inst.operands[j].isreg
13609		    && inst.operands[j].isscalar))
13610		matches = 0;
13611	      break;
13612
13613	    case SE_L:
13614	      break;
13615	    }
13616	  if (!matches)
13617	    break;
13618	}
13619      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13620	/* We've matched all the entries in the shape table, and we don't
13621	   have any left over operands which have not been matched.  */
13622	break;
13623    }
13624
13625  va_end (ap);
13626
13627  if (shape == NS_NULL && first_shape != NS_NULL)
13628    first_error (_("invalid instruction shape"));
13629
13630  return shape;
13631}
13632
13633/* True if SHAPE is predominantly a quadword operation (most of the time, this
13634   means the Q bit should be set).  */
13635
13636static int
13637neon_quad (enum neon_shape shape)
13638{
13639  return neon_shape_class[shape] == SC_QUAD;
13640}
13641
13642static void
13643neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13644		       unsigned *g_size)
13645{
13646  /* Allow modification to be made to types which are constrained to be
13647     based on the key element, based on bits set alongside N_EQK.  */
13648  if ((typebits & N_EQK) != 0)
13649    {
13650      if ((typebits & N_HLF) != 0)
13651	*g_size /= 2;
13652      else if ((typebits & N_DBL) != 0)
13653	*g_size *= 2;
13654      if ((typebits & N_SGN) != 0)
13655	*g_type = NT_signed;
13656      else if ((typebits & N_UNS) != 0)
13657	*g_type = NT_unsigned;
13658      else if ((typebits & N_INT) != 0)
13659	*g_type = NT_integer;
13660      else if ((typebits & N_FLT) != 0)
13661	*g_type = NT_float;
13662      else if ((typebits & N_SIZ) != 0)
13663	*g_type = NT_untyped;
13664    }
13665}
13666
13667/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13668   operand type, i.e. the single type specified in a Neon instruction when it
13669   is the only one given.  */
13670
13671static struct neon_type_el
13672neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13673{
13674  struct neon_type_el dest = *key;
13675
13676  gas_assert ((thisarg & N_EQK) != 0);
13677
13678  neon_modify_type_size (thisarg, &dest.type, &dest.size);
13679
13680  return dest;
13681}
13682
13683/* Convert Neon type and size into compact bitmask representation.  */
13684
13685static enum neon_type_mask
13686type_chk_of_el_type (enum neon_el_type type, unsigned size)
13687{
13688  switch (type)
13689    {
13690    case NT_untyped:
13691      switch (size)
13692	{
13693	case 8:  return N_8;
13694	case 16: return N_16;
13695	case 32: return N_32;
13696	case 64: return N_64;
13697	default: ;
13698	}
13699      break;
13700
13701    case NT_integer:
13702      switch (size)
13703	{
13704	case 8:  return N_I8;
13705	case 16: return N_I16;
13706	case 32: return N_I32;
13707	case 64: return N_I64;
13708	default: ;
13709	}
13710      break;
13711
13712    case NT_float:
13713      switch (size)
13714	{
13715	case 16: return N_F16;
13716	case 32: return N_F32;
13717	case 64: return N_F64;
13718	default: ;
13719	}
13720      break;
13721
13722    case NT_poly:
13723      switch (size)
13724	{
13725	case 8:  return N_P8;
13726	case 16: return N_P16;
13727	case 64: return N_P64;
13728	default: ;
13729	}
13730      break;
13731
13732    case NT_signed:
13733      switch (size)
13734	{
13735	case 8:  return N_S8;
13736	case 16: return N_S16;
13737	case 32: return N_S32;
13738	case 64: return N_S64;
13739	default: ;
13740	}
13741      break;
13742
13743    case NT_unsigned:
13744      switch (size)
13745	{
13746	case 8:  return N_U8;
13747	case 16: return N_U16;
13748	case 32: return N_U32;
13749	case 64: return N_U64;
13750	default: ;
13751	}
13752      break;
13753
13754    default: ;
13755    }
13756
13757  return N_UTYP;
13758}
13759
13760/* Convert compact Neon bitmask type representation to a type and size. Only
13761   handles the case where a single bit is set in the mask.  */
13762
13763static int
13764el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13765		     enum neon_type_mask mask)
13766{
13767  if ((mask & N_EQK) != 0)
13768    return FAIL;
13769
13770  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13771    *size = 8;
13772  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13773    *size = 16;
13774  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13775    *size = 32;
13776  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13777    *size = 64;
13778  else
13779    return FAIL;
13780
13781  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13782    *type = NT_signed;
13783  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13784    *type = NT_unsigned;
13785  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13786    *type = NT_integer;
13787  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13788    *type = NT_untyped;
13789  else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13790    *type = NT_poly;
13791  else if ((mask & (N_F_ALL)) != 0)
13792    *type = NT_float;
13793  else
13794    return FAIL;
13795
13796  return SUCCESS;
13797}
13798
13799/* Modify a bitmask of allowed types. This is only needed for type
13800   relaxation.  */
13801
13802static unsigned
13803modify_types_allowed (unsigned allowed, unsigned mods)
13804{
13805  unsigned size;
13806  enum neon_el_type type;
13807  unsigned destmask;
13808  int i;
13809
13810  destmask = 0;
13811
13812  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13813    {
13814      if (el_type_of_type_chk (&type, &size,
13815			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
13816	{
13817	  neon_modify_type_size (mods, &type, &size);
13818	  destmask |= type_chk_of_el_type (type, size);
13819	}
13820    }
13821
13822  return destmask;
13823}
13824
13825/* Check type and return type classification.
13826   The manual states (paraphrase): If one datatype is given, it indicates the
13827   type given in:
13828    - the second operand, if there is one
13829    - the operand, if there is no second operand
13830    - the result, if there are no operands.
13831   This isn't quite good enough though, so we use a concept of a "key" datatype
13832   which is set on a per-instruction basis, which is the one which matters when
13833   only one data type is written.
13834   Note: this function has side-effects (e.g. filling in missing operands). All
13835   Neon instructions should call it before performing bit encoding.  */
13836
13837static struct neon_type_el
13838neon_check_type (unsigned els, enum neon_shape ns, ...)
13839{
13840  va_list ap;
13841  unsigned i, pass, key_el = 0;
13842  unsigned types[NEON_MAX_TYPE_ELS];
13843  enum neon_el_type k_type = NT_invtype;
13844  unsigned k_size = -1u;
13845  struct neon_type_el badtype = {NT_invtype, -1};
13846  unsigned key_allowed = 0;
13847
13848  /* Optional registers in Neon instructions are always (not) in operand 1.
13849     Fill in the missing operand here, if it was omitted.  */
13850  if (els > 1 && !inst.operands[1].present)
13851    inst.operands[1] = inst.operands[0];
13852
13853  /* Suck up all the varargs.  */
13854  va_start (ap, ns);
13855  for (i = 0; i < els; i++)
13856    {
13857      unsigned thisarg = va_arg (ap, unsigned);
13858      if (thisarg == N_IGNORE_TYPE)
13859	{
13860	  va_end (ap);
13861	  return badtype;
13862	}
13863      types[i] = thisarg;
13864      if ((thisarg & N_KEY) != 0)
13865	key_el = i;
13866    }
13867  va_end (ap);
13868
13869  if (inst.vectype.elems > 0)
13870    for (i = 0; i < els; i++)
13871      if (inst.operands[i].vectype.type != NT_invtype)
13872	{
13873	  first_error (_("types specified in both the mnemonic and operands"));
13874	  return badtype;
13875	}
13876
13877  /* Duplicate inst.vectype elements here as necessary.
13878     FIXME: No idea if this is exactly the same as the ARM assembler,
13879     particularly when an insn takes one register and one non-register
13880     operand. */
13881  if (inst.vectype.elems == 1 && els > 1)
13882    {
13883      unsigned j;
13884      inst.vectype.elems = els;
13885      inst.vectype.el[key_el] = inst.vectype.el[0];
13886      for (j = 0; j < els; j++)
13887	if (j != key_el)
13888	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13889						  types[j]);
13890    }
13891  else if (inst.vectype.elems == 0 && els > 0)
13892    {
13893      unsigned j;
13894      /* No types were given after the mnemonic, so look for types specified
13895	 after each operand. We allow some flexibility here; as long as the
13896	 "key" operand has a type, we can infer the others.  */
13897      for (j = 0; j < els; j++)
13898	if (inst.operands[j].vectype.type != NT_invtype)
13899	  inst.vectype.el[j] = inst.operands[j].vectype;
13900
13901      if (inst.operands[key_el].vectype.type != NT_invtype)
13902	{
13903	  for (j = 0; j < els; j++)
13904	    if (inst.operands[j].vectype.type == NT_invtype)
13905	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13906						      types[j]);
13907	}
13908      else
13909	{
13910	  first_error (_("operand types can't be inferred"));
13911	  return badtype;
13912	}
13913    }
13914  else if (inst.vectype.elems != els)
13915    {
13916      first_error (_("type specifier has the wrong number of parts"));
13917      return badtype;
13918    }
13919
13920  for (pass = 0; pass < 2; pass++)
13921    {
13922      for (i = 0; i < els; i++)
13923	{
13924	  unsigned thisarg = types[i];
13925	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13926	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13927	  enum neon_el_type g_type = inst.vectype.el[i].type;
13928	  unsigned g_size = inst.vectype.el[i].size;
13929
13930	  /* Decay more-specific signed & unsigned types to sign-insensitive
13931	     integer types if sign-specific variants are unavailable.  */
13932	  if ((g_type == NT_signed || g_type == NT_unsigned)
13933	      && (types_allowed & N_SU_ALL) == 0)
13934	    g_type = NT_integer;
13935
13936	  /* If only untyped args are allowed, decay any more specific types to
13937	     them. Some instructions only care about signs for some element
13938	     sizes, so handle that properly.  */
13939	  if (((types_allowed & N_UNT) == 0)
13940	      && ((g_size == 8 && (types_allowed & N_8) != 0)
13941		  || (g_size == 16 && (types_allowed & N_16) != 0)
13942		  || (g_size == 32 && (types_allowed & N_32) != 0)
13943		  || (g_size == 64 && (types_allowed & N_64) != 0)))
13944	    g_type = NT_untyped;
13945
13946	  if (pass == 0)
13947	    {
13948	      if ((thisarg & N_KEY) != 0)
13949		{
13950		  k_type = g_type;
13951		  k_size = g_size;
13952		  key_allowed = thisarg & ~N_KEY;
13953
13954		  /* Check architecture constraint on FP16 extension.  */
13955		  if (k_size == 16
13956		      && k_type == NT_float
13957		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13958		    {
13959		      inst.error = _(BAD_FP16);
13960		      return badtype;
13961		    }
13962		}
13963	    }
13964	  else
13965	    {
13966	      if ((thisarg & N_VFP) != 0)
13967		{
13968		  enum neon_shape_el regshape;
13969		  unsigned regwidth, match;
13970
13971		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
13972		  if (ns == NS_NULL)
13973		    {
13974		      first_error (_("invalid instruction shape"));
13975		      return badtype;
13976		    }
13977		  regshape = neon_shape_tab[ns].el[i];
13978		  regwidth = neon_shape_el_size[regshape];
13979
13980		  /* In VFP mode, operands must match register widths. If we
13981		     have a key operand, use its width, else use the width of
13982		     the current operand.  */
13983		  if (k_size != -1u)
13984		    match = k_size;
13985		  else
13986		    match = g_size;
13987
13988		  /* FP16 will use a single precision register.  */
13989		  if (regwidth == 32 && match == 16)
13990		    {
13991		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13992			match = regwidth;
13993		      else
13994			{
13995			  inst.error = _(BAD_FP16);
13996			  return badtype;
13997			}
13998		    }
13999
14000		  if (regwidth != match)
14001		    {
14002		      first_error (_("operand size must match register width"));
14003		      return badtype;
14004		    }
14005		}
14006
14007	      if ((thisarg & N_EQK) == 0)
14008		{
14009		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
14010
14011		  if ((given_type & types_allowed) == 0)
14012		    {
14013		      first_error (_("bad type in Neon instruction"));
14014		      return badtype;
14015		    }
14016		}
14017	      else
14018		{
14019		  enum neon_el_type mod_k_type = k_type;
14020		  unsigned mod_k_size = k_size;
14021		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14022		  if (g_type != mod_k_type || g_size != mod_k_size)
14023		    {
14024		      first_error (_("inconsistent types in Neon instruction"));
14025		      return badtype;
14026		    }
14027		}
14028	    }
14029	}
14030    }
14031
14032  return inst.vectype.el[key_el];
14033}
14034
14035/* Neon-style VFP instruction forwarding.  */
14036
14037/* Thumb VFP instructions have 0xE in the condition field.  */
14038
14039static void
14040do_vfp_cond_or_thumb (void)
14041{
14042  inst.is_neon = 1;
14043
14044  if (thumb_mode)
14045    inst.instruction |= 0xe0000000;
14046  else
14047    inst.instruction |= inst.cond << 28;
14048}
14049
14050/* Look up and encode a simple mnemonic, for use as a helper function for the
14051   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
14052   etc.  It is assumed that operand parsing has already been done, and that the
14053   operands are in the form expected by the given opcode (this isn't necessarily
14054   the same as the form in which they were parsed, hence some massaging must
14055   take place before this function is called).
14056   Checks current arch version against that in the looked-up opcode.  */
14057
14058static void
14059do_vfp_nsyn_opcode (const char *opname)
14060{
14061  const struct asm_opcode *opcode;
14062
14063  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14064
14065  if (!opcode)
14066    abort ();
14067
14068  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14069		thumb_mode ? *opcode->tvariant : *opcode->avariant),
14070	      _(BAD_FPU));
14071
14072  inst.is_neon = 1;
14073
14074  if (thumb_mode)
14075    {
14076      inst.instruction = opcode->tvalue;
14077      opcode->tencode ();
14078    }
14079  else
14080    {
14081      inst.instruction = (inst.cond << 28) | opcode->avalue;
14082      opcode->aencode ();
14083    }
14084}
14085
14086static void
14087do_vfp_nsyn_add_sub (enum neon_shape rs)
14088{
14089  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14090
14091  if (rs == NS_FFF || rs == NS_HHH)
14092    {
14093      if (is_add)
14094	do_vfp_nsyn_opcode ("fadds");
14095      else
14096	do_vfp_nsyn_opcode ("fsubs");
14097
14098      /* ARMv8.2 fp16 instruction.  */
14099      if (rs == NS_HHH)
14100	do_scalar_fp16_v82_encode ();
14101    }
14102  else
14103    {
14104      if (is_add)
14105	do_vfp_nsyn_opcode ("faddd");
14106      else
14107	do_vfp_nsyn_opcode ("fsubd");
14108    }
14109}
14110
14111/* Check operand types to see if this is a VFP instruction, and if so call
14112   PFN ().  */
14113
14114static int
14115try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14116{
14117  enum neon_shape rs;
14118  struct neon_type_el et;
14119
14120  switch (args)
14121    {
14122    case 2:
14123      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14124      et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14125      break;
14126
14127    case 3:
14128      rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14129      et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14130			    N_F_ALL | N_KEY | N_VFP);
14131      break;
14132
14133    default:
14134      abort ();
14135    }
14136
14137  if (et.type != NT_invtype)
14138    {
14139      pfn (rs);
14140      return SUCCESS;
14141    }
14142
14143  inst.error = NULL;
14144  return FAIL;
14145}
14146
14147static void
14148do_vfp_nsyn_mla_mls (enum neon_shape rs)
14149{
14150  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14151
14152  if (rs == NS_FFF || rs == NS_HHH)
14153    {
14154      if (is_mla)
14155	do_vfp_nsyn_opcode ("fmacs");
14156      else
14157	do_vfp_nsyn_opcode ("fnmacs");
14158
14159      /* ARMv8.2 fp16 instruction.  */
14160      if (rs == NS_HHH)
14161	do_scalar_fp16_v82_encode ();
14162    }
14163  else
14164    {
14165      if (is_mla)
14166	do_vfp_nsyn_opcode ("fmacd");
14167      else
14168	do_vfp_nsyn_opcode ("fnmacd");
14169    }
14170}
14171
14172static void
14173do_vfp_nsyn_fma_fms (enum neon_shape rs)
14174{
14175  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14176
14177  if (rs == NS_FFF || rs == NS_HHH)
14178    {
14179      if (is_fma)
14180	do_vfp_nsyn_opcode ("ffmas");
14181      else
14182	do_vfp_nsyn_opcode ("ffnmas");
14183
14184      /* ARMv8.2 fp16 instruction.  */
14185      if (rs == NS_HHH)
14186	do_scalar_fp16_v82_encode ();
14187    }
14188  else
14189    {
14190      if (is_fma)
14191	do_vfp_nsyn_opcode ("ffmad");
14192      else
14193	do_vfp_nsyn_opcode ("ffnmad");
14194    }
14195}
14196
14197static void
14198do_vfp_nsyn_mul (enum neon_shape rs)
14199{
14200  if (rs == NS_FFF || rs == NS_HHH)
14201    {
14202      do_vfp_nsyn_opcode ("fmuls");
14203
14204      /* ARMv8.2 fp16 instruction.  */
14205      if (rs == NS_HHH)
14206	do_scalar_fp16_v82_encode ();
14207    }
14208  else
14209    do_vfp_nsyn_opcode ("fmuld");
14210}
14211
14212static void
14213do_vfp_nsyn_abs_neg (enum neon_shape rs)
14214{
14215  int is_neg = (inst.instruction & 0x80) != 0;
14216  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14217
14218  if (rs == NS_FF || rs == NS_HH)
14219    {
14220      if (is_neg)
14221	do_vfp_nsyn_opcode ("fnegs");
14222      else
14223	do_vfp_nsyn_opcode ("fabss");
14224
14225      /* ARMv8.2 fp16 instruction.  */
14226      if (rs == NS_HH)
14227	do_scalar_fp16_v82_encode ();
14228    }
14229  else
14230    {
14231      if (is_neg)
14232	do_vfp_nsyn_opcode ("fnegd");
14233      else
14234	do_vfp_nsyn_opcode ("fabsd");
14235    }
14236}
14237
14238/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14239   insns belong to Neon, and are handled elsewhere.  */
14240
14241static void
14242do_vfp_nsyn_ldm_stm (int is_dbmode)
14243{
14244  int is_ldm = (inst.instruction & (1 << 20)) != 0;
14245  if (is_ldm)
14246    {
14247      if (is_dbmode)
14248	do_vfp_nsyn_opcode ("fldmdbs");
14249      else
14250	do_vfp_nsyn_opcode ("fldmias");
14251    }
14252  else
14253    {
14254      if (is_dbmode)
14255	do_vfp_nsyn_opcode ("fstmdbs");
14256      else
14257	do_vfp_nsyn_opcode ("fstmias");
14258    }
14259}
14260
14261static void
14262do_vfp_nsyn_sqrt (void)
14263{
14264  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14265  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14266
14267  if (rs == NS_FF || rs == NS_HH)
14268    {
14269      do_vfp_nsyn_opcode ("fsqrts");
14270
14271      /* ARMv8.2 fp16 instruction.  */
14272      if (rs == NS_HH)
14273	do_scalar_fp16_v82_encode ();
14274    }
14275  else
14276    do_vfp_nsyn_opcode ("fsqrtd");
14277}
14278
14279static void
14280do_vfp_nsyn_div (void)
14281{
14282  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14283  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14284		   N_F_ALL | N_KEY | N_VFP);
14285
14286  if (rs == NS_FFF || rs == NS_HHH)
14287    {
14288      do_vfp_nsyn_opcode ("fdivs");
14289
14290      /* ARMv8.2 fp16 instruction.  */
14291      if (rs == NS_HHH)
14292	do_scalar_fp16_v82_encode ();
14293    }
14294  else
14295    do_vfp_nsyn_opcode ("fdivd");
14296}
14297
14298static void
14299do_vfp_nsyn_nmul (void)
14300{
14301  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14302  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14303		   N_F_ALL | N_KEY | N_VFP);
14304
14305  if (rs == NS_FFF || rs == NS_HHH)
14306    {
14307      NEON_ENCODE (SINGLE, inst);
14308      do_vfp_sp_dyadic ();
14309
14310      /* ARMv8.2 fp16 instruction.  */
14311      if (rs == NS_HHH)
14312	do_scalar_fp16_v82_encode ();
14313    }
14314  else
14315    {
14316      NEON_ENCODE (DOUBLE, inst);
14317      do_vfp_dp_rd_rn_rm ();
14318    }
14319  do_vfp_cond_or_thumb ();
14320
14321}
14322
14323static void
14324do_vfp_nsyn_cmp (void)
14325{
14326  enum neon_shape rs;
14327  if (inst.operands[1].isreg)
14328    {
14329      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14330      neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14331
14332      if (rs == NS_FF || rs == NS_HH)
14333	{
14334	  NEON_ENCODE (SINGLE, inst);
14335	  do_vfp_sp_monadic ();
14336	}
14337      else
14338	{
14339	  NEON_ENCODE (DOUBLE, inst);
14340	  do_vfp_dp_rd_rm ();
14341	}
14342    }
14343  else
14344    {
14345      rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14346      neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14347
14348      switch (inst.instruction & 0x0fffffff)
14349	{
14350	case N_MNEM_vcmp:
14351	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14352	  break;
14353	case N_MNEM_vcmpe:
14354	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14355	  break;
14356	default:
14357	  abort ();
14358	}
14359
14360      if (rs == NS_FI || rs == NS_HI)
14361	{
14362	  NEON_ENCODE (SINGLE, inst);
14363	  do_vfp_sp_compare_z ();
14364	}
14365      else
14366	{
14367	  NEON_ENCODE (DOUBLE, inst);
14368	  do_vfp_dp_rd ();
14369	}
14370    }
14371  do_vfp_cond_or_thumb ();
14372
14373  /* ARMv8.2 fp16 instruction.  */
14374  if (rs == NS_HI || rs == NS_HH)
14375    do_scalar_fp16_v82_encode ();
14376}
14377
14378static void
14379nsyn_insert_sp (void)
14380{
14381  inst.operands[1] = inst.operands[0];
14382  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14383  inst.operands[0].reg = REG_SP;
14384  inst.operands[0].isreg = 1;
14385  inst.operands[0].writeback = 1;
14386  inst.operands[0].present = 1;
14387}
14388
14389static void
14390do_vfp_nsyn_push (void)
14391{
14392  nsyn_insert_sp ();
14393  if (inst.operands[1].issingle)
14394    do_vfp_nsyn_opcode ("fstmdbs");
14395  else
14396    do_vfp_nsyn_opcode ("fstmdbd");
14397}
14398
14399static void
14400do_vfp_nsyn_pop (void)
14401{
14402  nsyn_insert_sp ();
14403  if (inst.operands[1].issingle)
14404    do_vfp_nsyn_opcode ("fldmias");
14405  else
14406    do_vfp_nsyn_opcode ("fldmiad");
14407}
14408
14409/* Fix up Neon data-processing instructions, ORing in the correct bits for
14410   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
14411
14412static void
14413neon_dp_fixup (struct arm_it* insn)
14414{
14415  unsigned int i = insn->instruction;
14416  insn->is_neon = 1;
14417
14418  if (thumb_mode)
14419    {
14420      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
14421      if (i & (1 << 24))
14422	i |= 1 << 28;
14423
14424      i &= ~(1 << 24);
14425
14426      i |= 0xef000000;
14427    }
14428  else
14429    i |= 0xf2000000;
14430
14431  insn->instruction = i;
14432}
14433
14434/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14435   (0, 1, 2, 3).  */
14436
14437static unsigned
14438neon_logbits (unsigned x)
14439{
14440  return ffs (x) - 4;
14441}
14442
14443#define LOW4(R) ((R) & 0xf)
14444#define HI1(R) (((R) >> 4) & 1)
14445
14446/* Encode insns with bit pattern:
14447
14448  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14449  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
14450
14451  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14452  different meaning for some instruction.  */
14453
14454static void
14455neon_three_same (int isquad, int ubit, int size)
14456{
14457  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14458  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14459  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14460  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14461  inst.instruction |= LOW4 (inst.operands[2].reg);
14462  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14463  inst.instruction |= (isquad != 0) << 6;
14464  inst.instruction |= (ubit != 0) << 24;
14465  if (size != -1)
14466    inst.instruction |= neon_logbits (size) << 20;
14467
14468  neon_dp_fixup (&inst);
14469}
14470
14471/* Encode instructions of the form:
14472
14473  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
14474  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
14475
14476  Don't write size if SIZE == -1.  */
14477
14478static void
14479neon_two_same (int qbit, int ubit, int size)
14480{
14481  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14482  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14483  inst.instruction |= LOW4 (inst.operands[1].reg);
14484  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14485  inst.instruction |= (qbit != 0) << 6;
14486  inst.instruction |= (ubit != 0) << 24;
14487
14488  if (size != -1)
14489    inst.instruction |= neon_logbits (size) << 18;
14490
14491  neon_dp_fixup (&inst);
14492}
14493
14494/* Neon instruction encoders, in approximate order of appearance.  */
14495
14496static void
14497do_neon_dyadic_i_su (void)
14498{
14499  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14500  struct neon_type_el et = neon_check_type (3, rs,
14501    N_EQK, N_EQK, N_SU_32 | N_KEY);
14502  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14503}
14504
14505static void
14506do_neon_dyadic_i64_su (void)
14507{
14508  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14509  struct neon_type_el et = neon_check_type (3, rs,
14510    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14511  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14512}
14513
14514static void
14515neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14516		unsigned immbits)
14517{
14518  unsigned size = et.size >> 3;
14519  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14520  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14521  inst.instruction |= LOW4 (inst.operands[1].reg);
14522  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14523  inst.instruction |= (isquad != 0) << 6;
14524  inst.instruction |= immbits << 16;
14525  inst.instruction |= (size >> 3) << 7;
14526  inst.instruction |= (size & 0x7) << 19;
14527  if (write_ubit)
14528    inst.instruction |= (uval != 0) << 24;
14529
14530  neon_dp_fixup (&inst);
14531}
14532
14533static void
14534do_neon_shl_imm (void)
14535{
14536  if (!inst.operands[2].isreg)
14537    {
14538      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14539      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14540      int imm = inst.operands[2].imm;
14541
14542      constraint (imm < 0 || (unsigned)imm >= et.size,
14543		  _("immediate out of range for shift"));
14544      NEON_ENCODE (IMMED, inst);
14545      neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14546    }
14547  else
14548    {
14549      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14550      struct neon_type_el et = neon_check_type (3, rs,
14551	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14552      unsigned int tmp;
14553
14554      /* VSHL/VQSHL 3-register variants have syntax such as:
14555	   vshl.xx Dd, Dm, Dn
14556	 whereas other 3-register operations encoded by neon_three_same have
14557	 syntax like:
14558	   vadd.xx Dd, Dn, Dm
14559	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14560	 here.  */
14561      tmp = inst.operands[2].reg;
14562      inst.operands[2].reg = inst.operands[1].reg;
14563      inst.operands[1].reg = tmp;
14564      NEON_ENCODE (INTEGER, inst);
14565      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14566    }
14567}
14568
14569static void
14570do_neon_qshl_imm (void)
14571{
14572  if (!inst.operands[2].isreg)
14573    {
14574      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14575      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14576      int imm = inst.operands[2].imm;
14577
14578      constraint (imm < 0 || (unsigned)imm >= et.size,
14579		  _("immediate out of range for shift"));
14580      NEON_ENCODE (IMMED, inst);
14581      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14582    }
14583  else
14584    {
14585      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14586      struct neon_type_el et = neon_check_type (3, rs,
14587	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14588      unsigned int tmp;
14589
14590      /* See note in do_neon_shl_imm.  */
14591      tmp = inst.operands[2].reg;
14592      inst.operands[2].reg = inst.operands[1].reg;
14593      inst.operands[1].reg = tmp;
14594      NEON_ENCODE (INTEGER, inst);
14595      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14596    }
14597}
14598
14599static void
14600do_neon_rshl (void)
14601{
14602  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14603  struct neon_type_el et = neon_check_type (3, rs,
14604    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14605  unsigned int tmp;
14606
14607  tmp = inst.operands[2].reg;
14608  inst.operands[2].reg = inst.operands[1].reg;
14609  inst.operands[1].reg = tmp;
14610  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14611}
14612
14613static int
14614neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14615{
14616  /* Handle .I8 pseudo-instructions.  */
14617  if (size == 8)
14618    {
14619      /* Unfortunately, this will make everything apart from zero out-of-range.
14620	 FIXME is this the intended semantics? There doesn't seem much point in
14621	 accepting .I8 if so.  */
14622      immediate |= immediate << 8;
14623      size = 16;
14624    }
14625
14626  if (size >= 32)
14627    {
14628      if (immediate == (immediate & 0x000000ff))
14629	{
14630	  *immbits = immediate;
14631	  return 0x1;
14632	}
14633      else if (immediate == (immediate & 0x0000ff00))
14634	{
14635	  *immbits = immediate >> 8;
14636	  return 0x3;
14637	}
14638      else if (immediate == (immediate & 0x00ff0000))
14639	{
14640	  *immbits = immediate >> 16;
14641	  return 0x5;
14642	}
14643      else if (immediate == (immediate & 0xff000000))
14644	{
14645	  *immbits = immediate >> 24;
14646	  return 0x7;
14647	}
14648      if ((immediate & 0xffff) != (immediate >> 16))
14649	goto bad_immediate;
14650      immediate &= 0xffff;
14651    }
14652
14653  if (immediate == (immediate & 0x000000ff))
14654    {
14655      *immbits = immediate;
14656      return 0x9;
14657    }
14658  else if (immediate == (immediate & 0x0000ff00))
14659    {
14660      *immbits = immediate >> 8;
14661      return 0xb;
14662    }
14663
14664  bad_immediate:
14665  first_error (_("immediate value out of range"));
14666  return FAIL;
14667}
14668
14669static void
14670do_neon_logic (void)
14671{
14672  if (inst.operands[2].present && inst.operands[2].isreg)
14673    {
14674      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14675      neon_check_type (3, rs, N_IGNORE_TYPE);
14676      /* U bit and size field were set as part of the bitmask.  */
14677      NEON_ENCODE (INTEGER, inst);
14678      neon_three_same (neon_quad (rs), 0, -1);
14679    }
14680  else
14681    {
14682      const int three_ops_form = (inst.operands[2].present
14683				  && !inst.operands[2].isreg);
14684      const int immoperand = (three_ops_form ? 2 : 1);
14685      enum neon_shape rs = (three_ops_form
14686			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14687			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14688      struct neon_type_el et = neon_check_type (2, rs,
14689	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14690      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14691      unsigned immbits;
14692      int cmode;
14693
14694      if (et.type == NT_invtype)
14695	return;
14696
14697      if (three_ops_form)
14698	constraint (inst.operands[0].reg != inst.operands[1].reg,
14699		    _("first and second operands shall be the same register"));
14700
14701      NEON_ENCODE (IMMED, inst);
14702
14703      immbits = inst.operands[immoperand].imm;
14704      if (et.size == 64)
14705	{
14706	  /* .i64 is a pseudo-op, so the immediate must be a repeating
14707	     pattern.  */
14708	  if (immbits != (inst.operands[immoperand].regisimm ?
14709			  inst.operands[immoperand].reg : 0))
14710	    {
14711	      /* Set immbits to an invalid constant.  */
14712	      immbits = 0xdeadbeef;
14713	    }
14714	}
14715
14716      switch (opcode)
14717	{
14718	case N_MNEM_vbic:
14719	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14720	  break;
14721
14722	case N_MNEM_vorr:
14723	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14724	  break;
14725
14726	case N_MNEM_vand:
14727	  /* Pseudo-instruction for VBIC.  */
14728	  neon_invert_size (&immbits, 0, et.size);
14729	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14730	  break;
14731
14732	case N_MNEM_vorn:
14733	  /* Pseudo-instruction for VORR.  */
14734	  neon_invert_size (&immbits, 0, et.size);
14735	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14736	  break;
14737
14738	default:
14739	  abort ();
14740	}
14741
14742      if (cmode == FAIL)
14743	return;
14744
14745      inst.instruction |= neon_quad (rs) << 6;
14746      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14747      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14748      inst.instruction |= cmode << 8;
14749      neon_write_immbits (immbits);
14750
14751      neon_dp_fixup (&inst);
14752    }
14753}
14754
14755static void
14756do_neon_bitfield (void)
14757{
14758  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14759  neon_check_type (3, rs, N_IGNORE_TYPE);
14760  neon_three_same (neon_quad (rs), 0, -1);
14761}
14762
14763static void
14764neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14765		  unsigned destbits)
14766{
14767  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14768  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14769					    types | N_KEY);
14770  if (et.type == NT_float)
14771    {
14772      NEON_ENCODE (FLOAT, inst);
14773      neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14774    }
14775  else
14776    {
14777      NEON_ENCODE (INTEGER, inst);
14778      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14779    }
14780}
14781
14782static void
14783do_neon_dyadic_if_su (void)
14784{
14785  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14786}
14787
14788static void
14789do_neon_dyadic_if_su_d (void)
14790{
14791  /* This version only allow D registers, but that constraint is enforced during
14792     operand parsing so we don't need to do anything extra here.  */
14793  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14794}
14795
14796static void
14797do_neon_dyadic_if_i_d (void)
14798{
14799  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14800     affected if we specify unsigned args.  */
14801  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14802}
14803
14804enum vfp_or_neon_is_neon_bits
14805{
14806  NEON_CHECK_CC = 1,
14807  NEON_CHECK_ARCH = 2,
14808  NEON_CHECK_ARCH8 = 4
14809};
14810
14811/* Call this function if an instruction which may have belonged to the VFP or
14812   Neon instruction sets, but turned out to be a Neon instruction (due to the
14813   operand types involved, etc.). We have to check and/or fix-up a couple of
14814   things:
14815
14816     - Make sure the user hasn't attempted to make a Neon instruction
14817       conditional.
14818     - Alter the value in the condition code field if necessary.
14819     - Make sure that the arch supports Neon instructions.
14820
14821   Which of these operations take place depends on bits from enum
14822   vfp_or_neon_is_neon_bits.
14823
14824   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14825   current instruction's condition is COND_ALWAYS, the condition field is
14826   changed to inst.uncond_value. This is necessary because instructions shared
14827   between VFP and Neon may be conditional for the VFP variants only, and the
14828   unconditional Neon version must have, e.g., 0xF in the condition field.  */
14829
14830static int
14831vfp_or_neon_is_neon (unsigned check)
14832{
14833  /* Conditions are always legal in Thumb mode (IT blocks).  */
14834  if (!thumb_mode && (check & NEON_CHECK_CC))
14835    {
14836      if (inst.cond != COND_ALWAYS)
14837	{
14838	  first_error (_(BAD_COND));
14839	  return FAIL;
14840	}
14841      if (inst.uncond_value != -1)
14842	inst.instruction |= inst.uncond_value << 28;
14843    }
14844
14845  if ((check & NEON_CHECK_ARCH)
14846      && !mark_feature_used (&fpu_neon_ext_v1))
14847    {
14848      first_error (_(BAD_FPU));
14849      return FAIL;
14850    }
14851
14852  if ((check & NEON_CHECK_ARCH8)
14853      && !mark_feature_used (&fpu_neon_ext_armv8))
14854    {
14855      first_error (_(BAD_FPU));
14856      return FAIL;
14857    }
14858
14859  return SUCCESS;
14860}
14861
14862static void
14863do_neon_addsub_if_i (void)
14864{
14865  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14866    return;
14867
14868  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14869    return;
14870
14871  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14872     affected if we specify unsigned args.  */
14873  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14874}
14875
14876/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14877   result to be:
14878     V<op> A,B     (A is operand 0, B is operand 2)
14879   to mean:
14880     V<op> A,B,A
14881   not:
14882     V<op> A,B,B
14883   so handle that case specially.  */
14884
14885static void
14886neon_exchange_operands (void)
14887{
14888  if (inst.operands[1].present)
14889    {
14890      void *scratch = xmalloc (sizeof (inst.operands[0]));
14891
14892      /* Swap operands[1] and operands[2].  */
14893      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14894      inst.operands[1] = inst.operands[2];
14895      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14896      free (scratch);
14897    }
14898  else
14899    {
14900      inst.operands[1] = inst.operands[2];
14901      inst.operands[2] = inst.operands[0];
14902    }
14903}
14904
14905static void
14906neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14907{
14908  if (inst.operands[2].isreg)
14909    {
14910      if (invert)
14911	neon_exchange_operands ();
14912      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14913    }
14914  else
14915    {
14916      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14917      struct neon_type_el et = neon_check_type (2, rs,
14918	N_EQK | N_SIZ, immtypes | N_KEY);
14919
14920      NEON_ENCODE (IMMED, inst);
14921      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14922      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14923      inst.instruction |= LOW4 (inst.operands[1].reg);
14924      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14925      inst.instruction |= neon_quad (rs) << 6;
14926      inst.instruction |= (et.type == NT_float) << 10;
14927      inst.instruction |= neon_logbits (et.size) << 18;
14928
14929      neon_dp_fixup (&inst);
14930    }
14931}
14932
14933static void
14934do_neon_cmp (void)
14935{
14936  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
14937}
14938
14939static void
14940do_neon_cmp_inv (void)
14941{
14942  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
14943}
14944
14945static void
14946do_neon_ceq (void)
14947{
14948  neon_compare (N_IF_32, N_IF_32, FALSE);
14949}
14950
14951/* For multiply instructions, we have the possibility of 16-bit or 32-bit
14952   scalars, which are encoded in 5 bits, M : Rm.
14953   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14954   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14955   index in M.  */
14956
14957static unsigned
14958neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14959{
14960  unsigned regno = NEON_SCALAR_REG (scalar);
14961  unsigned elno = NEON_SCALAR_INDEX (scalar);
14962
14963  switch (elsize)
14964    {
14965    case 16:
14966      if (regno > 7 || elno > 3)
14967	goto bad_scalar;
14968      return regno | (elno << 3);
14969
14970    case 32:
14971      if (regno > 15 || elno > 1)
14972	goto bad_scalar;
14973      return regno | (elno << 4);
14974
14975    default:
14976    bad_scalar:
14977      first_error (_("scalar out of range for multiply instruction"));
14978    }
14979
14980  return 0;
14981}
14982
14983/* Encode multiply / multiply-accumulate scalar instructions.  */
14984
14985static void
14986neon_mul_mac (struct neon_type_el et, int ubit)
14987{
14988  unsigned scalar;
14989
14990  /* Give a more helpful error message if we have an invalid type.  */
14991  if (et.type == NT_invtype)
14992    return;
14993
14994  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14995  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14996  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14997  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14998  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14999  inst.instruction |= LOW4 (scalar);
15000  inst.instruction |= HI1 (scalar) << 5;
15001  inst.instruction |= (et.type == NT_float) << 8;
15002  inst.instruction |= neon_logbits (et.size) << 20;
15003  inst.instruction |= (ubit != 0) << 24;
15004
15005  neon_dp_fixup (&inst);
15006}
15007
15008static void
15009do_neon_mac_maybe_scalar (void)
15010{
15011  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15012    return;
15013
15014  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15015    return;
15016
15017  if (inst.operands[2].isscalar)
15018    {
15019      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15020      struct neon_type_el et = neon_check_type (3, rs,
15021	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15022      NEON_ENCODE (SCALAR, inst);
15023      neon_mul_mac (et, neon_quad (rs));
15024    }
15025  else
15026    {
15027      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
15028	 affected if we specify unsigned args.  */
15029      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15030    }
15031}
15032
15033static void
15034do_neon_fmac (void)
15035{
15036  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15037    return;
15038
15039  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15040    return;
15041
15042  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15043}
15044
15045static void
15046do_neon_tst (void)
15047{
15048  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15049  struct neon_type_el et = neon_check_type (3, rs,
15050    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15051  neon_three_same (neon_quad (rs), 0, et.size);
15052}
15053
15054/* VMUL with 3 registers allows the P8 type. The scalar version supports the
15055   same types as the MAC equivalents. The polynomial type for this instruction
15056   is encoded the same as the integer type.  */
15057
15058static void
15059do_neon_mul (void)
15060{
15061  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15062    return;
15063
15064  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15065    return;
15066
15067  if (inst.operands[2].isscalar)
15068    do_neon_mac_maybe_scalar ();
15069  else
15070    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15071}
15072
15073static void
15074do_neon_qdmulh (void)
15075{
15076  if (inst.operands[2].isscalar)
15077    {
15078      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15079      struct neon_type_el et = neon_check_type (3, rs,
15080	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15081      NEON_ENCODE (SCALAR, inst);
15082      neon_mul_mac (et, neon_quad (rs));
15083    }
15084  else
15085    {
15086      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15087      struct neon_type_el et = neon_check_type (3, rs,
15088	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15089      NEON_ENCODE (INTEGER, inst);
15090      /* The U bit (rounding) comes from bit mask.  */
15091      neon_three_same (neon_quad (rs), 0, et.size);
15092    }
15093}
15094
15095static void
15096do_neon_qrdmlah (void)
15097{
15098  /* Check we're on the correct architecture.  */
15099  if (!mark_feature_used (&fpu_neon_ext_armv8))
15100    inst.error =
15101      _("instruction form not available on this architecture.");
15102  else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15103    {
15104      as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15105      record_feature_use (&fpu_neon_ext_v8_1);
15106    }
15107
15108  if (inst.operands[2].isscalar)
15109    {
15110      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15111      struct neon_type_el et = neon_check_type (3, rs,
15112	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15113      NEON_ENCODE (SCALAR, inst);
15114      neon_mul_mac (et, neon_quad (rs));
15115    }
15116  else
15117    {
15118      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15119      struct neon_type_el et = neon_check_type (3, rs,
15120	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15121      NEON_ENCODE (INTEGER, inst);
15122      /* The U bit (rounding) comes from bit mask.  */
15123      neon_three_same (neon_quad (rs), 0, et.size);
15124    }
15125}
15126
15127static void
15128do_neon_fcmp_absolute (void)
15129{
15130  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15131  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15132					    N_F_16_32 | N_KEY);
15133  /* Size field comes from bit mask.  */
15134  neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15135}
15136
15137static void
15138do_neon_fcmp_absolute_inv (void)
15139{
15140  neon_exchange_operands ();
15141  do_neon_fcmp_absolute ();
15142}
15143
15144static void
15145do_neon_step (void)
15146{
15147  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15148  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15149					    N_F_16_32 | N_KEY);
15150  neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15151}
15152
15153static void
15154do_neon_abs_neg (void)
15155{
15156  enum neon_shape rs;
15157  struct neon_type_el et;
15158
15159  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15160    return;
15161
15162  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15163    return;
15164
15165  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15166  et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15167
15168  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15169  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15170  inst.instruction |= LOW4 (inst.operands[1].reg);
15171  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15172  inst.instruction |= neon_quad (rs) << 6;
15173  inst.instruction |= (et.type == NT_float) << 10;
15174  inst.instruction |= neon_logbits (et.size) << 18;
15175
15176  neon_dp_fixup (&inst);
15177}
15178
15179static void
15180do_neon_sli (void)
15181{
15182  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15183  struct neon_type_el et = neon_check_type (2, rs,
15184    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15185  int imm = inst.operands[2].imm;
15186  constraint (imm < 0 || (unsigned)imm >= et.size,
15187	      _("immediate out of range for insert"));
15188  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15189}
15190
15191static void
15192do_neon_sri (void)
15193{
15194  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15195  struct neon_type_el et = neon_check_type (2, rs,
15196    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15197  int imm = inst.operands[2].imm;
15198  constraint (imm < 1 || (unsigned)imm > et.size,
15199	      _("immediate out of range for insert"));
15200  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15201}
15202
15203static void
15204do_neon_qshlu_imm (void)
15205{
15206  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15207  struct neon_type_el et = neon_check_type (2, rs,
15208    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15209  int imm = inst.operands[2].imm;
15210  constraint (imm < 0 || (unsigned)imm >= et.size,
15211	      _("immediate out of range for shift"));
15212  /* Only encodes the 'U present' variant of the instruction.
15213     In this case, signed types have OP (bit 8) set to 0.
15214     Unsigned types have OP set to 1.  */
15215  inst.instruction |= (et.type == NT_unsigned) << 8;
15216  /* The rest of the bits are the same as other immediate shifts.  */
15217  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15218}
15219
15220static void
15221do_neon_qmovn (void)
15222{
15223  struct neon_type_el et = neon_check_type (2, NS_DQ,
15224    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15225  /* Saturating move where operands can be signed or unsigned, and the
15226     destination has the same signedness.  */
15227  NEON_ENCODE (INTEGER, inst);
15228  if (et.type == NT_unsigned)
15229    inst.instruction |= 0xc0;
15230  else
15231    inst.instruction |= 0x80;
15232  neon_two_same (0, 1, et.size / 2);
15233}
15234
15235static void
15236do_neon_qmovun (void)
15237{
15238  struct neon_type_el et = neon_check_type (2, NS_DQ,
15239    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15240  /* Saturating move with unsigned results. Operands must be signed.  */
15241  NEON_ENCODE (INTEGER, inst);
15242  neon_two_same (0, 1, et.size / 2);
15243}
15244
15245static void
15246do_neon_rshift_sat_narrow (void)
15247{
15248  /* FIXME: Types for narrowing. If operands are signed, results can be signed
15249     or unsigned. If operands are unsigned, results must also be unsigned.  */
15250  struct neon_type_el et = neon_check_type (2, NS_DQI,
15251    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15252  int imm = inst.operands[2].imm;
15253  /* This gets the bounds check, size encoding and immediate bits calculation
15254     right.  */
15255  et.size /= 2;
15256
15257  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15258     VQMOVN.I<size> <Dd>, <Qm>.  */
15259  if (imm == 0)
15260    {
15261      inst.operands[2].present = 0;
15262      inst.instruction = N_MNEM_vqmovn;
15263      do_neon_qmovn ();
15264      return;
15265    }
15266
15267  constraint (imm < 1 || (unsigned)imm > et.size,
15268	      _("immediate out of range"));
15269  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15270}
15271
15272static void
15273do_neon_rshift_sat_narrow_u (void)
15274{
15275  /* FIXME: Types for narrowing. If operands are signed, results can be signed
15276     or unsigned. If operands are unsigned, results must also be unsigned.  */
15277  struct neon_type_el et = neon_check_type (2, NS_DQI,
15278    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15279  int imm = inst.operands[2].imm;
15280  /* This gets the bounds check, size encoding and immediate bits calculation
15281     right.  */
15282  et.size /= 2;
15283
15284  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15285     VQMOVUN.I<size> <Dd>, <Qm>.  */
15286  if (imm == 0)
15287    {
15288      inst.operands[2].present = 0;
15289      inst.instruction = N_MNEM_vqmovun;
15290      do_neon_qmovun ();
15291      return;
15292    }
15293
15294  constraint (imm < 1 || (unsigned)imm > et.size,
15295	      _("immediate out of range"));
15296  /* FIXME: The manual is kind of unclear about what value U should have in
15297     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15298     must be 1.  */
15299  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15300}
15301
15302static void
15303do_neon_movn (void)
15304{
15305  struct neon_type_el et = neon_check_type (2, NS_DQ,
15306    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15307  NEON_ENCODE (INTEGER, inst);
15308  neon_two_same (0, 1, et.size / 2);
15309}
15310
15311static void
15312do_neon_rshift_narrow (void)
15313{
15314  struct neon_type_el et = neon_check_type (2, NS_DQI,
15315    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15316  int imm = inst.operands[2].imm;
15317  /* This gets the bounds check, size encoding and immediate bits calculation
15318     right.  */
15319  et.size /= 2;
15320
15321  /* If immediate is zero then we are a pseudo-instruction for
15322     VMOVN.I<size> <Dd>, <Qm>  */
15323  if (imm == 0)
15324    {
15325      inst.operands[2].present = 0;
15326      inst.instruction = N_MNEM_vmovn;
15327      do_neon_movn ();
15328      return;
15329    }
15330
15331  constraint (imm < 1 || (unsigned)imm > et.size,
15332	      _("immediate out of range for narrowing operation"));
15333  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15334}
15335
15336static void
15337do_neon_shll (void)
15338{
15339  /* FIXME: Type checking when lengthening.  */
15340  struct neon_type_el et = neon_check_type (2, NS_QDI,
15341    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15342  unsigned imm = inst.operands[2].imm;
15343
15344  if (imm == et.size)
15345    {
15346      /* Maximum shift variant.  */
15347      NEON_ENCODE (INTEGER, inst);
15348      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15349      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15350      inst.instruction |= LOW4 (inst.operands[1].reg);
15351      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15352      inst.instruction |= neon_logbits (et.size) << 18;
15353
15354      neon_dp_fixup (&inst);
15355    }
15356  else
15357    {
15358      /* A more-specific type check for non-max versions.  */
15359      et = neon_check_type (2, NS_QDI,
15360	N_EQK | N_DBL, N_SU_32 | N_KEY);
15361      NEON_ENCODE (IMMED, inst);
15362      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15363    }
15364}
15365
15366/* Check the various types for the VCVT instruction, and return which version
15367   the current instruction is.  */
15368
15369#define CVT_FLAVOUR_VAR							      \
15370  CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
15371  CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
15372  CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
15373  CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
15374  /* Half-precision conversions.  */					      \
15375  CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
15376  CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
15377  CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
15378  CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
15379  CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
15380  CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
15381  /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
15382     Compared with single/double precision variants, only the co-processor    \
15383     field is different, so the encoding flow is reused here.  */	      \
15384  CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
15385  CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
15386  CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15387  CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15388  /* VFP instructions.  */						      \
15389  CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
15390  CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
15391  CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15392  CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15393  CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
15394  CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
15395  /* VFP instructions with bitshift.  */				      \
15396  CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
15397  CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
15398  CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
15399  CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
15400  CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
15401  CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
15402  CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
15403  CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
15404
15405#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15406  neon_cvt_flavour_##C,
15407
15408/* The different types of conversions we can do.  */
15409enum neon_cvt_flavour
15410{
15411  CVT_FLAVOUR_VAR
15412  neon_cvt_flavour_invalid,
15413  neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15414};
15415
15416#undef CVT_VAR
15417
15418static enum neon_cvt_flavour
15419get_neon_cvt_flavour (enum neon_shape rs)
15420{
15421#define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
15422  et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
15423  if (et.type != NT_invtype)				\
15424    {							\
15425      inst.error = NULL;				\
15426      return (neon_cvt_flavour_##C);			\
15427    }
15428
15429  struct neon_type_el et;
15430  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15431			|| rs == NS_FF) ? N_VFP : 0;
15432  /* The instruction versions which take an immediate take one register
15433     argument, which is extended to the width of the full register. Thus the
15434     "source" and "destination" registers must have the same width.  Hack that
15435     here by making the size equal to the key (wider, in this case) operand.  */
15436  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15437
15438  CVT_FLAVOUR_VAR;
15439
15440  return neon_cvt_flavour_invalid;
15441#undef CVT_VAR
15442}
15443
15444enum neon_cvt_mode
15445{
15446  neon_cvt_mode_a,
15447  neon_cvt_mode_n,
15448  neon_cvt_mode_p,
15449  neon_cvt_mode_m,
15450  neon_cvt_mode_z,
15451  neon_cvt_mode_x,
15452  neon_cvt_mode_r
15453};
15454
15455/* Neon-syntax VFP conversions.  */
15456
15457static void
15458do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15459{
15460  const char *opname = 0;
15461
15462  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15463      || rs == NS_FHI || rs == NS_HFI)
15464    {
15465      /* Conversions with immediate bitshift.  */
15466      const char *enc[] =
15467	{
15468#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15469	  CVT_FLAVOUR_VAR
15470	  NULL
15471#undef CVT_VAR
15472	};
15473
15474      if (flavour < (int) ARRAY_SIZE (enc))
15475	{
15476	  opname = enc[flavour];
15477	  constraint (inst.operands[0].reg != inst.operands[1].reg,
15478		      _("operands 0 and 1 must be the same register"));
15479	  inst.operands[1] = inst.operands[2];
15480	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15481	}
15482    }
15483  else
15484    {
15485      /* Conversions without bitshift.  */
15486      const char *enc[] =
15487	{
15488#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15489	  CVT_FLAVOUR_VAR
15490	  NULL
15491#undef CVT_VAR
15492	};
15493
15494      if (flavour < (int) ARRAY_SIZE (enc))
15495	opname = enc[flavour];
15496    }
15497
15498  if (opname)
15499    do_vfp_nsyn_opcode (opname);
15500
15501  /* ARMv8.2 fp16 VCVT instruction.  */
15502  if (flavour == neon_cvt_flavour_s32_f16
15503      || flavour == neon_cvt_flavour_u32_f16
15504      || flavour == neon_cvt_flavour_f16_u32
15505      || flavour == neon_cvt_flavour_f16_s32)
15506    do_scalar_fp16_v82_encode ();
15507}
15508
15509static void
15510do_vfp_nsyn_cvtz (void)
15511{
15512  enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15513  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15514  const char *enc[] =
15515    {
15516#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15517      CVT_FLAVOUR_VAR
15518      NULL
15519#undef CVT_VAR
15520    };
15521
15522  if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15523    do_vfp_nsyn_opcode (enc[flavour]);
15524}
15525
15526static void
15527do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15528		      enum neon_cvt_mode mode)
15529{
15530  int sz, op;
15531  int rm;
15532
15533  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15534     D register operands.  */
15535  if (flavour == neon_cvt_flavour_s32_f64
15536      || flavour == neon_cvt_flavour_u32_f64)
15537    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15538		_(BAD_FPU));
15539
15540  if (flavour == neon_cvt_flavour_s32_f16
15541      || flavour == neon_cvt_flavour_u32_f16)
15542    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15543		_(BAD_FP16));
15544
15545  set_it_insn_type (OUTSIDE_IT_INSN);
15546
15547  switch (flavour)
15548    {
15549    case neon_cvt_flavour_s32_f64:
15550      sz = 1;
15551      op = 1;
15552      break;
15553    case neon_cvt_flavour_s32_f32:
15554      sz = 0;
15555      op = 1;
15556      break;
15557    case neon_cvt_flavour_s32_f16:
15558      sz = 0;
15559      op = 1;
15560      break;
15561    case neon_cvt_flavour_u32_f64:
15562      sz = 1;
15563      op = 0;
15564      break;
15565    case neon_cvt_flavour_u32_f32:
15566      sz = 0;
15567      op = 0;
15568      break;
15569    case neon_cvt_flavour_u32_f16:
15570      sz = 0;
15571      op = 0;
15572      break;
15573    default:
15574      first_error (_("invalid instruction shape"));
15575      return;
15576    }
15577
15578  switch (mode)
15579    {
15580    case neon_cvt_mode_a: rm = 0; break;
15581    case neon_cvt_mode_n: rm = 1; break;
15582    case neon_cvt_mode_p: rm = 2; break;
15583    case neon_cvt_mode_m: rm = 3; break;
15584    default: first_error (_("invalid rounding mode")); return;
15585    }
15586
15587  NEON_ENCODE (FPV8, inst);
15588  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15589  encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15590  inst.instruction |= sz << 8;
15591
15592  /* ARMv8.2 fp16 VCVT instruction.  */
15593  if (flavour == neon_cvt_flavour_s32_f16
15594      ||flavour == neon_cvt_flavour_u32_f16)
15595    do_scalar_fp16_v82_encode ();
15596  inst.instruction |= op << 7;
15597  inst.instruction |= rm << 16;
15598  inst.instruction |= 0xf0000000;
15599  inst.is_neon = TRUE;
15600}
15601
15602static void
15603do_neon_cvt_1 (enum neon_cvt_mode mode)
15604{
15605  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15606					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15607					  NS_FH, NS_HF, NS_FHI, NS_HFI,
15608					  NS_NULL);
15609  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15610
15611  if (flavour == neon_cvt_flavour_invalid)
15612    return;
15613
15614  /* PR11109: Handle round-to-zero for VCVT conversions.  */
15615  if (mode == neon_cvt_mode_z
15616      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15617      && (flavour == neon_cvt_flavour_s16_f16
15618	  || flavour == neon_cvt_flavour_u16_f16
15619	  || flavour == neon_cvt_flavour_s32_f32
15620	  || flavour == neon_cvt_flavour_u32_f32
15621	  || flavour == neon_cvt_flavour_s32_f64
15622	  || flavour == neon_cvt_flavour_u32_f64)
15623      && (rs == NS_FD || rs == NS_FF))
15624    {
15625      do_vfp_nsyn_cvtz ();
15626      return;
15627    }
15628
15629  /* ARMv8.2 fp16 VCVT conversions.  */
15630  if (mode == neon_cvt_mode_z
15631      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15632      && (flavour == neon_cvt_flavour_s32_f16
15633	  || flavour == neon_cvt_flavour_u32_f16)
15634      && (rs == NS_FH))
15635    {
15636      do_vfp_nsyn_cvtz ();
15637      do_scalar_fp16_v82_encode ();
15638      return;
15639    }
15640
15641  /* VFP rather than Neon conversions.  */
15642  if (flavour >= neon_cvt_flavour_first_fp)
15643    {
15644      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15645	do_vfp_nsyn_cvt (rs, flavour);
15646      else
15647	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15648
15649      return;
15650    }
15651
15652  switch (rs)
15653    {
15654    case NS_DDI:
15655    case NS_QQI:
15656      {
15657	unsigned immbits;
15658	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15659			     0x0000100, 0x1000100, 0x0, 0x1000000};
15660
15661	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15662	  return;
15663
15664	/* Fixed-point conversion with #0 immediate is encoded as an
15665	   integer conversion.  */
15666	if (inst.operands[2].present && inst.operands[2].imm == 0)
15667	  goto int_encode;
15668	NEON_ENCODE (IMMED, inst);
15669	if (flavour != neon_cvt_flavour_invalid)
15670	  inst.instruction |= enctab[flavour];
15671	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15672	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15673	inst.instruction |= LOW4 (inst.operands[1].reg);
15674	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15675	inst.instruction |= neon_quad (rs) << 6;
15676	inst.instruction |= 1 << 21;
15677	if (flavour < neon_cvt_flavour_s16_f16)
15678	  {
15679	    inst.instruction |= 1 << 21;
15680	    immbits = 32 - inst.operands[2].imm;
15681	    inst.instruction |= immbits << 16;
15682	  }
15683	else
15684	  {
15685	    inst.instruction |= 3 << 20;
15686	    immbits = 16 - inst.operands[2].imm;
15687	    inst.instruction |= immbits << 16;
15688	    inst.instruction &= ~(1 << 9);
15689	  }
15690
15691	neon_dp_fixup (&inst);
15692      }
15693      break;
15694
15695    case NS_DD:
15696    case NS_QQ:
15697      if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15698	{
15699	  NEON_ENCODE (FLOAT, inst);
15700	  set_it_insn_type (OUTSIDE_IT_INSN);
15701
15702	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15703	    return;
15704
15705	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15706	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15707	  inst.instruction |= LOW4 (inst.operands[1].reg);
15708	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15709	  inst.instruction |= neon_quad (rs) << 6;
15710	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15711			       || flavour == neon_cvt_flavour_u32_f32) << 7;
15712	  inst.instruction |= mode << 8;
15713	  if (flavour == neon_cvt_flavour_u16_f16
15714	      || flavour == neon_cvt_flavour_s16_f16)
15715	    /* Mask off the original size bits and reencode them.  */
15716	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15717
15718	  if (thumb_mode)
15719	    inst.instruction |= 0xfc000000;
15720	  else
15721	    inst.instruction |= 0xf0000000;
15722	}
15723      else
15724	{
15725    int_encode:
15726	  {
15727	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15728				  0x100, 0x180, 0x0, 0x080};
15729
15730	    NEON_ENCODE (INTEGER, inst);
15731
15732	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15733	      return;
15734
15735	    if (flavour != neon_cvt_flavour_invalid)
15736	      inst.instruction |= enctab[flavour];
15737
15738	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15739	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15740	    inst.instruction |= LOW4 (inst.operands[1].reg);
15741	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15742	    inst.instruction |= neon_quad (rs) << 6;
15743	    if (flavour >= neon_cvt_flavour_s16_f16
15744		&& flavour <= neon_cvt_flavour_f16_u16)
15745	      /* Half precision.  */
15746	      inst.instruction |= 1 << 18;
15747	    else
15748	      inst.instruction |= 2 << 18;
15749
15750	    neon_dp_fixup (&inst);
15751	  }
15752	}
15753      break;
15754
15755    /* Half-precision conversions for Advanced SIMD -- neon.  */
15756    case NS_QD:
15757    case NS_DQ:
15758
15759      if ((rs == NS_DQ)
15760	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15761	  {
15762	    as_bad (_("operand size must match register width"));
15763	    break;
15764	  }
15765
15766      if ((rs == NS_QD)
15767	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15768	  {
15769	    as_bad (_("operand size must match register width"));
15770	    break;
15771	  }
15772
15773      if (rs == NS_DQ)
15774	inst.instruction = 0x3b60600;
15775      else
15776	inst.instruction = 0x3b60700;
15777
15778      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15779      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15780      inst.instruction |= LOW4 (inst.operands[1].reg);
15781      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15782      neon_dp_fixup (&inst);
15783      break;
15784
15785    default:
15786      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
15787      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15788	do_vfp_nsyn_cvt (rs, flavour);
15789      else
15790	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15791    }
15792}
15793
15794static void
15795do_neon_cvtr (void)
15796{
15797  do_neon_cvt_1 (neon_cvt_mode_x);
15798}
15799
15800static void
15801do_neon_cvt (void)
15802{
15803  do_neon_cvt_1 (neon_cvt_mode_z);
15804}
15805
15806static void
15807do_neon_cvta (void)
15808{
15809  do_neon_cvt_1 (neon_cvt_mode_a);
15810}
15811
15812static void
15813do_neon_cvtn (void)
15814{
15815  do_neon_cvt_1 (neon_cvt_mode_n);
15816}
15817
15818static void
15819do_neon_cvtp (void)
15820{
15821  do_neon_cvt_1 (neon_cvt_mode_p);
15822}
15823
15824static void
15825do_neon_cvtm (void)
15826{
15827  do_neon_cvt_1 (neon_cvt_mode_m);
15828}
15829
15830static void
15831do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15832{
15833  if (is_double)
15834    mark_feature_used (&fpu_vfp_ext_armv8);
15835
15836  encode_arm_vfp_reg (inst.operands[0].reg,
15837		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15838  encode_arm_vfp_reg (inst.operands[1].reg,
15839		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15840  inst.instruction |= to ? 0x10000 : 0;
15841  inst.instruction |= t ? 0x80 : 0;
15842  inst.instruction |= is_double ? 0x100 : 0;
15843  do_vfp_cond_or_thumb ();
15844}
15845
15846static void
15847do_neon_cvttb_1 (bfd_boolean t)
15848{
15849  enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15850					  NS_DF, NS_DH, NS_NULL);
15851
15852  if (rs == NS_NULL)
15853    return;
15854  else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15855    {
15856      inst.error = NULL;
15857      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15858    }
15859  else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15860    {
15861      inst.error = NULL;
15862      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15863    }
15864  else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15865    {
15866      /* The VCVTB and VCVTT instructions with D-register operands
15867         don't work for SP only targets.  */
15868      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15869		  _(BAD_FPU));
15870
15871      inst.error = NULL;
15872      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15873    }
15874  else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15875    {
15876      /* The VCVTB and VCVTT instructions with D-register operands
15877         don't work for SP only targets.  */
15878      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15879		  _(BAD_FPU));
15880
15881      inst.error = NULL;
15882      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15883    }
15884  else
15885    return;
15886}
15887
15888static void
15889do_neon_cvtb (void)
15890{
15891  do_neon_cvttb_1 (FALSE);
15892}
15893
15894
15895static void
15896do_neon_cvtt (void)
15897{
15898  do_neon_cvttb_1 (TRUE);
15899}
15900
15901static void
15902neon_move_immediate (void)
15903{
15904  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15905  struct neon_type_el et = neon_check_type (2, rs,
15906    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15907  unsigned immlo, immhi = 0, immbits;
15908  int op, cmode, float_p;
15909
15910  constraint (et.type == NT_invtype,
15911	      _("operand size must be specified for immediate VMOV"));
15912
15913  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
15914  op = (inst.instruction & (1 << 5)) != 0;
15915
15916  immlo = inst.operands[1].imm;
15917  if (inst.operands[1].regisimm)
15918    immhi = inst.operands[1].reg;
15919
15920  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15921	      _("immediate has bits set outside the operand size"));
15922
15923  float_p = inst.operands[1].immisfloat;
15924
15925  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15926					et.size, et.type)) == FAIL)
15927    {
15928      /* Invert relevant bits only.  */
15929      neon_invert_size (&immlo, &immhi, et.size);
15930      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15931	 with one or the other; those cases are caught by
15932	 neon_cmode_for_move_imm.  */
15933      op = !op;
15934      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15935					    &op, et.size, et.type)) == FAIL)
15936	{
15937	  first_error (_("immediate out of range"));
15938	  return;
15939	}
15940    }
15941
15942  inst.instruction &= ~(1 << 5);
15943  inst.instruction |= op << 5;
15944
15945  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15946  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15947  inst.instruction |= neon_quad (rs) << 6;
15948  inst.instruction |= cmode << 8;
15949
15950  neon_write_immbits (immbits);
15951}
15952
15953static void
15954do_neon_mvn (void)
15955{
15956  if (inst.operands[1].isreg)
15957    {
15958      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15959
15960      NEON_ENCODE (INTEGER, inst);
15961      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15962      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15963      inst.instruction |= LOW4 (inst.operands[1].reg);
15964      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15965      inst.instruction |= neon_quad (rs) << 6;
15966    }
15967  else
15968    {
15969      NEON_ENCODE (IMMED, inst);
15970      neon_move_immediate ();
15971    }
15972
15973  neon_dp_fixup (&inst);
15974}
15975
15976/* Encode instructions of form:
15977
15978  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
15979  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
15980
15981static void
15982neon_mixed_length (struct neon_type_el et, unsigned size)
15983{
15984  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15985  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15986  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15987  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15988  inst.instruction |= LOW4 (inst.operands[2].reg);
15989  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15990  inst.instruction |= (et.type == NT_unsigned) << 24;
15991  inst.instruction |= neon_logbits (size) << 20;
15992
15993  neon_dp_fixup (&inst);
15994}
15995
15996static void
15997do_neon_dyadic_long (void)
15998{
15999  /* FIXME: Type checking for lengthening op.  */
16000  struct neon_type_el et = neon_check_type (3, NS_QDD,
16001    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16002  neon_mixed_length (et, et.size);
16003}
16004
16005static void
16006do_neon_abal (void)
16007{
16008  struct neon_type_el et = neon_check_type (3, NS_QDD,
16009    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16010  neon_mixed_length (et, et.size);
16011}
16012
16013static void
16014neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16015{
16016  if (inst.operands[2].isscalar)
16017    {
16018      struct neon_type_el et = neon_check_type (3, NS_QDS,
16019	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16020      NEON_ENCODE (SCALAR, inst);
16021      neon_mul_mac (et, et.type == NT_unsigned);
16022    }
16023  else
16024    {
16025      struct neon_type_el et = neon_check_type (3, NS_QDD,
16026	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16027      NEON_ENCODE (INTEGER, inst);
16028      neon_mixed_length (et, et.size);
16029    }
16030}
16031
16032static void
16033do_neon_mac_maybe_scalar_long (void)
16034{
16035  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16036}
16037
16038static void
16039do_neon_dyadic_wide (void)
16040{
16041  struct neon_type_el et = neon_check_type (3, NS_QQD,
16042    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16043  neon_mixed_length (et, et.size);
16044}
16045
16046static void
16047do_neon_dyadic_narrow (void)
16048{
16049  struct neon_type_el et = neon_check_type (3, NS_QDD,
16050    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16051  /* Operand sign is unimportant, and the U bit is part of the opcode,
16052     so force the operand type to integer.  */
16053  et.type = NT_integer;
16054  neon_mixed_length (et, et.size / 2);
16055}
16056
16057static void
16058do_neon_mul_sat_scalar_long (void)
16059{
16060  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16061}
16062
16063static void
16064do_neon_vmull (void)
16065{
16066  if (inst.operands[2].isscalar)
16067    do_neon_mac_maybe_scalar_long ();
16068  else
16069    {
16070      struct neon_type_el et = neon_check_type (3, NS_QDD,
16071	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16072
16073      if (et.type == NT_poly)
16074	NEON_ENCODE (POLY, inst);
16075      else
16076	NEON_ENCODE (INTEGER, inst);
16077
16078      /* For polynomial encoding the U bit must be zero, and the size must
16079	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16080	 obviously, as 0b10).  */
16081      if (et.size == 64)
16082	{
16083	  /* Check we're on the correct architecture.  */
16084	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
16085	    inst.error =
16086	      _("Instruction form not available on this architecture.");
16087
16088	  et.size = 32;
16089	}
16090
16091      neon_mixed_length (et, et.size);
16092    }
16093}
16094
16095static void
16096do_neon_ext (void)
16097{
16098  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16099  struct neon_type_el et = neon_check_type (3, rs,
16100    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16101  unsigned imm = (inst.operands[3].imm * et.size) / 8;
16102
16103  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16104	      _("shift out of range"));
16105  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16106  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16107  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16108  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16109  inst.instruction |= LOW4 (inst.operands[2].reg);
16110  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16111  inst.instruction |= neon_quad (rs) << 6;
16112  inst.instruction |= imm << 8;
16113
16114  neon_dp_fixup (&inst);
16115}
16116
16117static void
16118do_neon_rev (void)
16119{
16120  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16121  struct neon_type_el et = neon_check_type (2, rs,
16122    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16123  unsigned op = (inst.instruction >> 7) & 3;
16124  /* N (width of reversed regions) is encoded as part of the bitmask. We
16125     extract it here to check the elements to be reversed are smaller.
16126     Otherwise we'd get a reserved instruction.  */
16127  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16128  gas_assert (elsize != 0);
16129  constraint (et.size >= elsize,
16130	      _("elements must be smaller than reversal region"));
16131  neon_two_same (neon_quad (rs), 1, et.size);
16132}
16133
16134static void
16135do_neon_dup (void)
16136{
16137  if (inst.operands[1].isscalar)
16138    {
16139      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16140      struct neon_type_el et = neon_check_type (2, rs,
16141	N_EQK, N_8 | N_16 | N_32 | N_KEY);
16142      unsigned sizebits = et.size >> 3;
16143      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16144      int logsize = neon_logbits (et.size);
16145      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16146
16147      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16148	return;
16149
16150      NEON_ENCODE (SCALAR, inst);
16151      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16152      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16153      inst.instruction |= LOW4 (dm);
16154      inst.instruction |= HI1 (dm) << 5;
16155      inst.instruction |= neon_quad (rs) << 6;
16156      inst.instruction |= x << 17;
16157      inst.instruction |= sizebits << 16;
16158
16159      neon_dp_fixup (&inst);
16160    }
16161  else
16162    {
16163      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16164      struct neon_type_el et = neon_check_type (2, rs,
16165	N_8 | N_16 | N_32 | N_KEY, N_EQK);
16166      /* Duplicate ARM register to lanes of vector.  */
16167      NEON_ENCODE (ARMREG, inst);
16168      switch (et.size)
16169	{
16170	case 8:  inst.instruction |= 0x400000; break;
16171	case 16: inst.instruction |= 0x000020; break;
16172	case 32: inst.instruction |= 0x000000; break;
16173	default: break;
16174	}
16175      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16176      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16177      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16178      inst.instruction |= neon_quad (rs) << 21;
16179      /* The encoding for this instruction is identical for the ARM and Thumb
16180	 variants, except for the condition field.  */
16181      do_vfp_cond_or_thumb ();
16182    }
16183}
16184
16185/* VMOV has particularly many variations. It can be one of:
16186     0. VMOV<c><q> <Qd>, <Qm>
16187     1. VMOV<c><q> <Dd>, <Dm>
16188   (Register operations, which are VORR with Rm = Rn.)
16189     2. VMOV<c><q>.<dt> <Qd>, #<imm>
16190     3. VMOV<c><q>.<dt> <Dd>, #<imm>
16191   (Immediate loads.)
16192     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16193   (ARM register to scalar.)
16194     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16195   (Two ARM registers to vector.)
16196     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16197   (Scalar to ARM register.)
16198     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16199   (Vector to two ARM registers.)
16200     8. VMOV.F32 <Sd>, <Sm>
16201     9. VMOV.F64 <Dd>, <Dm>
16202   (VFP register moves.)
16203    10. VMOV.F32 <Sd>, #imm
16204    11. VMOV.F64 <Dd>, #imm
16205   (VFP float immediate load.)
16206    12. VMOV <Rd>, <Sm>
16207   (VFP single to ARM reg.)
16208    13. VMOV <Sd>, <Rm>
16209   (ARM reg to VFP single.)
16210    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16211   (Two ARM regs to two VFP singles.)
16212    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16213   (Two VFP singles to two ARM regs.)
16214
16215   These cases can be disambiguated using neon_select_shape, except cases 1/9
16216   and 3/11 which depend on the operand type too.
16217
16218   All the encoded bits are hardcoded by this function.
16219
16220   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16221   Cases 5, 7 may be used with VFPv2 and above.
16222
16223   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16224   can specify a type where it doesn't make sense to, and is ignored).  */
16225
16226static void
16227do_neon_mov (void)
16228{
16229  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16230					  NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16231					  NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16232					  NS_HR, NS_RH, NS_HI, NS_NULL);
16233  struct neon_type_el et;
16234  const char *ldconst = 0;
16235
16236  switch (rs)
16237    {
16238    case NS_DD:  /* case 1/9.  */
16239      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16240      /* It is not an error here if no type is given.  */
16241      inst.error = NULL;
16242      if (et.type == NT_float && et.size == 64)
16243	{
16244	  do_vfp_nsyn_opcode ("fcpyd");
16245	  break;
16246	}
16247      /* fall through.  */
16248
16249    case NS_QQ:  /* case 0/1.  */
16250      {
16251	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16252	  return;
16253	/* The architecture manual I have doesn't explicitly state which
16254	   value the U bit should have for register->register moves, but
16255	   the equivalent VORR instruction has U = 0, so do that.  */
16256	inst.instruction = 0x0200110;
16257	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16258	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16259	inst.instruction |= LOW4 (inst.operands[1].reg);
16260	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16261	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16262	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16263	inst.instruction |= neon_quad (rs) << 6;
16264
16265	neon_dp_fixup (&inst);
16266      }
16267      break;
16268
16269    case NS_DI:  /* case 3/11.  */
16270      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16271      inst.error = NULL;
16272      if (et.type == NT_float && et.size == 64)
16273	{
16274	  /* case 11 (fconstd).  */
16275	  ldconst = "fconstd";
16276	  goto encode_fconstd;
16277	}
16278      /* fall through.  */
16279
16280    case NS_QI:  /* case 2/3.  */
16281      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16282	return;
16283      inst.instruction = 0x0800010;
16284      neon_move_immediate ();
16285      neon_dp_fixup (&inst);
16286      break;
16287
16288    case NS_SR:  /* case 4.  */
16289      {
16290	unsigned bcdebits = 0;
16291	int logsize;
16292	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16293	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16294
16295	/* .<size> is optional here, defaulting to .32. */
16296	if (inst.vectype.elems == 0
16297	    && inst.operands[0].vectype.type == NT_invtype
16298	    && inst.operands[1].vectype.type == NT_invtype)
16299	  {
16300	    inst.vectype.el[0].type = NT_untyped;
16301	    inst.vectype.el[0].size = 32;
16302	    inst.vectype.elems = 1;
16303	  }
16304
16305	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16306	logsize = neon_logbits (et.size);
16307
16308	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16309		    _(BAD_FPU));
16310	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16311		    && et.size != 32, _(BAD_FPU));
16312	constraint (et.type == NT_invtype, _("bad type for scalar"));
16313	constraint (x >= 64 / et.size, _("scalar index out of range"));
16314
16315	switch (et.size)
16316	  {
16317	  case 8:  bcdebits = 0x8; break;
16318	  case 16: bcdebits = 0x1; break;
16319	  case 32: bcdebits = 0x0; break;
16320	  default: ;
16321	  }
16322
16323	bcdebits |= x << logsize;
16324
16325	inst.instruction = 0xe000b10;
16326	do_vfp_cond_or_thumb ();
16327	inst.instruction |= LOW4 (dn) << 16;
16328	inst.instruction |= HI1 (dn) << 7;
16329	inst.instruction |= inst.operands[1].reg << 12;
16330	inst.instruction |= (bcdebits & 3) << 5;
16331	inst.instruction |= (bcdebits >> 2) << 21;
16332      }
16333      break;
16334
16335    case NS_DRR:  /* case 5 (fmdrr).  */
16336      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16337		  _(BAD_FPU));
16338
16339      inst.instruction = 0xc400b10;
16340      do_vfp_cond_or_thumb ();
16341      inst.instruction |= LOW4 (inst.operands[0].reg);
16342      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16343      inst.instruction |= inst.operands[1].reg << 12;
16344      inst.instruction |= inst.operands[2].reg << 16;
16345      break;
16346
16347    case NS_RS:  /* case 6.  */
16348      {
16349	unsigned logsize;
16350	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16351	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16352	unsigned abcdebits = 0;
16353
16354	/* .<dt> is optional here, defaulting to .32. */
16355	if (inst.vectype.elems == 0
16356	    && inst.operands[0].vectype.type == NT_invtype
16357	    && inst.operands[1].vectype.type == NT_invtype)
16358	  {
16359	    inst.vectype.el[0].type = NT_untyped;
16360	    inst.vectype.el[0].size = 32;
16361	    inst.vectype.elems = 1;
16362	  }
16363
16364	et = neon_check_type (2, NS_NULL,
16365			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16366	logsize = neon_logbits (et.size);
16367
16368	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16369		    _(BAD_FPU));
16370	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16371		    && et.size != 32, _(BAD_FPU));
16372	constraint (et.type == NT_invtype, _("bad type for scalar"));
16373	constraint (x >= 64 / et.size, _("scalar index out of range"));
16374
16375	switch (et.size)
16376	  {
16377	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16378	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16379	  case 32: abcdebits = 0x00; break;
16380	  default: ;
16381	  }
16382
16383	abcdebits |= x << logsize;
16384	inst.instruction = 0xe100b10;
16385	do_vfp_cond_or_thumb ();
16386	inst.instruction |= LOW4 (dn) << 16;
16387	inst.instruction |= HI1 (dn) << 7;
16388	inst.instruction |= inst.operands[0].reg << 12;
16389	inst.instruction |= (abcdebits & 3) << 5;
16390	inst.instruction |= (abcdebits >> 2) << 21;
16391      }
16392      break;
16393
16394    case NS_RRD:  /* case 7 (fmrrd).  */
16395      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16396		  _(BAD_FPU));
16397
16398      inst.instruction = 0xc500b10;
16399      do_vfp_cond_or_thumb ();
16400      inst.instruction |= inst.operands[0].reg << 12;
16401      inst.instruction |= inst.operands[1].reg << 16;
16402      inst.instruction |= LOW4 (inst.operands[2].reg);
16403      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16404      break;
16405
16406    case NS_FF:  /* case 8 (fcpys).  */
16407      do_vfp_nsyn_opcode ("fcpys");
16408      break;
16409
16410    case NS_HI:
16411    case NS_FI:  /* case 10 (fconsts).  */
16412      ldconst = "fconsts";
16413      encode_fconstd:
16414      if (is_quarter_float (inst.operands[1].imm))
16415	{
16416	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16417	  do_vfp_nsyn_opcode (ldconst);
16418
16419	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
16420	  if (rs == NS_HI)
16421	    do_scalar_fp16_v82_encode ();
16422	}
16423      else
16424	first_error (_("immediate out of range"));
16425      break;
16426
16427    case NS_RH:
16428    case NS_RF:  /* case 12 (fmrs).  */
16429      do_vfp_nsyn_opcode ("fmrs");
16430      /* ARMv8.2 fp16 vmov.f16 instruction.  */
16431      if (rs == NS_RH)
16432	do_scalar_fp16_v82_encode ();
16433      break;
16434
16435    case NS_HR:
16436    case NS_FR:  /* case 13 (fmsr).  */
16437      do_vfp_nsyn_opcode ("fmsr");
16438      /* ARMv8.2 fp16 vmov.f16 instruction.  */
16439      if (rs == NS_HR)
16440	do_scalar_fp16_v82_encode ();
16441      break;
16442
16443    /* The encoders for the fmrrs and fmsrr instructions expect three operands
16444       (one of which is a list), but we have parsed four.  Do some fiddling to
16445       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16446       expect.  */
16447    case NS_RRFF:  /* case 14 (fmrrs).  */
16448      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16449		  _("VFP registers must be adjacent"));
16450      inst.operands[2].imm = 2;
16451      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16452      do_vfp_nsyn_opcode ("fmrrs");
16453      break;
16454
16455    case NS_FFRR:  /* case 15 (fmsrr).  */
16456      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16457		  _("VFP registers must be adjacent"));
16458      inst.operands[1] = inst.operands[2];
16459      inst.operands[2] = inst.operands[3];
16460      inst.operands[0].imm = 2;
16461      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16462      do_vfp_nsyn_opcode ("fmsrr");
16463      break;
16464
16465    case NS_NULL:
16466      /* neon_select_shape has determined that the instruction
16467	 shape is wrong and has already set the error message.  */
16468      break;
16469
16470    default:
16471      abort ();
16472    }
16473}
16474
16475static void
16476do_neon_rshift_round_imm (void)
16477{
16478  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16479  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16480  int imm = inst.operands[2].imm;
16481
16482  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
16483  if (imm == 0)
16484    {
16485      inst.operands[2].present = 0;
16486      do_neon_mov ();
16487      return;
16488    }
16489
16490  constraint (imm < 1 || (unsigned)imm > et.size,
16491	      _("immediate out of range for shift"));
16492  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16493		  et.size - imm);
16494}
16495
16496static void
16497do_neon_movhf (void)
16498{
16499  enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16500  constraint (rs != NS_HH, _("invalid suffix"));
16501
16502  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16503	      _(BAD_FPU));
16504
16505  do_vfp_sp_monadic ();
16506
16507  inst.is_neon = 1;
16508  inst.instruction |= 0xf0000000;
16509}
16510
16511static void
16512do_neon_movl (void)
16513{
16514  struct neon_type_el et = neon_check_type (2, NS_QD,
16515    N_EQK | N_DBL, N_SU_32 | N_KEY);
16516  unsigned sizebits = et.size >> 3;
16517  inst.instruction |= sizebits << 19;
16518  neon_two_same (0, et.type == NT_unsigned, -1);
16519}
16520
16521static void
16522do_neon_trn (void)
16523{
16524  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16525  struct neon_type_el et = neon_check_type (2, rs,
16526    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16527  NEON_ENCODE (INTEGER, inst);
16528  neon_two_same (neon_quad (rs), 1, et.size);
16529}
16530
16531static void
16532do_neon_zip_uzp (void)
16533{
16534  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16535  struct neon_type_el et = neon_check_type (2, rs,
16536    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16537  if (rs == NS_DD && et.size == 32)
16538    {
16539      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
16540      inst.instruction = N_MNEM_vtrn;
16541      do_neon_trn ();
16542      return;
16543    }
16544  neon_two_same (neon_quad (rs), 1, et.size);
16545}
16546
16547static void
16548do_neon_sat_abs_neg (void)
16549{
16550  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16551  struct neon_type_el et = neon_check_type (2, rs,
16552    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16553  neon_two_same (neon_quad (rs), 1, et.size);
16554}
16555
16556static void
16557do_neon_pair_long (void)
16558{
16559  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16560  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16561  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
16562  inst.instruction |= (et.type == NT_unsigned) << 7;
16563  neon_two_same (neon_quad (rs), 1, et.size);
16564}
16565
16566static void
16567do_neon_recip_est (void)
16568{
16569  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16570  struct neon_type_el et = neon_check_type (2, rs,
16571    N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16572  inst.instruction |= (et.type == NT_float) << 8;
16573  neon_two_same (neon_quad (rs), 1, et.size);
16574}
16575
16576static void
16577do_neon_cls (void)
16578{
16579  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16580  struct neon_type_el et = neon_check_type (2, rs,
16581    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16582  neon_two_same (neon_quad (rs), 1, et.size);
16583}
16584
16585static void
16586do_neon_clz (void)
16587{
16588  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16589  struct neon_type_el et = neon_check_type (2, rs,
16590    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16591  neon_two_same (neon_quad (rs), 1, et.size);
16592}
16593
16594static void
16595do_neon_cnt (void)
16596{
16597  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16598  struct neon_type_el et = neon_check_type (2, rs,
16599    N_EQK | N_INT, N_8 | N_KEY);
16600  neon_two_same (neon_quad (rs), 1, et.size);
16601}
16602
16603static void
16604do_neon_swp (void)
16605{
16606  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16607  neon_two_same (neon_quad (rs), 1, -1);
16608}
16609
16610static void
16611do_neon_tbl_tbx (void)
16612{
16613  unsigned listlenbits;
16614  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16615
16616  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16617    {
16618      first_error (_("bad list length for table lookup"));
16619      return;
16620    }
16621
16622  listlenbits = inst.operands[1].imm - 1;
16623  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16624  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16625  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16626  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16627  inst.instruction |= LOW4 (inst.operands[2].reg);
16628  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16629  inst.instruction |= listlenbits << 8;
16630
16631  neon_dp_fixup (&inst);
16632}
16633
16634static void
16635do_neon_ldm_stm (void)
16636{
16637  /* P, U and L bits are part of bitmask.  */
16638  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16639  unsigned offsetbits = inst.operands[1].imm * 2;
16640
16641  if (inst.operands[1].issingle)
16642    {
16643      do_vfp_nsyn_ldm_stm (is_dbmode);
16644      return;
16645    }
16646
16647  constraint (is_dbmode && !inst.operands[0].writeback,
16648	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
16649
16650  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16651	      _("register list must contain at least 1 and at most 16 "
16652		"registers"));
16653
16654  inst.instruction |= inst.operands[0].reg << 16;
16655  inst.instruction |= inst.operands[0].writeback << 21;
16656  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16657  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16658
16659  inst.instruction |= offsetbits;
16660
16661  do_vfp_cond_or_thumb ();
16662}
16663
16664static void
16665do_neon_ldr_str (void)
16666{
16667  int is_ldr = (inst.instruction & (1 << 20)) != 0;
16668
16669  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16670     And is UNPREDICTABLE in thumb mode.  */
16671  if (!is_ldr
16672      && inst.operands[1].reg == REG_PC
16673      && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16674    {
16675      if (thumb_mode)
16676	inst.error = _("Use of PC here is UNPREDICTABLE");
16677      else if (warn_on_deprecated)
16678	as_tsktsk (_("Use of PC here is deprecated"));
16679    }
16680
16681  if (inst.operands[0].issingle)
16682    {
16683      if (is_ldr)
16684	do_vfp_nsyn_opcode ("flds");
16685      else
16686	do_vfp_nsyn_opcode ("fsts");
16687
16688      /* ARMv8.2 vldr.16/vstr.16 instruction.  */
16689      if (inst.vectype.el[0].size == 16)
16690	do_scalar_fp16_v82_encode ();
16691    }
16692  else
16693    {
16694      if (is_ldr)
16695	do_vfp_nsyn_opcode ("fldd");
16696      else
16697	do_vfp_nsyn_opcode ("fstd");
16698    }
16699}
16700
16701/* "interleave" version also handles non-interleaving register VLD1/VST1
16702   instructions.  */
16703
16704static void
16705do_neon_ld_st_interleave (void)
16706{
16707  struct neon_type_el et = neon_check_type (1, NS_NULL,
16708					    N_8 | N_16 | N_32 | N_64);
16709  unsigned alignbits = 0;
16710  unsigned idx;
16711  /* The bits in this table go:
16712     0: register stride of one (0) or two (1)
16713     1,2: register list length, minus one (1, 2, 3, 4).
16714     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16715     We use -1 for invalid entries.  */
16716  const int typetable[] =
16717    {
16718      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
16719       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
16720       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
16721       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
16722    };
16723  int typebits;
16724
16725  if (et.type == NT_invtype)
16726    return;
16727
16728  if (inst.operands[1].immisalign)
16729    switch (inst.operands[1].imm >> 8)
16730      {
16731      case 64: alignbits = 1; break;
16732      case 128:
16733	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16734	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16735	  goto bad_alignment;
16736	alignbits = 2;
16737	break;
16738      case 256:
16739	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16740	  goto bad_alignment;
16741	alignbits = 3;
16742	break;
16743      default:
16744      bad_alignment:
16745	first_error (_("bad alignment"));
16746	return;
16747      }
16748
16749  inst.instruction |= alignbits << 4;
16750  inst.instruction |= neon_logbits (et.size) << 6;
16751
16752  /* Bits [4:6] of the immediate in a list specifier encode register stride
16753     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16754     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16755     up the right value for "type" in a table based on this value and the given
16756     list style, then stick it back.  */
16757  idx = ((inst.operands[0].imm >> 4) & 7)
16758	| (((inst.instruction >> 8) & 3) << 3);
16759
16760  typebits = typetable[idx];
16761
16762  constraint (typebits == -1, _("bad list type for instruction"));
16763  constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16764	      _("bad element type for instruction"));
16765
16766  inst.instruction &= ~0xf00;
16767  inst.instruction |= typebits << 8;
16768}
16769
16770/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16771   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16772   otherwise. The variable arguments are a list of pairs of legal (size, align)
16773   values, terminated with -1.  */
16774
16775static int
16776neon_alignment_bit (int size, int align, int *do_alignment, ...)
16777{
16778  va_list ap;
16779  int result = FAIL, thissize, thisalign;
16780
16781  if (!inst.operands[1].immisalign)
16782    {
16783      *do_alignment = 0;
16784      return SUCCESS;
16785    }
16786
16787  va_start (ap, do_alignment);
16788
16789  do
16790    {
16791      thissize = va_arg (ap, int);
16792      if (thissize == -1)
16793	break;
16794      thisalign = va_arg (ap, int);
16795
16796      if (size == thissize && align == thisalign)
16797	result = SUCCESS;
16798    }
16799  while (result != SUCCESS);
16800
16801  va_end (ap);
16802
16803  if (result == SUCCESS)
16804    *do_alignment = 1;
16805  else
16806    first_error (_("unsupported alignment for instruction"));
16807
16808  return result;
16809}
16810
16811static void
16812do_neon_ld_st_lane (void)
16813{
16814  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16815  int align_good, do_alignment = 0;
16816  int logsize = neon_logbits (et.size);
16817  int align = inst.operands[1].imm >> 8;
16818  int n = (inst.instruction >> 8) & 3;
16819  int max_el = 64 / et.size;
16820
16821  if (et.type == NT_invtype)
16822    return;
16823
16824  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16825	      _("bad list length"));
16826  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16827	      _("scalar index out of range"));
16828  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16829	      && et.size == 8,
16830	      _("stride of 2 unavailable when element size is 8"));
16831
16832  switch (n)
16833    {
16834    case 0:  /* VLD1 / VST1.  */
16835      align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16836				       32, 32, -1);
16837      if (align_good == FAIL)
16838	return;
16839      if (do_alignment)
16840	{
16841	  unsigned alignbits = 0;
16842	  switch (et.size)
16843	    {
16844	    case 16: alignbits = 0x1; break;
16845	    case 32: alignbits = 0x3; break;
16846	    default: ;
16847	    }
16848	  inst.instruction |= alignbits << 4;
16849	}
16850      break;
16851
16852    case 1:  /* VLD2 / VST2.  */
16853      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16854		      16, 32, 32, 64, -1);
16855      if (align_good == FAIL)
16856	return;
16857      if (do_alignment)
16858	inst.instruction |= 1 << 4;
16859      break;
16860
16861    case 2:  /* VLD3 / VST3.  */
16862      constraint (inst.operands[1].immisalign,
16863		  _("can't use alignment with this instruction"));
16864      break;
16865
16866    case 3:  /* VLD4 / VST4.  */
16867      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16868				       16, 64, 32, 64, 32, 128, -1);
16869      if (align_good == FAIL)
16870	return;
16871      if (do_alignment)
16872	{
16873	  unsigned alignbits = 0;
16874	  switch (et.size)
16875	    {
16876	    case 8:  alignbits = 0x1; break;
16877	    case 16: alignbits = 0x1; break;
16878	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16879	    default: ;
16880	    }
16881	  inst.instruction |= alignbits << 4;
16882	}
16883      break;
16884
16885    default: ;
16886    }
16887
16888  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
16889  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16890    inst.instruction |= 1 << (4 + logsize);
16891
16892  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16893  inst.instruction |= logsize << 10;
16894}
16895
16896/* Encode single n-element structure to all lanes VLD<n> instructions.  */
16897
16898static void
16899do_neon_ld_dup (void)
16900{
16901  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16902  int align_good, do_alignment = 0;
16903
16904  if (et.type == NT_invtype)
16905    return;
16906
16907  switch ((inst.instruction >> 8) & 3)
16908    {
16909    case 0:  /* VLD1.  */
16910      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16911      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16912				       &do_alignment, 16, 16, 32, 32, -1);
16913      if (align_good == FAIL)
16914	return;
16915      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16916	{
16917	case 1: break;
16918	case 2: inst.instruction |= 1 << 5; break;
16919	default: first_error (_("bad list length")); return;
16920	}
16921      inst.instruction |= neon_logbits (et.size) << 6;
16922      break;
16923
16924    case 1:  /* VLD2.  */
16925      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16926				       &do_alignment, 8, 16, 16, 32, 32, 64,
16927				       -1);
16928      if (align_good == FAIL)
16929	return;
16930      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16931		  _("bad list length"));
16932      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16933	inst.instruction |= 1 << 5;
16934      inst.instruction |= neon_logbits (et.size) << 6;
16935      break;
16936
16937    case 2:  /* VLD3.  */
16938      constraint (inst.operands[1].immisalign,
16939		  _("can't use alignment with this instruction"));
16940      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16941		  _("bad list length"));
16942      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16943	inst.instruction |= 1 << 5;
16944      inst.instruction |= neon_logbits (et.size) << 6;
16945      break;
16946
16947    case 3:  /* VLD4.  */
16948      {
16949	int align = inst.operands[1].imm >> 8;
16950	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16951					 16, 64, 32, 64, 32, 128, -1);
16952	if (align_good == FAIL)
16953	  return;
16954	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16955		    _("bad list length"));
16956	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16957	  inst.instruction |= 1 << 5;
16958	if (et.size == 32 && align == 128)
16959	  inst.instruction |= 0x3 << 6;
16960	else
16961	  inst.instruction |= neon_logbits (et.size) << 6;
16962      }
16963      break;
16964
16965    default: ;
16966    }
16967
16968  inst.instruction |= do_alignment << 4;
16969}
16970
16971/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16972   apart from bits [11:4].  */
16973
16974static void
16975do_neon_ldx_stx (void)
16976{
16977  if (inst.operands[1].isreg)
16978    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16979
16980  switch (NEON_LANE (inst.operands[0].imm))
16981    {
16982    case NEON_INTERLEAVE_LANES:
16983      NEON_ENCODE (INTERLV, inst);
16984      do_neon_ld_st_interleave ();
16985      break;
16986
16987    case NEON_ALL_LANES:
16988      NEON_ENCODE (DUP, inst);
16989      if (inst.instruction == N_INV)
16990	{
16991	  first_error ("only loads support such operands");
16992	  break;
16993	}
16994      do_neon_ld_dup ();
16995      break;
16996
16997    default:
16998      NEON_ENCODE (LANE, inst);
16999      do_neon_ld_st_lane ();
17000    }
17001
17002  /* L bit comes from bit mask.  */
17003  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17004  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17005  inst.instruction |= inst.operands[1].reg << 16;
17006
17007  if (inst.operands[1].postind)
17008    {
17009      int postreg = inst.operands[1].imm & 0xf;
17010      constraint (!inst.operands[1].immisreg,
17011		  _("post-index must be a register"));
17012      constraint (postreg == 0xd || postreg == 0xf,
17013		  _("bad register for post-index"));
17014      inst.instruction |= postreg;
17015    }
17016  else
17017    {
17018      constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17019      constraint (inst.reloc.exp.X_op != O_constant
17020		  || inst.reloc.exp.X_add_number != 0,
17021		  BAD_ADDR_MODE);
17022
17023      if (inst.operands[1].writeback)
17024	{
17025	  inst.instruction |= 0xd;
17026	}
17027      else
17028	inst.instruction |= 0xf;
17029    }
17030
17031  if (thumb_mode)
17032    inst.instruction |= 0xf9000000;
17033  else
17034    inst.instruction |= 0xf4000000;
17035}
17036
17037/* FP v8.  */
17038static void
17039do_vfp_nsyn_fpv8 (enum neon_shape rs)
17040{
17041  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17042     D register operands.  */
17043  if (neon_shape_class[rs] == SC_DOUBLE)
17044    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17045		_(BAD_FPU));
17046
17047  NEON_ENCODE (FPV8, inst);
17048
17049  if (rs == NS_FFF || rs == NS_HHH)
17050    {
17051      do_vfp_sp_dyadic ();
17052
17053      /* ARMv8.2 fp16 instruction.  */
17054      if (rs == NS_HHH)
17055	do_scalar_fp16_v82_encode ();
17056    }
17057  else
17058    do_vfp_dp_rd_rn_rm ();
17059
17060  if (rs == NS_DDD)
17061    inst.instruction |= 0x100;
17062
17063  inst.instruction |= 0xf0000000;
17064}
17065
17066static void
17067do_vsel (void)
17068{
17069  set_it_insn_type (OUTSIDE_IT_INSN);
17070
17071  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17072    first_error (_("invalid instruction shape"));
17073}
17074
17075static void
17076do_vmaxnm (void)
17077{
17078  set_it_insn_type (OUTSIDE_IT_INSN);
17079
17080  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17081    return;
17082
17083  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17084    return;
17085
17086  neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17087}
17088
17089static void
17090do_vrint_1 (enum neon_cvt_mode mode)
17091{
17092  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17093  struct neon_type_el et;
17094
17095  if (rs == NS_NULL)
17096    return;
17097
17098  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17099     D register operands.  */
17100  if (neon_shape_class[rs] == SC_DOUBLE)
17101    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17102		_(BAD_FPU));
17103
17104  et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17105			| N_VFP);
17106  if (et.type != NT_invtype)
17107    {
17108      /* VFP encodings.  */
17109      if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17110	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17111	set_it_insn_type (OUTSIDE_IT_INSN);
17112
17113      NEON_ENCODE (FPV8, inst);
17114      if (rs == NS_FF || rs == NS_HH)
17115	do_vfp_sp_monadic ();
17116      else
17117	do_vfp_dp_rd_rm ();
17118
17119      switch (mode)
17120	{
17121	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17122	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17123	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17124	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17125	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17126	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17127	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17128	default: abort ();
17129	}
17130
17131      inst.instruction |= (rs == NS_DD) << 8;
17132      do_vfp_cond_or_thumb ();
17133
17134      /* ARMv8.2 fp16 vrint instruction.  */
17135      if (rs == NS_HH)
17136      do_scalar_fp16_v82_encode ();
17137    }
17138  else
17139    {
17140      /* Neon encodings (or something broken...).  */
17141      inst.error = NULL;
17142      et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17143
17144      if (et.type == NT_invtype)
17145	return;
17146
17147      set_it_insn_type (OUTSIDE_IT_INSN);
17148      NEON_ENCODE (FLOAT, inst);
17149
17150      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17151	return;
17152
17153      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17154      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17155      inst.instruction |= LOW4 (inst.operands[1].reg);
17156      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17157      inst.instruction |= neon_quad (rs) << 6;
17158      /* Mask off the original size bits and reencode them.  */
17159      inst.instruction = ((inst.instruction & 0xfff3ffff)
17160			  | neon_logbits (et.size) << 18);
17161
17162      switch (mode)
17163	{
17164	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17165	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17166	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17167	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17168	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17169	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17170	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17171	default: abort ();
17172	}
17173
17174      if (thumb_mode)
17175	inst.instruction |= 0xfc000000;
17176      else
17177	inst.instruction |= 0xf0000000;
17178    }
17179}
17180
17181static void
17182do_vrintx (void)
17183{
17184  do_vrint_1 (neon_cvt_mode_x);
17185}
17186
17187static void
17188do_vrintz (void)
17189{
17190  do_vrint_1 (neon_cvt_mode_z);
17191}
17192
17193static void
17194do_vrintr (void)
17195{
17196  do_vrint_1 (neon_cvt_mode_r);
17197}
17198
17199static void
17200do_vrinta (void)
17201{
17202  do_vrint_1 (neon_cvt_mode_a);
17203}
17204
17205static void
17206do_vrintn (void)
17207{
17208  do_vrint_1 (neon_cvt_mode_n);
17209}
17210
17211static void
17212do_vrintp (void)
17213{
17214  do_vrint_1 (neon_cvt_mode_p);
17215}
17216
17217static void
17218do_vrintm (void)
17219{
17220  do_vrint_1 (neon_cvt_mode_m);
17221}
17222
17223/* Crypto v1 instructions.  */
17224static void
17225do_crypto_2op_1 (unsigned elttype, int op)
17226{
17227  set_it_insn_type (OUTSIDE_IT_INSN);
17228
17229  if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17230      == NT_invtype)
17231    return;
17232
17233  inst.error = NULL;
17234
17235  NEON_ENCODE (INTEGER, inst);
17236  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17237  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17238  inst.instruction |= LOW4 (inst.operands[1].reg);
17239  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17240  if (op != -1)
17241    inst.instruction |= op << 6;
17242
17243  if (thumb_mode)
17244    inst.instruction |= 0xfc000000;
17245  else
17246    inst.instruction |= 0xf0000000;
17247}
17248
17249static void
17250do_crypto_3op_1 (int u, int op)
17251{
17252  set_it_insn_type (OUTSIDE_IT_INSN);
17253
17254  if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17255		       N_32 | N_UNT | N_KEY).type == NT_invtype)
17256    return;
17257
17258  inst.error = NULL;
17259
17260  NEON_ENCODE (INTEGER, inst);
17261  neon_three_same (1, u, 8 << op);
17262}
17263
17264static void
17265do_aese (void)
17266{
17267  do_crypto_2op_1 (N_8, 0);
17268}
17269
17270static void
17271do_aesd (void)
17272{
17273  do_crypto_2op_1 (N_8, 1);
17274}
17275
17276static void
17277do_aesmc (void)
17278{
17279  do_crypto_2op_1 (N_8, 2);
17280}
17281
17282static void
17283do_aesimc (void)
17284{
17285  do_crypto_2op_1 (N_8, 3);
17286}
17287
17288static void
17289do_sha1c (void)
17290{
17291  do_crypto_3op_1 (0, 0);
17292}
17293
17294static void
17295do_sha1p (void)
17296{
17297  do_crypto_3op_1 (0, 1);
17298}
17299
17300static void
17301do_sha1m (void)
17302{
17303  do_crypto_3op_1 (0, 2);
17304}
17305
17306static void
17307do_sha1su0 (void)
17308{
17309  do_crypto_3op_1 (0, 3);
17310}
17311
17312static void
17313do_sha256h (void)
17314{
17315  do_crypto_3op_1 (1, 0);
17316}
17317
17318static void
17319do_sha256h2 (void)
17320{
17321  do_crypto_3op_1 (1, 1);
17322}
17323
17324static void
17325do_sha256su1 (void)
17326{
17327  do_crypto_3op_1 (1, 2);
17328}
17329
17330static void
17331do_sha1h (void)
17332{
17333  do_crypto_2op_1 (N_32, -1);
17334}
17335
17336static void
17337do_sha1su1 (void)
17338{
17339  do_crypto_2op_1 (N_32, 0);
17340}
17341
17342static void
17343do_sha256su0 (void)
17344{
17345  do_crypto_2op_1 (N_32, 1);
17346}
17347
17348static void
17349do_crc32_1 (unsigned int poly, unsigned int sz)
17350{
17351  unsigned int Rd = inst.operands[0].reg;
17352  unsigned int Rn = inst.operands[1].reg;
17353  unsigned int Rm = inst.operands[2].reg;
17354
17355  set_it_insn_type (OUTSIDE_IT_INSN);
17356  inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17357  inst.instruction |= LOW4 (Rn) << 16;
17358  inst.instruction |= LOW4 (Rm);
17359  inst.instruction |= sz << (thumb_mode ? 4 : 21);
17360  inst.instruction |= poly << (thumb_mode ? 20 : 9);
17361
17362  if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17363    as_warn (UNPRED_REG ("r15"));
17364  if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17365    as_warn (UNPRED_REG ("r13"));
17366}
17367
17368static void
17369do_crc32b (void)
17370{
17371  do_crc32_1 (0, 0);
17372}
17373
17374static void
17375do_crc32h (void)
17376{
17377  do_crc32_1 (0, 1);
17378}
17379
17380static void
17381do_crc32w (void)
17382{
17383  do_crc32_1 (0, 2);
17384}
17385
17386static void
17387do_crc32cb (void)
17388{
17389  do_crc32_1 (1, 0);
17390}
17391
17392static void
17393do_crc32ch (void)
17394{
17395  do_crc32_1 (1, 1);
17396}
17397
17398static void
17399do_crc32cw (void)
17400{
17401  do_crc32_1 (1, 2);
17402}
17403
17404
17405/* Overall per-instruction processing.	*/
17406
17407/* We need to be able to fix up arbitrary expressions in some statements.
17408   This is so that we can handle symbols that are an arbitrary distance from
17409   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17410   which returns part of an address in a form which will be valid for
17411   a data instruction.	We do this by pushing the expression into a symbol
17412   in the expr_section, and creating a fix for that.  */
17413
17414static void
17415fix_new_arm (fragS *	   frag,
17416	     int	   where,
17417	     short int	   size,
17418	     expressionS * exp,
17419	     int	   pc_rel,
17420	     int	   reloc)
17421{
17422  fixS *	   new_fix;
17423
17424  switch (exp->X_op)
17425    {
17426    case O_constant:
17427      if (pc_rel)
17428	{
17429	  /* Create an absolute valued symbol, so we have something to
17430	     refer to in the object file.  Unfortunately for us, gas's
17431	     generic expression parsing will already have folded out
17432	     any use of .set foo/.type foo %function that may have
17433	     been used to set type information of the target location,
17434	     that's being specified symbolically.  We have to presume
17435	     the user knows what they are doing.  */
17436	  char name[16 + 8];
17437	  symbolS *symbol;
17438
17439	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17440
17441	  symbol = symbol_find_or_make (name);
17442	  S_SET_SEGMENT (symbol, absolute_section);
17443	  symbol_set_frag (symbol, &zero_address_frag);
17444	  S_SET_VALUE (symbol, exp->X_add_number);
17445	  exp->X_op = O_symbol;
17446	  exp->X_add_symbol = symbol;
17447	  exp->X_add_number = 0;
17448	}
17449      /* FALLTHROUGH */
17450    case O_symbol:
17451    case O_add:
17452    case O_subtract:
17453      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17454			     (enum bfd_reloc_code_real) reloc);
17455      break;
17456
17457    default:
17458      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17459				  pc_rel, (enum bfd_reloc_code_real) reloc);
17460      break;
17461    }
17462
17463  /* Mark whether the fix is to a THUMB instruction, or an ARM
17464     instruction.  */
17465  new_fix->tc_fix_data = thumb_mode;
17466}
17467
17468/* Create a frg for an instruction requiring relaxation.  */
17469static void
17470output_relax_insn (void)
17471{
17472  char * to;
17473  symbolS *sym;
17474  int offset;
17475
17476  /* The size of the instruction is unknown, so tie the debug info to the
17477     start of the instruction.  */
17478  dwarf2_emit_insn (0);
17479
17480  switch (inst.reloc.exp.X_op)
17481    {
17482    case O_symbol:
17483      sym = inst.reloc.exp.X_add_symbol;
17484      offset = inst.reloc.exp.X_add_number;
17485      break;
17486    case O_constant:
17487      sym = NULL;
17488      offset = inst.reloc.exp.X_add_number;
17489      break;
17490    default:
17491      sym = make_expr_symbol (&inst.reloc.exp);
17492      offset = 0;
17493      break;
17494  }
17495  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17496		 inst.relax, sym, offset, NULL/*offset, opcode*/);
17497  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17498}
17499
17500/* Write a 32-bit thumb instruction to buf.  */
17501static void
17502put_thumb32_insn (char * buf, unsigned long insn)
17503{
17504  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17505  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17506}
17507
17508static void
17509output_inst (const char * str)
17510{
17511  char * to = NULL;
17512
17513  if (inst.error)
17514    {
17515      as_bad ("%s -- `%s'", inst.error, str);
17516      return;
17517    }
17518  if (inst.relax)
17519    {
17520      output_relax_insn ();
17521      return;
17522    }
17523  if (inst.size == 0)
17524    return;
17525
17526  to = frag_more (inst.size);
17527  /* PR 9814: Record the thumb mode into the current frag so that we know
17528     what type of NOP padding to use, if necessary.  We override any previous
17529     setting so that if the mode has changed then the NOPS that we use will
17530     match the encoding of the last instruction in the frag.  */
17531  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17532
17533  if (thumb_mode && (inst.size > THUMB_SIZE))
17534    {
17535      gas_assert (inst.size == (2 * THUMB_SIZE));
17536      put_thumb32_insn (to, inst.instruction);
17537    }
17538  else if (inst.size > INSN_SIZE)
17539    {
17540      gas_assert (inst.size == (2 * INSN_SIZE));
17541      md_number_to_chars (to, inst.instruction, INSN_SIZE);
17542      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17543    }
17544  else
17545    md_number_to_chars (to, inst.instruction, inst.size);
17546
17547  if (inst.reloc.type != BFD_RELOC_UNUSED)
17548    fix_new_arm (frag_now, to - frag_now->fr_literal,
17549		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17550		 inst.reloc.type);
17551
17552  dwarf2_emit_insn (inst.size);
17553}
17554
17555static char *
17556output_it_inst (int cond, int mask, char * to)
17557{
17558  unsigned long instruction = 0xbf00;
17559
17560  mask &= 0xf;
17561  instruction |= mask;
17562  instruction |= cond << 4;
17563
17564  if (to == NULL)
17565    {
17566      to = frag_more (2);
17567#ifdef OBJ_ELF
17568      dwarf2_emit_insn (2);
17569#endif
17570    }
17571
17572  md_number_to_chars (to, instruction, 2);
17573
17574  return to;
17575}
17576
17577/* Tag values used in struct asm_opcode's tag field.  */
17578enum opcode_tag
17579{
17580  OT_unconditional,	/* Instruction cannot be conditionalized.
17581			   The ARM condition field is still 0xE.  */
17582  OT_unconditionalF,	/* Instruction cannot be conditionalized
17583			   and carries 0xF in its ARM condition field.  */
17584  OT_csuffix,		/* Instruction takes a conditional suffix.  */
17585  OT_csuffixF,		/* Some forms of the instruction take a conditional
17586			   suffix, others place 0xF where the condition field
17587			   would be.  */
17588  OT_cinfix3,		/* Instruction takes a conditional infix,
17589			   beginning at character index 3.  (In
17590			   unified mode, it becomes a suffix.)  */
17591  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
17592			    tsts, cmps, cmns, and teqs. */
17593  OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
17594			   character index 3, even in unified mode.  Used for
17595			   legacy instructions where suffix and infix forms
17596			   may be ambiguous.  */
17597  OT_csuf_or_in3,	/* Instruction takes either a conditional
17598			   suffix or an infix at character index 3.  */
17599  OT_odd_infix_unc,	/* This is the unconditional variant of an
17600			   instruction that takes a conditional infix
17601			   at an unusual position.  In unified mode,
17602			   this variant will accept a suffix.  */
17603  OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
17604			   are the conditional variants of instructions that
17605			   take conditional infixes in unusual positions.
17606			   The infix appears at character index
17607			   (tag - OT_odd_infix_0).  These are not accepted
17608			   in unified mode.  */
17609};
17610
17611/* Subroutine of md_assemble, responsible for looking up the primary
17612   opcode from the mnemonic the user wrote.  STR points to the
17613   beginning of the mnemonic.
17614
17615   This is not simply a hash table lookup, because of conditional
17616   variants.  Most instructions have conditional variants, which are
17617   expressed with a _conditional affix_ to the mnemonic.  If we were
17618   to encode each conditional variant as a literal string in the opcode
17619   table, it would have approximately 20,000 entries.
17620
17621   Most mnemonics take this affix as a suffix, and in unified syntax,
17622   'most' is upgraded to 'all'.  However, in the divided syntax, some
17623   instructions take the affix as an infix, notably the s-variants of
17624   the arithmetic instructions.  Of those instructions, all but six
17625   have the infix appear after the third character of the mnemonic.
17626
17627   Accordingly, the algorithm for looking up primary opcodes given
17628   an identifier is:
17629
17630   1. Look up the identifier in the opcode table.
17631      If we find a match, go to step U.
17632
17633   2. Look up the last two characters of the identifier in the
17634      conditions table.  If we find a match, look up the first N-2
17635      characters of the identifier in the opcode table.  If we
17636      find a match, go to step CE.
17637
17638   3. Look up the fourth and fifth characters of the identifier in
17639      the conditions table.  If we find a match, extract those
17640      characters from the identifier, and look up the remaining
17641      characters in the opcode table.  If we find a match, go
17642      to step CM.
17643
17644   4. Fail.
17645
17646   U. Examine the tag field of the opcode structure, in case this is
17647      one of the six instructions with its conditional infix in an
17648      unusual place.  If it is, the tag tells us where to find the
17649      infix; look it up in the conditions table and set inst.cond
17650      accordingly.  Otherwise, this is an unconditional instruction.
17651      Again set inst.cond accordingly.  Return the opcode structure.
17652
17653  CE. Examine the tag field to make sure this is an instruction that
17654      should receive a conditional suffix.  If it is not, fail.
17655      Otherwise, set inst.cond from the suffix we already looked up,
17656      and return the opcode structure.
17657
17658  CM. Examine the tag field to make sure this is an instruction that
17659      should receive a conditional infix after the third character.
17660      If it is not, fail.  Otherwise, undo the edits to the current
17661      line of input and proceed as for case CE.  */
17662
17663static const struct asm_opcode *
17664opcode_lookup (char **str)
17665{
17666  char *end, *base;
17667  char *affix;
17668  const struct asm_opcode *opcode;
17669  const struct asm_cond *cond;
17670  char save[2];
17671
17672  /* Scan up to the end of the mnemonic, which must end in white space,
17673     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
17674  for (base = end = *str; *end != '\0'; end++)
17675    if (*end == ' ' || *end == '.')
17676      break;
17677
17678  if (end == base)
17679    return NULL;
17680
17681  /* Handle a possible width suffix and/or Neon type suffix.  */
17682  if (end[0] == '.')
17683    {
17684      int offset = 2;
17685
17686      /* The .w and .n suffixes are only valid if the unified syntax is in
17687	 use.  */
17688      if (unified_syntax && end[1] == 'w')
17689	inst.size_req = 4;
17690      else if (unified_syntax && end[1] == 'n')
17691	inst.size_req = 2;
17692      else
17693	offset = 0;
17694
17695      inst.vectype.elems = 0;
17696
17697      *str = end + offset;
17698
17699      if (end[offset] == '.')
17700	{
17701	  /* See if we have a Neon type suffix (possible in either unified or
17702	     non-unified ARM syntax mode).  */
17703	  if (parse_neon_type (&inst.vectype, str) == FAIL)
17704	    return NULL;
17705	}
17706      else if (end[offset] != '\0' && end[offset] != ' ')
17707	return NULL;
17708    }
17709  else
17710    *str = end;
17711
17712  /* Look for unaffixed or special-case affixed mnemonic.  */
17713  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17714						    end - base);
17715  if (opcode)
17716    {
17717      /* step U */
17718      if (opcode->tag < OT_odd_infix_0)
17719	{
17720	  inst.cond = COND_ALWAYS;
17721	  return opcode;
17722	}
17723
17724      if (warn_on_deprecated && unified_syntax)
17725	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17726      affix = base + (opcode->tag - OT_odd_infix_0);
17727      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17728      gas_assert (cond);
17729
17730      inst.cond = cond->value;
17731      return opcode;
17732    }
17733
17734  /* Cannot have a conditional suffix on a mnemonic of less than two
17735     characters.  */
17736  if (end - base < 3)
17737    return NULL;
17738
17739  /* Look for suffixed mnemonic.  */
17740  affix = end - 2;
17741  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17742  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17743						    affix - base);
17744  if (opcode && cond)
17745    {
17746      /* step CE */
17747      switch (opcode->tag)
17748	{
17749	case OT_cinfix3_legacy:
17750	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
17751	  break;
17752
17753	case OT_cinfix3:
17754	case OT_cinfix3_deprecated:
17755	case OT_odd_infix_unc:
17756	  if (!unified_syntax)
17757	    return 0;
17758	  /* else fall through */
17759
17760	case OT_csuffix:
17761	case OT_csuffixF:
17762	case OT_csuf_or_in3:
17763	  inst.cond = cond->value;
17764	  return opcode;
17765
17766	case OT_unconditional:
17767	case OT_unconditionalF:
17768	  if (thumb_mode)
17769	    inst.cond = cond->value;
17770	  else
17771	    {
17772	      /* Delayed diagnostic.  */
17773	      inst.error = BAD_COND;
17774	      inst.cond = COND_ALWAYS;
17775	    }
17776	  return opcode;
17777
17778	default:
17779	  return NULL;
17780	}
17781    }
17782
17783  /* Cannot have a usual-position infix on a mnemonic of less than
17784     six characters (five would be a suffix).  */
17785  if (end - base < 6)
17786    return NULL;
17787
17788  /* Look for infixed mnemonic in the usual position.  */
17789  affix = base + 3;
17790  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17791  if (!cond)
17792    return NULL;
17793
17794  memcpy (save, affix, 2);
17795  memmove (affix, affix + 2, (end - affix) - 2);
17796  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17797						    (end - base) - 2);
17798  memmove (affix + 2, affix, (end - affix) - 2);
17799  memcpy (affix, save, 2);
17800
17801  if (opcode
17802      && (opcode->tag == OT_cinfix3
17803	  || opcode->tag == OT_cinfix3_deprecated
17804	  || opcode->tag == OT_csuf_or_in3
17805	  || opcode->tag == OT_cinfix3_legacy))
17806    {
17807      /* Step CM.  */
17808      if (warn_on_deprecated && unified_syntax
17809	  && (opcode->tag == OT_cinfix3
17810	      || opcode->tag == OT_cinfix3_deprecated))
17811	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17812
17813      inst.cond = cond->value;
17814      return opcode;
17815    }
17816
17817  return NULL;
17818}
17819
17820/* This function generates an initial IT instruction, leaving its block
17821   virtually open for the new instructions. Eventually,
17822   the mask will be updated by now_it_add_mask () each time
17823   a new instruction needs to be included in the IT block.
17824   Finally, the block is closed with close_automatic_it_block ().
17825   The block closure can be requested either from md_assemble (),
17826   a tencode (), or due to a label hook.  */
17827
17828static void
17829new_automatic_it_block (int cond)
17830{
17831  now_it.state = AUTOMATIC_IT_BLOCK;
17832  now_it.mask = 0x18;
17833  now_it.cc = cond;
17834  now_it.block_length = 1;
17835  mapping_state (MAP_THUMB);
17836  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17837  now_it.warn_deprecated = FALSE;
17838  now_it.insn_cond = TRUE;
17839}
17840
17841/* Close an automatic IT block.
17842   See comments in new_automatic_it_block ().  */
17843
17844static void
17845close_automatic_it_block (void)
17846{
17847  now_it.mask = 0x10;
17848  now_it.block_length = 0;
17849}
17850
17851/* Update the mask of the current automatically-generated IT
17852   instruction. See comments in new_automatic_it_block ().  */
17853
17854static void
17855now_it_add_mask (int cond)
17856{
17857#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
17858#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
17859					      | ((bitvalue) << (nbit)))
17860  const int resulting_bit = (cond & 1);
17861
17862  now_it.mask &= 0xf;
17863  now_it.mask = SET_BIT_VALUE (now_it.mask,
17864				   resulting_bit,
17865				  (5 - now_it.block_length));
17866  now_it.mask = SET_BIT_VALUE (now_it.mask,
17867				   1,
17868				   ((5 - now_it.block_length) - 1) );
17869  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17870
17871#undef CLEAR_BIT
17872#undef SET_BIT_VALUE
17873}
17874
17875/* The IT blocks handling machinery is accessed through the these functions:
17876     it_fsm_pre_encode ()               from md_assemble ()
17877     set_it_insn_type ()                optional, from the tencode functions
17878     set_it_insn_type_last ()           ditto
17879     in_it_block ()                     ditto
17880     it_fsm_post_encode ()              from md_assemble ()
17881     force_automatic_it_block_close ()  from label habdling functions
17882
17883   Rationale:
17884     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17885	initializing the IT insn type with a generic initial value depending
17886	on the inst.condition.
17887     2) During the tencode function, two things may happen:
17888	a) The tencode function overrides the IT insn type by
17889	   calling either set_it_insn_type (type) or set_it_insn_type_last ().
17890	b) The tencode function queries the IT block state by
17891	   calling in_it_block () (i.e. to determine narrow/not narrow mode).
17892
17893	Both set_it_insn_type and in_it_block run the internal FSM state
17894	handling function (handle_it_state), because: a) setting the IT insn
17895	type may incur in an invalid state (exiting the function),
17896	and b) querying the state requires the FSM to be updated.
17897	Specifically we want to avoid creating an IT block for conditional
17898	branches, so it_fsm_pre_encode is actually a guess and we can't
17899	determine whether an IT block is required until the tencode () routine
17900	has decided what type of instruction this actually it.
17901	Because of this, if set_it_insn_type and in_it_block have to be used,
17902	set_it_insn_type has to be called first.
17903
17904	set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17905	determines the insn IT type depending on the inst.cond code.
17906	When a tencode () routine encodes an instruction that can be
17907	either outside an IT block, or, in the case of being inside, has to be
17908	the last one, set_it_insn_type_last () will determine the proper
17909	IT instruction type based on the inst.cond code. Otherwise,
17910	set_it_insn_type can be called for overriding that logic or
17911	for covering other cases.
17912
17913	Calling handle_it_state () may not transition the IT block state to
17914	OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17915	still queried. Instead, if the FSM determines that the state should
17916	be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17917	after the tencode () function: that's what it_fsm_post_encode () does.
17918
17919	Since in_it_block () calls the state handling function to get an
17920	updated state, an error may occur (due to invalid insns combination).
17921	In that case, inst.error is set.
17922	Therefore, inst.error has to be checked after the execution of
17923	the tencode () routine.
17924
17925     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17926	any pending state change (if any) that didn't take place in
17927	handle_it_state () as explained above.  */
17928
17929static void
17930it_fsm_pre_encode (void)
17931{
17932  if (inst.cond != COND_ALWAYS)
17933    inst.it_insn_type = INSIDE_IT_INSN;
17934  else
17935    inst.it_insn_type = OUTSIDE_IT_INSN;
17936
17937  now_it.state_handled = 0;
17938}
17939
17940/* IT state FSM handling function.  */
17941
17942static int
17943handle_it_state (void)
17944{
17945  now_it.state_handled = 1;
17946  now_it.insn_cond = FALSE;
17947
17948  switch (now_it.state)
17949    {
17950    case OUTSIDE_IT_BLOCK:
17951      switch (inst.it_insn_type)
17952	{
17953	case OUTSIDE_IT_INSN:
17954	  break;
17955
17956	case INSIDE_IT_INSN:
17957	case INSIDE_IT_LAST_INSN:
17958	  if (thumb_mode == 0)
17959	    {
17960	      if (unified_syntax
17961		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17962		as_tsktsk (_("Warning: conditional outside an IT block"\
17963			     " for Thumb."));
17964	    }
17965	  else
17966	    {
17967	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17968		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17969		{
17970		  /* Automatically generate the IT instruction.  */
17971		  new_automatic_it_block (inst.cond);
17972		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17973		    close_automatic_it_block ();
17974		}
17975	      else
17976		{
17977		  inst.error = BAD_OUT_IT;
17978		  return FAIL;
17979		}
17980	    }
17981	  break;
17982
17983	case IF_INSIDE_IT_LAST_INSN:
17984	case NEUTRAL_IT_INSN:
17985	  break;
17986
17987	case IT_INSN:
17988	  now_it.state = MANUAL_IT_BLOCK;
17989	  now_it.block_length = 0;
17990	  break;
17991	}
17992      break;
17993
17994    case AUTOMATIC_IT_BLOCK:
17995      /* Three things may happen now:
17996	 a) We should increment current it block size;
17997	 b) We should close current it block (closing insn or 4 insns);
17998	 c) We should close current it block and start a new one (due
17999	 to incompatible conditions or
18000	 4 insns-length block reached).  */
18001
18002      switch (inst.it_insn_type)
18003	{
18004	case OUTSIDE_IT_INSN:
18005	  /* The closure of the block shall happen immediatelly,
18006	     so any in_it_block () call reports the block as closed.  */
18007	  force_automatic_it_block_close ();
18008	  break;
18009
18010	case INSIDE_IT_INSN:
18011	case INSIDE_IT_LAST_INSN:
18012	case IF_INSIDE_IT_LAST_INSN:
18013	  now_it.block_length++;
18014
18015	  if (now_it.block_length > 4
18016	      || !now_it_compatible (inst.cond))
18017	    {
18018	      force_automatic_it_block_close ();
18019	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18020		new_automatic_it_block (inst.cond);
18021	    }
18022	  else
18023	    {
18024	      now_it.insn_cond = TRUE;
18025	      now_it_add_mask (inst.cond);
18026	    }
18027
18028	  if (now_it.state == AUTOMATIC_IT_BLOCK
18029	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18030		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18031	    close_automatic_it_block ();
18032	  break;
18033
18034	case NEUTRAL_IT_INSN:
18035	  now_it.block_length++;
18036	  now_it.insn_cond = TRUE;
18037
18038	  if (now_it.block_length > 4)
18039	    force_automatic_it_block_close ();
18040	  else
18041	    now_it_add_mask (now_it.cc & 1);
18042	  break;
18043
18044	case IT_INSN:
18045	  close_automatic_it_block ();
18046	  now_it.state = MANUAL_IT_BLOCK;
18047	  break;
18048	}
18049      break;
18050
18051    case MANUAL_IT_BLOCK:
18052      {
18053	/* Check conditional suffixes.  */
18054	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18055	int is_last;
18056	now_it.mask <<= 1;
18057	now_it.mask &= 0x1f;
18058	is_last = (now_it.mask == 0x10);
18059	now_it.insn_cond = TRUE;
18060
18061	switch (inst.it_insn_type)
18062	  {
18063	  case OUTSIDE_IT_INSN:
18064	    inst.error = BAD_NOT_IT;
18065	    return FAIL;
18066
18067	  case INSIDE_IT_INSN:
18068	    if (cond != inst.cond)
18069	      {
18070		inst.error = BAD_IT_COND;
18071		return FAIL;
18072	      }
18073	    break;
18074
18075	  case INSIDE_IT_LAST_INSN:
18076	  case IF_INSIDE_IT_LAST_INSN:
18077	    if (cond != inst.cond)
18078	      {
18079		inst.error = BAD_IT_COND;
18080		return FAIL;
18081	      }
18082	    if (!is_last)
18083	      {
18084		inst.error = BAD_BRANCH;
18085		return FAIL;
18086	      }
18087	    break;
18088
18089	  case NEUTRAL_IT_INSN:
18090	    /* The BKPT instruction is unconditional even in an IT block.  */
18091	    break;
18092
18093	  case IT_INSN:
18094	    inst.error = BAD_IT_IT;
18095	    return FAIL;
18096	  }
18097      }
18098      break;
18099    }
18100
18101  return SUCCESS;
18102}
18103
18104struct depr_insn_mask
18105{
18106  unsigned long pattern;
18107  unsigned long mask;
18108  const char* description;
18109};
18110
18111/* List of 16-bit instruction patterns deprecated in an IT block in
18112   ARMv8.  */
18113static const struct depr_insn_mask depr_it_insns[] = {
18114  { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18115  { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18116  { 0xa000, 0xb800, N_("ADR") },
18117  { 0x4800, 0xf800, N_("Literal loads") },
18118  { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18119  { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18120  /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18121     field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
18122  { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18123  { 0, 0, NULL }
18124};
18125
18126static void
18127it_fsm_post_encode (void)
18128{
18129  int is_last;
18130
18131  if (!now_it.state_handled)
18132    handle_it_state ();
18133
18134  if (now_it.insn_cond
18135      && !now_it.warn_deprecated
18136      && warn_on_deprecated
18137      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18138    {
18139      if (inst.instruction >= 0x10000)
18140	{
18141	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18142		     "deprecated in ARMv8"));
18143	  now_it.warn_deprecated = TRUE;
18144	}
18145      else
18146	{
18147	  const struct depr_insn_mask *p = depr_it_insns;
18148
18149	  while (p->mask != 0)
18150	    {
18151	      if ((inst.instruction & p->mask) == p->pattern)
18152		{
18153		  as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18154			     "of the following class are deprecated in ARMv8: "
18155			     "%s"), p->description);
18156		  now_it.warn_deprecated = TRUE;
18157		  break;
18158		}
18159
18160	      ++p;
18161	    }
18162	}
18163
18164      if (now_it.block_length > 1)
18165	{
18166	  as_tsktsk (_("IT blocks containing more than one conditional "
18167		     "instruction are deprecated in ARMv8"));
18168	  now_it.warn_deprecated = TRUE;
18169	}
18170    }
18171
18172  is_last = (now_it.mask == 0x10);
18173  if (is_last)
18174    {
18175      now_it.state = OUTSIDE_IT_BLOCK;
18176      now_it.mask = 0;
18177    }
18178}
18179
18180static void
18181force_automatic_it_block_close (void)
18182{
18183  if (now_it.state == AUTOMATIC_IT_BLOCK)
18184    {
18185      close_automatic_it_block ();
18186      now_it.state = OUTSIDE_IT_BLOCK;
18187      now_it.mask = 0;
18188    }
18189}
18190
18191static int
18192in_it_block (void)
18193{
18194  if (!now_it.state_handled)
18195    handle_it_state ();
18196
18197  return now_it.state != OUTSIDE_IT_BLOCK;
18198}
18199
18200/* Whether OPCODE only has T32 encoding.  Since this function is only used by
18201   t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18202   here, hence the "known" in the function name.  */
18203
18204static bfd_boolean
18205known_t32_only_insn (const struct asm_opcode *opcode)
18206{
18207  /* Original Thumb-1 wide instruction.  */
18208  if (opcode->tencode == do_t_blx
18209      || opcode->tencode == do_t_branch23
18210      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18211      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18212    return TRUE;
18213
18214  /* Wide-only instruction added to ARMv8-M Baseline.  */
18215  if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18216      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18217      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18218      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18219    return TRUE;
18220
18221  return FALSE;
18222}
18223
18224/* Whether wide instruction variant can be used if available for a valid OPCODE
18225   in ARCH.  */
18226
18227static bfd_boolean
18228t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18229{
18230  if (known_t32_only_insn (opcode))
18231    return TRUE;
18232
18233  /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
18234     of variant T3 of B.W is checked in do_t_branch.  */
18235  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18236      && opcode->tencode == do_t_branch)
18237    return TRUE;
18238
18239  /* Wide instruction variants of all instructions with narrow *and* wide
18240     variants become available with ARMv6t2.  Other opcodes are either
18241     narrow-only or wide-only and are thus available if OPCODE is valid.  */
18242  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18243    return TRUE;
18244
18245  /* OPCODE with narrow only instruction variant or wide variant not
18246     available.  */
18247  return FALSE;
18248}
18249
18250void
18251md_assemble (char *str)
18252{
18253  char *p = str;
18254  const struct asm_opcode * opcode;
18255
18256  /* Align the previous label if needed.  */
18257  if (last_label_seen != NULL)
18258    {
18259      symbol_set_frag (last_label_seen, frag_now);
18260      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18261      S_SET_SEGMENT (last_label_seen, now_seg);
18262    }
18263
18264  memset (&inst, '\0', sizeof (inst));
18265  inst.reloc.type = BFD_RELOC_UNUSED;
18266
18267  opcode = opcode_lookup (&p);
18268  if (!opcode)
18269    {
18270      /* It wasn't an instruction, but it might be a register alias of
18271	 the form alias .req reg, or a Neon .dn/.qn directive.  */
18272      if (! create_register_alias (str, p)
18273	  && ! create_neon_reg_alias (str, p))
18274	as_bad (_("bad instruction `%s'"), str);
18275
18276      return;
18277    }
18278
18279  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18280    as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18281
18282  /* The value which unconditional instructions should have in place of the
18283     condition field.  */
18284  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18285
18286  if (thumb_mode)
18287    {
18288      arm_feature_set variant;
18289
18290      variant = cpu_variant;
18291      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
18292      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18293	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18294      /* Check that this instruction is supported for this CPU.  */
18295      if (!opcode->tvariant
18296	  || (thumb_mode == 1
18297	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18298	{
18299	  as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18300	  return;
18301	}
18302      if (inst.cond != COND_ALWAYS && !unified_syntax
18303	  && opcode->tencode != do_t_branch)
18304	{
18305	  as_bad (_("Thumb does not support conditional execution"));
18306	  return;
18307	}
18308
18309      /* Two things are addressed here:
18310	 1) Implicit require narrow instructions on Thumb-1.
18311	    This avoids relaxation accidentally introducing Thumb-2
18312	    instructions.
18313	 2) Reject wide instructions in non Thumb-2 cores.
18314
18315	 Only instructions with narrow and wide variants need to be handled
18316	 but selecting all non wide-only instructions is easier.  */
18317      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18318	  && !t32_insn_ok (variant, opcode))
18319	{
18320	  if (inst.size_req == 0)
18321	    inst.size_req = 2;
18322	  else if (inst.size_req == 4)
18323	    {
18324	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18325		as_bad (_("selected processor does not support 32bit wide "
18326			  "variant of instruction `%s'"), str);
18327	      else
18328		as_bad (_("selected processor does not support `%s' in "
18329			  "Thumb-2 mode"), str);
18330	      return;
18331	    }
18332	}
18333
18334      inst.instruction = opcode->tvalue;
18335
18336      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18337	{
18338	  /* Prepare the it_insn_type for those encodings that don't set
18339	     it.  */
18340	  it_fsm_pre_encode ();
18341
18342	  opcode->tencode ();
18343
18344	  it_fsm_post_encode ();
18345	}
18346
18347      if (!(inst.error || inst.relax))
18348	{
18349	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18350	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
18351	  if (inst.size_req && inst.size_req != inst.size)
18352	    {
18353	      as_bad (_("cannot honor width suffix -- `%s'"), str);
18354	      return;
18355	    }
18356	}
18357
18358      /* Something has gone badly wrong if we try to relax a fixed size
18359	 instruction.  */
18360      gas_assert (inst.size_req == 0 || !inst.relax);
18361
18362      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18363			      *opcode->tvariant);
18364      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18365	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
18366	 of relaxable instructions will be considered later after we finish all
18367	 relaxation.  */
18368      if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18369	variant = arm_arch_none;
18370      else
18371	variant = cpu_variant;
18372      if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18373	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18374				arm_ext_v6t2);
18375
18376      check_neon_suffixes;
18377
18378      if (!inst.error)
18379	{
18380	  mapping_state (MAP_THUMB);
18381	}
18382    }
18383  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18384    {
18385      bfd_boolean is_bx;
18386
18387      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
18388      is_bx = (opcode->aencode == do_bx);
18389
18390      /* Check that this instruction is supported for this CPU.  */
18391      if (!(is_bx && fix_v4bx)
18392	  && !(opcode->avariant &&
18393	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18394	{
18395	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18396	  return;
18397	}
18398      if (inst.size_req)
18399	{
18400	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18401	  return;
18402	}
18403
18404      inst.instruction = opcode->avalue;
18405      if (opcode->tag == OT_unconditionalF)
18406	inst.instruction |= 0xFU << 28;
18407      else
18408	inst.instruction |= inst.cond << 28;
18409      inst.size = INSN_SIZE;
18410      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18411	{
18412	  it_fsm_pre_encode ();
18413	  opcode->aencode ();
18414	  it_fsm_post_encode ();
18415	}
18416      /* Arm mode bx is marked as both v4T and v5 because it's still required
18417	 on a hypothetical non-thumb v5 core.  */
18418      if (is_bx)
18419	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18420      else
18421	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18422				*opcode->avariant);
18423
18424      check_neon_suffixes;
18425
18426      if (!inst.error)
18427	{
18428	  mapping_state (MAP_ARM);
18429	}
18430    }
18431  else
18432    {
18433      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18434		"-- `%s'"), str);
18435      return;
18436    }
18437  output_inst (str);
18438}
18439
18440static void
18441check_it_blocks_finished (void)
18442{
18443#ifdef OBJ_ELF
18444  asection *sect;
18445
18446  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18447    {
18448      segment_info_type *seginfo = seg_info (sect);
18449
18450      if (seginfo && seginfo->tc_segment_info_data.current_it.state
18451	  == MANUAL_IT_BLOCK)
18452        {
18453	  as_warn (_("section '%s' finished with an open IT block."),
18454		   sect->name);
18455        }
18456    }
18457#else
18458  if (now_it.state == MANUAL_IT_BLOCK)
18459    as_warn (_("file finished with an open IT block."));
18460#endif
18461}
18462
18463/* Various frobbings of labels and their addresses.  */
18464
18465void
18466arm_start_line_hook (void)
18467{
18468  last_label_seen = NULL;
18469}
18470
18471void
18472arm_frob_label (symbolS * sym)
18473{
18474  last_label_seen = sym;
18475
18476  ARM_SET_THUMB (sym, thumb_mode);
18477
18478#if defined OBJ_COFF || defined OBJ_ELF
18479  ARM_SET_INTERWORK (sym, support_interwork);
18480#endif
18481
18482  force_automatic_it_block_close ();
18483
18484  /* Note - do not allow local symbols (.Lxxx) to be labelled
18485     as Thumb functions.  This is because these labels, whilst
18486     they exist inside Thumb code, are not the entry points for
18487     possible ARM->Thumb calls.	 Also, these labels can be used
18488     as part of a computed goto or switch statement.  eg gcc
18489     can generate code that looks like this:
18490
18491		ldr  r2, [pc, .Laaa]
18492		lsl  r3, r3, #2
18493		ldr  r2, [r3, r2]
18494		mov  pc, r2
18495
18496       .Lbbb:  .word .Lxxx
18497       .Lccc:  .word .Lyyy
18498       ..etc...
18499       .Laaa:	.word Lbbb
18500
18501     The first instruction loads the address of the jump table.
18502     The second instruction converts a table index into a byte offset.
18503     The third instruction gets the jump address out of the table.
18504     The fourth instruction performs the jump.
18505
18506     If the address stored at .Laaa is that of a symbol which has the
18507     Thumb_Func bit set, then the linker will arrange for this address
18508     to have the bottom bit set, which in turn would mean that the
18509     address computation performed by the third instruction would end
18510     up with the bottom bit set.  Since the ARM is capable of unaligned
18511     word loads, the instruction would then load the incorrect address
18512     out of the jump table, and chaos would ensue.  */
18513  if (label_is_thumb_function_name
18514      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18515      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18516    {
18517      /* When the address of a Thumb function is taken the bottom
18518	 bit of that address should be set.  This will allow
18519	 interworking between Arm and Thumb functions to work
18520	 correctly.  */
18521
18522      THUMB_SET_FUNC (sym, 1);
18523
18524      label_is_thumb_function_name = FALSE;
18525    }
18526
18527  dwarf2_emit_label (sym);
18528}
18529
18530bfd_boolean
18531arm_data_in_code (void)
18532{
18533  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18534    {
18535      *input_line_pointer = '/';
18536      input_line_pointer += 5;
18537      *input_line_pointer = 0;
18538      return TRUE;
18539    }
18540
18541  return FALSE;
18542}
18543
18544char *
18545arm_canonicalize_symbol_name (char * name)
18546{
18547  int len;
18548
18549  if (thumb_mode && (len = strlen (name)) > 5
18550      && streq (name + len - 5, "/data"))
18551    *(name + len - 5) = 0;
18552
18553  return name;
18554}
18555
18556/* Table of all register names defined by default.  The user can
18557   define additional names with .req.  Note that all register names
18558   should appear in both upper and lowercase variants.	Some registers
18559   also have mixed-case names.	*/
18560
18561#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18562#define REGNUM(p,n,t) REGDEF(p##n, n, t)
18563#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18564#define REGSET(p,t) \
18565  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18566  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18567  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18568  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18569#define REGSETH(p,t) \
18570  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18571  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18572  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18573  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18574#define REGSET2(p,t) \
18575  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18576  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18577  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18578  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18579#define SPLRBANK(base,bank,t) \
18580  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18581  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18582  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18583  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18584  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18585  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18586
18587static const struct reg_entry reg_names[] =
18588{
18589  /* ARM integer registers.  */
18590  REGSET(r, RN), REGSET(R, RN),
18591
18592  /* ATPCS synonyms.  */
18593  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18594  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18595  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18596
18597  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18598  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18599  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18600
18601  /* Well-known aliases.  */
18602  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18603  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18604
18605  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18606  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18607
18608  /* Coprocessor numbers.  */
18609  REGSET(p, CP), REGSET(P, CP),
18610
18611  /* Coprocessor register numbers.  The "cr" variants are for backward
18612     compatibility.  */
18613  REGSET(c,  CN), REGSET(C, CN),
18614  REGSET(cr, CN), REGSET(CR, CN),
18615
18616  /* ARM banked registers.  */
18617  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18618  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18619  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18620  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18621  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18622  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18623  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18624
18625  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18626  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18627  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18628  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18629  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18630  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18631  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18632  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18633
18634  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18635  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18636  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18637  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18638  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18639  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18640  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18641  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18642  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18643
18644  /* FPA registers.  */
18645  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18646  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18647
18648  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18649  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18650
18651  /* VFP SP registers.	*/
18652  REGSET(s,VFS),  REGSET(S,VFS),
18653  REGSETH(s,VFS), REGSETH(S,VFS),
18654
18655  /* VFP DP Registers.	*/
18656  REGSET(d,VFD),  REGSET(D,VFD),
18657  /* Extra Neon DP registers.  */
18658  REGSETH(d,VFD), REGSETH(D,VFD),
18659
18660  /* Neon QP registers.  */
18661  REGSET2(q,NQ),  REGSET2(Q,NQ),
18662
18663  /* VFP control registers.  */
18664  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18665  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18666  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18667  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18668  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18669  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18670
18671  /* Maverick DSP coprocessor registers.  */
18672  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
18673  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
18674
18675  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18676  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18677  REGDEF(dspsc,0,DSPSC),
18678
18679  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18680  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18681  REGDEF(DSPSC,0,DSPSC),
18682
18683  /* iWMMXt data registers - p0, c0-15.	 */
18684  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18685
18686  /* iWMMXt control registers - p1, c0-3.  */
18687  REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
18688  REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
18689  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
18690  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
18691
18692  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
18693  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
18694  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
18695  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
18696  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
18697
18698  /* XScale accumulator registers.  */
18699  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18700};
18701#undef REGDEF
18702#undef REGNUM
18703#undef REGSET
18704
18705/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
18706   within psr_required_here.  */
18707static const struct asm_psr psrs[] =
18708{
18709  /* Backward compatibility notation.  Note that "all" is no longer
18710     truly all possible PSR bits.  */
18711  {"all",  PSR_c | PSR_f},
18712  {"flg",  PSR_f},
18713  {"ctl",  PSR_c},
18714
18715  /* Individual flags.	*/
18716  {"f",	   PSR_f},
18717  {"c",	   PSR_c},
18718  {"x",	   PSR_x},
18719  {"s",	   PSR_s},
18720
18721  /* Combinations of flags.  */
18722  {"fs",   PSR_f | PSR_s},
18723  {"fx",   PSR_f | PSR_x},
18724  {"fc",   PSR_f | PSR_c},
18725  {"sf",   PSR_s | PSR_f},
18726  {"sx",   PSR_s | PSR_x},
18727  {"sc",   PSR_s | PSR_c},
18728  {"xf",   PSR_x | PSR_f},
18729  {"xs",   PSR_x | PSR_s},
18730  {"xc",   PSR_x | PSR_c},
18731  {"cf",   PSR_c | PSR_f},
18732  {"cs",   PSR_c | PSR_s},
18733  {"cx",   PSR_c | PSR_x},
18734  {"fsx",  PSR_f | PSR_s | PSR_x},
18735  {"fsc",  PSR_f | PSR_s | PSR_c},
18736  {"fxs",  PSR_f | PSR_x | PSR_s},
18737  {"fxc",  PSR_f | PSR_x | PSR_c},
18738  {"fcs",  PSR_f | PSR_c | PSR_s},
18739  {"fcx",  PSR_f | PSR_c | PSR_x},
18740  {"sfx",  PSR_s | PSR_f | PSR_x},
18741  {"sfc",  PSR_s | PSR_f | PSR_c},
18742  {"sxf",  PSR_s | PSR_x | PSR_f},
18743  {"sxc",  PSR_s | PSR_x | PSR_c},
18744  {"scf",  PSR_s | PSR_c | PSR_f},
18745  {"scx",  PSR_s | PSR_c | PSR_x},
18746  {"xfs",  PSR_x | PSR_f | PSR_s},
18747  {"xfc",  PSR_x | PSR_f | PSR_c},
18748  {"xsf",  PSR_x | PSR_s | PSR_f},
18749  {"xsc",  PSR_x | PSR_s | PSR_c},
18750  {"xcf",  PSR_x | PSR_c | PSR_f},
18751  {"xcs",  PSR_x | PSR_c | PSR_s},
18752  {"cfs",  PSR_c | PSR_f | PSR_s},
18753  {"cfx",  PSR_c | PSR_f | PSR_x},
18754  {"csf",  PSR_c | PSR_s | PSR_f},
18755  {"csx",  PSR_c | PSR_s | PSR_x},
18756  {"cxf",  PSR_c | PSR_x | PSR_f},
18757  {"cxs",  PSR_c | PSR_x | PSR_s},
18758  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18759  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18760  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18761  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18762  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18763  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18764  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18765  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18766  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18767  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18768  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18769  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18770  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18771  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18772  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18773  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18774  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18775  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18776  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18777  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18778  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18779  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18780  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18781  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18782};
18783
18784/* Table of V7M psr names.  */
18785static const struct asm_psr v7m_psrs[] =
18786{
18787  {"apsr",	  0 }, {"APSR",		0 },
18788  {"iapsr",	  1 }, {"IAPSR",	1 },
18789  {"eapsr",	  2 }, {"EAPSR",	2 },
18790  {"psr",	  3 }, {"PSR",		3 },
18791  {"xpsr",	  3 }, {"XPSR",		3 }, {"xPSR",	  3 },
18792  {"ipsr",	  5 }, {"IPSR",		5 },
18793  {"epsr",	  6 }, {"EPSR",		6 },
18794  {"iepsr",	  7 }, {"IEPSR",	7 },
18795  {"msp",	  8 }, {"MSP",		8 }, {"msp_s",     8 }, {"MSP_S",     8 },
18796  {"psp",	  9 }, {"PSP",		9 }, {"psp_s",     9 }, {"PSP_S",     9 },
18797  {"primask",	  16}, {"PRIMASK",	16},
18798  {"basepri",	  17}, {"BASEPRI",	17},
18799  {"basepri_max", 18}, {"BASEPRI_MAX",	18},
18800  {"basepri_max", 18}, {"BASEPRI_MASK",	18}, /* Typo, preserved for backwards compatibility.  */
18801  {"faultmask",	  19}, {"FAULTMASK",	19},
18802  {"control",	  20}, {"CONTROL",	20},
18803  {"msp_ns",	0x88}, {"MSP_NS",     0x88},
18804  {"psp_ns",	0x89}, {"PSP_NS",     0x89}
18805};
18806
18807/* Table of all shift-in-operand names.	 */
18808static const struct asm_shift_name shift_names [] =
18809{
18810  { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
18811  { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
18812  { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
18813  { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
18814  { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
18815  { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
18816};
18817
18818/* Table of all explicit relocation names.  */
18819#ifdef OBJ_ELF
18820static struct reloc_entry reloc_names[] =
18821{
18822  { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
18823  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
18824  { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
18825  { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18826  { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18827  { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
18828  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
18829  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
18830  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
18831  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18832  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
18833  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18834  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18835	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18836  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18837	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18838  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18839	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18840};
18841#endif
18842
18843/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
18844static const struct asm_cond conds[] =
18845{
18846  {"eq", 0x0},
18847  {"ne", 0x1},
18848  {"cs", 0x2}, {"hs", 0x2},
18849  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18850  {"mi", 0x4},
18851  {"pl", 0x5},
18852  {"vs", 0x6},
18853  {"vc", 0x7},
18854  {"hi", 0x8},
18855  {"ls", 0x9},
18856  {"ge", 0xa},
18857  {"lt", 0xb},
18858  {"gt", 0xc},
18859  {"le", 0xd},
18860  {"al", 0xe}
18861};
18862
18863#define UL_BARRIER(L,U,CODE,FEAT) \
18864  { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18865  { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18866
18867static struct asm_barrier_opt barrier_opt_names[] =
18868{
18869  UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
18870  UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
18871  UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
18872  UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
18873  UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
18874  UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
18875  UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
18876  UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
18877  UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
18878  UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
18879  UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
18880  UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
18881  UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
18882  UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
18883  UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
18884  UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
18885};
18886
18887#undef UL_BARRIER
18888
18889/* Table of ARM-format instructions.	*/
18890
18891/* Macros for gluing together operand strings.  N.B. In all cases
18892   other than OPS0, the trailing OP_stop comes from default
18893   zero-initialization of the unspecified elements of the array.  */
18894#define OPS0()		  { OP_stop, }
18895#define OPS1(a)		  { OP_##a, }
18896#define OPS2(a,b)	  { OP_##a,OP_##b, }
18897#define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
18898#define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
18899#define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18900#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18901
18902/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18903   This is useful when mixing operands for ARM and THUMB, i.e. using the
18904   MIX_ARM_THUMB_OPERANDS macro.
18905   In order to use these macros, prefix the number of operands with _
18906   e.g. _3.  */
18907#define OPS_1(a)	   { a, }
18908#define OPS_2(a,b)	   { a,b, }
18909#define OPS_3(a,b,c)	   { a,b,c, }
18910#define OPS_4(a,b,c,d)	   { a,b,c,d, }
18911#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
18912#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18913
18914/* These macros abstract out the exact format of the mnemonic table and
18915   save some repeated characters.  */
18916
18917/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
18918#define TxCE(mnem, op, top, nops, ops, ae, te) \
18919  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18920    THUMB_VARIANT, do_##ae, do_##te }
18921
18922/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18923   a T_MNEM_xyz enumerator.  */
18924#define TCE(mnem, aop, top, nops, ops, ae, te) \
18925      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18926#define tCE(mnem, aop, top, nops, ops, ae, te) \
18927      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18928
18929/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18930   infix after the third character.  */
18931#define TxC3(mnem, op, top, nops, ops, ae, te) \
18932  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18933    THUMB_VARIANT, do_##ae, do_##te }
18934#define TxC3w(mnem, op, top, nops, ops, ae, te) \
18935  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18936    THUMB_VARIANT, do_##ae, do_##te }
18937#define TC3(mnem, aop, top, nops, ops, ae, te) \
18938      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18939#define TC3w(mnem, aop, top, nops, ops, ae, te) \
18940      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18941#define tC3(mnem, aop, top, nops, ops, ae, te) \
18942      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18943#define tC3w(mnem, aop, top, nops, ops, ae, te) \
18944      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18945
18946/* Mnemonic that cannot be conditionalized.  The ARM condition-code
18947   field is still 0xE.  Many of the Thumb variants can be executed
18948   conditionally, so this is checked separately.  */
18949#define TUE(mnem, op, top, nops, ops, ae, te)				\
18950  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18951    THUMB_VARIANT, do_##ae, do_##te }
18952
18953/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18954   Used by mnemonics that have very minimal differences in the encoding for
18955   ARM and Thumb variants and can be handled in a common function.  */
18956#define TUEc(mnem, op, top, nops, ops, en) \
18957  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18958    THUMB_VARIANT, do_##en, do_##en }
18959
18960/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18961   condition code field.  */
18962#define TUF(mnem, op, top, nops, ops, ae, te)				\
18963  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18964    THUMB_VARIANT, do_##ae, do_##te }
18965
18966/* ARM-only variants of all the above.  */
18967#define CE(mnem,  op, nops, ops, ae)	\
18968  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18969
18970#define C3(mnem, op, nops, ops, ae)	\
18971  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18972
18973/* Legacy mnemonics that always have conditional infix after the third
18974   character.  */
18975#define CL(mnem, op, nops, ops, ae)	\
18976  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18977    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18978
18979/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
18980#define cCE(mnem,  op, nops, ops, ae)	\
18981  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18982
18983/* Legacy coprocessor instructions where conditional infix and conditional
18984   suffix are ambiguous.  For consistency this includes all FPA instructions,
18985   not just the potentially ambiguous ones.  */
18986#define cCL(mnem, op, nops, ops, ae)	\
18987  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18988    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18989
18990/* Coprocessor, takes either a suffix or a position-3 infix
18991   (for an FPA corner case). */
18992#define C3E(mnem, op, nops, ops, ae) \
18993  { mnem, OPS##nops ops, OT_csuf_or_in3, \
18994    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18995
18996#define xCM_(m1, m2, m3, op, nops, ops, ae)	\
18997  { m1 #m2 m3, OPS##nops ops, \
18998    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18999    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19000
19001#define CM(m1, m2, op, nops, ops, ae)	\
19002  xCM_ (m1,   , m2, op, nops, ops, ae),	\
19003  xCM_ (m1, eq, m2, op, nops, ops, ae),	\
19004  xCM_ (m1, ne, m2, op, nops, ops, ae),	\
19005  xCM_ (m1, cs, m2, op, nops, ops, ae),	\
19006  xCM_ (m1, hs, m2, op, nops, ops, ae),	\
19007  xCM_ (m1, cc, m2, op, nops, ops, ae),	\
19008  xCM_ (m1, ul, m2, op, nops, ops, ae),	\
19009  xCM_ (m1, lo, m2, op, nops, ops, ae),	\
19010  xCM_ (m1, mi, m2, op, nops, ops, ae),	\
19011  xCM_ (m1, pl, m2, op, nops, ops, ae),	\
19012  xCM_ (m1, vs, m2, op, nops, ops, ae),	\
19013  xCM_ (m1, vc, m2, op, nops, ops, ae),	\
19014  xCM_ (m1, hi, m2, op, nops, ops, ae),	\
19015  xCM_ (m1, ls, m2, op, nops, ops, ae),	\
19016  xCM_ (m1, ge, m2, op, nops, ops, ae),	\
19017  xCM_ (m1, lt, m2, op, nops, ops, ae),	\
19018  xCM_ (m1, gt, m2, op, nops, ops, ae),	\
19019  xCM_ (m1, le, m2, op, nops, ops, ae),	\
19020  xCM_ (m1, al, m2, op, nops, ops, ae)
19021
19022#define UE(mnem, op, nops, ops, ae)	\
19023  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19024
19025#define UF(mnem, op, nops, ops, ae)	\
19026  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19027
19028/* Neon data-processing. ARM versions are unconditional with cond=0xf.
19029   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19030   use the same encoding function for each.  */
19031#define NUF(mnem, op, nops, ops, enc)					\
19032  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
19033    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19034
19035/* Neon data processing, version which indirects through neon_enc_tab for
19036   the various overloaded versions of opcodes.  */
19037#define nUF(mnem, op, nops, ops, enc)					\
19038  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
19039    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19040
19041/* Neon insn with conditional suffix for the ARM version, non-overloaded
19042   version.  */
19043#define NCE_tag(mnem, op, nops, ops, enc, tag)				\
19044  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
19045    THUMB_VARIANT, do_##enc, do_##enc }
19046
19047#define NCE(mnem, op, nops, ops, enc)					\
19048   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19049
19050#define NCEF(mnem, op, nops, ops, enc)					\
19051    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19052
19053/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
19054#define nCE_tag(mnem, op, nops, ops, enc, tag)				\
19055  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
19056    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19057
19058#define nCE(mnem, op, nops, ops, enc)					\
19059   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19060
19061#define nCEF(mnem, op, nops, ops, enc)					\
19062    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19063
19064#define do_0 0
19065
19066static const struct asm_opcode insns[] =
19067{
19068#define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
19069#define THUMB_VARIANT  & arm_ext_v4t
19070 tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
19071 tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
19072 tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
19073 tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
19074 tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
19075 tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
19076 tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
19077 tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
19078 tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
19079 tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
19080 tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
19081 tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
19082 tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
19083 tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
19084 tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
19085 tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
19086
19087 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19088    for setting PSR flag bits.  They are obsolete in V6 and do not
19089    have Thumb equivalents. */
19090 tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
19091 tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
19092  CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
19093 tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
19094 tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
19095  CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
19096 tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
19097 tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
19098  CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
19099
19100 tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
19101 tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
19102 tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
19103 tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
19104
19105 tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
19106 tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19107 tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19108								OP_RRnpc),
19109					OP_ADDRGLDR),ldst, t_ldst),
19110 tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19111
19112 tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19113 tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19114 tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19115 tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19116 tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19117 tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19118
19119 TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
19120 TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
19121 tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
19122 TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
19123
19124  /* Pseudo ops.  */
19125 tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
19126  C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
19127 tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
19128 tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
19129
19130  /* Thumb-compatibility pseudo ops.  */
19131 tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
19132 tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
19133 tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
19134 tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
19135 tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
19136 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
19137 tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
19138 tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
19139 tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
19140 tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
19141 tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
19142 tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
19143
19144 /* These may simplify to neg.  */
19145 TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19146 TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19147
19148#undef  THUMB_VARIANT
19149#define THUMB_VARIANT  & arm_ext_v6
19150
19151 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
19152
19153 /* V1 instructions with no Thumb analogue prior to V6T2.  */
19154#undef  THUMB_VARIANT
19155#define THUMB_VARIANT  & arm_ext_v6t2
19156
19157 TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
19158 TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
19159  CL("teqp",	130f000,           2, (RR, SH),      cmp),
19160
19161 TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19162 TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19163 TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
19164 TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19165
19166 TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19167 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19168
19169 TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19170 TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19171
19172 /* V1 instructions with no Thumb analogue at all.  */
19173  CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
19174  C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
19175
19176  C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
19177  C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
19178  C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
19179  C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
19180  C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
19181  C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
19182  C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
19183  C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
19184
19185#undef  ARM_VARIANT
19186#define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
19187#undef  THUMB_VARIANT
19188#define THUMB_VARIANT  & arm_ext_v4t
19189
19190 tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
19191 tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
19192
19193#undef  THUMB_VARIANT
19194#define THUMB_VARIANT  & arm_ext_v6t2
19195
19196 TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19197  C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19198
19199  /* Generic coprocessor instructions.	*/
19200 TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
19201 TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19202 TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19203 TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19204 TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19205 TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19206 TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
19207
19208#undef  ARM_VARIANT
19209#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
19210
19211  CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19212  C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19213
19214#undef  ARM_VARIANT
19215#define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
19216#undef  THUMB_VARIANT
19217#define THUMB_VARIANT  & arm_ext_msr
19218
19219 TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19220 TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19221
19222#undef  ARM_VARIANT
19223#define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
19224#undef  THUMB_VARIANT
19225#define THUMB_VARIANT  & arm_ext_v6t2
19226
19227 TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19228  CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19229 TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19230  CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19231 TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19232  CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19233 TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19234  CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19235
19236#undef  ARM_VARIANT
19237#define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
19238#undef  THUMB_VARIANT
19239#define THUMB_VARIANT  & arm_ext_v4t
19240
19241 tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19242 tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19243 tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19244 tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19245 tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19246 tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19247
19248#undef  ARM_VARIANT
19249#define ARM_VARIANT  & arm_ext_v4t_5
19250
19251  /* ARM Architecture 4T.  */
19252  /* Note: bx (and blx) are required on V5, even if the processor does
19253     not support Thumb.	 */
19254 TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
19255
19256#undef  ARM_VARIANT
19257#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
19258#undef  THUMB_VARIANT
19259#define THUMB_VARIANT  & arm_ext_v5t
19260
19261  /* Note: blx has 2 variants; the .value coded here is for
19262     BLX(2).  Only this variant has conditional execution.  */
19263 TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
19264 TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
19265
19266#undef  THUMB_VARIANT
19267#define THUMB_VARIANT  & arm_ext_v6t2
19268
19269 TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
19270 TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
19271 TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
19272 TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
19273 TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
19274 TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
19275 TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19276 TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19277
19278#undef  ARM_VARIANT
19279#define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
19280#undef  THUMB_VARIANT
19281#define THUMB_VARIANT  & arm_ext_v5exp
19282
19283 TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19284 TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19285 TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19286 TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19287
19288 TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19289 TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19290
19291 TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19292 TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19293 TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19294 TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19295
19296 TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19297 TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19298 TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19299 TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19300
19301 TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19302 TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19303
19304 TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19305 TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19306 TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19307 TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19308
19309#undef  ARM_VARIANT
19310#define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
19311#undef  THUMB_VARIANT
19312#define THUMB_VARIANT  & arm_ext_v6t2
19313
19314 TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
19315 TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19316     ldrd, t_ldstd),
19317 TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19318				       ADDRGLDRS), ldrd, t_ldstd),
19319
19320 TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19321 TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19322
19323#undef  ARM_VARIANT
19324#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
19325
19326 TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
19327
19328#undef  ARM_VARIANT
19329#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
19330#undef  THUMB_VARIANT
19331#define THUMB_VARIANT  & arm_ext_v6
19332
19333 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
19334 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
19335 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19336 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19337 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19338 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19339 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19340 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19341 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19342 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
19343
19344#undef  THUMB_VARIANT
19345#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19346
19347 TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
19348 TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19349				      strex,  t_strex),
19350#undef  THUMB_VARIANT
19351#define THUMB_VARIANT  & arm_ext_v6t2
19352
19353 TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19354 TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19355
19356 TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
19357 TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
19358
19359/*  ARM V6 not included in V7M.  */
19360#undef  THUMB_VARIANT
19361#define THUMB_VARIANT  & arm_ext_v6_notm
19362 TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19363 TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19364  UF(rfeib,	9900a00,           1, (RRw),			   rfe),
19365  UF(rfeda,	8100a00,           1, (RRw),			   rfe),
19366 TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
19367 TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19368  UF(rfefa,	8100a00,           1, (RRw),			   rfe),
19369 TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
19370  UF(rfeed,	9900a00,           1, (RRw),			   rfe),
19371 TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19372 TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19373 TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19374  UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
19375  UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
19376  UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
19377  UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
19378 TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
19379 TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
19380 TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
19381
19382/*  ARM V6 not included in V7M (eg. integer SIMD).  */
19383#undef  THUMB_VARIANT
19384#define THUMB_VARIANT  & arm_ext_v6_dsp
19385 TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
19386 TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
19387 TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19388 TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19389 TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19390 /* Old name for QASX.  */
19391 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19392 TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19393 /* Old name for QSAX.  */
19394 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19395 TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19396 TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19397 TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19398 TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19399 TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19400 /* Old name for SASX.  */
19401 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19402 TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19403 TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19404 TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19405 /* Old name for SHASX.  */
19406 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19407 TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19408 /* Old name for SHSAX.  */
19409 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19410 TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19411 TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19412 TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19413 /* Old name for SSAX.  */
19414 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19415 TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19416 TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19417 TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19418 TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19419 TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19420 /* Old name for UASX.  */
19421 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19422 TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19423 TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19424 TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19425 /* Old name for UHASX.  */
19426 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19427 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19428 /* Old name for UHSAX.  */
19429 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19430 TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19431 TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19432 TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19433 TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19434 TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19435 /* Old name for UQASX.  */
19436 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19437 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19438 /* Old name for UQSAX.  */
19439 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19440 TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19441 TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19442 TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19443 TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19444 /* Old name for USAX.  */
19445 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19446 TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19447 TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19448 TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19449 TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19450 TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
19451 TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19452 TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19453 TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19454 TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
19455 TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19456 TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19457 TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19458 TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19459 TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19460 TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19461 TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19462 TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19463 TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19464 TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19465 TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19466 TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19467 TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19468 TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19469 TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19470 TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19471 TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19472 TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19473 TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19474 TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
19475 TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
19476 TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
19477 TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
19478 TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
19479
19480#undef  ARM_VARIANT
19481#define ARM_VARIANT   & arm_ext_v6k
19482#undef  THUMB_VARIANT
19483#define THUMB_VARIANT & arm_ext_v6k
19484
19485 tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
19486 tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
19487 tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
19488 tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
19489
19490#undef  THUMB_VARIANT
19491#define THUMB_VARIANT  & arm_ext_v6_notm
19492 TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19493				      ldrexd, t_ldrexd),
19494 TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19495				       RRnpcb), strexd, t_strexd),
19496
19497#undef  THUMB_VARIANT
19498#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19499 TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19500     rd_rn,  rd_rn),
19501 TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19502     rd_rn,  rd_rn),
19503 TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19504     strex, t_strexbh),
19505 TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19506     strex, t_strexbh),
19507 TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
19508
19509#undef  ARM_VARIANT
19510#define ARM_VARIANT    & arm_ext_sec
19511#undef  THUMB_VARIANT
19512#define THUMB_VARIANT  & arm_ext_sec
19513
19514 TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
19515
19516#undef	ARM_VARIANT
19517#define	ARM_VARIANT    & arm_ext_virt
19518#undef	THUMB_VARIANT
19519#define	THUMB_VARIANT    & arm_ext_virt
19520
19521 TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19522 TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
19523
19524#undef	ARM_VARIANT
19525#define	ARM_VARIANT    & arm_ext_pan
19526#undef	THUMB_VARIANT
19527#define	THUMB_VARIANT  & arm_ext_pan
19528
19529 TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
19530
19531#undef  ARM_VARIANT
19532#define ARM_VARIANT    & arm_ext_v6t2
19533#undef  THUMB_VARIANT
19534#define THUMB_VARIANT  & arm_ext_v6t2
19535
19536 TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
19537 TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19538 TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19539 TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19540
19541 TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19542 TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
19543
19544 TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19545 TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19546 TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19547 TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19548
19549#undef  THUMB_VARIANT
19550#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19551 TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19552 TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19553
19554 /* Thumb-only instructions.  */
19555#undef  ARM_VARIANT
19556#define ARM_VARIANT NULL
19557  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
19558  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
19559
19560 /* ARM does not really have an IT instruction, so always allow it.
19561    The opcode is copied from Thumb in order to allow warnings in
19562    -mimplicit-it=[never | arm] modes.  */
19563#undef  ARM_VARIANT
19564#define ARM_VARIANT  & arm_ext_v1
19565#undef  THUMB_VARIANT
19566#define THUMB_VARIANT  & arm_ext_v6t2
19567
19568 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
19569 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
19570 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
19571 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
19572 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
19573 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
19574 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
19575 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
19576 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
19577 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
19578 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
19579 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
19580 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
19581 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
19582 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
19583 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
19584 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19585 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19586
19587 /* Thumb2 only instructions.  */
19588#undef  ARM_VARIANT
19589#define ARM_VARIANT  NULL
19590
19591 TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19592 TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19593 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
19594 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
19595 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
19596 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
19597
19598 /* Hardware division instructions.  */
19599#undef  ARM_VARIANT
19600#define ARM_VARIANT    & arm_ext_adiv
19601#undef  THUMB_VARIANT
19602#define THUMB_VARIANT  & arm_ext_div
19603
19604 TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19605 TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19606
19607 /* ARM V6M/V7 instructions.  */
19608#undef  ARM_VARIANT
19609#define ARM_VARIANT    & arm_ext_barrier
19610#undef  THUMB_VARIANT
19611#define THUMB_VARIANT  & arm_ext_barrier
19612
19613 TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19614 TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19615 TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19616
19617 /* ARM V7 instructions.  */
19618#undef  ARM_VARIANT
19619#define ARM_VARIANT    & arm_ext_v7
19620#undef  THUMB_VARIANT
19621#define THUMB_VARIANT  & arm_ext_v7
19622
19623 TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
19624 TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
19625
19626#undef  ARM_VARIANT
19627#define ARM_VARIANT    & arm_ext_mp
19628#undef  THUMB_VARIANT
19629#define THUMB_VARIANT  & arm_ext_mp
19630
19631 TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
19632
19633 /* AArchv8 instructions.  */
19634#undef  ARM_VARIANT
19635#define ARM_VARIANT   & arm_ext_v8
19636
19637/* Instructions shared between armv8-a and armv8-m.  */
19638#undef  THUMB_VARIANT
19639#define THUMB_VARIANT & arm_ext_atomics
19640
19641 TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19642 TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19643 TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19644 TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19645 TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19646 TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19647 TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19648 TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
19649 TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19650 TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19651							stlex,  t_stlex),
19652 TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19653							stlex, t_stlex),
19654 TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19655							stlex, t_stlex),
19656#undef  THUMB_VARIANT
19657#define THUMB_VARIANT & arm_ext_v8
19658
19659 tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
19660 TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
19661 TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19662							ldrexd, t_ldrexd),
19663 TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19664							strexd, t_strexd),
19665 /* ARMv8 T32 only.  */
19666#undef  ARM_VARIANT
19667#define ARM_VARIANT  NULL
19668 TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
19669 TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
19670 TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
19671
19672  /* FP for ARMv8.  */
19673#undef  ARM_VARIANT
19674#define ARM_VARIANT   & fpu_vfp_ext_armv8xd
19675#undef  THUMB_VARIANT
19676#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19677
19678  nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
19679  nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
19680  nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
19681  nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
19682  nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19683  nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19684  nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
19685  nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
19686  nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
19687  nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
19688  nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
19689  nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
19690  nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
19691  nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
19692  nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
19693  nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
19694  nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
19695
19696  /* Crypto v1 extensions.  */
19697#undef  ARM_VARIANT
19698#define ARM_VARIANT & fpu_crypto_ext_armv8
19699#undef  THUMB_VARIANT
19700#define THUMB_VARIANT & fpu_crypto_ext_armv8
19701
19702  nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19703  nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19704  nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19705  nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19706  nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19707  nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19708  nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19709  nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19710  nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19711  nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19712  nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19713  nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19714  nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19715  nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19716
19717#undef  ARM_VARIANT
19718#define ARM_VARIANT   & crc_ext_armv8
19719#undef  THUMB_VARIANT
19720#define THUMB_VARIANT & crc_ext_armv8
19721  TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19722  TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19723  TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19724  TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19725  TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19726  TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19727
19728 /* ARMv8.2 RAS extension.  */
19729#undef  ARM_VARIANT
19730#define ARM_VARIANT   & arm_ext_ras
19731#undef  THUMB_VARIANT
19732#define THUMB_VARIANT & arm_ext_ras
19733 TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
19734
19735#undef  ARM_VARIANT
19736#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
19737#undef  THUMB_VARIANT
19738#define THUMB_VARIANT NULL
19739
19740 cCE("wfs",	e200110, 1, (RR),	     rd),
19741 cCE("rfs",	e300110, 1, (RR),	     rd),
19742 cCE("wfc",	e400110, 1, (RR),	     rd),
19743 cCE("rfc",	e500110, 1, (RR),	     rd),
19744
19745 cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19746 cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19747 cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19748 cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19749
19750 cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19751 cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19752 cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19753 cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19754
19755 cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
19756 cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
19757 cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
19758 cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
19759 cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
19760 cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
19761 cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
19762 cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
19763 cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
19764 cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
19765 cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
19766 cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
19767
19768 cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
19769 cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
19770 cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
19771 cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
19772 cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
19773 cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
19774 cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
19775 cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
19776 cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
19777 cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
19778 cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
19779 cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
19780
19781 cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
19782 cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
19783 cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
19784 cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
19785 cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
19786 cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
19787 cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
19788 cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
19789 cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
19790 cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
19791 cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
19792 cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
19793
19794 cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
19795 cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
19796 cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
19797 cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
19798 cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
19799 cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
19800 cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
19801 cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
19802 cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
19803 cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
19804 cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
19805 cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
19806
19807 cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
19808 cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
19809 cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
19810 cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
19811 cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
19812 cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
19813 cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
19814 cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
19815 cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
19816 cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
19817 cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
19818 cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
19819
19820 cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
19821 cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
19822 cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
19823 cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
19824 cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
19825 cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
19826 cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
19827 cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
19828 cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
19829 cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
19830 cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
19831 cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
19832
19833 cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
19834 cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
19835 cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
19836 cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
19837 cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
19838 cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
19839 cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
19840 cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
19841 cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
19842 cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
19843 cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
19844 cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
19845
19846 cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
19847 cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
19848 cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
19849 cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
19850 cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
19851 cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
19852 cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
19853 cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
19854 cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
19855 cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
19856 cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
19857 cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
19858
19859 cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
19860 cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
19861 cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
19862 cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
19863 cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
19864 cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
19865 cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
19866 cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
19867 cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
19868 cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
19869 cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
19870 cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
19871
19872 cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
19873 cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
19874 cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
19875 cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
19876 cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
19877 cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
19878 cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
19879 cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
19880 cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
19881 cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
19882 cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
19883 cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
19884
19885 cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
19886 cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
19887 cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
19888 cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
19889 cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
19890 cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
19891 cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
19892 cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
19893 cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
19894 cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
19895 cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
19896 cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
19897
19898 cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
19899 cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
19900 cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
19901 cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
19902 cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
19903 cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
19904 cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
19905 cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
19906 cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
19907 cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
19908 cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
19909 cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
19910
19911 cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
19912 cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
19913 cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
19914 cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
19915 cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
19916 cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
19917 cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
19918 cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
19919 cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
19920 cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
19921 cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
19922 cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
19923
19924 cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
19925 cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
19926 cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
19927 cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
19928 cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
19929 cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
19930 cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
19931 cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
19932 cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
19933 cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
19934 cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
19935 cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
19936
19937 cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
19938 cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
19939 cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
19940 cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
19941 cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
19942 cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
19943 cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
19944 cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
19945 cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
19946 cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
19947 cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
19948 cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
19949
19950 cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
19951 cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
19952 cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
19953 cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
19954 cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
19955 cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
19956 cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
19957 cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
19958 cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
19959 cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
19960 cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
19961 cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
19962
19963 cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19964 cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19965 cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19966 cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19967 cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19968 cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19969 cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19970 cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19971 cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19972 cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19973 cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19974 cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19975
19976 cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19977 cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19978 cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19979 cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19980 cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19981 cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19982 cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19983 cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19984 cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19985 cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19986 cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19987 cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19988
19989 cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19990 cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19991 cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19992 cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19993 cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19994 cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19995 cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19996 cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19997 cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19998 cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19999 cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20000 cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20001
20002 cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20003 cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20004 cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20005 cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20006 cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20007 cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20008 cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20009 cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20010 cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20011 cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20012 cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20013 cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20014
20015 cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20016 cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20017 cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20018 cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20019 cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20020 cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20021 cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20022 cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20023 cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20024 cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20025 cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20026 cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20027
20028 cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20029 cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20030 cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20031 cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20032 cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20033 cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20034 cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20035 cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20036 cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20037 cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20038 cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20039 cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20040
20041 cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20042 cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20043 cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20044 cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20045 cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20046 cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20047 cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20048 cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20049 cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20050 cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20051 cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20052 cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20053
20054 cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20055 cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20056 cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20057 cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20058 cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20059 cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20060 cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20061 cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20062 cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20063 cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20064 cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20065 cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20066
20067 cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20068 cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20069 cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20070 cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20071 cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20072 cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20073 cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20074 cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20075 cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20076 cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20077 cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20078 cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20079
20080 cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20081 cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20082 cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20083 cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20084 cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20085 cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20086 cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20087 cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20088 cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20089 cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20090 cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20091 cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20092
20093 cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20094 cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20095 cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20096 cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20097 cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20098 cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20099 cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20100 cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20101 cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20102 cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20103 cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20104 cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20105
20106 cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20107 cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20108 cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20109 cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20110 cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20111 cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20112 cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20113 cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20114 cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20115 cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20116 cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20117 cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20118
20119 cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20120 cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20121 cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20122 cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20123 cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20124 cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20125 cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20126 cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20127 cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20128 cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20129 cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20130 cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20131
20132 cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
20133 C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
20134 cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
20135 C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
20136
20137 cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
20138 cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
20139 cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
20140 cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
20141 cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
20142 cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
20143 cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
20144 cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
20145 cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
20146 cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
20147 cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
20148 cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
20149
20150  /* The implementation of the FIX instruction is broken on some
20151     assemblers, in that it accepts a precision specifier as well as a
20152     rounding specifier, despite the fact that this is meaningless.
20153     To be more compatible, we accept it as well, though of course it
20154     does not set any bits.  */
20155 cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
20156 cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
20157 cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
20158 cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
20159 cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
20160 cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
20161 cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
20162 cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
20163 cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
20164 cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
20165 cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
20166 cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
20167 cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
20168
20169  /* Instructions that were new with the real FPA, call them V2.  */
20170#undef  ARM_VARIANT
20171#define ARM_VARIANT  & fpu_fpa_ext_v2
20172
20173 cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20174 cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20175 cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20176 cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20177 cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20178 cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20179
20180#undef  ARM_VARIANT
20181#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
20182
20183  /* Moves and type conversions.  */
20184 cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20185 cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
20186 cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
20187 cCE("fmstat",	ef1fa10, 0, (),		      noargs),
20188 cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
20189 cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
20190 cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20191 cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20192 cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20193 cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20194 cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20195 cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20196 cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
20197 cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
20198
20199  /* Memory operations.	 */
20200 cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
20201 cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
20202 cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20203 cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20204 cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20205 cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20206 cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20207 cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20208 cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20209 cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20210 cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20211 cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20212 cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20213 cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20214 cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20215 cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20216 cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20217 cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20218
20219  /* Monadic operations.  */
20220 cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20221 cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20222 cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20223
20224  /* Dyadic operations.	 */
20225 cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20226 cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20227 cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20228 cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20229 cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20230 cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20231 cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20232 cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20233 cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20234
20235  /* Comparisons.  */
20236 cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20237 cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
20238 cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20239 cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
20240
20241 /* Double precision load/store are still present on single precision
20242    implementations.  */
20243 cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
20244 cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
20245 cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20246 cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20247 cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20248 cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20249 cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20250 cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20251 cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20252 cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20253
20254#undef  ARM_VARIANT
20255#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
20256
20257  /* Moves and type conversions.  */
20258 cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20259 cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20260 cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20261 cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
20262 cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
20263 cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
20264 cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
20265 cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20266 cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20267 cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20268 cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20269 cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20270 cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20271
20272  /* Monadic operations.  */
20273 cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20274 cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20275 cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20276
20277  /* Dyadic operations.	 */
20278 cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20279 cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20280 cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20281 cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20282 cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20283 cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20284 cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20285 cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20286 cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20287
20288  /* Comparisons.  */
20289 cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20290 cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
20291 cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20292 cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
20293
20294#undef  ARM_VARIANT
20295#define ARM_VARIANT  & fpu_vfp_ext_v2
20296
20297 cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20298 cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20299 cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
20300 cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
20301
20302/* Instructions which may belong to either the Neon or VFP instruction sets.
20303   Individual encoder functions perform additional architecture checks.  */
20304#undef  ARM_VARIANT
20305#define ARM_VARIANT    & fpu_vfp_ext_v1xd
20306#undef  THUMB_VARIANT
20307#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
20308
20309  /* These mnemonics are unique to VFP.  */
20310 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
20311 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20312 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20313 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20314 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20315 nCE(vcmp,      _vcmp,    2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
20316 nCE(vcmpe,     _vcmpe,   2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
20317 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
20318 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
20319 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
20320
20321  /* Mnemonics shared by Neon and VFP.  */
20322 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20323 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20324 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20325
20326 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20327 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20328
20329 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20330 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20331
20332 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20333 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20334 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20335 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20336 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20337 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20338 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20339 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20340
20341 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20342 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
20343 NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20344 NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20345
20346
20347  /* NOTE: All VMOV encoding is special-cased!  */
20348 NCE(vmov,      0,       1, (VMOV), neon_mov),
20349 NCE(vmovq,     0,       1, (VMOV), neon_mov),
20350
20351#undef  ARM_VARIANT
20352#define ARM_VARIANT    & arm_ext_fp16
20353#undef  THUMB_VARIANT
20354#define THUMB_VARIANT  & arm_ext_fp16
20355 /* New instructions added from v8.2, allowing the extraction and insertion of
20356    the upper 16 bits of a 32-bit vector register.  */
20357 NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
20358 NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
20359
20360#undef  THUMB_VARIANT
20361#define THUMB_VARIANT  & fpu_neon_ext_v1
20362#undef  ARM_VARIANT
20363#define ARM_VARIANT    & fpu_neon_ext_v1
20364
20365  /* Data processing with three registers of the same length.  */
20366  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
20367 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
20368 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
20369 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20370 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20371 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20372 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20373 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20374 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20375  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
20376 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20377 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
20378 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20379 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
20380 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20381 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
20382 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20383 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
20384  /* If not immediate, fall back to neon_dyadic_i64_su.
20385     shl_imm should accept I8 I16 I32 I64,
20386     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
20387 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20388 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
20389 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20390 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
20391  /* Logic ops, types optional & ignored.  */
20392 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20393 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20394 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20395 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20396 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20397 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20398 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20399 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20400 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
20401 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
20402  /* Bitfield ops, untyped.  */
20403 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20404 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20405 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20406 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20407 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20408 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20409  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
20410 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20411 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20412 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20413 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20414 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20415 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20416  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20417     back to neon_dyadic_if_su.  */
20418 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20419 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
20420 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20421 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
20422 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20423 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
20424 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20425 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
20426  /* Comparison. Type I8 I16 I32 F32.  */
20427 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20428 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
20429  /* As above, D registers only.  */
20430 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
20431 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
20432  /* Int and float variants, signedness unimportant.  */
20433 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
20434 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
20435 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
20436  /* Add/sub take types I8 I16 I32 I64 F32.  */
20437 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
20438 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
20439  /* vtst takes sizes 8, 16, 32.  */
20440 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20441 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
20442  /* VMUL takes I8 I16 I32 F32 P8.  */
20443 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
20444  /* VQD{R}MULH takes S16 S32.  */
20445 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20446 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
20447 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20448 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
20449 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20450 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
20451 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20452 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
20453 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20454 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
20455 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20456 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
20457 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
20458 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
20459 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
20460 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
20461 /* ARM v8.1 extension.  */
20462 nUF (vqrdmlah,  _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20463 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
20464 nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20465 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
20466
20467  /* Two address, int/float. Types S8 S16 S32 F32.  */
20468 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
20469 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
20470
20471  /* Data processing with two registers and a shift amount.  */
20472  /* Right shifts, and variants with rounding.
20473     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
20474 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20475 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
20476 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20477 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
20478 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
20479 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
20480 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
20481 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
20482  /* Shift and insert. Sizes accepted 8 16 32 64.  */
20483 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20484 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
20485 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20486 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
20487  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
20488 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20489 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
20490  /* Right shift immediate, saturating & narrowing, with rounding variants.
20491     Types accepted S16 S32 S64 U16 U32 U64.  */
20492 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20493 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20494  /* As above, unsigned. Types accepted S16 S32 S64.  */
20495 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20496 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20497  /* Right shift narrowing. Types accepted I16 I32 I64.  */
20498 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20499 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20500  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
20501 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
20502  /* CVT with optional immediate for fixed-point variant.  */
20503 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
20504
20505 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
20506 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
20507
20508  /* Data processing, three registers of different lengths.  */
20509  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
20510 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
20511 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
20512 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
20513 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
20514  /* If not scalar, fall back to neon_dyadic_long.
20515     Vector types as above, scalar types S16 S32 U16 U32.  */
20516 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20517 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20518  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
20519 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20520 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20521  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
20522 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20523 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20524 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20525 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20526  /* Saturating doubling multiplies. Types S16 S32.  */
20527 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20528 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20529 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20530  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20531     S16 S32 U16 U32.  */
20532 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
20533
20534  /* Extract. Size 8.  */
20535 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20536 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
20537
20538  /* Two registers, miscellaneous.  */
20539  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
20540 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
20541 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
20542 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
20543 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
20544 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
20545 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
20546  /* Vector replicate. Sizes 8 16 32.  */
20547 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
20548 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
20549  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
20550 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
20551  /* VMOVN. Types I16 I32 I64.  */
20552 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
20553  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
20554 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
20555  /* VQMOVUN. Types S16 S32 S64.  */
20556 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
20557  /* VZIP / VUZP. Sizes 8 16 32.  */
20558 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20559 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
20560 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20561 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
20562  /* VQABS / VQNEG. Types S8 S16 S32.  */
20563 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20564 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20565 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20566 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20567  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
20568 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
20569 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
20570 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
20571 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
20572  /* Reciprocal estimates.  Types U32 F16 F32.  */
20573 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
20574 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
20575 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
20576 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
20577  /* VCLS. Types S8 S16 S32.  */
20578 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
20579 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
20580  /* VCLZ. Types I8 I16 I32.  */
20581 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
20582 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
20583  /* VCNT. Size 8.  */
20584 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
20585 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
20586  /* Two address, untyped.  */
20587 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
20588 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
20589  /* VTRN. Sizes 8 16 32.  */
20590 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
20591 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
20592
20593  /* Table lookup. Size 8.  */
20594 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20595 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20596
20597#undef  THUMB_VARIANT
20598#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
20599#undef  ARM_VARIANT
20600#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
20601
20602  /* Neon element/structure load/store.  */
20603 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20604 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20605 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20606 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20607 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20608 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20609 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20610 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20611
20612#undef  THUMB_VARIANT
20613#define THUMB_VARIANT & fpu_vfp_ext_v3xd
20614#undef  ARM_VARIANT
20615#define ARM_VARIANT   & fpu_vfp_ext_v3xd
20616 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
20617 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20618 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20619 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20620 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20621 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20622 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20623 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20624 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20625
20626#undef  THUMB_VARIANT
20627#define THUMB_VARIANT  & fpu_vfp_ext_v3
20628#undef  ARM_VARIANT
20629#define ARM_VARIANT    & fpu_vfp_ext_v3
20630
20631 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
20632 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20633 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20634 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20635 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20636 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20637 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20638 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20639 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20640
20641#undef  ARM_VARIANT
20642#define ARM_VARIANT    & fpu_vfp_ext_fma
20643#undef  THUMB_VARIANT
20644#define THUMB_VARIANT  & fpu_vfp_ext_fma
20645 /* Mnemonics shared by Neon and VFP.  These are included in the
20646    VFP FMA variant; NEON and VFP FMA always includes the NEON
20647    FMA instructions.  */
20648 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20649 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20650 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20651    the v form should always be used.  */
20652 cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20653 cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20654 cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20655 cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20656 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20657 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20658
20659#undef THUMB_VARIANT
20660#undef  ARM_VARIANT
20661#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
20662
20663 cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20664 cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20665 cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20666 cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20667 cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20668 cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20669 cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20670 cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20671
20672#undef  ARM_VARIANT
20673#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
20674
20675 cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
20676 cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
20677 cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
20678 cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
20679 cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
20680 cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
20681 cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
20682 cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
20683 cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
20684 cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20685 cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20686 cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20687 cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20688 cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20689 cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20690 cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20691 cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20692 cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20693 cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
20694 cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
20695 cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20696 cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20697 cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20698 cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20699 cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20700 cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20701 cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
20702 cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
20703 cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
20704 cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
20705 cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
20706 cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
20707 cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
20708 cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
20709 cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
20710 cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
20711 cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
20712 cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20713 cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20714 cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20715 cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20716 cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20717 cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20718 cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20719 cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20720 cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20721 cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20722 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20723 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20724 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20725 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20726 cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20727 cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20728 cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20729 cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20730 cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20731 cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20732 cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20733 cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20734 cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20735 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20736 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20737 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20738 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20739 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20740 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20741 cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20742 cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20743 cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20744 cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20745 cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20746 cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20747 cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20748 cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20749 cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20750 cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20751 cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20752 cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20753 cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20754 cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20755 cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20756 cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20757 cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20758 cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20759 cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20760 cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20761 cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20762 cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20763 cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
20764 cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20765 cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20766 cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20767 cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20768 cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20769 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20770 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20771 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20772 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20773 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20774 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20775 cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20776 cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20777 cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20778 cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20779 cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20780 cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20781 cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20782 cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20783 cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20784 cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20785 cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
20786 cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20787 cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20788 cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20789 cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20790 cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20791 cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20792 cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20793 cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20794 cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20795 cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20796 cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20797 cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20798 cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20799 cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20800 cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20801 cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20802 cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20803 cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20804 cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20805 cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20806 cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20807 cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20808 cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20809 cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20810 cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20811 cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20812 cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20813 cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20814 cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20815 cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20816 cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20817 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
20818 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
20819 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
20820 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
20821 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
20822 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
20823 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20824 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20825 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20826 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
20827 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
20828 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
20829 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
20830 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
20831 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
20832 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20833 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20834 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20835 cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20836 cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
20837
20838#undef  ARM_VARIANT
20839#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
20840
20841 cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
20842 cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
20843 cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
20844 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
20845 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
20846 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
20847 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20848 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20849 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20850 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20851 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20852 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20853 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20854 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20855 cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20856 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20857 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20858 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20859 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20860 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20861 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20862 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20863 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20864 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20865 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20866 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20867 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20868 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20869 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20870 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20871 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20872 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20873 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20874 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20875 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20876 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20877 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20878 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20879 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20880 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20881 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20882 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20883 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20884 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20885 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20886 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20887 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20888 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20889 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20890 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20891 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20892 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20893 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20894 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20895 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20896 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20897 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
20898
20899#undef  ARM_VARIANT
20900#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
20901
20902 cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
20903 cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
20904 cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
20905 cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
20906 cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
20907 cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
20908 cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
20909 cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
20910 cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
20911 cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
20912 cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
20913 cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
20914 cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
20915 cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
20916 cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
20917 cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
20918 cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
20919 cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
20920 cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
20921 cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
20922 cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
20923 cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
20924 cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
20925 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
20926 cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
20927 cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
20928 cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
20929 cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
20930 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
20931 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
20932 cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
20933 cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
20934 cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
20935 cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
20936 cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
20937 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
20938 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
20939 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
20940 cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
20941 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
20942 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
20943 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
20944 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
20945 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
20946 cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
20947 cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
20948 cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
20949 cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
20950 cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
20951 cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
20952 cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
20953 cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
20954 cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
20955 cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
20956 cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20957 cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20958 cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20959 cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20960 cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
20961 cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
20962 cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
20963 cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
20964 cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
20965 cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
20966 cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20967 cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20968 cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20969 cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20970 cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20971 cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
20972 cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20973 cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
20974 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20975 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20976 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20977 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20978
20979 /* ARMv8-M instructions.  */
20980#undef  ARM_VARIANT
20981#define ARM_VARIANT NULL
20982#undef  THUMB_VARIANT
20983#define THUMB_VARIANT & arm_ext_v8m
20984 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
20985 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
20986 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
20987 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20988 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20989 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
20990 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
20991
20992 /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
20993    instructions behave as nop if no VFP is present.  */
20994#undef  THUMB_VARIANT
20995#define THUMB_VARIANT & arm_ext_v8m_main
20996 TUEc("vlldm",	0,	 ec300a00, 1, (RRnpc),	rn),
20997 TUEc("vlstm",	0,	 ec200a00, 1, (RRnpc),	rn),
20998};
20999#undef ARM_VARIANT
21000#undef THUMB_VARIANT
21001#undef TCE
21002#undef TUE
21003#undef TUF
21004#undef TCC
21005#undef cCE
21006#undef cCL
21007#undef C3E
21008#undef CE
21009#undef CM
21010#undef UE
21011#undef UF
21012#undef UT
21013#undef NUF
21014#undef nUF
21015#undef NCE
21016#undef nCE
21017#undef OPS0
21018#undef OPS1
21019#undef OPS2
21020#undef OPS3
21021#undef OPS4
21022#undef OPS5
21023#undef OPS6
21024#undef do_0
21025
21026/* MD interface: bits in the object file.  */
21027
21028/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21029   for use in the a.out file, and stores them in the array pointed to by buf.
21030   This knows about the endian-ness of the target machine and does
21031   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
21032   2 (short) and 4 (long)  Floating numbers are put out as a series of
21033   LITTLENUMS (shorts, here at least).	*/
21034
21035void
21036md_number_to_chars (char * buf, valueT val, int n)
21037{
21038  if (target_big_endian)
21039    number_to_chars_bigendian (buf, val, n);
21040  else
21041    number_to_chars_littleendian (buf, val, n);
21042}
21043
21044static valueT
21045md_chars_to_number (char * buf, int n)
21046{
21047  valueT result = 0;
21048  unsigned char * where = (unsigned char *) buf;
21049
21050  if (target_big_endian)
21051    {
21052      while (n--)
21053	{
21054	  result <<= 8;
21055	  result |= (*where++ & 255);
21056	}
21057    }
21058  else
21059    {
21060      while (n--)
21061	{
21062	  result <<= 8;
21063	  result |= (where[n] & 255);
21064	}
21065    }
21066
21067  return result;
21068}
21069
21070/* MD interface: Sections.  */
21071
21072/* Calculate the maximum variable size (i.e., excluding fr_fix)
21073   that an rs_machine_dependent frag may reach.  */
21074
21075unsigned int
21076arm_frag_max_var (fragS *fragp)
21077{
21078  /* We only use rs_machine_dependent for variable-size Thumb instructions,
21079     which are either THUMB_SIZE (2) or INSN_SIZE (4).
21080
21081     Note that we generate relaxable instructions even for cases that don't
21082     really need it, like an immediate that's a trivial constant.  So we're
21083     overestimating the instruction size for some of those cases.  Rather
21084     than putting more intelligence here, it would probably be better to
21085     avoid generating a relaxation frag in the first place when it can be
21086     determined up front that a short instruction will suffice.  */
21087
21088  gas_assert (fragp->fr_type == rs_machine_dependent);
21089  return INSN_SIZE;
21090}
21091
21092/* Estimate the size of a frag before relaxing.  Assume everything fits in
21093   2 bytes.  */
21094
21095int
21096md_estimate_size_before_relax (fragS * fragp,
21097			       segT    segtype ATTRIBUTE_UNUSED)
21098{
21099  fragp->fr_var = 2;
21100  return 2;
21101}
21102
21103/* Convert a machine dependent frag.  */
21104
21105void
21106md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21107{
21108  unsigned long insn;
21109  unsigned long old_op;
21110  char *buf;
21111  expressionS exp;
21112  fixS *fixp;
21113  int reloc_type;
21114  int pc_rel;
21115  int opcode;
21116
21117  buf = fragp->fr_literal + fragp->fr_fix;
21118
21119  old_op = bfd_get_16(abfd, buf);
21120  if (fragp->fr_symbol)
21121    {
21122      exp.X_op = O_symbol;
21123      exp.X_add_symbol = fragp->fr_symbol;
21124    }
21125  else
21126    {
21127      exp.X_op = O_constant;
21128    }
21129  exp.X_add_number = fragp->fr_offset;
21130  opcode = fragp->fr_subtype;
21131  switch (opcode)
21132    {
21133    case T_MNEM_ldr_pc:
21134    case T_MNEM_ldr_pc2:
21135    case T_MNEM_ldr_sp:
21136    case T_MNEM_str_sp:
21137    case T_MNEM_ldr:
21138    case T_MNEM_ldrb:
21139    case T_MNEM_ldrh:
21140    case T_MNEM_str:
21141    case T_MNEM_strb:
21142    case T_MNEM_strh:
21143      if (fragp->fr_var == 4)
21144	{
21145	  insn = THUMB_OP32 (opcode);
21146	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21147	    {
21148	      insn |= (old_op & 0x700) << 4;
21149	    }
21150	  else
21151	    {
21152	      insn |= (old_op & 7) << 12;
21153	      insn |= (old_op & 0x38) << 13;
21154	    }
21155	  insn |= 0x00000c00;
21156	  put_thumb32_insn (buf, insn);
21157	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21158	}
21159      else
21160	{
21161	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21162	}
21163      pc_rel = (opcode == T_MNEM_ldr_pc2);
21164      break;
21165    case T_MNEM_adr:
21166      if (fragp->fr_var == 4)
21167	{
21168	  insn = THUMB_OP32 (opcode);
21169	  insn |= (old_op & 0xf0) << 4;
21170	  put_thumb32_insn (buf, insn);
21171	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21172	}
21173      else
21174	{
21175	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21176	  exp.X_add_number -= 4;
21177	}
21178      pc_rel = 1;
21179      break;
21180    case T_MNEM_mov:
21181    case T_MNEM_movs:
21182    case T_MNEM_cmp:
21183    case T_MNEM_cmn:
21184      if (fragp->fr_var == 4)
21185	{
21186	  int r0off = (opcode == T_MNEM_mov
21187		       || opcode == T_MNEM_movs) ? 0 : 8;
21188	  insn = THUMB_OP32 (opcode);
21189	  insn = (insn & 0xe1ffffff) | 0x10000000;
21190	  insn |= (old_op & 0x700) << r0off;
21191	  put_thumb32_insn (buf, insn);
21192	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21193	}
21194      else
21195	{
21196	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21197	}
21198      pc_rel = 0;
21199      break;
21200    case T_MNEM_b:
21201      if (fragp->fr_var == 4)
21202	{
21203	  insn = THUMB_OP32(opcode);
21204	  put_thumb32_insn (buf, insn);
21205	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21206	}
21207      else
21208	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21209      pc_rel = 1;
21210      break;
21211    case T_MNEM_bcond:
21212      if (fragp->fr_var == 4)
21213	{
21214	  insn = THUMB_OP32(opcode);
21215	  insn |= (old_op & 0xf00) << 14;
21216	  put_thumb32_insn (buf, insn);
21217	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21218	}
21219      else
21220	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21221      pc_rel = 1;
21222      break;
21223    case T_MNEM_add_sp:
21224    case T_MNEM_add_pc:
21225    case T_MNEM_inc_sp:
21226    case T_MNEM_dec_sp:
21227      if (fragp->fr_var == 4)
21228	{
21229	  /* ??? Choose between add and addw.  */
21230	  insn = THUMB_OP32 (opcode);
21231	  insn |= (old_op & 0xf0) << 4;
21232	  put_thumb32_insn (buf, insn);
21233	  if (opcode == T_MNEM_add_pc)
21234	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
21235	  else
21236	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21237	}
21238      else
21239	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21240      pc_rel = 0;
21241      break;
21242
21243    case T_MNEM_addi:
21244    case T_MNEM_addis:
21245    case T_MNEM_subi:
21246    case T_MNEM_subis:
21247      if (fragp->fr_var == 4)
21248	{
21249	  insn = THUMB_OP32 (opcode);
21250	  insn |= (old_op & 0xf0) << 4;
21251	  insn |= (old_op & 0xf) << 16;
21252	  put_thumb32_insn (buf, insn);
21253	  if (insn & (1 << 20))
21254	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21255	  else
21256	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21257	}
21258      else
21259	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21260      pc_rel = 0;
21261      break;
21262    default:
21263      abort ();
21264    }
21265  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21266		      (enum bfd_reloc_code_real) reloc_type);
21267  fixp->fx_file = fragp->fr_file;
21268  fixp->fx_line = fragp->fr_line;
21269  fragp->fr_fix += fragp->fr_var;
21270
21271  /* Set whether we use thumb-2 ISA based on final relaxation results.  */
21272  if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21273      && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21274    ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21275}
21276
21277/* Return the size of a relaxable immediate operand instruction.
21278   SHIFT and SIZE specify the form of the allowable immediate.  */
21279static int
21280relax_immediate (fragS *fragp, int size, int shift)
21281{
21282  offsetT offset;
21283  offsetT mask;
21284  offsetT low;
21285
21286  /* ??? Should be able to do better than this.  */
21287  if (fragp->fr_symbol)
21288    return 4;
21289
21290  low = (1 << shift) - 1;
21291  mask = (1 << (shift + size)) - (1 << shift);
21292  offset = fragp->fr_offset;
21293  /* Force misaligned offsets to 32-bit variant.  */
21294  if (offset & low)
21295    return 4;
21296  if (offset & ~mask)
21297    return 4;
21298  return 2;
21299}
21300
21301/* Get the address of a symbol during relaxation.  */
21302static addressT
21303relaxed_symbol_addr (fragS *fragp, long stretch)
21304{
21305  fragS *sym_frag;
21306  addressT addr;
21307  symbolS *sym;
21308
21309  sym = fragp->fr_symbol;
21310  sym_frag = symbol_get_frag (sym);
21311  know (S_GET_SEGMENT (sym) != absolute_section
21312	|| sym_frag == &zero_address_frag);
21313  addr = S_GET_VALUE (sym) + fragp->fr_offset;
21314
21315  /* If frag has yet to be reached on this pass, assume it will
21316     move by STRETCH just as we did.  If this is not so, it will
21317     be because some frag between grows, and that will force
21318     another pass.  */
21319
21320  if (stretch != 0
21321      && sym_frag->relax_marker != fragp->relax_marker)
21322    {
21323      fragS *f;
21324
21325      /* Adjust stretch for any alignment frag.  Note that if have
21326	 been expanding the earlier code, the symbol may be
21327	 defined in what appears to be an earlier frag.  FIXME:
21328	 This doesn't handle the fr_subtype field, which specifies
21329	 a maximum number of bytes to skip when doing an
21330	 alignment.  */
21331      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21332	{
21333	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21334	    {
21335	      if (stretch < 0)
21336		stretch = - ((- stretch)
21337			     & ~ ((1 << (int) f->fr_offset) - 1));
21338	      else
21339		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21340	      if (stretch == 0)
21341		break;
21342	    }
21343	}
21344      if (f != NULL)
21345	addr += stretch;
21346    }
21347
21348  return addr;
21349}
21350
21351/* Return the size of a relaxable adr pseudo-instruction or PC-relative
21352   load.  */
21353static int
21354relax_adr (fragS *fragp, asection *sec, long stretch)
21355{
21356  addressT addr;
21357  offsetT val;
21358
21359  /* Assume worst case for symbols not known to be in the same section.  */
21360  if (fragp->fr_symbol == NULL
21361      || !S_IS_DEFINED (fragp->fr_symbol)
21362      || sec != S_GET_SEGMENT (fragp->fr_symbol)
21363      || S_IS_WEAK (fragp->fr_symbol))
21364    return 4;
21365
21366  val = relaxed_symbol_addr (fragp, stretch);
21367  addr = fragp->fr_address + fragp->fr_fix;
21368  addr = (addr + 4) & ~3;
21369  /* Force misaligned targets to 32-bit variant.  */
21370  if (val & 3)
21371    return 4;
21372  val -= addr;
21373  if (val < 0 || val > 1020)
21374    return 4;
21375  return 2;
21376}
21377
21378/* Return the size of a relaxable add/sub immediate instruction.  */
21379static int
21380relax_addsub (fragS *fragp, asection *sec)
21381{
21382  char *buf;
21383  int op;
21384
21385  buf = fragp->fr_literal + fragp->fr_fix;
21386  op = bfd_get_16(sec->owner, buf);
21387  if ((op & 0xf) == ((op >> 4) & 0xf))
21388    return relax_immediate (fragp, 8, 0);
21389  else
21390    return relax_immediate (fragp, 3, 0);
21391}
21392
21393/* Return TRUE iff the definition of symbol S could be pre-empted
21394   (overridden) at link or load time.  */
21395static bfd_boolean
21396symbol_preemptible (symbolS *s)
21397{
21398  /* Weak symbols can always be pre-empted.  */
21399  if (S_IS_WEAK (s))
21400    return TRUE;
21401
21402  /* Non-global symbols cannot be pre-empted. */
21403  if (! S_IS_EXTERNAL (s))
21404    return FALSE;
21405
21406#ifdef OBJ_ELF
21407  /* In ELF, a global symbol can be marked protected, or private.  In that
21408     case it can't be pre-empted (other definitions in the same link unit
21409     would violate the ODR).  */
21410  if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21411    return FALSE;
21412#endif
21413
21414  /* Other global symbols might be pre-empted.  */
21415  return TRUE;
21416}
21417
21418/* Return the size of a relaxable branch instruction.  BITS is the
21419   size of the offset field in the narrow instruction.  */
21420
21421static int
21422relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21423{
21424  addressT addr;
21425  offsetT val;
21426  offsetT limit;
21427
21428  /* Assume worst case for symbols not known to be in the same section.  */
21429  if (!S_IS_DEFINED (fragp->fr_symbol)
21430      || sec != S_GET_SEGMENT (fragp->fr_symbol)
21431      || S_IS_WEAK (fragp->fr_symbol))
21432    return 4;
21433
21434#ifdef OBJ_ELF
21435  /* A branch to a function in ARM state will require interworking.  */
21436  if (S_IS_DEFINED (fragp->fr_symbol)
21437      && ARM_IS_FUNC (fragp->fr_symbol))
21438      return 4;
21439#endif
21440
21441  if (symbol_preemptible (fragp->fr_symbol))
21442    return 4;
21443
21444  val = relaxed_symbol_addr (fragp, stretch);
21445  addr = fragp->fr_address + fragp->fr_fix + 4;
21446  val -= addr;
21447
21448  /* Offset is a signed value *2 */
21449  limit = 1 << bits;
21450  if (val >= limit || val < -limit)
21451    return 4;
21452  return 2;
21453}
21454
21455
21456/* Relax a machine dependent frag.  This returns the amount by which
21457   the current size of the frag should change.  */
21458
21459int
21460arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21461{
21462  int oldsize;
21463  int newsize;
21464
21465  oldsize = fragp->fr_var;
21466  switch (fragp->fr_subtype)
21467    {
21468    case T_MNEM_ldr_pc2:
21469      newsize = relax_adr (fragp, sec, stretch);
21470      break;
21471    case T_MNEM_ldr_pc:
21472    case T_MNEM_ldr_sp:
21473    case T_MNEM_str_sp:
21474      newsize = relax_immediate (fragp, 8, 2);
21475      break;
21476    case T_MNEM_ldr:
21477    case T_MNEM_str:
21478      newsize = relax_immediate (fragp, 5, 2);
21479      break;
21480    case T_MNEM_ldrh:
21481    case T_MNEM_strh:
21482      newsize = relax_immediate (fragp, 5, 1);
21483      break;
21484    case T_MNEM_ldrb:
21485    case T_MNEM_strb:
21486      newsize = relax_immediate (fragp, 5, 0);
21487      break;
21488    case T_MNEM_adr:
21489      newsize = relax_adr (fragp, sec, stretch);
21490      break;
21491    case T_MNEM_mov:
21492    case T_MNEM_movs:
21493    case T_MNEM_cmp:
21494    case T_MNEM_cmn:
21495      newsize = relax_immediate (fragp, 8, 0);
21496      break;
21497    case T_MNEM_b:
21498      newsize = relax_branch (fragp, sec, 11, stretch);
21499      break;
21500    case T_MNEM_bcond:
21501      newsize = relax_branch (fragp, sec, 8, stretch);
21502      break;
21503    case T_MNEM_add_sp:
21504    case T_MNEM_add_pc:
21505      newsize = relax_immediate (fragp, 8, 2);
21506      break;
21507    case T_MNEM_inc_sp:
21508    case T_MNEM_dec_sp:
21509      newsize = relax_immediate (fragp, 7, 2);
21510      break;
21511    case T_MNEM_addi:
21512    case T_MNEM_addis:
21513    case T_MNEM_subi:
21514    case T_MNEM_subis:
21515      newsize = relax_addsub (fragp, sec);
21516      break;
21517    default:
21518      abort ();
21519    }
21520
21521  fragp->fr_var = newsize;
21522  /* Freeze wide instructions that are at or before the same location as
21523     in the previous pass.  This avoids infinite loops.
21524     Don't freeze them unconditionally because targets may be artificially
21525     misaligned by the expansion of preceding frags.  */
21526  if (stretch <= 0 && newsize > 2)
21527    {
21528      md_convert_frag (sec->owner, sec, fragp);
21529      frag_wane (fragp);
21530    }
21531
21532  return newsize - oldsize;
21533}
21534
21535/* Round up a section size to the appropriate boundary.	 */
21536
21537valueT
21538md_section_align (segT	 segment ATTRIBUTE_UNUSED,
21539		  valueT size)
21540{
21541#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21542  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21543    {
21544      /* For a.out, force the section size to be aligned.  If we don't do
21545	 this, BFD will align it for us, but it will not write out the
21546	 final bytes of the section.  This may be a bug in BFD, but it is
21547	 easier to fix it here since that is how the other a.out targets
21548	 work.  */
21549      int align;
21550
21551      align = bfd_get_section_alignment (stdoutput, segment);
21552      size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21553    }
21554#endif
21555
21556  return size;
21557}
21558
21559/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
21560   of an rs_align_code fragment.  */
21561
21562void
21563arm_handle_align (fragS * fragP)
21564{
21565  static unsigned char const arm_noop[2][2][4] =
21566    {
21567      {  /* ARMv1 */
21568	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
21569	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
21570      },
21571      {  /* ARMv6k */
21572	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
21573	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
21574      },
21575    };
21576  static unsigned char const thumb_noop[2][2][2] =
21577    {
21578      {  /* Thumb-1 */
21579	{0xc0, 0x46},  /* LE */
21580	{0x46, 0xc0},  /* BE */
21581      },
21582      {  /* Thumb-2 */
21583	{0x00, 0xbf},  /* LE */
21584	{0xbf, 0x00}   /* BE */
21585      }
21586    };
21587  static unsigned char const wide_thumb_noop[2][4] =
21588    {  /* Wide Thumb-2 */
21589      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
21590      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
21591    };
21592
21593  unsigned bytes, fix, noop_size;
21594  char * p;
21595  const unsigned char * noop;
21596  const unsigned char *narrow_noop = NULL;
21597#ifdef OBJ_ELF
21598  enum mstate state;
21599#endif
21600
21601  if (fragP->fr_type != rs_align_code)
21602    return;
21603
21604  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21605  p = fragP->fr_literal + fragP->fr_fix;
21606  fix = 0;
21607
21608  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21609    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21610
21611  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21612
21613  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21614    {
21615      if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21616			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21617	{
21618	  narrow_noop = thumb_noop[1][target_big_endian];
21619	  noop = wide_thumb_noop[target_big_endian];
21620	}
21621      else
21622	noop = thumb_noop[0][target_big_endian];
21623      noop_size = 2;
21624#ifdef OBJ_ELF
21625      state = MAP_THUMB;
21626#endif
21627    }
21628  else
21629    {
21630      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21631					   ? selected_cpu : arm_arch_none,
21632					   arm_ext_v6k) != 0]
21633		     [target_big_endian];
21634      noop_size = 4;
21635#ifdef OBJ_ELF
21636      state = MAP_ARM;
21637#endif
21638    }
21639
21640  fragP->fr_var = noop_size;
21641
21642  if (bytes & (noop_size - 1))
21643    {
21644      fix = bytes & (noop_size - 1);
21645#ifdef OBJ_ELF
21646      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21647#endif
21648      memset (p, 0, fix);
21649      p += fix;
21650      bytes -= fix;
21651    }
21652
21653  if (narrow_noop)
21654    {
21655      if (bytes & noop_size)
21656	{
21657	  /* Insert a narrow noop.  */
21658	  memcpy (p, narrow_noop, noop_size);
21659	  p += noop_size;
21660	  bytes -= noop_size;
21661	  fix += noop_size;
21662	}
21663
21664      /* Use wide noops for the remainder */
21665      noop_size = 4;
21666    }
21667
21668  while (bytes >= noop_size)
21669    {
21670      memcpy (p, noop, noop_size);
21671      p += noop_size;
21672      bytes -= noop_size;
21673      fix += noop_size;
21674    }
21675
21676  fragP->fr_fix += fix;
21677}
21678
21679/* Called from md_do_align.  Used to create an alignment
21680   frag in a code section.  */
21681
21682void
21683arm_frag_align_code (int n, int max)
21684{
21685  char * p;
21686
21687  /* We assume that there will never be a requirement
21688     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
21689  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21690    {
21691      char err_msg[128];
21692
21693      sprintf (err_msg,
21694	_("alignments greater than %d bytes not supported in .text sections."),
21695	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21696      as_fatal ("%s", err_msg);
21697    }
21698
21699  p = frag_var (rs_align_code,
21700		MAX_MEM_FOR_RS_ALIGN_CODE,
21701		1,
21702		(relax_substateT) max,
21703		(symbolS *) NULL,
21704		(offsetT) n,
21705		(char *) NULL);
21706  *p = 0;
21707}
21708
21709/* Perform target specific initialisation of a frag.
21710   Note - despite the name this initialisation is not done when the frag
21711   is created, but only when its type is assigned.  A frag can be created
21712   and used a long time before its type is set, so beware of assuming that
21713   this initialisationis performed first.  */
21714
21715#ifndef OBJ_ELF
21716void
21717arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21718{
21719  /* Record whether this frag is in an ARM or a THUMB area.  */
21720  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21721}
21722
21723#else /* OBJ_ELF is defined.  */
21724void
21725arm_init_frag (fragS * fragP, int max_chars)
21726{
21727  int frag_thumb_mode;
21728
21729  /* If the current ARM vs THUMB mode has not already
21730     been recorded into this frag then do so now.  */
21731  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21732    fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21733
21734  frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21735
21736  /* Record a mapping symbol for alignment frags.  We will delete this
21737     later if the alignment ends up empty.  */
21738  switch (fragP->fr_type)
21739    {
21740    case rs_align:
21741    case rs_align_test:
21742    case rs_fill:
21743      mapping_state_2 (MAP_DATA, max_chars);
21744      break;
21745    case rs_align_code:
21746      mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21747      break;
21748    default:
21749      break;
21750    }
21751}
21752
21753/* When we change sections we need to issue a new mapping symbol.  */
21754
21755void
21756arm_elf_change_section (void)
21757{
21758  /* Link an unlinked unwind index table section to the .text section.	*/
21759  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21760      && elf_linked_to_section (now_seg) == NULL)
21761    elf_linked_to_section (now_seg) = text_section;
21762}
21763
21764int
21765arm_elf_section_type (const char * str, size_t len)
21766{
21767  if (len == 5 && strncmp (str, "exidx", 5) == 0)
21768    return SHT_ARM_EXIDX;
21769
21770  return -1;
21771}
21772
21773/* Code to deal with unwinding tables.	*/
21774
21775static void add_unwind_adjustsp (offsetT);
21776
21777/* Generate any deferred unwind frame offset.  */
21778
21779static void
21780flush_pending_unwind (void)
21781{
21782  offsetT offset;
21783
21784  offset = unwind.pending_offset;
21785  unwind.pending_offset = 0;
21786  if (offset != 0)
21787    add_unwind_adjustsp (offset);
21788}
21789
21790/* Add an opcode to this list for this function.  Two-byte opcodes should
21791   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
21792   order.  */
21793
21794static void
21795add_unwind_opcode (valueT op, int length)
21796{
21797  /* Add any deferred stack adjustment.	 */
21798  if (unwind.pending_offset)
21799    flush_pending_unwind ();
21800
21801  unwind.sp_restored = 0;
21802
21803  if (unwind.opcode_count + length > unwind.opcode_alloc)
21804    {
21805      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21806      if (unwind.opcodes)
21807	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21808				     unwind.opcode_alloc);
21809      else
21810	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21811    }
21812  while (length > 0)
21813    {
21814      length--;
21815      unwind.opcodes[unwind.opcode_count] = op & 0xff;
21816      op >>= 8;
21817      unwind.opcode_count++;
21818    }
21819}
21820
21821/* Add unwind opcodes to adjust the stack pointer.  */
21822
21823static void
21824add_unwind_adjustsp (offsetT offset)
21825{
21826  valueT op;
21827
21828  if (offset > 0x200)
21829    {
21830      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
21831      char bytes[5];
21832      int n;
21833      valueT o;
21834
21835      /* Long form: 0xb2, uleb128.  */
21836      /* This might not fit in a word so add the individual bytes,
21837	 remembering the list is built in reverse order.  */
21838      o = (valueT) ((offset - 0x204) >> 2);
21839      if (o == 0)
21840	add_unwind_opcode (0, 1);
21841
21842      /* Calculate the uleb128 encoding of the offset.	*/
21843      n = 0;
21844      while (o)
21845	{
21846	  bytes[n] = o & 0x7f;
21847	  o >>= 7;
21848	  if (o)
21849	    bytes[n] |= 0x80;
21850	  n++;
21851	}
21852      /* Add the insn.	*/
21853      for (; n; n--)
21854	add_unwind_opcode (bytes[n - 1], 1);
21855      add_unwind_opcode (0xb2, 1);
21856    }
21857  else if (offset > 0x100)
21858    {
21859      /* Two short opcodes.  */
21860      add_unwind_opcode (0x3f, 1);
21861      op = (offset - 0x104) >> 2;
21862      add_unwind_opcode (op, 1);
21863    }
21864  else if (offset > 0)
21865    {
21866      /* Short opcode.	*/
21867      op = (offset - 4) >> 2;
21868      add_unwind_opcode (op, 1);
21869    }
21870  else if (offset < 0)
21871    {
21872      offset = -offset;
21873      while (offset > 0x100)
21874	{
21875	  add_unwind_opcode (0x7f, 1);
21876	  offset -= 0x100;
21877	}
21878      op = ((offset - 4) >> 2) | 0x40;
21879      add_unwind_opcode (op, 1);
21880    }
21881}
21882
21883/* Finish the list of unwind opcodes for this function.	 */
21884static void
21885finish_unwind_opcodes (void)
21886{
21887  valueT op;
21888
21889  if (unwind.fp_used)
21890    {
21891      /* Adjust sp as necessary.  */
21892      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21893      flush_pending_unwind ();
21894
21895      /* After restoring sp from the frame pointer.  */
21896      op = 0x90 | unwind.fp_reg;
21897      add_unwind_opcode (op, 1);
21898    }
21899  else
21900    flush_pending_unwind ();
21901}
21902
21903
21904/* Start an exception table entry.  If idx is nonzero this is an index table
21905   entry.  */
21906
21907static void
21908start_unwind_section (const segT text_seg, int idx)
21909{
21910  const char * text_name;
21911  const char * prefix;
21912  const char * prefix_once;
21913  const char * group_name;
21914  char * sec_name;
21915  int type;
21916  int flags;
21917  int linkonce;
21918
21919  if (idx)
21920    {
21921      prefix = ELF_STRING_ARM_unwind;
21922      prefix_once = ELF_STRING_ARM_unwind_once;
21923      type = SHT_ARM_EXIDX;
21924    }
21925  else
21926    {
21927      prefix = ELF_STRING_ARM_unwind_info;
21928      prefix_once = ELF_STRING_ARM_unwind_info_once;
21929      type = SHT_PROGBITS;
21930    }
21931
21932  text_name = segment_name (text_seg);
21933  if (streq (text_name, ".text"))
21934    text_name = "";
21935
21936  if (strncmp (text_name, ".gnu.linkonce.t.",
21937	       strlen (".gnu.linkonce.t.")) == 0)
21938    {
21939      prefix = prefix_once;
21940      text_name += strlen (".gnu.linkonce.t.");
21941    }
21942
21943  sec_name = concat (prefix, text_name, (char *) NULL);
21944
21945  flags = SHF_ALLOC;
21946  linkonce = 0;
21947  group_name = 0;
21948
21949  /* Handle COMDAT group.  */
21950  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21951    {
21952      group_name = elf_group_name (text_seg);
21953      if (group_name == NULL)
21954	{
21955	  as_bad (_("Group section `%s' has no group signature"),
21956		  segment_name (text_seg));
21957	  ignore_rest_of_line ();
21958	  return;
21959	}
21960      flags |= SHF_GROUP;
21961      linkonce = 1;
21962    }
21963
21964  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21965
21966  /* Set the section link for index tables.  */
21967  if (idx)
21968    elf_linked_to_section (now_seg) = text_seg;
21969}
21970
21971
21972/* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
21973   personality routine data.  Returns zero, or the index table value for
21974   an inline entry.  */
21975
21976static valueT
21977create_unwind_entry (int have_data)
21978{
21979  int size;
21980  addressT where;
21981  char *ptr;
21982  /* The current word of data.	*/
21983  valueT data;
21984  /* The number of bytes left in this word.  */
21985  int n;
21986
21987  finish_unwind_opcodes ();
21988
21989  /* Remember the current text section.	 */
21990  unwind.saved_seg = now_seg;
21991  unwind.saved_subseg = now_subseg;
21992
21993  start_unwind_section (now_seg, 0);
21994
21995  if (unwind.personality_routine == NULL)
21996    {
21997      if (unwind.personality_index == -2)
21998	{
21999	  if (have_data)
22000	    as_bad (_("handlerdata in cantunwind frame"));
22001	  return 1; /* EXIDX_CANTUNWIND.  */
22002	}
22003
22004      /* Use a default personality routine if none is specified.  */
22005      if (unwind.personality_index == -1)
22006	{
22007	  if (unwind.opcode_count > 3)
22008	    unwind.personality_index = 1;
22009	  else
22010	    unwind.personality_index = 0;
22011	}
22012
22013      /* Space for the personality routine entry.  */
22014      if (unwind.personality_index == 0)
22015	{
22016	  if (unwind.opcode_count > 3)
22017	    as_bad (_("too many unwind opcodes for personality routine 0"));
22018
22019	  if (!have_data)
22020	    {
22021	      /* All the data is inline in the index table.  */
22022	      data = 0x80;
22023	      n = 3;
22024	      while (unwind.opcode_count > 0)
22025		{
22026		  unwind.opcode_count--;
22027		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22028		  n--;
22029		}
22030
22031	      /* Pad with "finish" opcodes.  */
22032	      while (n--)
22033		data = (data << 8) | 0xb0;
22034
22035	      return data;
22036	    }
22037	  size = 0;
22038	}
22039      else
22040	/* We get two opcodes "free" in the first word.	 */
22041	size = unwind.opcode_count - 2;
22042    }
22043  else
22044    {
22045      /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
22046      if (unwind.personality_index != -1)
22047	{
22048	  as_bad (_("attempt to recreate an unwind entry"));
22049	  return 1;
22050	}
22051
22052      /* An extra byte is required for the opcode count.	*/
22053      size = unwind.opcode_count + 1;
22054    }
22055
22056  size = (size + 3) >> 2;
22057  if (size > 0xff)
22058    as_bad (_("too many unwind opcodes"));
22059
22060  frag_align (2, 0, 0);
22061  record_alignment (now_seg, 2);
22062  unwind.table_entry = expr_build_dot ();
22063
22064  /* Allocate the table entry.	*/
22065  ptr = frag_more ((size << 2) + 4);
22066  /* PR 13449: Zero the table entries in case some of them are not used.  */
22067  memset (ptr, 0, (size << 2) + 4);
22068  where = frag_now_fix () - ((size << 2) + 4);
22069
22070  switch (unwind.personality_index)
22071    {
22072    case -1:
22073      /* ??? Should this be a PLT generating relocation?  */
22074      /* Custom personality routine.  */
22075      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22076	       BFD_RELOC_ARM_PREL31);
22077
22078      where += 4;
22079      ptr += 4;
22080
22081      /* Set the first byte to the number of additional words.	*/
22082      data = size > 0 ? size - 1 : 0;
22083      n = 3;
22084      break;
22085
22086    /* ABI defined personality routines.  */
22087    case 0:
22088      /* Three opcodes bytes are packed into the first word.  */
22089      data = 0x80;
22090      n = 3;
22091      break;
22092
22093    case 1:
22094    case 2:
22095      /* The size and first two opcode bytes go in the first word.  */
22096      data = ((0x80 + unwind.personality_index) << 8) | size;
22097      n = 2;
22098      break;
22099
22100    default:
22101      /* Should never happen.  */
22102      abort ();
22103    }
22104
22105  /* Pack the opcodes into words (MSB first), reversing the list at the same
22106     time.  */
22107  while (unwind.opcode_count > 0)
22108    {
22109      if (n == 0)
22110	{
22111	  md_number_to_chars (ptr, data, 4);
22112	  ptr += 4;
22113	  n = 4;
22114	  data = 0;
22115	}
22116      unwind.opcode_count--;
22117      n--;
22118      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22119    }
22120
22121  /* Finish off the last word.	*/
22122  if (n < 4)
22123    {
22124      /* Pad with "finish" opcodes.  */
22125      while (n--)
22126	data = (data << 8) | 0xb0;
22127
22128      md_number_to_chars (ptr, data, 4);
22129    }
22130
22131  if (!have_data)
22132    {
22133      /* Add an empty descriptor if there is no user-specified data.   */
22134      ptr = frag_more (4);
22135      md_number_to_chars (ptr, 0, 4);
22136    }
22137
22138  return 0;
22139}
22140
22141
22142/* Initialize the DWARF-2 unwind information for this procedure.  */
22143
22144void
22145tc_arm_frame_initial_instructions (void)
22146{
22147  cfi_add_CFA_def_cfa (REG_SP, 0);
22148}
22149#endif /* OBJ_ELF */
22150
22151/* Convert REGNAME to a DWARF-2 register number.  */
22152
22153int
22154tc_arm_regname_to_dw2regnum (char *regname)
22155{
22156  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22157  if (reg != FAIL)
22158    return reg;
22159
22160  /* PR 16694: Allow VFP registers as well.  */
22161  reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22162  if (reg != FAIL)
22163    return 64 + reg;
22164
22165  reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22166  if (reg != FAIL)
22167    return reg + 256;
22168
22169  return -1;
22170}
22171
22172#ifdef TE_PE
22173void
22174tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22175{
22176  expressionS exp;
22177
22178  exp.X_op = O_secrel;
22179  exp.X_add_symbol = symbol;
22180  exp.X_add_number = 0;
22181  emit_expr (&exp, size);
22182}
22183#endif
22184
22185/* MD interface: Symbol and relocation handling.  */
22186
22187/* Return the address within the segment that a PC-relative fixup is
22188   relative to.  For ARM, PC-relative fixups applied to instructions
22189   are generally relative to the location of the fixup plus 8 bytes.
22190   Thumb branches are offset by 4, and Thumb loads relative to PC
22191   require special handling.  */
22192
22193long
22194md_pcrel_from_section (fixS * fixP, segT seg)
22195{
22196  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22197
22198  /* If this is pc-relative and we are going to emit a relocation
22199     then we just want to put out any pipeline compensation that the linker
22200     will need.  Otherwise we want to use the calculated base.
22201     For WinCE we skip the bias for externals as well, since this
22202     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
22203  if (fixP->fx_pcrel
22204      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22205	  || (arm_force_relocation (fixP)
22206#ifdef TE_WINCE
22207	      && !S_IS_EXTERNAL (fixP->fx_addsy)
22208#endif
22209	      )))
22210    base = 0;
22211
22212
22213  switch (fixP->fx_r_type)
22214    {
22215      /* PC relative addressing on the Thumb is slightly odd as the
22216	 bottom two bits of the PC are forced to zero for the
22217	 calculation.  This happens *after* application of the
22218	 pipeline offset.  However, Thumb adrl already adjusts for
22219	 this, so we need not do it again.  */
22220    case BFD_RELOC_ARM_THUMB_ADD:
22221      return base & ~3;
22222
22223    case BFD_RELOC_ARM_THUMB_OFFSET:
22224    case BFD_RELOC_ARM_T32_OFFSET_IMM:
22225    case BFD_RELOC_ARM_T32_ADD_PC12:
22226    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22227      return (base + 4) & ~3;
22228
22229      /* Thumb branches are simply offset by +4.  */
22230    case BFD_RELOC_THUMB_PCREL_BRANCH7:
22231    case BFD_RELOC_THUMB_PCREL_BRANCH9:
22232    case BFD_RELOC_THUMB_PCREL_BRANCH12:
22233    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22234    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22235      return base + 4;
22236
22237    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22238      if (fixP->fx_addsy
22239	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22240	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22241	  && ARM_IS_FUNC (fixP->fx_addsy)
22242	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22243	base = fixP->fx_where + fixP->fx_frag->fr_address;
22244       return base + 4;
22245
22246      /* BLX is like branches above, but forces the low two bits of PC to
22247	 zero.  */
22248    case BFD_RELOC_THUMB_PCREL_BLX:
22249      if (fixP->fx_addsy
22250	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22251	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22252	  && THUMB_IS_FUNC (fixP->fx_addsy)
22253	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22254	base = fixP->fx_where + fixP->fx_frag->fr_address;
22255      return (base + 4) & ~3;
22256
22257      /* ARM mode branches are offset by +8.  However, the Windows CE
22258	 loader expects the relocation not to take this into account.  */
22259    case BFD_RELOC_ARM_PCREL_BLX:
22260      if (fixP->fx_addsy
22261	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22262	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22263	  && ARM_IS_FUNC (fixP->fx_addsy)
22264	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22265	base = fixP->fx_where + fixP->fx_frag->fr_address;
22266      return base + 8;
22267
22268    case BFD_RELOC_ARM_PCREL_CALL:
22269      if (fixP->fx_addsy
22270	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22271	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22272	  && THUMB_IS_FUNC (fixP->fx_addsy)
22273	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22274	base = fixP->fx_where + fixP->fx_frag->fr_address;
22275      return base + 8;
22276
22277    case BFD_RELOC_ARM_PCREL_BRANCH:
22278    case BFD_RELOC_ARM_PCREL_JUMP:
22279    case BFD_RELOC_ARM_PLT32:
22280#ifdef TE_WINCE
22281      /* When handling fixups immediately, because we have already
22282	 discovered the value of a symbol, or the address of the frag involved
22283	 we must account for the offset by +8, as the OS loader will never see the reloc.
22284	 see fixup_segment() in write.c
22285	 The S_IS_EXTERNAL test handles the case of global symbols.
22286	 Those need the calculated base, not just the pipe compensation the linker will need.  */
22287      if (fixP->fx_pcrel
22288	  && fixP->fx_addsy != NULL
22289	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22290	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22291	return base + 8;
22292      return base;
22293#else
22294      return base + 8;
22295#endif
22296
22297
22298      /* ARM mode loads relative to PC are also offset by +8.  Unlike
22299	 branches, the Windows CE loader *does* expect the relocation
22300	 to take this into account.  */
22301    case BFD_RELOC_ARM_OFFSET_IMM:
22302    case BFD_RELOC_ARM_OFFSET_IMM8:
22303    case BFD_RELOC_ARM_HWLITERAL:
22304    case BFD_RELOC_ARM_LITERAL:
22305    case BFD_RELOC_ARM_CP_OFF_IMM:
22306      return base + 8;
22307
22308
22309      /* Other PC-relative relocations are un-offset.  */
22310    default:
22311      return base;
22312    }
22313}
22314
22315static bfd_boolean flag_warn_syms = TRUE;
22316
22317bfd_boolean
22318arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22319{
22320  /* PR 18347 - Warn if the user attempts to create a symbol with the same
22321     name as an ARM instruction.  Whilst strictly speaking it is allowed, it
22322     does mean that the resulting code might be very confusing to the reader.
22323     Also this warning can be triggered if the user omits an operand before
22324     an immediate address, eg:
22325
22326       LDR =foo
22327
22328     GAS treats this as an assignment of the value of the symbol foo to a
22329     symbol LDR, and so (without this code) it will not issue any kind of
22330     warning or error message.
22331
22332     Note - ARM instructions are case-insensitive but the strings in the hash
22333     table are all stored in lower case, so we must first ensure that name is
22334     lower case too.  */
22335  if (flag_warn_syms && arm_ops_hsh)
22336    {
22337      char * nbuf = strdup (name);
22338      char * p;
22339
22340      for (p = nbuf; *p; p++)
22341	*p = TOLOWER (*p);
22342      if (hash_find (arm_ops_hsh, nbuf) != NULL)
22343	{
22344	  static struct hash_control * already_warned = NULL;
22345
22346	  if (already_warned == NULL)
22347	    already_warned = hash_new ();
22348	  /* Only warn about the symbol once.  To keep the code
22349	     simple we let hash_insert do the lookup for us.  */
22350	  if (hash_insert (already_warned, name, NULL) == NULL)
22351	    as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22352	}
22353      else
22354	free (nbuf);
22355    }
22356
22357  return FALSE;
22358}
22359
22360/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22361   Otherwise we have no need to default values of symbols.  */
22362
22363symbolS *
22364md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22365{
22366#ifdef OBJ_ELF
22367  if (name[0] == '_' && name[1] == 'G'
22368      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22369    {
22370      if (!GOT_symbol)
22371	{
22372	  if (symbol_find (name))
22373	    as_bad (_("GOT already in the symbol table"));
22374
22375	  GOT_symbol = symbol_new (name, undefined_section,
22376				   (valueT) 0, & zero_address_frag);
22377	}
22378
22379      return GOT_symbol;
22380    }
22381#endif
22382
22383  return NULL;
22384}
22385
22386/* Subroutine of md_apply_fix.	 Check to see if an immediate can be
22387   computed as two separate immediate values, added together.  We
22388   already know that this value cannot be computed by just one ARM
22389   instruction.	 */
22390
22391static unsigned int
22392validate_immediate_twopart (unsigned int   val,
22393			    unsigned int * highpart)
22394{
22395  unsigned int a;
22396  unsigned int i;
22397
22398  for (i = 0; i < 32; i += 2)
22399    if (((a = rotate_left (val, i)) & 0xff) != 0)
22400      {
22401	if (a & 0xff00)
22402	  {
22403	    if (a & ~ 0xffff)
22404	      continue;
22405	    * highpart = (a  >> 8) | ((i + 24) << 7);
22406	  }
22407	else if (a & 0xff0000)
22408	  {
22409	    if (a & 0xff000000)
22410	      continue;
22411	    * highpart = (a >> 16) | ((i + 16) << 7);
22412	  }
22413	else
22414	  {
22415	    gas_assert (a & 0xff000000);
22416	    * highpart = (a >> 24) | ((i + 8) << 7);
22417	  }
22418
22419	return (a & 0xff) | (i << 7);
22420      }
22421
22422  return FAIL;
22423}
22424
22425static int
22426validate_offset_imm (unsigned int val, int hwse)
22427{
22428  if ((hwse && val > 255) || val > 4095)
22429    return FAIL;
22430  return val;
22431}
22432
22433/* Subroutine of md_apply_fix.	 Do those data_ops which can take a
22434   negative immediate constant by altering the instruction.  A bit of
22435   a hack really.
22436	MOV <-> MVN
22437	AND <-> BIC
22438	ADC <-> SBC
22439	by inverting the second operand, and
22440	ADD <-> SUB
22441	CMP <-> CMN
22442	by negating the second operand.	 */
22443
22444static int
22445negate_data_op (unsigned long * instruction,
22446		unsigned long	value)
22447{
22448  int op, new_inst;
22449  unsigned long negated, inverted;
22450
22451  negated = encode_arm_immediate (-value);
22452  inverted = encode_arm_immediate (~value);
22453
22454  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22455  switch (op)
22456    {
22457      /* First negates.	 */
22458    case OPCODE_SUB:		 /* ADD <-> SUB	 */
22459      new_inst = OPCODE_ADD;
22460      value = negated;
22461      break;
22462
22463    case OPCODE_ADD:
22464      new_inst = OPCODE_SUB;
22465      value = negated;
22466      break;
22467
22468    case OPCODE_CMP:		 /* CMP <-> CMN	 */
22469      new_inst = OPCODE_CMN;
22470      value = negated;
22471      break;
22472
22473    case OPCODE_CMN:
22474      new_inst = OPCODE_CMP;
22475      value = negated;
22476      break;
22477
22478      /* Now Inverted ops.  */
22479    case OPCODE_MOV:		 /* MOV <-> MVN	 */
22480      new_inst = OPCODE_MVN;
22481      value = inverted;
22482      break;
22483
22484    case OPCODE_MVN:
22485      new_inst = OPCODE_MOV;
22486      value = inverted;
22487      break;
22488
22489    case OPCODE_AND:		 /* AND <-> BIC	 */
22490      new_inst = OPCODE_BIC;
22491      value = inverted;
22492      break;
22493
22494    case OPCODE_BIC:
22495      new_inst = OPCODE_AND;
22496      value = inverted;
22497      break;
22498
22499    case OPCODE_ADC:		  /* ADC <-> SBC  */
22500      new_inst = OPCODE_SBC;
22501      value = inverted;
22502      break;
22503
22504    case OPCODE_SBC:
22505      new_inst = OPCODE_ADC;
22506      value = inverted;
22507      break;
22508
22509      /* We cannot do anything.	 */
22510    default:
22511      return FAIL;
22512    }
22513
22514  if (value == (unsigned) FAIL)
22515    return FAIL;
22516
22517  *instruction &= OPCODE_MASK;
22518  *instruction |= new_inst << DATA_OP_SHIFT;
22519  return value;
22520}
22521
22522/* Like negate_data_op, but for Thumb-2.   */
22523
22524static unsigned int
22525thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22526{
22527  int op, new_inst;
22528  int rd;
22529  unsigned int negated, inverted;
22530
22531  negated = encode_thumb32_immediate (-value);
22532  inverted = encode_thumb32_immediate (~value);
22533
22534  rd = (*instruction >> 8) & 0xf;
22535  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22536  switch (op)
22537    {
22538      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
22539    case T2_OPCODE_SUB:
22540      new_inst = T2_OPCODE_ADD;
22541      value = negated;
22542      break;
22543
22544    case T2_OPCODE_ADD:
22545      new_inst = T2_OPCODE_SUB;
22546      value = negated;
22547      break;
22548
22549      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
22550    case T2_OPCODE_ORR:
22551      new_inst = T2_OPCODE_ORN;
22552      value = inverted;
22553      break;
22554
22555    case T2_OPCODE_ORN:
22556      new_inst = T2_OPCODE_ORR;
22557      value = inverted;
22558      break;
22559
22560      /* AND <-> BIC.  TST has no inverted equivalent.  */
22561    case T2_OPCODE_AND:
22562      new_inst = T2_OPCODE_BIC;
22563      if (rd == 15)
22564	value = FAIL;
22565      else
22566	value = inverted;
22567      break;
22568
22569    case T2_OPCODE_BIC:
22570      new_inst = T2_OPCODE_AND;
22571      value = inverted;
22572      break;
22573
22574      /* ADC <-> SBC  */
22575    case T2_OPCODE_ADC:
22576      new_inst = T2_OPCODE_SBC;
22577      value = inverted;
22578      break;
22579
22580    case T2_OPCODE_SBC:
22581      new_inst = T2_OPCODE_ADC;
22582      value = inverted;
22583      break;
22584
22585      /* We cannot do anything.	 */
22586    default:
22587      return FAIL;
22588    }
22589
22590  if (value == (unsigned int)FAIL)
22591    return FAIL;
22592
22593  *instruction &= T2_OPCODE_MASK;
22594  *instruction |= new_inst << T2_DATA_OP_SHIFT;
22595  return value;
22596}
22597
22598/* Read a 32-bit thumb instruction from buf.  */
22599static unsigned long
22600get_thumb32_insn (char * buf)
22601{
22602  unsigned long insn;
22603  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22604  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22605
22606  return insn;
22607}
22608
22609
22610/* We usually want to set the low bit on the address of thumb function
22611   symbols.  In particular .word foo - . should have the low bit set.
22612   Generic code tries to fold the difference of two symbols to
22613   a constant.  Prevent this and force a relocation when the first symbols
22614   is a thumb function.  */
22615
22616bfd_boolean
22617arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22618{
22619  if (op == O_subtract
22620      && l->X_op == O_symbol
22621      && r->X_op == O_symbol
22622      && THUMB_IS_FUNC (l->X_add_symbol))
22623    {
22624      l->X_op = O_subtract;
22625      l->X_op_symbol = r->X_add_symbol;
22626      l->X_add_number -= r->X_add_number;
22627      return TRUE;
22628    }
22629
22630  /* Process as normal.  */
22631  return FALSE;
22632}
22633
22634/* Encode Thumb2 unconditional branches and calls. The encoding
22635   for the 2 are identical for the immediate values.  */
22636
22637static void
22638encode_thumb2_b_bl_offset (char * buf, offsetT value)
22639{
22640#define T2I1I2MASK  ((1 << 13) | (1 << 11))
22641  offsetT newval;
22642  offsetT newval2;
22643  addressT S, I1, I2, lo, hi;
22644
22645  S = (value >> 24) & 0x01;
22646  I1 = (value >> 23) & 0x01;
22647  I2 = (value >> 22) & 0x01;
22648  hi = (value >> 12) & 0x3ff;
22649  lo = (value >> 1) & 0x7ff;
22650  newval   = md_chars_to_number (buf, THUMB_SIZE);
22651  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22652  newval  |= (S << 10) | hi;
22653  newval2 &=  ~T2I1I2MASK;
22654  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22655  md_number_to_chars (buf, newval, THUMB_SIZE);
22656  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22657}
22658
22659void
22660md_apply_fix (fixS *	fixP,
22661	       valueT * valP,
22662	       segT	seg)
22663{
22664  offsetT	 value = * valP;
22665  offsetT	 newval;
22666  unsigned int	 newimm;
22667  unsigned long	 temp;
22668  int		 sign;
22669  char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22670
22671  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22672
22673  /* Note whether this will delete the relocation.  */
22674
22675  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22676    fixP->fx_done = 1;
22677
22678  /* On a 64-bit host, silently truncate 'value' to 32 bits for
22679     consistency with the behaviour on 32-bit hosts.  Remember value
22680     for emit_reloc.  */
22681  value &= 0xffffffff;
22682  value ^= 0x80000000;
22683  value -= 0x80000000;
22684
22685  *valP = value;
22686  fixP->fx_addnumber = value;
22687
22688  /* Same treatment for fixP->fx_offset.  */
22689  fixP->fx_offset &= 0xffffffff;
22690  fixP->fx_offset ^= 0x80000000;
22691  fixP->fx_offset -= 0x80000000;
22692
22693  switch (fixP->fx_r_type)
22694    {
22695    case BFD_RELOC_NONE:
22696      /* This will need to go in the object file.  */
22697      fixP->fx_done = 0;
22698      break;
22699
22700    case BFD_RELOC_ARM_IMMEDIATE:
22701      /* We claim that this fixup has been processed here,
22702	 even if in fact we generate an error because we do
22703	 not have a reloc for it, so tc_gen_reloc will reject it.  */
22704      fixP->fx_done = 1;
22705
22706      if (fixP->fx_addsy)
22707	{
22708	  const char *msg = 0;
22709
22710	  if (! S_IS_DEFINED (fixP->fx_addsy))
22711	    msg = _("undefined symbol %s used as an immediate value");
22712	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22713	    msg = _("symbol %s is in a different section");
22714	  else if (S_IS_WEAK (fixP->fx_addsy))
22715	    msg = _("symbol %s is weak and may be overridden later");
22716
22717	  if (msg)
22718	    {
22719	      as_bad_where (fixP->fx_file, fixP->fx_line,
22720			    msg, S_GET_NAME (fixP->fx_addsy));
22721	      break;
22722	    }
22723	}
22724
22725      temp = md_chars_to_number (buf, INSN_SIZE);
22726
22727      /* If the offset is negative, we should use encoding A2 for ADR.  */
22728      if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22729	newimm = negate_data_op (&temp, value);
22730      else
22731	{
22732	  newimm = encode_arm_immediate (value);
22733
22734	  /* If the instruction will fail, see if we can fix things up by
22735	     changing the opcode.  */
22736	  if (newimm == (unsigned int) FAIL)
22737	    newimm = negate_data_op (&temp, value);
22738	}
22739
22740      if (newimm == (unsigned int) FAIL)
22741	{
22742	  as_bad_where (fixP->fx_file, fixP->fx_line,
22743			_("invalid constant (%lx) after fixup"),
22744			(unsigned long) value);
22745	  break;
22746	}
22747
22748      newimm |= (temp & 0xfffff000);
22749      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22750      break;
22751
22752    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22753      {
22754	unsigned int highpart = 0;
22755	unsigned int newinsn  = 0xe1a00000; /* nop.  */
22756
22757	if (fixP->fx_addsy)
22758	  {
22759	    const char *msg = 0;
22760
22761	    if (! S_IS_DEFINED (fixP->fx_addsy))
22762	      msg = _("undefined symbol %s used as an immediate value");
22763	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22764	      msg = _("symbol %s is in a different section");
22765	    else if (S_IS_WEAK (fixP->fx_addsy))
22766	      msg = _("symbol %s is weak and may be overridden later");
22767
22768	    if (msg)
22769	      {
22770		as_bad_where (fixP->fx_file, fixP->fx_line,
22771			      msg, S_GET_NAME (fixP->fx_addsy));
22772		break;
22773	      }
22774	  }
22775
22776	newimm = encode_arm_immediate (value);
22777	temp = md_chars_to_number (buf, INSN_SIZE);
22778
22779	/* If the instruction will fail, see if we can fix things up by
22780	   changing the opcode.	 */
22781	if (newimm == (unsigned int) FAIL
22782	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22783	  {
22784	    /* No ?  OK - try using two ADD instructions to generate
22785	       the value.  */
22786	    newimm = validate_immediate_twopart (value, & highpart);
22787
22788	    /* Yes - then make sure that the second instruction is
22789	       also an add.  */
22790	    if (newimm != (unsigned int) FAIL)
22791	      newinsn = temp;
22792	    /* Still No ?  Try using a negated value.  */
22793	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22794	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22795	    /* Otherwise - give up.  */
22796	    else
22797	      {
22798		as_bad_where (fixP->fx_file, fixP->fx_line,
22799			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22800			      (long) value);
22801		break;
22802	      }
22803
22804	    /* Replace the first operand in the 2nd instruction (which
22805	       is the PC) with the destination register.  We have
22806	       already added in the PC in the first instruction and we
22807	       do not want to do it again.  */
22808	    newinsn &= ~ 0xf0000;
22809	    newinsn |= ((newinsn & 0x0f000) << 4);
22810	  }
22811
22812	newimm |= (temp & 0xfffff000);
22813	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22814
22815	highpart |= (newinsn & 0xfffff000);
22816	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22817      }
22818      break;
22819
22820    case BFD_RELOC_ARM_OFFSET_IMM:
22821      if (!fixP->fx_done && seg->use_rela_p)
22822	value = 0;
22823
22824    case BFD_RELOC_ARM_LITERAL:
22825      sign = value > 0;
22826
22827      if (value < 0)
22828	value = - value;
22829
22830      if (validate_offset_imm (value, 0) == FAIL)
22831	{
22832	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22833	    as_bad_where (fixP->fx_file, fixP->fx_line,
22834			  _("invalid literal constant: pool needs to be closer"));
22835	  else
22836	    as_bad_where (fixP->fx_file, fixP->fx_line,
22837			  _("bad immediate value for offset (%ld)"),
22838			  (long) value);
22839	  break;
22840	}
22841
22842      newval = md_chars_to_number (buf, INSN_SIZE);
22843      if (value == 0)
22844	newval &= 0xfffff000;
22845      else
22846	{
22847	  newval &= 0xff7ff000;
22848	  newval |= value | (sign ? INDEX_UP : 0);
22849	}
22850      md_number_to_chars (buf, newval, INSN_SIZE);
22851      break;
22852
22853    case BFD_RELOC_ARM_OFFSET_IMM8:
22854    case BFD_RELOC_ARM_HWLITERAL:
22855      sign = value > 0;
22856
22857      if (value < 0)
22858	value = - value;
22859
22860      if (validate_offset_imm (value, 1) == FAIL)
22861	{
22862	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22863	    as_bad_where (fixP->fx_file, fixP->fx_line,
22864			  _("invalid literal constant: pool needs to be closer"));
22865	  else
22866	    as_bad_where (fixP->fx_file, fixP->fx_line,
22867			  _("bad immediate value for 8-bit offset (%ld)"),
22868			  (long) value);
22869	  break;
22870	}
22871
22872      newval = md_chars_to_number (buf, INSN_SIZE);
22873      if (value == 0)
22874	newval &= 0xfffff0f0;
22875      else
22876	{
22877	  newval &= 0xff7ff0f0;
22878	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22879	}
22880      md_number_to_chars (buf, newval, INSN_SIZE);
22881      break;
22882
22883    case BFD_RELOC_ARM_T32_OFFSET_U8:
22884      if (value < 0 || value > 1020 || value % 4 != 0)
22885	as_bad_where (fixP->fx_file, fixP->fx_line,
22886		      _("bad immediate value for offset (%ld)"), (long) value);
22887      value /= 4;
22888
22889      newval = md_chars_to_number (buf+2, THUMB_SIZE);
22890      newval |= value;
22891      md_number_to_chars (buf+2, newval, THUMB_SIZE);
22892      break;
22893
22894    case BFD_RELOC_ARM_T32_OFFSET_IMM:
22895      /* This is a complicated relocation used for all varieties of Thumb32
22896	 load/store instruction with immediate offset:
22897
22898	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22899						   *4, optional writeback(W)
22900						   (doubleword load/store)
22901
22902	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22903	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22904	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22905	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22906	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22907
22908	 Uppercase letters indicate bits that are already encoded at
22909	 this point.  Lowercase letters are our problem.  For the
22910	 second block of instructions, the secondary opcode nybble
22911	 (bits 8..11) is present, and bit 23 is zero, even if this is
22912	 a PC-relative operation.  */
22913      newval = md_chars_to_number (buf, THUMB_SIZE);
22914      newval <<= 16;
22915      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22916
22917      if ((newval & 0xf0000000) == 0xe0000000)
22918	{
22919	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
22920	  if (value >= 0)
22921	    newval |= (1 << 23);
22922	  else
22923	    value = -value;
22924	  if (value % 4 != 0)
22925	    {
22926	      as_bad_where (fixP->fx_file, fixP->fx_line,
22927			    _("offset not a multiple of 4"));
22928	      break;
22929	    }
22930	  value /= 4;
22931	  if (value > 0xff)
22932	    {
22933	      as_bad_where (fixP->fx_file, fixP->fx_line,
22934			    _("offset out of range"));
22935	      break;
22936	    }
22937	  newval &= ~0xff;
22938	}
22939      else if ((newval & 0x000f0000) == 0x000f0000)
22940	{
22941	  /* PC-relative, 12-bit offset.  */
22942	  if (value >= 0)
22943	    newval |= (1 << 23);
22944	  else
22945	    value = -value;
22946	  if (value > 0xfff)
22947	    {
22948	      as_bad_where (fixP->fx_file, fixP->fx_line,
22949			    _("offset out of range"));
22950	      break;
22951	    }
22952	  newval &= ~0xfff;
22953	}
22954      else if ((newval & 0x00000100) == 0x00000100)
22955	{
22956	  /* Writeback: 8-bit, +/- offset.  */
22957	  if (value >= 0)
22958	    newval |= (1 << 9);
22959	  else
22960	    value = -value;
22961	  if (value > 0xff)
22962	    {
22963	      as_bad_where (fixP->fx_file, fixP->fx_line,
22964			    _("offset out of range"));
22965	      break;
22966	    }
22967	  newval &= ~0xff;
22968	}
22969      else if ((newval & 0x00000f00) == 0x00000e00)
22970	{
22971	  /* T-instruction: positive 8-bit offset.  */
22972	  if (value < 0 || value > 0xff)
22973	    {
22974	      as_bad_where (fixP->fx_file, fixP->fx_line,
22975			    _("offset out of range"));
22976	      break;
22977	    }
22978	  newval &= ~0xff;
22979	  newval |= value;
22980	}
22981      else
22982	{
22983	  /* Positive 12-bit or negative 8-bit offset.  */
22984	  int limit;
22985	  if (value >= 0)
22986	    {
22987	      newval |= (1 << 23);
22988	      limit = 0xfff;
22989	    }
22990	  else
22991	    {
22992	      value = -value;
22993	      limit = 0xff;
22994	    }
22995	  if (value > limit)
22996	    {
22997	      as_bad_where (fixP->fx_file, fixP->fx_line,
22998			    _("offset out of range"));
22999	      break;
23000	    }
23001	  newval &= ~limit;
23002	}
23003
23004      newval |= value;
23005      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23006      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23007      break;
23008
23009    case BFD_RELOC_ARM_SHIFT_IMM:
23010      newval = md_chars_to_number (buf, INSN_SIZE);
23011      if (((unsigned long) value) > 32
23012	  || (value == 32
23013	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23014	{
23015	  as_bad_where (fixP->fx_file, fixP->fx_line,
23016			_("shift expression is too large"));
23017	  break;
23018	}
23019
23020      if (value == 0)
23021	/* Shifts of zero must be done as lsl.	*/
23022	newval &= ~0x60;
23023      else if (value == 32)
23024	value = 0;
23025      newval &= 0xfffff07f;
23026      newval |= (value & 0x1f) << 7;
23027      md_number_to_chars (buf, newval, INSN_SIZE);
23028      break;
23029
23030    case BFD_RELOC_ARM_T32_IMMEDIATE:
23031    case BFD_RELOC_ARM_T32_ADD_IMM:
23032    case BFD_RELOC_ARM_T32_IMM12:
23033    case BFD_RELOC_ARM_T32_ADD_PC12:
23034      /* We claim that this fixup has been processed here,
23035	 even if in fact we generate an error because we do
23036	 not have a reloc for it, so tc_gen_reloc will reject it.  */
23037      fixP->fx_done = 1;
23038
23039      if (fixP->fx_addsy
23040	  && ! S_IS_DEFINED (fixP->fx_addsy))
23041	{
23042	  as_bad_where (fixP->fx_file, fixP->fx_line,
23043			_("undefined symbol %s used as an immediate value"),
23044			S_GET_NAME (fixP->fx_addsy));
23045	  break;
23046	}
23047
23048      newval = md_chars_to_number (buf, THUMB_SIZE);
23049      newval <<= 16;
23050      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23051
23052      newimm = FAIL;
23053      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23054	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23055	{
23056	  newimm = encode_thumb32_immediate (value);
23057	  if (newimm == (unsigned int) FAIL)
23058	    newimm = thumb32_negate_data_op (&newval, value);
23059	}
23060      if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
23061	  && newimm == (unsigned int) FAIL)
23062	{
23063	  /* Turn add/sum into addw/subw.  */
23064	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23065	    newval = (newval & 0xfeffffff) | 0x02000000;
23066	  /* No flat 12-bit imm encoding for addsw/subsw.  */
23067	  if ((newval & 0x00100000) == 0)
23068	    {
23069	      /* 12 bit immediate for addw/subw.  */
23070	      if (value < 0)
23071		{
23072		  value = -value;
23073		  newval ^= 0x00a00000;
23074		}
23075	      if (value > 0xfff)
23076		newimm = (unsigned int) FAIL;
23077	      else
23078		newimm = value;
23079	    }
23080	}
23081
23082      if (newimm == (unsigned int)FAIL)
23083	{
23084	  as_bad_where (fixP->fx_file, fixP->fx_line,
23085			_("invalid constant (%lx) after fixup"),
23086			(unsigned long) value);
23087	  break;
23088	}
23089
23090      newval |= (newimm & 0x800) << 15;
23091      newval |= (newimm & 0x700) << 4;
23092      newval |= (newimm & 0x0ff);
23093
23094      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23095      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23096      break;
23097
23098    case BFD_RELOC_ARM_SMC:
23099      if (((unsigned long) value) > 0xffff)
23100	as_bad_where (fixP->fx_file, fixP->fx_line,
23101		      _("invalid smc expression"));
23102      newval = md_chars_to_number (buf, INSN_SIZE);
23103      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23104      md_number_to_chars (buf, newval, INSN_SIZE);
23105      break;
23106
23107    case BFD_RELOC_ARM_HVC:
23108      if (((unsigned long) value) > 0xffff)
23109	as_bad_where (fixP->fx_file, fixP->fx_line,
23110		      _("invalid hvc expression"));
23111      newval = md_chars_to_number (buf, INSN_SIZE);
23112      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23113      md_number_to_chars (buf, newval, INSN_SIZE);
23114      break;
23115
23116    case BFD_RELOC_ARM_SWI:
23117      if (fixP->tc_fix_data != 0)
23118	{
23119	  if (((unsigned long) value) > 0xff)
23120	    as_bad_where (fixP->fx_file, fixP->fx_line,
23121			  _("invalid swi expression"));
23122	  newval = md_chars_to_number (buf, THUMB_SIZE);
23123	  newval |= value;
23124	  md_number_to_chars (buf, newval, THUMB_SIZE);
23125	}
23126      else
23127	{
23128	  if (((unsigned long) value) > 0x00ffffff)
23129	    as_bad_where (fixP->fx_file, fixP->fx_line,
23130			  _("invalid swi expression"));
23131	  newval = md_chars_to_number (buf, INSN_SIZE);
23132	  newval |= value;
23133	  md_number_to_chars (buf, newval, INSN_SIZE);
23134	}
23135      break;
23136
23137    case BFD_RELOC_ARM_MULTI:
23138      if (((unsigned long) value) > 0xffff)
23139	as_bad_where (fixP->fx_file, fixP->fx_line,
23140		      _("invalid expression in load/store multiple"));
23141      newval = value | md_chars_to_number (buf, INSN_SIZE);
23142      md_number_to_chars (buf, newval, INSN_SIZE);
23143      break;
23144
23145#ifdef OBJ_ELF
23146    case BFD_RELOC_ARM_PCREL_CALL:
23147
23148      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23149	  && fixP->fx_addsy
23150	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23151	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23152	  && THUMB_IS_FUNC (fixP->fx_addsy))
23153	/* Flip the bl to blx. This is a simple flip
23154	   bit here because we generate PCREL_CALL for
23155	   unconditional bls.  */
23156	{
23157	  newval = md_chars_to_number (buf, INSN_SIZE);
23158	  newval = newval | 0x10000000;
23159	  md_number_to_chars (buf, newval, INSN_SIZE);
23160	  temp = 1;
23161	  fixP->fx_done = 1;
23162	}
23163      else
23164	temp = 3;
23165      goto arm_branch_common;
23166
23167    case BFD_RELOC_ARM_PCREL_JUMP:
23168      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23169	  && fixP->fx_addsy
23170	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23171	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23172	  && THUMB_IS_FUNC (fixP->fx_addsy))
23173	{
23174	  /* This would map to a bl<cond>, b<cond>,
23175	     b<always> to a Thumb function. We
23176	     need to force a relocation for this particular
23177	     case.  */
23178	  newval = md_chars_to_number (buf, INSN_SIZE);
23179	  fixP->fx_done = 0;
23180	}
23181
23182    case BFD_RELOC_ARM_PLT32:
23183#endif
23184    case BFD_RELOC_ARM_PCREL_BRANCH:
23185      temp = 3;
23186      goto arm_branch_common;
23187
23188    case BFD_RELOC_ARM_PCREL_BLX:
23189
23190      temp = 1;
23191      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23192	  && fixP->fx_addsy
23193	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23194	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23195	  && ARM_IS_FUNC (fixP->fx_addsy))
23196	{
23197	  /* Flip the blx to a bl and warn.  */
23198	  const char *name = S_GET_NAME (fixP->fx_addsy);
23199	  newval = 0xeb000000;
23200	  as_warn_where (fixP->fx_file, fixP->fx_line,
23201			 _("blx to '%s' an ARM ISA state function changed to bl"),
23202			  name);
23203	  md_number_to_chars (buf, newval, INSN_SIZE);
23204	  temp = 3;
23205	  fixP->fx_done = 1;
23206	}
23207
23208#ifdef OBJ_ELF
23209       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23210	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23211#endif
23212
23213    arm_branch_common:
23214      /* We are going to store value (shifted right by two) in the
23215	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
23216	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
23217	 also be be clear.  */
23218      if (value & temp)
23219	as_bad_where (fixP->fx_file, fixP->fx_line,
23220		      _("misaligned branch destination"));
23221      if ((value & (offsetT)0xfe000000) != (offsetT)0
23222	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23223	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23224
23225      if (fixP->fx_done || !seg->use_rela_p)
23226	{
23227	  newval = md_chars_to_number (buf, INSN_SIZE);
23228	  newval |= (value >> 2) & 0x00ffffff;
23229	  /* Set the H bit on BLX instructions.  */
23230	  if (temp == 1)
23231	    {
23232	      if (value & 2)
23233		newval |= 0x01000000;
23234	      else
23235		newval &= ~0x01000000;
23236	    }
23237	  md_number_to_chars (buf, newval, INSN_SIZE);
23238	}
23239      break;
23240
23241    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23242      /* CBZ can only branch forward.  */
23243
23244      /* Attempts to use CBZ to branch to the next instruction
23245	 (which, strictly speaking, are prohibited) will be turned into
23246	 no-ops.
23247
23248	 FIXME: It may be better to remove the instruction completely and
23249	 perform relaxation.  */
23250      if (value == -2)
23251	{
23252	  newval = md_chars_to_number (buf, THUMB_SIZE);
23253	  newval = 0xbf00; /* NOP encoding T1 */
23254	  md_number_to_chars (buf, newval, THUMB_SIZE);
23255	}
23256      else
23257	{
23258	  if (value & ~0x7e)
23259	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23260
23261	  if (fixP->fx_done || !seg->use_rela_p)
23262	    {
23263	      newval = md_chars_to_number (buf, THUMB_SIZE);
23264	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23265	      md_number_to_chars (buf, newval, THUMB_SIZE);
23266	    }
23267	}
23268      break;
23269
23270    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
23271      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23272	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23273
23274      if (fixP->fx_done || !seg->use_rela_p)
23275	{
23276	  newval = md_chars_to_number (buf, THUMB_SIZE);
23277	  newval |= (value & 0x1ff) >> 1;
23278	  md_number_to_chars (buf, newval, THUMB_SIZE);
23279	}
23280      break;
23281
23282    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
23283      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23284	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23285
23286      if (fixP->fx_done || !seg->use_rela_p)
23287	{
23288	  newval = md_chars_to_number (buf, THUMB_SIZE);
23289	  newval |= (value & 0xfff) >> 1;
23290	  md_number_to_chars (buf, newval, THUMB_SIZE);
23291	}
23292      break;
23293
23294    case BFD_RELOC_THUMB_PCREL_BRANCH20:
23295      if (fixP->fx_addsy
23296	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23297	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23298	  && ARM_IS_FUNC (fixP->fx_addsy)
23299	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23300	{
23301	  /* Force a relocation for a branch 20 bits wide.  */
23302	  fixP->fx_done = 0;
23303	}
23304      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23305	as_bad_where (fixP->fx_file, fixP->fx_line,
23306		      _("conditional branch out of range"));
23307
23308      if (fixP->fx_done || !seg->use_rela_p)
23309	{
23310	  offsetT newval2;
23311	  addressT S, J1, J2, lo, hi;
23312
23313	  S  = (value & 0x00100000) >> 20;
23314	  J2 = (value & 0x00080000) >> 19;
23315	  J1 = (value & 0x00040000) >> 18;
23316	  hi = (value & 0x0003f000) >> 12;
23317	  lo = (value & 0x00000ffe) >> 1;
23318
23319	  newval   = md_chars_to_number (buf, THUMB_SIZE);
23320	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23321	  newval  |= (S << 10) | hi;
23322	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
23323	  md_number_to_chars (buf, newval, THUMB_SIZE);
23324	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23325	}
23326      break;
23327
23328    case BFD_RELOC_THUMB_PCREL_BLX:
23329      /* If there is a blx from a thumb state function to
23330	 another thumb function flip this to a bl and warn
23331	 about it.  */
23332
23333      if (fixP->fx_addsy
23334	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23335	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23336	  && THUMB_IS_FUNC (fixP->fx_addsy))
23337	{
23338	  const char *name = S_GET_NAME (fixP->fx_addsy);
23339	  as_warn_where (fixP->fx_file, fixP->fx_line,
23340			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23341			 name);
23342	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23343	  newval = newval | 0x1000;
23344	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23345	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23346	  fixP->fx_done = 1;
23347	}
23348
23349
23350      goto thumb_bl_common;
23351
23352    case BFD_RELOC_THUMB_PCREL_BRANCH23:
23353      /* A bl from Thumb state ISA to an internal ARM state function
23354	 is converted to a blx.  */
23355      if (fixP->fx_addsy
23356	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23357	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23358	  && ARM_IS_FUNC (fixP->fx_addsy)
23359	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23360	{
23361	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23362	  newval = newval & ~0x1000;
23363	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23364	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23365	  fixP->fx_done = 1;
23366	}
23367
23368    thumb_bl_common:
23369
23370      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23371	/* For a BLX instruction, make sure that the relocation is rounded up
23372	   to a word boundary.  This follows the semantics of the instruction
23373	   which specifies that bit 1 of the target address will come from bit
23374	   1 of the base address.  */
23375	value = (value + 3) & ~ 3;
23376
23377#ifdef OBJ_ELF
23378       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23379	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23380	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23381#endif
23382
23383      if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23384	{
23385	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23386	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23387	  else if ((value & ~0x1ffffff)
23388		   && ((value & ~0x1ffffff) != ~0x1ffffff))
23389	    as_bad_where (fixP->fx_file, fixP->fx_line,
23390			  _("Thumb2 branch out of range"));
23391	}
23392
23393      if (fixP->fx_done || !seg->use_rela_p)
23394	encode_thumb2_b_bl_offset (buf, value);
23395
23396      break;
23397
23398    case BFD_RELOC_THUMB_PCREL_BRANCH25:
23399      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23400	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23401
23402      if (fixP->fx_done || !seg->use_rela_p)
23403	  encode_thumb2_b_bl_offset (buf, value);
23404
23405      break;
23406
23407    case BFD_RELOC_8:
23408      if (fixP->fx_done || !seg->use_rela_p)
23409	*buf = value;
23410      break;
23411
23412    case BFD_RELOC_16:
23413      if (fixP->fx_done || !seg->use_rela_p)
23414	md_number_to_chars (buf, value, 2);
23415      break;
23416
23417#ifdef OBJ_ELF
23418    case BFD_RELOC_ARM_TLS_CALL:
23419    case BFD_RELOC_ARM_THM_TLS_CALL:
23420    case BFD_RELOC_ARM_TLS_DESCSEQ:
23421    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23422    case BFD_RELOC_ARM_TLS_GOTDESC:
23423    case BFD_RELOC_ARM_TLS_GD32:
23424    case BFD_RELOC_ARM_TLS_LE32:
23425    case BFD_RELOC_ARM_TLS_IE32:
23426    case BFD_RELOC_ARM_TLS_LDM32:
23427    case BFD_RELOC_ARM_TLS_LDO32:
23428      S_SET_THREAD_LOCAL (fixP->fx_addsy);
23429      break;
23430
23431    case BFD_RELOC_ARM_GOT32:
23432    case BFD_RELOC_ARM_GOTOFF:
23433      break;
23434
23435    case BFD_RELOC_ARM_GOT_PREL:
23436      if (fixP->fx_done || !seg->use_rela_p)
23437	md_number_to_chars (buf, value, 4);
23438      break;
23439
23440    case BFD_RELOC_ARM_TARGET2:
23441      /* TARGET2 is not partial-inplace, so we need to write the
23442	 addend here for REL targets, because it won't be written out
23443	 during reloc processing later.  */
23444      if (fixP->fx_done || !seg->use_rela_p)
23445	md_number_to_chars (buf, fixP->fx_offset, 4);
23446      break;
23447#endif
23448
23449    case BFD_RELOC_RVA:
23450    case BFD_RELOC_32:
23451    case BFD_RELOC_ARM_TARGET1:
23452    case BFD_RELOC_ARM_ROSEGREL32:
23453    case BFD_RELOC_ARM_SBREL32:
23454    case BFD_RELOC_32_PCREL:
23455#ifdef TE_PE
23456    case BFD_RELOC_32_SECREL:
23457#endif
23458      if (fixP->fx_done || !seg->use_rela_p)
23459#ifdef TE_WINCE
23460	/* For WinCE we only do this for pcrel fixups.  */
23461	if (fixP->fx_done || fixP->fx_pcrel)
23462#endif
23463	  md_number_to_chars (buf, value, 4);
23464      break;
23465
23466#ifdef OBJ_ELF
23467    case BFD_RELOC_ARM_PREL31:
23468      if (fixP->fx_done || !seg->use_rela_p)
23469	{
23470	  newval = md_chars_to_number (buf, 4) & 0x80000000;
23471	  if ((value ^ (value >> 1)) & 0x40000000)
23472	    {
23473	      as_bad_where (fixP->fx_file, fixP->fx_line,
23474			    _("rel31 relocation overflow"));
23475	    }
23476	  newval |= value & 0x7fffffff;
23477	  md_number_to_chars (buf, newval, 4);
23478	}
23479      break;
23480#endif
23481
23482    case BFD_RELOC_ARM_CP_OFF_IMM:
23483    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23484      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23485	newval = md_chars_to_number (buf, INSN_SIZE);
23486      else
23487	newval = get_thumb32_insn (buf);
23488      if ((newval & 0x0f200f00) == 0x0d000900)
23489	{
23490	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
23491	     has permitted values that are multiples of 2, in the range 0
23492	     to 510.  */
23493	  if (value < -510 || value > 510 || (value & 1))
23494	    as_bad_where (fixP->fx_file, fixP->fx_line,
23495			  _("co-processor offset out of range"));
23496	}
23497      else if (value < -1023 || value > 1023 || (value & 3))
23498	as_bad_where (fixP->fx_file, fixP->fx_line,
23499		      _("co-processor offset out of range"));
23500    cp_off_common:
23501      sign = value > 0;
23502      if (value < 0)
23503	value = -value;
23504      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23505	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23506	newval = md_chars_to_number (buf, INSN_SIZE);
23507      else
23508	newval = get_thumb32_insn (buf);
23509      if (value == 0)
23510	newval &= 0xffffff00;
23511      else
23512	{
23513	  newval &= 0xff7fff00;
23514	  if ((newval & 0x0f200f00) == 0x0d000900)
23515	    {
23516	      /* This is a fp16 vstr/vldr.
23517
23518		 It requires the immediate offset in the instruction is shifted
23519		 left by 1 to be a half-word offset.
23520
23521		 Here, left shift by 1 first, and later right shift by 2
23522		 should get the right offset.  */
23523	      value <<= 1;
23524	    }
23525	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23526	}
23527      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23528	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23529	md_number_to_chars (buf, newval, INSN_SIZE);
23530      else
23531	put_thumb32_insn (buf, newval);
23532      break;
23533
23534    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23535    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23536      if (value < -255 || value > 255)
23537	as_bad_where (fixP->fx_file, fixP->fx_line,
23538		      _("co-processor offset out of range"));
23539      value *= 4;
23540      goto cp_off_common;
23541
23542    case BFD_RELOC_ARM_THUMB_OFFSET:
23543      newval = md_chars_to_number (buf, THUMB_SIZE);
23544      /* Exactly what ranges, and where the offset is inserted depends
23545	 on the type of instruction, we can establish this from the
23546	 top 4 bits.  */
23547      switch (newval >> 12)
23548	{
23549	case 4: /* PC load.  */
23550	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23551	     forced to zero for these loads; md_pcrel_from has already
23552	     compensated for this.  */
23553	  if (value & 3)
23554	    as_bad_where (fixP->fx_file, fixP->fx_line,
23555			  _("invalid offset, target not word aligned (0x%08lX)"),
23556			  (((unsigned long) fixP->fx_frag->fr_address
23557			    + (unsigned long) fixP->fx_where) & ~3)
23558			  + (unsigned long) value);
23559
23560	  if (value & ~0x3fc)
23561	    as_bad_where (fixP->fx_file, fixP->fx_line,
23562			  _("invalid offset, value too big (0x%08lX)"),
23563			  (long) value);
23564
23565	  newval |= value >> 2;
23566	  break;
23567
23568	case 9: /* SP load/store.  */
23569	  if (value & ~0x3fc)
23570	    as_bad_where (fixP->fx_file, fixP->fx_line,
23571			  _("invalid offset, value too big (0x%08lX)"),
23572			  (long) value);
23573	  newval |= value >> 2;
23574	  break;
23575
23576	case 6: /* Word load/store.  */
23577	  if (value & ~0x7c)
23578	    as_bad_where (fixP->fx_file, fixP->fx_line,
23579			  _("invalid offset, value too big (0x%08lX)"),
23580			  (long) value);
23581	  newval |= value << 4; /* 6 - 2.  */
23582	  break;
23583
23584	case 7: /* Byte load/store.  */
23585	  if (value & ~0x1f)
23586	    as_bad_where (fixP->fx_file, fixP->fx_line,
23587			  _("invalid offset, value too big (0x%08lX)"),
23588			  (long) value);
23589	  newval |= value << 6;
23590	  break;
23591
23592	case 8: /* Halfword load/store.	 */
23593	  if (value & ~0x3e)
23594	    as_bad_where (fixP->fx_file, fixP->fx_line,
23595			  _("invalid offset, value too big (0x%08lX)"),
23596			  (long) value);
23597	  newval |= value << 5; /* 6 - 1.  */
23598	  break;
23599
23600	default:
23601	  as_bad_where (fixP->fx_file, fixP->fx_line,
23602			"Unable to process relocation for thumb opcode: %lx",
23603			(unsigned long) newval);
23604	  break;
23605	}
23606      md_number_to_chars (buf, newval, THUMB_SIZE);
23607      break;
23608
23609    case BFD_RELOC_ARM_THUMB_ADD:
23610      /* This is a complicated relocation, since we use it for all of
23611	 the following immediate relocations:
23612
23613	    3bit ADD/SUB
23614	    8bit ADD/SUB
23615	    9bit ADD/SUB SP word-aligned
23616	   10bit ADD PC/SP word-aligned
23617
23618	 The type of instruction being processed is encoded in the
23619	 instruction field:
23620
23621	   0x8000  SUB
23622	   0x00F0  Rd
23623	   0x000F  Rs
23624      */
23625      newval = md_chars_to_number (buf, THUMB_SIZE);
23626      {
23627	int rd = (newval >> 4) & 0xf;
23628	int rs = newval & 0xf;
23629	int subtract = !!(newval & 0x8000);
23630
23631	/* Check for HI regs, only very restricted cases allowed:
23632	   Adjusting SP, and using PC or SP to get an address.	*/
23633	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23634	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
23635	  as_bad_where (fixP->fx_file, fixP->fx_line,
23636			_("invalid Hi register with immediate"));
23637
23638	/* If value is negative, choose the opposite instruction.  */
23639	if (value < 0)
23640	  {
23641	    value = -value;
23642	    subtract = !subtract;
23643	    if (value < 0)
23644	      as_bad_where (fixP->fx_file, fixP->fx_line,
23645			    _("immediate value out of range"));
23646	  }
23647
23648	if (rd == REG_SP)
23649	  {
23650 	    if (value & ~0x1fc)
23651	      as_bad_where (fixP->fx_file, fixP->fx_line,
23652			    _("invalid immediate for stack address calculation"));
23653	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23654	    newval |= value >> 2;
23655	  }
23656	else if (rs == REG_PC || rs == REG_SP)
23657	  {
23658	    /* PR gas/18541.  If the addition is for a defined symbol
23659	       within range of an ADR instruction then accept it.  */
23660	    if (subtract
23661		&& value == 4
23662		&& fixP->fx_addsy != NULL)
23663	      {
23664		subtract = 0;
23665
23666		if (! S_IS_DEFINED (fixP->fx_addsy)
23667		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
23668		    || S_IS_WEAK (fixP->fx_addsy))
23669		  {
23670		    as_bad_where (fixP->fx_file, fixP->fx_line,
23671				  _("address calculation needs a strongly defined nearby symbol"));
23672		  }
23673		else
23674		  {
23675		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23676
23677		    /* Round up to the next 4-byte boundary.  */
23678		    if (v & 3)
23679		      v = (v + 3) & ~ 3;
23680		    else
23681		      v += 4;
23682		    v = S_GET_VALUE (fixP->fx_addsy) - v;
23683
23684		    if (v & ~0x3fc)
23685		      {
23686			as_bad_where (fixP->fx_file, fixP->fx_line,
23687				      _("symbol too far away"));
23688		      }
23689		    else
23690		      {
23691			fixP->fx_done = 1;
23692			value = v;
23693		      }
23694		  }
23695	      }
23696
23697	    if (subtract || value & ~0x3fc)
23698	      as_bad_where (fixP->fx_file, fixP->fx_line,
23699			    _("invalid immediate for address calculation (value = 0x%08lX)"),
23700			    (unsigned long) (subtract ? - value : value));
23701	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23702	    newval |= rd << 8;
23703	    newval |= value >> 2;
23704	  }
23705	else if (rs == rd)
23706	  {
23707	    if (value & ~0xff)
23708	      as_bad_where (fixP->fx_file, fixP->fx_line,
23709			    _("immediate value out of range"));
23710	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23711	    newval |= (rd << 8) | value;
23712	  }
23713	else
23714	  {
23715	    if (value & ~0x7)
23716	      as_bad_where (fixP->fx_file, fixP->fx_line,
23717			    _("immediate value out of range"));
23718	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23719	    newval |= rd | (rs << 3) | (value << 6);
23720	  }
23721      }
23722      md_number_to_chars (buf, newval, THUMB_SIZE);
23723      break;
23724
23725    case BFD_RELOC_ARM_THUMB_IMM:
23726      newval = md_chars_to_number (buf, THUMB_SIZE);
23727      if (value < 0 || value > 255)
23728	as_bad_where (fixP->fx_file, fixP->fx_line,
23729		      _("invalid immediate: %ld is out of range"),
23730		      (long) value);
23731      newval |= value;
23732      md_number_to_chars (buf, newval, THUMB_SIZE);
23733      break;
23734
23735    case BFD_RELOC_ARM_THUMB_SHIFT:
23736      /* 5bit shift value (0..32).  LSL cannot take 32.	 */
23737      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23738      temp = newval & 0xf800;
23739      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23740	as_bad_where (fixP->fx_file, fixP->fx_line,
23741		      _("invalid shift value: %ld"), (long) value);
23742      /* Shifts of zero must be encoded as LSL.	 */
23743      if (value == 0)
23744	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23745      /* Shifts of 32 are encoded as zero.  */
23746      else if (value == 32)
23747	value = 0;
23748      newval |= value << 6;
23749      md_number_to_chars (buf, newval, THUMB_SIZE);
23750      break;
23751
23752    case BFD_RELOC_VTABLE_INHERIT:
23753    case BFD_RELOC_VTABLE_ENTRY:
23754      fixP->fx_done = 0;
23755      return;
23756
23757    case BFD_RELOC_ARM_MOVW:
23758    case BFD_RELOC_ARM_MOVT:
23759    case BFD_RELOC_ARM_THUMB_MOVW:
23760    case BFD_RELOC_ARM_THUMB_MOVT:
23761      if (fixP->fx_done || !seg->use_rela_p)
23762	{
23763	  /* REL format relocations are limited to a 16-bit addend.  */
23764	  if (!fixP->fx_done)
23765	    {
23766	      if (value < -0x8000 || value > 0x7fff)
23767		  as_bad_where (fixP->fx_file, fixP->fx_line,
23768				_("offset out of range"));
23769	    }
23770	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23771		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23772	    {
23773	      value >>= 16;
23774	    }
23775
23776	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23777	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23778	    {
23779	      newval = get_thumb32_insn (buf);
23780	      newval &= 0xfbf08f00;
23781	      newval |= (value & 0xf000) << 4;
23782	      newval |= (value & 0x0800) << 15;
23783	      newval |= (value & 0x0700) << 4;
23784	      newval |= (value & 0x00ff);
23785	      put_thumb32_insn (buf, newval);
23786	    }
23787	  else
23788	    {
23789	      newval = md_chars_to_number (buf, 4);
23790	      newval &= 0xfff0f000;
23791	      newval |= value & 0x0fff;
23792	      newval |= (value & 0xf000) << 4;
23793	      md_number_to_chars (buf, newval, 4);
23794	    }
23795	}
23796      return;
23797
23798   case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23799   case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23800   case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23801   case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23802      gas_assert (!fixP->fx_done);
23803      {
23804	bfd_vma insn;
23805	bfd_boolean is_mov;
23806	bfd_vma encoded_addend = value;
23807
23808	/* Check that addend can be encoded in instruction.  */
23809	if (!seg->use_rela_p && (value < 0 || value > 255))
23810	  as_bad_where (fixP->fx_file, fixP->fx_line,
23811			_("the offset 0x%08lX is not representable"),
23812			(unsigned long) encoded_addend);
23813
23814	/* Extract the instruction.  */
23815	insn = md_chars_to_number (buf, THUMB_SIZE);
23816	is_mov = (insn & 0xf800) == 0x2000;
23817
23818	/* Encode insn.  */
23819	if (is_mov)
23820	  {
23821	    if (!seg->use_rela_p)
23822	      insn |= encoded_addend;
23823	  }
23824	else
23825	  {
23826	    int rd, rs;
23827
23828	    /* Extract the instruction.  */
23829	     /* Encoding is the following
23830		0x8000  SUB
23831		0x00F0  Rd
23832		0x000F  Rs
23833	     */
23834	     /* The following conditions must be true :
23835		- ADD
23836		- Rd == Rs
23837		- Rd <= 7
23838	     */
23839	    rd = (insn >> 4) & 0xf;
23840	    rs = insn & 0xf;
23841	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
23842	      as_bad_where (fixP->fx_file, fixP->fx_line,
23843			_("Unable to process relocation for thumb opcode: %lx"),
23844			(unsigned long) insn);
23845
23846	    /* Encode as ADD immediate8 thumb 1 code.  */
23847	    insn = 0x3000 | (rd << 8);
23848
23849	    /* Place the encoded addend into the first 8 bits of the
23850	       instruction.  */
23851	    if (!seg->use_rela_p)
23852	      insn |= encoded_addend;
23853	  }
23854
23855	/* Update the instruction.  */
23856	md_number_to_chars (buf, insn, THUMB_SIZE);
23857      }
23858      break;
23859
23860   case BFD_RELOC_ARM_ALU_PC_G0_NC:
23861   case BFD_RELOC_ARM_ALU_PC_G0:
23862   case BFD_RELOC_ARM_ALU_PC_G1_NC:
23863   case BFD_RELOC_ARM_ALU_PC_G1:
23864   case BFD_RELOC_ARM_ALU_PC_G2:
23865   case BFD_RELOC_ARM_ALU_SB_G0_NC:
23866   case BFD_RELOC_ARM_ALU_SB_G0:
23867   case BFD_RELOC_ARM_ALU_SB_G1_NC:
23868   case BFD_RELOC_ARM_ALU_SB_G1:
23869   case BFD_RELOC_ARM_ALU_SB_G2:
23870     gas_assert (!fixP->fx_done);
23871     if (!seg->use_rela_p)
23872       {
23873	 bfd_vma insn;
23874	 bfd_vma encoded_addend;
23875	 bfd_vma addend_abs = abs (value);
23876
23877	 /* Check that the absolute value of the addend can be
23878	    expressed as an 8-bit constant plus a rotation.  */
23879	 encoded_addend = encode_arm_immediate (addend_abs);
23880	 if (encoded_addend == (unsigned int) FAIL)
23881	   as_bad_where (fixP->fx_file, fixP->fx_line,
23882			 _("the offset 0x%08lX is not representable"),
23883			 (unsigned long) addend_abs);
23884
23885	 /* Extract the instruction.  */
23886	 insn = md_chars_to_number (buf, INSN_SIZE);
23887
23888	 /* If the addend is positive, use an ADD instruction.
23889	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
23890	 insn &= 0xff1fffff;
23891	 if (value < 0)
23892	   insn |= 1 << 22;
23893	 else
23894	   insn |= 1 << 23;
23895
23896	 /* Place the encoded addend into the first 12 bits of the
23897	    instruction.  */
23898	 insn &= 0xfffff000;
23899	 insn |= encoded_addend;
23900
23901	 /* Update the instruction.  */
23902	 md_number_to_chars (buf, insn, INSN_SIZE);
23903       }
23904     break;
23905
23906    case BFD_RELOC_ARM_LDR_PC_G0:
23907    case BFD_RELOC_ARM_LDR_PC_G1:
23908    case BFD_RELOC_ARM_LDR_PC_G2:
23909    case BFD_RELOC_ARM_LDR_SB_G0:
23910    case BFD_RELOC_ARM_LDR_SB_G1:
23911    case BFD_RELOC_ARM_LDR_SB_G2:
23912      gas_assert (!fixP->fx_done);
23913      if (!seg->use_rela_p)
23914	{
23915	  bfd_vma insn;
23916	  bfd_vma addend_abs = abs (value);
23917
23918	  /* Check that the absolute value of the addend can be
23919	     encoded in 12 bits.  */
23920	  if (addend_abs >= 0x1000)
23921	    as_bad_where (fixP->fx_file, fixP->fx_line,
23922			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23923			  (unsigned long) addend_abs);
23924
23925	  /* Extract the instruction.  */
23926	  insn = md_chars_to_number (buf, INSN_SIZE);
23927
23928	  /* If the addend is negative, clear bit 23 of the instruction.
23929	     Otherwise set it.  */
23930	  if (value < 0)
23931	    insn &= ~(1 << 23);
23932	  else
23933	    insn |= 1 << 23;
23934
23935	  /* Place the absolute value of the addend into the first 12 bits
23936	     of the instruction.  */
23937	  insn &= 0xfffff000;
23938	  insn |= addend_abs;
23939
23940	  /* Update the instruction.  */
23941	  md_number_to_chars (buf, insn, INSN_SIZE);
23942	}
23943      break;
23944
23945    case BFD_RELOC_ARM_LDRS_PC_G0:
23946    case BFD_RELOC_ARM_LDRS_PC_G1:
23947    case BFD_RELOC_ARM_LDRS_PC_G2:
23948    case BFD_RELOC_ARM_LDRS_SB_G0:
23949    case BFD_RELOC_ARM_LDRS_SB_G1:
23950    case BFD_RELOC_ARM_LDRS_SB_G2:
23951      gas_assert (!fixP->fx_done);
23952      if (!seg->use_rela_p)
23953	{
23954	  bfd_vma insn;
23955	  bfd_vma addend_abs = abs (value);
23956
23957	  /* Check that the absolute value of the addend can be
23958	     encoded in 8 bits.  */
23959	  if (addend_abs >= 0x100)
23960	    as_bad_where (fixP->fx_file, fixP->fx_line,
23961			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23962			  (unsigned long) addend_abs);
23963
23964	  /* Extract the instruction.  */
23965	  insn = md_chars_to_number (buf, INSN_SIZE);
23966
23967	  /* If the addend is negative, clear bit 23 of the instruction.
23968	     Otherwise set it.  */
23969	  if (value < 0)
23970	    insn &= ~(1 << 23);
23971	  else
23972	    insn |= 1 << 23;
23973
23974	  /* Place the first four bits of the absolute value of the addend
23975	     into the first 4 bits of the instruction, and the remaining
23976	     four into bits 8 .. 11.  */
23977	  insn &= 0xfffff0f0;
23978	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23979
23980	  /* Update the instruction.  */
23981	  md_number_to_chars (buf, insn, INSN_SIZE);
23982	}
23983      break;
23984
23985    case BFD_RELOC_ARM_LDC_PC_G0:
23986    case BFD_RELOC_ARM_LDC_PC_G1:
23987    case BFD_RELOC_ARM_LDC_PC_G2:
23988    case BFD_RELOC_ARM_LDC_SB_G0:
23989    case BFD_RELOC_ARM_LDC_SB_G1:
23990    case BFD_RELOC_ARM_LDC_SB_G2:
23991      gas_assert (!fixP->fx_done);
23992      if (!seg->use_rela_p)
23993	{
23994	  bfd_vma insn;
23995	  bfd_vma addend_abs = abs (value);
23996
23997	  /* Check that the absolute value of the addend is a multiple of
23998	     four and, when divided by four, fits in 8 bits.  */
23999	  if (addend_abs & 0x3)
24000	    as_bad_where (fixP->fx_file, fixP->fx_line,
24001			  _("bad offset 0x%08lX (must be word-aligned)"),
24002			  (unsigned long) addend_abs);
24003
24004	  if ((addend_abs >> 2) > 0xff)
24005	    as_bad_where (fixP->fx_file, fixP->fx_line,
24006			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24007			  (unsigned long) addend_abs);
24008
24009	  /* Extract the instruction.  */
24010	  insn = md_chars_to_number (buf, INSN_SIZE);
24011
24012	  /* If the addend is negative, clear bit 23 of the instruction.
24013	     Otherwise set it.  */
24014	  if (value < 0)
24015	    insn &= ~(1 << 23);
24016	  else
24017	    insn |= 1 << 23;
24018
24019	  /* Place the addend (divided by four) into the first eight
24020	     bits of the instruction.  */
24021	  insn &= 0xfffffff0;
24022	  insn |= addend_abs >> 2;
24023
24024	  /* Update the instruction.  */
24025	  md_number_to_chars (buf, insn, INSN_SIZE);
24026	}
24027      break;
24028
24029    case BFD_RELOC_ARM_V4BX:
24030      /* This will need to go in the object file.  */
24031      fixP->fx_done = 0;
24032      break;
24033
24034    case BFD_RELOC_UNUSED:
24035    default:
24036      as_bad_where (fixP->fx_file, fixP->fx_line,
24037		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24038    }
24039}
24040
24041/* Translate internal representation of relocation info to BFD target
24042   format.  */
24043
24044arelent *
24045tc_gen_reloc (asection *section, fixS *fixp)
24046{
24047  arelent * reloc;
24048  bfd_reloc_code_real_type code;
24049
24050  reloc = XNEW (arelent);
24051
24052  reloc->sym_ptr_ptr = XNEW (asymbol *);
24053  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24054  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24055
24056  if (fixp->fx_pcrel)
24057    {
24058      if (section->use_rela_p)
24059	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24060      else
24061	fixp->fx_offset = reloc->address;
24062    }
24063  reloc->addend = fixp->fx_offset;
24064
24065  switch (fixp->fx_r_type)
24066    {
24067    case BFD_RELOC_8:
24068      if (fixp->fx_pcrel)
24069	{
24070	  code = BFD_RELOC_8_PCREL;
24071	  break;
24072	}
24073
24074    case BFD_RELOC_16:
24075      if (fixp->fx_pcrel)
24076	{
24077	  code = BFD_RELOC_16_PCREL;
24078	  break;
24079	}
24080
24081    case BFD_RELOC_32:
24082      if (fixp->fx_pcrel)
24083	{
24084	  code = BFD_RELOC_32_PCREL;
24085	  break;
24086	}
24087
24088    case BFD_RELOC_ARM_MOVW:
24089      if (fixp->fx_pcrel)
24090	{
24091	  code = BFD_RELOC_ARM_MOVW_PCREL;
24092	  break;
24093	}
24094
24095    case BFD_RELOC_ARM_MOVT:
24096      if (fixp->fx_pcrel)
24097	{
24098	  code = BFD_RELOC_ARM_MOVT_PCREL;
24099	  break;
24100	}
24101
24102    case BFD_RELOC_ARM_THUMB_MOVW:
24103      if (fixp->fx_pcrel)
24104	{
24105	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24106	  break;
24107	}
24108
24109    case BFD_RELOC_ARM_THUMB_MOVT:
24110      if (fixp->fx_pcrel)
24111	{
24112	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24113	  break;
24114	}
24115
24116    case BFD_RELOC_NONE:
24117    case BFD_RELOC_ARM_PCREL_BRANCH:
24118    case BFD_RELOC_ARM_PCREL_BLX:
24119    case BFD_RELOC_RVA:
24120    case BFD_RELOC_THUMB_PCREL_BRANCH7:
24121    case BFD_RELOC_THUMB_PCREL_BRANCH9:
24122    case BFD_RELOC_THUMB_PCREL_BRANCH12:
24123    case BFD_RELOC_THUMB_PCREL_BRANCH20:
24124    case BFD_RELOC_THUMB_PCREL_BRANCH23:
24125    case BFD_RELOC_THUMB_PCREL_BRANCH25:
24126    case BFD_RELOC_VTABLE_ENTRY:
24127    case BFD_RELOC_VTABLE_INHERIT:
24128#ifdef TE_PE
24129    case BFD_RELOC_32_SECREL:
24130#endif
24131      code = fixp->fx_r_type;
24132      break;
24133
24134    case BFD_RELOC_THUMB_PCREL_BLX:
24135#ifdef OBJ_ELF
24136      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24137	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24138      else
24139#endif
24140	code = BFD_RELOC_THUMB_PCREL_BLX;
24141      break;
24142
24143    case BFD_RELOC_ARM_LITERAL:
24144    case BFD_RELOC_ARM_HWLITERAL:
24145      /* If this is called then the a literal has
24146	 been referenced across a section boundary.  */
24147      as_bad_where (fixp->fx_file, fixp->fx_line,
24148		    _("literal referenced across section boundary"));
24149      return NULL;
24150
24151#ifdef OBJ_ELF
24152    case BFD_RELOC_ARM_TLS_CALL:
24153    case BFD_RELOC_ARM_THM_TLS_CALL:
24154    case BFD_RELOC_ARM_TLS_DESCSEQ:
24155    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24156    case BFD_RELOC_ARM_GOT32:
24157    case BFD_RELOC_ARM_GOTOFF:
24158    case BFD_RELOC_ARM_GOT_PREL:
24159    case BFD_RELOC_ARM_PLT32:
24160    case BFD_RELOC_ARM_TARGET1:
24161    case BFD_RELOC_ARM_ROSEGREL32:
24162    case BFD_RELOC_ARM_SBREL32:
24163    case BFD_RELOC_ARM_PREL31:
24164    case BFD_RELOC_ARM_TARGET2:
24165    case BFD_RELOC_ARM_TLS_LDO32:
24166    case BFD_RELOC_ARM_PCREL_CALL:
24167    case BFD_RELOC_ARM_PCREL_JUMP:
24168    case BFD_RELOC_ARM_ALU_PC_G0_NC:
24169    case BFD_RELOC_ARM_ALU_PC_G0:
24170    case BFD_RELOC_ARM_ALU_PC_G1_NC:
24171    case BFD_RELOC_ARM_ALU_PC_G1:
24172    case BFD_RELOC_ARM_ALU_PC_G2:
24173    case BFD_RELOC_ARM_LDR_PC_G0:
24174    case BFD_RELOC_ARM_LDR_PC_G1:
24175    case BFD_RELOC_ARM_LDR_PC_G2:
24176    case BFD_RELOC_ARM_LDRS_PC_G0:
24177    case BFD_RELOC_ARM_LDRS_PC_G1:
24178    case BFD_RELOC_ARM_LDRS_PC_G2:
24179    case BFD_RELOC_ARM_LDC_PC_G0:
24180    case BFD_RELOC_ARM_LDC_PC_G1:
24181    case BFD_RELOC_ARM_LDC_PC_G2:
24182    case BFD_RELOC_ARM_ALU_SB_G0_NC:
24183    case BFD_RELOC_ARM_ALU_SB_G0:
24184    case BFD_RELOC_ARM_ALU_SB_G1_NC:
24185    case BFD_RELOC_ARM_ALU_SB_G1:
24186    case BFD_RELOC_ARM_ALU_SB_G2:
24187    case BFD_RELOC_ARM_LDR_SB_G0:
24188    case BFD_RELOC_ARM_LDR_SB_G1:
24189    case BFD_RELOC_ARM_LDR_SB_G2:
24190    case BFD_RELOC_ARM_LDRS_SB_G0:
24191    case BFD_RELOC_ARM_LDRS_SB_G1:
24192    case BFD_RELOC_ARM_LDRS_SB_G2:
24193    case BFD_RELOC_ARM_LDC_SB_G0:
24194    case BFD_RELOC_ARM_LDC_SB_G1:
24195    case BFD_RELOC_ARM_LDC_SB_G2:
24196    case BFD_RELOC_ARM_V4BX:
24197    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24198    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24199    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24200    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24201      code = fixp->fx_r_type;
24202      break;
24203
24204    case BFD_RELOC_ARM_TLS_GOTDESC:
24205    case BFD_RELOC_ARM_TLS_GD32:
24206    case BFD_RELOC_ARM_TLS_LE32:
24207    case BFD_RELOC_ARM_TLS_IE32:
24208    case BFD_RELOC_ARM_TLS_LDM32:
24209      /* BFD will include the symbol's address in the addend.
24210	 But we don't want that, so subtract it out again here.  */
24211      if (!S_IS_COMMON (fixp->fx_addsy))
24212	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24213      code = fixp->fx_r_type;
24214      break;
24215#endif
24216
24217    case BFD_RELOC_ARM_IMMEDIATE:
24218      as_bad_where (fixp->fx_file, fixp->fx_line,
24219		    _("internal relocation (type: IMMEDIATE) not fixed up"));
24220      return NULL;
24221
24222    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24223      as_bad_where (fixp->fx_file, fixp->fx_line,
24224		    _("ADRL used for a symbol not defined in the same file"));
24225      return NULL;
24226
24227    case BFD_RELOC_ARM_OFFSET_IMM:
24228      if (section->use_rela_p)
24229	{
24230	  code = fixp->fx_r_type;
24231	  break;
24232	}
24233
24234      if (fixp->fx_addsy != NULL
24235	  && !S_IS_DEFINED (fixp->fx_addsy)
24236	  && S_IS_LOCAL (fixp->fx_addsy))
24237	{
24238	  as_bad_where (fixp->fx_file, fixp->fx_line,
24239			_("undefined local label `%s'"),
24240			S_GET_NAME (fixp->fx_addsy));
24241	  return NULL;
24242	}
24243
24244      as_bad_where (fixp->fx_file, fixp->fx_line,
24245		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24246      return NULL;
24247
24248    default:
24249      {
24250	const char * type;
24251
24252	switch (fixp->fx_r_type)
24253	  {
24254	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
24255	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
24256	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
24257	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
24258	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
24259	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
24260	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
24261	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24262	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24263	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
24264	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
24265	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
24266	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24267	  default:			   type = _("<unknown>"); break;
24268	  }
24269	as_bad_where (fixp->fx_file, fixp->fx_line,
24270		      _("cannot represent %s relocation in this object file format"),
24271		      type);
24272	return NULL;
24273      }
24274    }
24275
24276#ifdef OBJ_ELF
24277  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24278      && GOT_symbol
24279      && fixp->fx_addsy == GOT_symbol)
24280    {
24281      code = BFD_RELOC_ARM_GOTPC;
24282      reloc->addend = fixp->fx_offset = reloc->address;
24283    }
24284#endif
24285
24286  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24287
24288  if (reloc->howto == NULL)
24289    {
24290      as_bad_where (fixp->fx_file, fixp->fx_line,
24291		    _("cannot represent %s relocation in this object file format"),
24292		    bfd_get_reloc_code_name (code));
24293      return NULL;
24294    }
24295
24296  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24297     vtable entry to be used in the relocation's section offset.  */
24298  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24299    reloc->address = fixp->fx_offset;
24300
24301  return reloc;
24302}
24303
24304/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
24305
24306void
24307cons_fix_new_arm (fragS *	frag,
24308		  int		where,
24309		  int		size,
24310		  expressionS * exp,
24311		  bfd_reloc_code_real_type reloc)
24312{
24313  int pcrel = 0;
24314
24315  /* Pick a reloc.
24316     FIXME: @@ Should look at CPU word size.  */
24317  switch (size)
24318    {
24319    case 1:
24320      reloc = BFD_RELOC_8;
24321      break;
24322    case 2:
24323      reloc = BFD_RELOC_16;
24324      break;
24325    case 4:
24326    default:
24327      reloc = BFD_RELOC_32;
24328      break;
24329    case 8:
24330      reloc = BFD_RELOC_64;
24331      break;
24332    }
24333
24334#ifdef TE_PE
24335  if (exp->X_op == O_secrel)
24336  {
24337    exp->X_op = O_symbol;
24338    reloc = BFD_RELOC_32_SECREL;
24339  }
24340#endif
24341
24342  fix_new_exp (frag, where, size, exp, pcrel, reloc);
24343}
24344
24345#if defined (OBJ_COFF)
24346void
24347arm_validate_fix (fixS * fixP)
24348{
24349  /* If the destination of the branch is a defined symbol which does not have
24350     the THUMB_FUNC attribute, then we must be calling a function which has
24351     the (interfacearm) attribute.  We look for the Thumb entry point to that
24352     function and change the branch to refer to that function instead.	*/
24353  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24354      && fixP->fx_addsy != NULL
24355      && S_IS_DEFINED (fixP->fx_addsy)
24356      && ! THUMB_IS_FUNC (fixP->fx_addsy))
24357    {
24358      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24359    }
24360}
24361#endif
24362
24363
24364int
24365arm_force_relocation (struct fix * fixp)
24366{
24367#if defined (OBJ_COFF) && defined (TE_PE)
24368  if (fixp->fx_r_type == BFD_RELOC_RVA)
24369    return 1;
24370#endif
24371
24372  /* In case we have a call or a branch to a function in ARM ISA mode from
24373     a thumb function or vice-versa force the relocation. These relocations
24374     are cleared off for some cores that might have blx and simple transformations
24375     are possible.  */
24376
24377#ifdef OBJ_ELF
24378  switch (fixp->fx_r_type)
24379    {
24380    case BFD_RELOC_ARM_PCREL_JUMP:
24381    case BFD_RELOC_ARM_PCREL_CALL:
24382    case BFD_RELOC_THUMB_PCREL_BLX:
24383      if (THUMB_IS_FUNC (fixp->fx_addsy))
24384	return 1;
24385      break;
24386
24387    case BFD_RELOC_ARM_PCREL_BLX:
24388    case BFD_RELOC_THUMB_PCREL_BRANCH25:
24389    case BFD_RELOC_THUMB_PCREL_BRANCH20:
24390    case BFD_RELOC_THUMB_PCREL_BRANCH23:
24391      if (ARM_IS_FUNC (fixp->fx_addsy))
24392	return 1;
24393      break;
24394
24395    default:
24396      break;
24397    }
24398#endif
24399
24400  /* Resolve these relocations even if the symbol is extern or weak.
24401     Technically this is probably wrong due to symbol preemption.
24402     In practice these relocations do not have enough range to be useful
24403     at dynamic link time, and some code (e.g. in the Linux kernel)
24404     expects these references to be resolved.  */
24405  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24406      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24407      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24408      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24409      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24410      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24411      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24412      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24413      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24414      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24415      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24416      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24417      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24418      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24419    return 0;
24420
24421  /* Always leave these relocations for the linker.  */
24422  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24423       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24424      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24425    return 1;
24426
24427  /* Always generate relocations against function symbols.  */
24428  if (fixp->fx_r_type == BFD_RELOC_32
24429      && fixp->fx_addsy
24430      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24431    return 1;
24432
24433  return generic_force_reloc (fixp);
24434}
24435
24436#if defined (OBJ_ELF) || defined (OBJ_COFF)
24437/* Relocations against function names must be left unadjusted,
24438   so that the linker can use this information to generate interworking
24439   stubs.  The MIPS version of this function
24440   also prevents relocations that are mips-16 specific, but I do not
24441   know why it does this.
24442
24443   FIXME:
24444   There is one other problem that ought to be addressed here, but
24445   which currently is not:  Taking the address of a label (rather
24446   than a function) and then later jumping to that address.  Such
24447   addresses also ought to have their bottom bit set (assuming that
24448   they reside in Thumb code), but at the moment they will not.	 */
24449
24450bfd_boolean
24451arm_fix_adjustable (fixS * fixP)
24452{
24453  if (fixP->fx_addsy == NULL)
24454    return 1;
24455
24456  /* Preserve relocations against symbols with function type.  */
24457  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24458    return FALSE;
24459
24460  if (THUMB_IS_FUNC (fixP->fx_addsy)
24461      && fixP->fx_subsy == NULL)
24462    return FALSE;
24463
24464  /* We need the symbol name for the VTABLE entries.  */
24465  if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24466      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24467    return FALSE;
24468
24469  /* Don't allow symbols to be discarded on GOT related relocs.	 */
24470  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24471      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24472      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24473      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24474      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24475      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24476      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24477      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24478      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24479      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24480      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24481      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24482      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24483      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24484    return FALSE;
24485
24486  /* Similarly for group relocations.  */
24487  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24488       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24489      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24490    return FALSE;
24491
24492  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
24493  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24494      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24495      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24496      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24497      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24498      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24499      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24500      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24501    return FALSE;
24502
24503  /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24504     offsets, so keep these symbols.  */
24505  if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24506      && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24507    return FALSE;
24508
24509  return TRUE;
24510}
24511#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24512
24513#ifdef OBJ_ELF
24514const char *
24515elf32_arm_target_format (void)
24516{
24517#ifdef TE_SYMBIAN
24518  return (target_big_endian
24519	  ? "elf32-bigarm-symbian"
24520	  : "elf32-littlearm-symbian");
24521#elif defined (TE_VXWORKS)
24522  return (target_big_endian
24523	  ? "elf32-bigarm-vxworks"
24524	  : "elf32-littlearm-vxworks");
24525#elif defined (TE_NACL)
24526  return (target_big_endian
24527	  ? "elf32-bigarm-nacl"
24528	  : "elf32-littlearm-nacl");
24529#else
24530  if (target_big_endian)
24531    return "elf32-bigarm";
24532  else
24533    return "elf32-littlearm";
24534#endif
24535}
24536
24537void
24538armelf_frob_symbol (symbolS * symp,
24539		    int *     puntp)
24540{
24541  elf_frob_symbol (symp, puntp);
24542}
24543#endif
24544
24545/* MD interface: Finalization.	*/
24546
24547void
24548arm_cleanup (void)
24549{
24550  literal_pool * pool;
24551
24552  /* Ensure that all the IT blocks are properly closed.  */
24553  check_it_blocks_finished ();
24554
24555  for (pool = list_of_pools; pool; pool = pool->next)
24556    {
24557      /* Put it at the end of the relevant section.  */
24558      subseg_set (pool->section, pool->sub_section);
24559#ifdef OBJ_ELF
24560      arm_elf_change_section ();
24561#endif
24562      s_ltorg (0);
24563    }
24564}
24565
24566#ifdef OBJ_ELF
24567/* Remove any excess mapping symbols generated for alignment frags in
24568   SEC.  We may have created a mapping symbol before a zero byte
24569   alignment; remove it if there's a mapping symbol after the
24570   alignment.  */
24571static void
24572check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24573		       void *dummy ATTRIBUTE_UNUSED)
24574{
24575  segment_info_type *seginfo = seg_info (sec);
24576  fragS *fragp;
24577
24578  if (seginfo == NULL || seginfo->frchainP == NULL)
24579    return;
24580
24581  for (fragp = seginfo->frchainP->frch_root;
24582       fragp != NULL;
24583       fragp = fragp->fr_next)
24584    {
24585      symbolS *sym = fragp->tc_frag_data.last_map;
24586      fragS *next = fragp->fr_next;
24587
24588      /* Variable-sized frags have been converted to fixed size by
24589	 this point.  But if this was variable-sized to start with,
24590	 there will be a fixed-size frag after it.  So don't handle
24591	 next == NULL.  */
24592      if (sym == NULL || next == NULL)
24593	continue;
24594
24595      if (S_GET_VALUE (sym) < next->fr_address)
24596	/* Not at the end of this frag.  */
24597	continue;
24598      know (S_GET_VALUE (sym) == next->fr_address);
24599
24600      do
24601	{
24602	  if (next->tc_frag_data.first_map != NULL)
24603	    {
24604	      /* Next frag starts with a mapping symbol.  Discard this
24605		 one.  */
24606	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24607	      break;
24608	    }
24609
24610	  if (next->fr_next == NULL)
24611	    {
24612	      /* This mapping symbol is at the end of the section.  Discard
24613		 it.  */
24614	      know (next->fr_fix == 0 && next->fr_var == 0);
24615	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24616	      break;
24617	    }
24618
24619	  /* As long as we have empty frags without any mapping symbols,
24620	     keep looking.  */
24621	  /* If the next frag is non-empty and does not start with a
24622	     mapping symbol, then this mapping symbol is required.  */
24623	  if (next->fr_address != next->fr_next->fr_address)
24624	    break;
24625
24626	  next = next->fr_next;
24627	}
24628      while (next != NULL);
24629    }
24630}
24631#endif
24632
24633/* Adjust the symbol table.  This marks Thumb symbols as distinct from
24634   ARM ones.  */
24635
24636void
24637arm_adjust_symtab (void)
24638{
24639#ifdef OBJ_COFF
24640  symbolS * sym;
24641
24642  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24643    {
24644      if (ARM_IS_THUMB (sym))
24645	{
24646	  if (THUMB_IS_FUNC (sym))
24647	    {
24648	      /* Mark the symbol as a Thumb function.  */
24649	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
24650		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
24651		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24652
24653	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24654		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24655	      else
24656		as_bad (_("%s: unexpected function type: %d"),
24657			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24658	    }
24659	  else switch (S_GET_STORAGE_CLASS (sym))
24660	    {
24661	    case C_EXT:
24662	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24663	      break;
24664	    case C_STAT:
24665	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24666	      break;
24667	    case C_LABEL:
24668	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24669	      break;
24670	    default:
24671	      /* Do nothing.  */
24672	      break;
24673	    }
24674	}
24675
24676      if (ARM_IS_INTERWORK (sym))
24677	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24678    }
24679#endif
24680#ifdef OBJ_ELF
24681  symbolS * sym;
24682  char	    bind;
24683
24684  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24685    {
24686      if (ARM_IS_THUMB (sym))
24687	{
24688	  elf_symbol_type * elf_sym;
24689
24690	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24691	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24692
24693	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24694		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24695	    {
24696	      /* If it's a .thumb_func, declare it as so,
24697		 otherwise tag label as .code 16.  */
24698	      if (THUMB_IS_FUNC (sym))
24699		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24700					 ST_BRANCH_TO_THUMB);
24701	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24702		elf_sym->internal_elf_sym.st_info =
24703		  ELF_ST_INFO (bind, STT_ARM_16BIT);
24704	    }
24705	}
24706    }
24707
24708  /* Remove any overlapping mapping symbols generated by alignment frags.  */
24709  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24710  /* Now do generic ELF adjustments.  */
24711  elf_adjust_symtab ();
24712#endif
24713}
24714
24715/* MD interface: Initialization.  */
24716
24717static void
24718set_constant_flonums (void)
24719{
24720  int i;
24721
24722  for (i = 0; i < NUM_FLOAT_VALS; i++)
24723    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24724      abort ();
24725}
24726
24727/* Auto-select Thumb mode if it's the only available instruction set for the
24728   given architecture.  */
24729
24730static void
24731autoselect_thumb_from_cpu_variant (void)
24732{
24733  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24734    opcode_select (16);
24735}
24736
24737void
24738md_begin (void)
24739{
24740  unsigned mach;
24741  unsigned int i;
24742
24743  if (	 (arm_ops_hsh = hash_new ()) == NULL
24744      || (arm_cond_hsh = hash_new ()) == NULL
24745      || (arm_shift_hsh = hash_new ()) == NULL
24746      || (arm_psr_hsh = hash_new ()) == NULL
24747      || (arm_v7m_psr_hsh = hash_new ()) == NULL
24748      || (arm_reg_hsh = hash_new ()) == NULL
24749      || (arm_reloc_hsh = hash_new ()) == NULL
24750      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24751    as_fatal (_("virtual memory exhausted"));
24752
24753  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24754    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24755  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24756    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24757  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24758    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24759  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24760    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24761  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24762    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24763		 (void *) (v7m_psrs + i));
24764  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24765    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24766  for (i = 0;
24767       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24768       i++)
24769    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24770		 (void *) (barrier_opt_names + i));
24771#ifdef OBJ_ELF
24772  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24773    {
24774      struct reloc_entry * entry = reloc_names + i;
24775
24776      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24777	/* This makes encode_branch() use the EABI versions of this relocation.  */
24778	entry->reloc = BFD_RELOC_UNUSED;
24779
24780      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24781    }
24782#endif
24783
24784  set_constant_flonums ();
24785
24786  /* Set the cpu variant based on the command-line options.  We prefer
24787     -mcpu= over -march= if both are set (as for GCC); and we prefer
24788     -mfpu= over any other way of setting the floating point unit.
24789     Use of legacy options with new options are faulted.  */
24790  if (legacy_cpu)
24791    {
24792      if (mcpu_cpu_opt || march_cpu_opt)
24793	as_bad (_("use of old and new-style options to set CPU type"));
24794
24795      mcpu_cpu_opt = legacy_cpu;
24796    }
24797  else if (!mcpu_cpu_opt)
24798    mcpu_cpu_opt = march_cpu_opt;
24799
24800  if (legacy_fpu)
24801    {
24802      if (mfpu_opt)
24803	as_bad (_("use of old and new-style options to set FPU type"));
24804
24805      mfpu_opt = legacy_fpu;
24806    }
24807  else if (!mfpu_opt)
24808    {
24809#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24810	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
24811      /* Some environments specify a default FPU.  If they don't, infer it
24812	 from the processor.  */
24813      if (mcpu_fpu_opt)
24814	mfpu_opt = mcpu_fpu_opt;
24815      else
24816	mfpu_opt = march_fpu_opt;
24817#else
24818      mfpu_opt = &fpu_default;
24819#endif
24820    }
24821
24822  if (!mfpu_opt)
24823    {
24824      if (mcpu_cpu_opt != NULL)
24825	mfpu_opt = &fpu_default;
24826      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24827	mfpu_opt = &fpu_arch_vfp_v2;
24828      else
24829	mfpu_opt = &fpu_arch_fpa;
24830    }
24831
24832#ifdef CPU_DEFAULT
24833  if (!mcpu_cpu_opt)
24834    {
24835      mcpu_cpu_opt = &cpu_default;
24836      selected_cpu = cpu_default;
24837    }
24838  else if (no_cpu_selected ())
24839    selected_cpu = cpu_default;
24840#else
24841  if (mcpu_cpu_opt)
24842    selected_cpu = *mcpu_cpu_opt;
24843  else
24844    mcpu_cpu_opt = &arm_arch_any;
24845#endif
24846
24847  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24848
24849  autoselect_thumb_from_cpu_variant ();
24850
24851  arm_arch_used = thumb_arch_used = arm_arch_none;
24852
24853#if defined OBJ_COFF || defined OBJ_ELF
24854  {
24855    unsigned int flags = 0;
24856
24857#if defined OBJ_ELF
24858    flags = meabi_flags;
24859
24860    switch (meabi_flags)
24861      {
24862      case EF_ARM_EABI_UNKNOWN:
24863#endif
24864	/* Set the flags in the private structure.  */
24865	if (uses_apcs_26)      flags |= F_APCS26;
24866	if (support_interwork) flags |= F_INTERWORK;
24867	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
24868	if (pic_code)	       flags |= F_PIC;
24869	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24870	  flags |= F_SOFT_FLOAT;
24871
24872	switch (mfloat_abi_opt)
24873	  {
24874	  case ARM_FLOAT_ABI_SOFT:
24875	  case ARM_FLOAT_ABI_SOFTFP:
24876	    flags |= F_SOFT_FLOAT;
24877	    break;
24878
24879	  case ARM_FLOAT_ABI_HARD:
24880	    if (flags & F_SOFT_FLOAT)
24881	      as_bad (_("hard-float conflicts with specified fpu"));
24882	    break;
24883	  }
24884
24885	/* Using pure-endian doubles (even if soft-float).	*/
24886	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24887	  flags |= F_VFP_FLOAT;
24888
24889#if defined OBJ_ELF
24890	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24891	    flags |= EF_ARM_MAVERICK_FLOAT;
24892	break;
24893
24894      case EF_ARM_EABI_VER4:
24895      case EF_ARM_EABI_VER5:
24896	/* No additional flags to set.	*/
24897	break;
24898
24899      default:
24900	abort ();
24901      }
24902#endif
24903    bfd_set_private_flags (stdoutput, flags);
24904
24905    /* We have run out flags in the COFF header to encode the
24906       status of ATPCS support, so instead we create a dummy,
24907       empty, debug section called .arm.atpcs.	*/
24908    if (atpcs)
24909      {
24910	asection * sec;
24911
24912	sec = bfd_make_section (stdoutput, ".arm.atpcs");
24913
24914	if (sec != NULL)
24915	  {
24916	    bfd_set_section_flags
24917	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24918	    bfd_set_section_size (stdoutput, sec, 0);
24919	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24920	  }
24921      }
24922  }
24923#endif
24924
24925  /* Record the CPU type as well.  */
24926  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24927    mach = bfd_mach_arm_iWMMXt2;
24928  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24929    mach = bfd_mach_arm_iWMMXt;
24930  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24931    mach = bfd_mach_arm_XScale;
24932  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24933    mach = bfd_mach_arm_ep9312;
24934  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24935    mach = bfd_mach_arm_5TE;
24936  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24937    {
24938      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24939	mach = bfd_mach_arm_5T;
24940      else
24941	mach = bfd_mach_arm_5;
24942    }
24943  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24944    {
24945      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24946	mach = bfd_mach_arm_4T;
24947      else
24948	mach = bfd_mach_arm_4;
24949    }
24950  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24951    mach = bfd_mach_arm_3M;
24952  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24953    mach = bfd_mach_arm_3;
24954  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24955    mach = bfd_mach_arm_2a;
24956  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24957    mach = bfd_mach_arm_2;
24958  else
24959    mach = bfd_mach_arm_unknown;
24960
24961  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24962}
24963
24964/* Command line processing.  */
24965
24966/* md_parse_option
24967      Invocation line includes a switch not recognized by the base assembler.
24968      See if it's a processor-specific option.
24969
24970      This routine is somewhat complicated by the need for backwards
24971      compatibility (since older releases of gcc can't be changed).
24972      The new options try to make the interface as compatible as
24973      possible with GCC.
24974
24975      New options (supported) are:
24976
24977	      -mcpu=<cpu name>		 Assemble for selected processor
24978	      -march=<architecture name> Assemble for selected architecture
24979	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
24980	      -EB/-mbig-endian		 Big-endian
24981	      -EL/-mlittle-endian	 Little-endian
24982	      -k			 Generate PIC code
24983	      -mthumb			 Start in Thumb mode
24984	      -mthumb-interwork		 Code supports ARM/Thumb interworking
24985
24986	      -m[no-]warn-deprecated     Warn about deprecated features
24987	      -m[no-]warn-syms		 Warn when symbols match instructions
24988
24989      For now we will also provide support for:
24990
24991	      -mapcs-32			 32-bit Program counter
24992	      -mapcs-26			 26-bit Program counter
24993	      -macps-float		 Floats passed in FP registers
24994	      -mapcs-reentrant		 Reentrant code
24995	      -matpcs
24996      (sometime these will probably be replaced with -mapcs=<list of options>
24997      and -matpcs=<list of options>)
24998
24999      The remaining options are only supported for back-wards compatibility.
25000      Cpu variants, the arm part is optional:
25001	      -m[arm]1		      Currently not supported.
25002	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
25003	      -m[arm]3		      Arm 3 processor
25004	      -m[arm]6[xx],	      Arm 6 processors
25005	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
25006	      -m[arm]8[10]	      Arm 8 processors
25007	      -m[arm]9[20][tdmi]      Arm 9 processors
25008	      -mstrongarm[110[0]]     StrongARM processors
25009	      -mxscale		      XScale processors
25010	      -m[arm]v[2345[t[e]]]    Arm architectures
25011	      -mall		      All (except the ARM1)
25012      FP variants:
25013	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
25014	      -mfpe-old		      (No float load/store multiples)
25015	      -mvfpxd		      VFP Single precision
25016	      -mvfp		      All VFP
25017	      -mno-fpu		      Disable all floating point instructions
25018
25019      The following CPU names are recognized:
25020	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25021	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25022	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25023	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25024	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25025	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
25026	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25027
25028      */
25029
25030const char * md_shortopts = "m:k";
25031
25032#ifdef ARM_BI_ENDIAN
25033#define OPTION_EB (OPTION_MD_BASE + 0)
25034#define OPTION_EL (OPTION_MD_BASE + 1)
25035#else
25036#if TARGET_BYTES_BIG_ENDIAN
25037#define OPTION_EB (OPTION_MD_BASE + 0)
25038#else
25039#define OPTION_EL (OPTION_MD_BASE + 1)
25040#endif
25041#endif
25042#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25043
25044struct option md_longopts[] =
25045{
25046#ifdef OPTION_EB
25047  {"EB", no_argument, NULL, OPTION_EB},
25048#endif
25049#ifdef OPTION_EL
25050  {"EL", no_argument, NULL, OPTION_EL},
25051#endif
25052  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25053  {NULL, no_argument, NULL, 0}
25054};
25055
25056
25057size_t md_longopts_size = sizeof (md_longopts);
25058
25059struct arm_option_table
25060{
25061  const char *option;		/* Option name to match.  */
25062  const char *help;		/* Help information.  */
25063  int  *var;		/* Variable to change.	*/
25064  int	value;		/* What to change it to.  */
25065  const char *deprecated;	/* If non-null, print this message.  */
25066};
25067
25068struct arm_option_table arm_opts[] =
25069{
25070  {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
25071  {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
25072  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25073   &support_interwork, 1, NULL},
25074  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25075  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25076  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25077   1, NULL},
25078  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25079  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25080  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25081  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25082   NULL},
25083
25084  /* These are recognized by the assembler, but have no affect on code.	 */
25085  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25086  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25087
25088  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25089  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25090   &warn_on_deprecated, 0, NULL},
25091  {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25092  {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25093  {NULL, NULL, NULL, 0, NULL}
25094};
25095
25096struct arm_legacy_option_table
25097{
25098  const char *option;				/* Option name to match.  */
25099  const arm_feature_set	**var;		/* Variable to change.	*/
25100  const arm_feature_set	value;		/* What to change it to.  */
25101  const char *deprecated;			/* If non-null, print this message.  */
25102};
25103
25104const struct arm_legacy_option_table arm_legacy_opts[] =
25105{
25106  /* DON'T add any new processors to this list -- we want the whole list
25107     to go away...  Add them to the processors table instead.  */
25108  {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
25109  {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
25110  {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
25111  {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
25112  {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25113  {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25114  {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25115  {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25116  {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
25117  {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
25118  {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
25119  {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
25120  {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
25121  {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
25122  {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
25123  {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
25124  {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
25125  {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
25126  {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
25127  {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
25128  {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
25129  {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
25130  {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
25131  {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
25132  {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
25133  {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
25134  {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
25135  {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
25136  {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
25137  {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
25138  {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
25139  {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
25140  {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
25141  {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
25142  {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25143  {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25144  {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25145  {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25146  {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25147  {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25148  {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
25149  {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
25150  {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
25151  {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
25152  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
25153  {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
25154  {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25155  {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25156  {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25157  {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25158  {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25159  {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25160  {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25161  {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25162  {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25163  {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25164  {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
25165  {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
25166  {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
25167  {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
25168  {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25169  {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25170  {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25171  {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25172  {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25173  {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25174  {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25175  {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25176  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
25177  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25178   N_("use -mcpu=strongarm110")},
25179  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25180   N_("use -mcpu=strongarm1100")},
25181  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25182   N_("use -mcpu=strongarm1110")},
25183  {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25184  {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25185  {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
25186
25187  /* Architecture variants -- don't add any more to this list either.  */
25188  {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
25189  {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
25190  {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25191  {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25192  {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
25193  {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
25194  {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25195  {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25196  {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
25197  {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
25198  {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25199  {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25200  {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
25201  {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
25202  {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25203  {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25204  {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25205  {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25206
25207  /* Floating point variants -- don't add any more to this list either.	 */
25208  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25209  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25210  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25211  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
25212   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25213
25214  {NULL, NULL, ARM_ARCH_NONE, NULL}
25215};
25216
25217struct arm_cpu_option_table
25218{
25219  const char *name;
25220  size_t name_len;
25221  const arm_feature_set	value;
25222  /* For some CPUs we assume an FPU unless the user explicitly sets
25223     -mfpu=...	*/
25224  const arm_feature_set	default_fpu;
25225  /* The canonical name of the CPU, or NULL to use NAME converted to upper
25226     case.  */
25227  const char *canonical_name;
25228};
25229
25230/* This list should, at a minimum, contain all the cpu names
25231   recognized by GCC.  */
25232#define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25233static const struct arm_cpu_option_table arm_cpus[] =
25234{
25235  ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
25236  ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
25237  ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
25238  ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
25239  ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
25240  ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25241  ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25242  ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25243  ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25244  ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25245  ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25246  ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25247  ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25248  ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25249  ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25250  ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25251  ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25252  ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25253  ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25254  ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25255  ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25256  ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25257  ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25258  ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25259  ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25260  ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25261  ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25262  ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25263  ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25264  ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25265  ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25266  ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25267  ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25268  ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25269  ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25270  ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25271  ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25272  ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25273  ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25274  ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
25275  ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25276  ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25277  ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25278  ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
25279  ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
25280  ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
25281  /* For V5 or later processors we default to using VFP; but the user
25282     should really set the FPU type explicitly.	 */
25283  ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25284  ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25285  ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25286  ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25287  ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
25288  ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25289  ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
25290  ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25291  ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25292  ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
25293  ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25294  ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25295  ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25296  ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25297  ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25298  ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
25299  ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25300  ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25301  ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25302  ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
25303								 "ARM1026EJ-S"),
25304  ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
25305  ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25306  ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25307  ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25308  ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25309  ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25310  ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
25311  ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
25312  ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
25313								 "ARM1136JF-S"),
25314  ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
25315  ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
25316  ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
25317  ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
25318  ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
25319  ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6KZ,	 FPU_NONE,	  NULL),
25320  ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP_V2, NULL),
25321  ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
25322						 FPU_NONE,	  "Cortex-A5"),
25323  ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25324								  "Cortex-A7"),
25325  ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
25326						 ARM_FEATURE_COPROC (FPU_VFP_V3
25327							| FPU_NEON_EXT_V1),
25328								  "Cortex-A8"),
25329  ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
25330						 ARM_FEATURE_COPROC (FPU_VFP_V3
25331							| FPU_NEON_EXT_V1),
25332								  "Cortex-A9"),
25333  ARM_CPU_OPT ("cortex-a12",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25334								  "Cortex-A12"),
25335  ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25336								  "Cortex-A15"),
25337  ARM_CPU_OPT ("cortex-a17",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25338								  "Cortex-A17"),
25339  ARM_CPU_OPT ("cortex-a32",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25340								  "Cortex-A32"),
25341  ARM_CPU_OPT ("cortex-a35",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25342								  "Cortex-A35"),
25343  ARM_CPU_OPT ("cortex-a53",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25344								  "Cortex-A53"),
25345  ARM_CPU_OPT ("cortex-a57",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25346								  "Cortex-A57"),
25347  ARM_CPU_OPT ("cortex-a72",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25348								  "Cortex-A72"),
25349  ARM_CPU_OPT ("cortex-a73",    ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25350								  "Cortex-A73"),
25351  ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
25352  ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
25353								  "Cortex-R4F"),
25354  ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
25355						 FPU_NONE,	  "Cortex-R5"),
25356  ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
25357						 FPU_ARCH_VFP_V3D16,
25358								  "Cortex-R7"),
25359  ARM_CPU_OPT ("cortex-r8",	ARM_ARCH_V7R_IDIV,
25360						 FPU_ARCH_VFP_V3D16,
25361								  "Cortex-R8"),
25362  ARM_CPU_OPT ("cortex-m7",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M7"),
25363  ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
25364  ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
25365  ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
25366  ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
25367  ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
25368  ARM_CPU_OPT ("exynos-m1",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25369								  "Samsung " \
25370								  "Exynos M1"),
25371  ARM_CPU_OPT ("qdf24xx",	ARM_ARCH_V8A,	 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25372								  "Qualcomm "
25373								  "QDF24XX"),
25374
25375  /* ??? XSCALE is really an architecture.  */
25376  ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25377  /* ??? iwmmxt is not a processor.  */
25378  ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25379  ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25380  ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25381  /* Maverick */
25382  ARM_CPU_OPT ("ep9312",	ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25383						 FPU_ARCH_MAVERICK, "ARM920T"),
25384  /* Marvell processors.  */
25385  ARM_CPU_OPT ("marvell-pj4",   ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25386						  | ARM_EXT_SEC,
25387						  ARM_EXT2_V6T2_V8M),
25388						FPU_ARCH_VFP_V3D16, NULL),
25389  ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25390						    | ARM_EXT_SEC,
25391						    ARM_EXT2_V6T2_V8M),
25392					       FPU_ARCH_NEON_VFP_V4, NULL),
25393  /* APM X-Gene family.  */
25394  ARM_CPU_OPT ("xgene1",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25395	                                                          "APM X-Gene 1"),
25396  ARM_CPU_OPT ("xgene2",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25397	                                                          "APM X-Gene 2"),
25398
25399  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25400};
25401#undef ARM_CPU_OPT
25402
25403struct arm_arch_option_table
25404{
25405  const char *name;
25406  size_t name_len;
25407  const arm_feature_set	value;
25408  const arm_feature_set	default_fpu;
25409};
25410
25411/* This list should, at a minimum, contain all the architecture names
25412   recognized by GCC.  */
25413#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25414static const struct arm_arch_option_table arm_archs[] =
25415{
25416  ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
25417  ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
25418  ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
25419  ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
25420  ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
25421  ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
25422  ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
25423  ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
25424  ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
25425  ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
25426  ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
25427  ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
25428  ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
25429  ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
25430  ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
25431  ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25432  ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
25433  ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
25434  ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
25435  ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
25436  ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
25437  /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25438     kept to preserve existing behaviour.  */
25439  ARM_ARCH_OPT ("armv6kz",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
25440  ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
25441  ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
25442  ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
25443  ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
25444  /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25445     kept to preserve existing behaviour.  */
25446  ARM_ARCH_OPT ("armv6kzt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25447  ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25448  ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
25449  ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
25450  ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
25451  /* The official spelling of the ARMv7 profile variants is the dashed form.
25452     Accept the non-dashed form for compatibility with old toolchains.  */
25453  ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
25454  ARM_ARCH_OPT ("armv7ve",	ARM_ARCH_V7VE,	 FPU_ARCH_VFP),
25455  ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
25456  ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
25457  ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
25458  ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
25459  ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
25460  ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
25461  ARM_ARCH_OPT ("armv8-m.base",	ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25462  ARM_ARCH_OPT ("armv8-m.main",	ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25463  ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
25464  ARM_ARCH_OPT ("armv8.1-a",	ARM_ARCH_V8_1A,	 FPU_ARCH_VFP),
25465  ARM_ARCH_OPT ("armv8.2-a",	ARM_ARCH_V8_2A,	 FPU_ARCH_VFP),
25466  ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25467  ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25468  ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25469  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25470};
25471#undef ARM_ARCH_OPT
25472
25473/* ISA extensions in the co-processor and main instruction set space.  */
25474struct arm_option_extension_value_table
25475{
25476  const char *name;
25477  size_t name_len;
25478  const arm_feature_set merge_value;
25479  const arm_feature_set clear_value;
25480  /* List of architectures for which an extension is available.  ARM_ARCH_NONE
25481     indicates that an extension is available for all architectures while
25482     ARM_ANY marks an empty entry.  */
25483  const arm_feature_set allowed_archs[2];
25484};
25485
25486/* The following table must be in alphabetical order with a NULL last entry.
25487   */
25488#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25489#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25490static const struct arm_option_extension_value_table arm_extensions[] =
25491{
25492  ARM_EXT_OPT ("crc",  ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25493			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25494  ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25495			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25496				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25497  ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25498			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25499			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25500  ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25501				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25502  ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25503			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25504			ARM_ARCH_V8_2A),
25505  ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25506			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25507			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25508			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25509  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25510			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25511  ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25512			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25513  ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25514			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25515  ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25516			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25517			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25518			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25519  ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25520			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25521				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25522  ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25523			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25524			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25525  ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25526			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25527			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25528  ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
25529			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25530			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25531  ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25532			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25533			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25534			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25535  ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
25536			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25537			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25538  ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25539				     | ARM_EXT_DIV),
25540			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25541				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25542  ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25543			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25544  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25545};
25546#undef ARM_EXT_OPT
25547
25548/* ISA floating-point and Advanced SIMD extensions.  */
25549struct arm_option_fpu_value_table
25550{
25551  const char *name;
25552  const arm_feature_set value;
25553};
25554
25555/* This list should, at a minimum, contain all the fpu names
25556   recognized by GCC.  */
25557static const struct arm_option_fpu_value_table arm_fpus[] =
25558{
25559  {"softfpa",		FPU_NONE},
25560  {"fpe",		FPU_ARCH_FPE},
25561  {"fpe2",		FPU_ARCH_FPE},
25562  {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
25563  {"fpa",		FPU_ARCH_FPA},
25564  {"fpa10",		FPU_ARCH_FPA},
25565  {"fpa11",		FPU_ARCH_FPA},
25566  {"arm7500fe",		FPU_ARCH_FPA},
25567  {"softvfp",		FPU_ARCH_VFP},
25568  {"softvfp+vfp",	FPU_ARCH_VFP_V2},
25569  {"vfp",		FPU_ARCH_VFP_V2},
25570  {"vfp9",		FPU_ARCH_VFP_V2},
25571  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
25572  {"vfp10",		FPU_ARCH_VFP_V2},
25573  {"vfp10-r0",		FPU_ARCH_VFP_V1},
25574  {"vfpxd",		FPU_ARCH_VFP_V1xD},
25575  {"vfpv2",		FPU_ARCH_VFP_V2},
25576  {"vfpv3",		FPU_ARCH_VFP_V3},
25577  {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
25578  {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
25579  {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
25580  {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
25581  {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
25582  {"arm1020t",		FPU_ARCH_VFP_V1},
25583  {"arm1020e",		FPU_ARCH_VFP_V2},
25584  {"arm1136jfs",	FPU_ARCH_VFP_V2},
25585  {"arm1136jf-s",	FPU_ARCH_VFP_V2},
25586  {"maverick",		FPU_ARCH_MAVERICK},
25587  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25588  {"neon-fp16",		FPU_ARCH_NEON_FP16},
25589  {"vfpv4",		FPU_ARCH_VFP_V4},
25590  {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
25591  {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
25592  {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
25593  {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
25594  {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
25595  {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
25596  {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
25597  {"crypto-neon-fp-armv8",
25598			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25599  {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
25600  {"crypto-neon-fp-armv8.1",
25601			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25602  {NULL,		ARM_ARCH_NONE}
25603};
25604
25605struct arm_option_value_table
25606{
25607  const char *name;
25608  long value;
25609};
25610
25611static const struct arm_option_value_table arm_float_abis[] =
25612{
25613  {"hard",	ARM_FLOAT_ABI_HARD},
25614  {"softfp",	ARM_FLOAT_ABI_SOFTFP},
25615  {"soft",	ARM_FLOAT_ABI_SOFT},
25616  {NULL,	0}
25617};
25618
25619#ifdef OBJ_ELF
25620/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
25621static const struct arm_option_value_table arm_eabis[] =
25622{
25623  {"gnu",	EF_ARM_EABI_UNKNOWN},
25624  {"4",		EF_ARM_EABI_VER4},
25625  {"5",		EF_ARM_EABI_VER5},
25626  {NULL,	0}
25627};
25628#endif
25629
25630struct arm_long_option_table
25631{
25632  const char * option;		/* Substring to match.	*/
25633  const char * help;			/* Help information.  */
25634  int (* func) (const char * subopt);	/* Function to decode sub-option.  */
25635  const char * deprecated;		/* If non-null, print this message.  */
25636};
25637
25638static bfd_boolean
25639arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25640{
25641  arm_feature_set *ext_set = XNEW (arm_feature_set);
25642
25643  /* We insist on extensions being specified in alphabetical order, and with
25644     extensions being added before being removed.  We achieve this by having
25645     the global ARM_EXTENSIONS table in alphabetical order, and using the
25646     ADDING_VALUE variable to indicate whether we are adding an extension (1)
25647     or removing it (0) and only allowing it to change in the order
25648     -1 -> 1 -> 0.  */
25649  const struct arm_option_extension_value_table * opt = NULL;
25650  const arm_feature_set arm_any = ARM_ANY;
25651  int adding_value = -1;
25652
25653  /* Copy the feature set, so that we can modify it.  */
25654  *ext_set = **opt_p;
25655  *opt_p = ext_set;
25656
25657  while (str != NULL && *str != 0)
25658    {
25659      const char *ext;
25660      size_t len;
25661
25662      if (*str != '+')
25663	{
25664	  as_bad (_("invalid architectural extension"));
25665	  return FALSE;
25666	}
25667
25668      str++;
25669      ext = strchr (str, '+');
25670
25671      if (ext != NULL)
25672	len = ext - str;
25673      else
25674	len = strlen (str);
25675
25676      if (len >= 2 && strncmp (str, "no", 2) == 0)
25677	{
25678	  if (adding_value != 0)
25679	    {
25680	      adding_value = 0;
25681	      opt = arm_extensions;
25682	    }
25683
25684	  len -= 2;
25685	  str += 2;
25686	}
25687      else if (len > 0)
25688	{
25689	  if (adding_value == -1)
25690	    {
25691	      adding_value = 1;
25692	      opt = arm_extensions;
25693	    }
25694	  else if (adding_value != 1)
25695	    {
25696	      as_bad (_("must specify extensions to add before specifying "
25697			"those to remove"));
25698	      return FALSE;
25699	    }
25700	}
25701
25702      if (len == 0)
25703	{
25704	  as_bad (_("missing architectural extension"));
25705	  return FALSE;
25706	}
25707
25708      gas_assert (adding_value != -1);
25709      gas_assert (opt != NULL);
25710
25711      /* Scan over the options table trying to find an exact match. */
25712      for (; opt->name != NULL; opt++)
25713	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25714	  {
25715	    int i, nb_allowed_archs =
25716	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25717	    /* Check we can apply the extension to this architecture.  */
25718	    for (i = 0; i < nb_allowed_archs; i++)
25719	      {
25720		/* Empty entry.  */
25721		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25722		  continue;
25723		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25724		  break;
25725	      }
25726	    if (i == nb_allowed_archs)
25727	      {
25728		as_bad (_("extension does not apply to the base architecture"));
25729		return FALSE;
25730	      }
25731
25732	    /* Add or remove the extension.  */
25733	    if (adding_value)
25734	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25735	    else
25736	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25737
25738	    break;
25739	  }
25740
25741      if (opt->name == NULL)
25742	{
25743	  /* Did we fail to find an extension because it wasn't specified in
25744	     alphabetical order, or because it does not exist?  */
25745
25746	  for (opt = arm_extensions; opt->name != NULL; opt++)
25747	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25748	      break;
25749
25750	  if (opt->name == NULL)
25751	    as_bad (_("unknown architectural extension `%s'"), str);
25752	  else
25753	    as_bad (_("architectural extensions must be specified in "
25754		      "alphabetical order"));
25755
25756	  return FALSE;
25757	}
25758      else
25759	{
25760	  /* We should skip the extension we've just matched the next time
25761	     round.  */
25762	  opt++;
25763	}
25764
25765      str = ext;
25766    };
25767
25768  return TRUE;
25769}
25770
25771static bfd_boolean
25772arm_parse_cpu (const char *str)
25773{
25774  const struct arm_cpu_option_table *opt;
25775  const char *ext = strchr (str, '+');
25776  size_t len;
25777
25778  if (ext != NULL)
25779    len = ext - str;
25780  else
25781    len = strlen (str);
25782
25783  if (len == 0)
25784    {
25785      as_bad (_("missing cpu name `%s'"), str);
25786      return FALSE;
25787    }
25788
25789  for (opt = arm_cpus; opt->name != NULL; opt++)
25790    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25791      {
25792	mcpu_cpu_opt = &opt->value;
25793	mcpu_fpu_opt = &opt->default_fpu;
25794	if (opt->canonical_name)
25795	  {
25796	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25797	    strcpy (selected_cpu_name, opt->canonical_name);
25798	  }
25799	else
25800	  {
25801	    size_t i;
25802
25803	    if (len >= sizeof selected_cpu_name)
25804	      len = (sizeof selected_cpu_name) - 1;
25805
25806	    for (i = 0; i < len; i++)
25807	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
25808	    selected_cpu_name[i] = 0;
25809	  }
25810
25811	if (ext != NULL)
25812	  return arm_parse_extension (ext, &mcpu_cpu_opt);
25813
25814	return TRUE;
25815      }
25816
25817  as_bad (_("unknown cpu `%s'"), str);
25818  return FALSE;
25819}
25820
25821static bfd_boolean
25822arm_parse_arch (const char *str)
25823{
25824  const struct arm_arch_option_table *opt;
25825  const char *ext = strchr (str, '+');
25826  size_t len;
25827
25828  if (ext != NULL)
25829    len = ext - str;
25830  else
25831    len = strlen (str);
25832
25833  if (len == 0)
25834    {
25835      as_bad (_("missing architecture name `%s'"), str);
25836      return FALSE;
25837    }
25838
25839  for (opt = arm_archs; opt->name != NULL; opt++)
25840    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25841      {
25842	march_cpu_opt = &opt->value;
25843	march_fpu_opt = &opt->default_fpu;
25844	strcpy (selected_cpu_name, opt->name);
25845
25846	if (ext != NULL)
25847	  return arm_parse_extension (ext, &march_cpu_opt);
25848
25849	return TRUE;
25850      }
25851
25852  as_bad (_("unknown architecture `%s'\n"), str);
25853  return FALSE;
25854}
25855
25856static bfd_boolean
25857arm_parse_fpu (const char * str)
25858{
25859  const struct arm_option_fpu_value_table * opt;
25860
25861  for (opt = arm_fpus; opt->name != NULL; opt++)
25862    if (streq (opt->name, str))
25863      {
25864	mfpu_opt = &opt->value;
25865	return TRUE;
25866      }
25867
25868  as_bad (_("unknown floating point format `%s'\n"), str);
25869  return FALSE;
25870}
25871
25872static bfd_boolean
25873arm_parse_float_abi (const char * str)
25874{
25875  const struct arm_option_value_table * opt;
25876
25877  for (opt = arm_float_abis; opt->name != NULL; opt++)
25878    if (streq (opt->name, str))
25879      {
25880	mfloat_abi_opt = opt->value;
25881	return TRUE;
25882      }
25883
25884  as_bad (_("unknown floating point abi `%s'\n"), str);
25885  return FALSE;
25886}
25887
25888#ifdef OBJ_ELF
25889static bfd_boolean
25890arm_parse_eabi (const char * str)
25891{
25892  const struct arm_option_value_table *opt;
25893
25894  for (opt = arm_eabis; opt->name != NULL; opt++)
25895    if (streq (opt->name, str))
25896      {
25897	meabi_flags = opt->value;
25898	return TRUE;
25899      }
25900  as_bad (_("unknown EABI `%s'\n"), str);
25901  return FALSE;
25902}
25903#endif
25904
25905static bfd_boolean
25906arm_parse_it_mode (const char * str)
25907{
25908  bfd_boolean ret = TRUE;
25909
25910  if (streq ("arm", str))
25911    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25912  else if (streq ("thumb", str))
25913    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25914  else if (streq ("always", str))
25915    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25916  else if (streq ("never", str))
25917    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25918  else
25919    {
25920      as_bad (_("unknown implicit IT mode `%s', should be "\
25921		"arm, thumb, always, or never."), str);
25922      ret = FALSE;
25923    }
25924
25925  return ret;
25926}
25927
25928static bfd_boolean
25929arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
25930{
25931  codecomposer_syntax = TRUE;
25932  arm_comment_chars[0] = ';';
25933  arm_line_separator_chars[0] = 0;
25934  return TRUE;
25935}
25936
25937struct arm_long_option_table arm_long_opts[] =
25938{
25939  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
25940   arm_parse_cpu, NULL},
25941  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
25942   arm_parse_arch, NULL},
25943  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
25944   arm_parse_fpu, NULL},
25945  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
25946   arm_parse_float_abi, NULL},
25947#ifdef OBJ_ELF
25948  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
25949   arm_parse_eabi, NULL},
25950#endif
25951  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
25952   arm_parse_it_mode, NULL},
25953  {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
25954   arm_ccs_mode, NULL},
25955  {NULL, NULL, 0, NULL}
25956};
25957
25958int
25959md_parse_option (int c, const char * arg)
25960{
25961  struct arm_option_table *opt;
25962  const struct arm_legacy_option_table *fopt;
25963  struct arm_long_option_table *lopt;
25964
25965  switch (c)
25966    {
25967#ifdef OPTION_EB
25968    case OPTION_EB:
25969      target_big_endian = 1;
25970      break;
25971#endif
25972
25973#ifdef OPTION_EL
25974    case OPTION_EL:
25975      target_big_endian = 0;
25976      break;
25977#endif
25978
25979    case OPTION_FIX_V4BX:
25980      fix_v4bx = TRUE;
25981      break;
25982
25983    case 'a':
25984      /* Listing option.  Just ignore these, we don't support additional
25985	 ones.	*/
25986      return 0;
25987
25988    default:
25989      for (opt = arm_opts; opt->option != NULL; opt++)
25990	{
25991	  if (c == opt->option[0]
25992	      && ((arg == NULL && opt->option[1] == 0)
25993		  || streq (arg, opt->option + 1)))
25994	    {
25995	      /* If the option is deprecated, tell the user.  */
25996	      if (warn_on_deprecated && opt->deprecated != NULL)
25997		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25998			   arg ? arg : "", _(opt->deprecated));
25999
26000	      if (opt->var != NULL)
26001		*opt->var = opt->value;
26002
26003	      return 1;
26004	    }
26005	}
26006
26007      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26008	{
26009	  if (c == fopt->option[0]
26010	      && ((arg == NULL && fopt->option[1] == 0)
26011		  || streq (arg, fopt->option + 1)))
26012	    {
26013	      /* If the option is deprecated, tell the user.  */
26014	      if (warn_on_deprecated && fopt->deprecated != NULL)
26015		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26016			   arg ? arg : "", _(fopt->deprecated));
26017
26018	      if (fopt->var != NULL)
26019		*fopt->var = &fopt->value;
26020
26021	      return 1;
26022	    }
26023	}
26024
26025      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26026	{
26027	  /* These options are expected to have an argument.  */
26028	  if (c == lopt->option[0]
26029	      && arg != NULL
26030	      && strncmp (arg, lopt->option + 1,
26031			  strlen (lopt->option + 1)) == 0)
26032	    {
26033	      /* If the option is deprecated, tell the user.  */
26034	      if (warn_on_deprecated && lopt->deprecated != NULL)
26035		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26036			   _(lopt->deprecated));
26037
26038	      /* Call the sup-option parser.  */
26039	      return lopt->func (arg + strlen (lopt->option) - 1);
26040	    }
26041	}
26042
26043      return 0;
26044    }
26045
26046  return 1;
26047}
26048
26049void
26050md_show_usage (FILE * fp)
26051{
26052  struct arm_option_table *opt;
26053  struct arm_long_option_table *lopt;
26054
26055  fprintf (fp, _(" ARM-specific assembler options:\n"));
26056
26057  for (opt = arm_opts; opt->option != NULL; opt++)
26058    if (opt->help != NULL)
26059      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
26060
26061  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26062    if (lopt->help != NULL)
26063      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
26064
26065#ifdef OPTION_EB
26066  fprintf (fp, _("\
26067  -EB                     assemble code for a big-endian cpu\n"));
26068#endif
26069
26070#ifdef OPTION_EL
26071  fprintf (fp, _("\
26072  -EL                     assemble code for a little-endian cpu\n"));
26073#endif
26074
26075  fprintf (fp, _("\
26076  --fix-v4bx              Allow BX in ARMv4 code\n"));
26077}
26078
26079
26080#ifdef OBJ_ELF
26081typedef struct
26082{
26083  int val;
26084  arm_feature_set flags;
26085} cpu_arch_ver_table;
26086
26087/* Mapping from CPU features to EABI CPU arch values.  As a general rule, table
26088   must be sorted least features first but some reordering is needed, eg. for
26089   Thumb-2 instructions to be detected as coming from ARMv6T2.  */
26090static const cpu_arch_ver_table cpu_arch_ver[] =
26091{
26092    {1, ARM_ARCH_V4},
26093    {2, ARM_ARCH_V4T},
26094    {3, ARM_ARCH_V5},
26095    {3, ARM_ARCH_V5T},
26096    {4, ARM_ARCH_V5TE},
26097    {5, ARM_ARCH_V5TEJ},
26098    {6, ARM_ARCH_V6},
26099    {9, ARM_ARCH_V6K},
26100    {7, ARM_ARCH_V6Z},
26101    {11, ARM_ARCH_V6M},
26102    {12, ARM_ARCH_V6SM},
26103    {8, ARM_ARCH_V6T2},
26104    {10, ARM_ARCH_V7VE},
26105    {10, ARM_ARCH_V7R},
26106    {10, ARM_ARCH_V7M},
26107    {14, ARM_ARCH_V8A},
26108    {16, ARM_ARCH_V8M_BASE},
26109    {17, ARM_ARCH_V8M_MAIN},
26110    {0, ARM_ARCH_NONE}
26111};
26112
26113/* Set an attribute if it has not already been set by the user.  */
26114static void
26115aeabi_set_attribute_int (int tag, int value)
26116{
26117  if (tag < 1
26118      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26119      || !attributes_set_explicitly[tag])
26120    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26121}
26122
26123static void
26124aeabi_set_attribute_string (int tag, const char *value)
26125{
26126  if (tag < 1
26127      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26128      || !attributes_set_explicitly[tag])
26129    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26130}
26131
26132/* Set the public EABI object attributes.  */
26133void
26134aeabi_set_public_attributes (void)
26135{
26136  int arch;
26137  char profile;
26138  int virt_sec = 0;
26139  int fp16_optional = 0;
26140  arm_feature_set arm_arch = ARM_ARCH_NONE;
26141  arm_feature_set flags;
26142  arm_feature_set tmp;
26143  arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26144  const cpu_arch_ver_table *p;
26145
26146  /* Choose the architecture based on the capabilities of the requested cpu
26147     (if any) and/or the instructions actually used.  */
26148  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26149  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26150  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26151
26152  if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26153    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26154
26155  if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26156    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26157
26158  selected_cpu = flags;
26159
26160  /* Allow the user to override the reported architecture.  */
26161  if (object_arch)
26162    {
26163      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26164      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26165    }
26166
26167  /* We need to make sure that the attributes do not identify us as v6S-M
26168     when the only v6S-M feature in use is the Operating System Extensions.  */
26169  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26170      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26171	ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26172
26173  tmp = flags;
26174  arch = 0;
26175  for (p = cpu_arch_ver; p->val; p++)
26176    {
26177      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26178	{
26179	  arch = p->val;
26180	  arm_arch = p->flags;
26181	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26182	}
26183    }
26184
26185  /* The table lookup above finds the last architecture to contribute
26186     a new feature.  Unfortunately, Tag13 is a subset of the union of
26187     v6T2 and v7-M, so it is never seen as contributing a new feature.
26188     We can not search for the last entry which is entirely used,
26189     because if no CPU is specified we build up only those flags
26190     actually used.  Perhaps we should separate out the specified
26191     and implicit cases.  Avoid taking this path for -march=all by
26192     checking for contradictory v7-A / v7-M features.  */
26193  if (arch == TAG_CPU_ARCH_V7
26194      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26195      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26196      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26197    {
26198      arch = TAG_CPU_ARCH_V7E_M;
26199      arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26200    }
26201
26202  ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26203  if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26204    {
26205      arch = TAG_CPU_ARCH_V8M_MAIN;
26206      arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26207    }
26208
26209  /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26210     coming from ARMv8-A.  However, since ARMv8-A has more instructions than
26211     ARMv8-M, -march=all must be detected as ARMv8-A.  */
26212  if (arch == TAG_CPU_ARCH_V8M_MAIN
26213      && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26214    {
26215      arch = TAG_CPU_ARCH_V8;
26216      arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26217    }
26218
26219  /* Tag_CPU_name.  */
26220  if (selected_cpu_name[0])
26221    {
26222      char *q;
26223
26224      q = selected_cpu_name;
26225      if (strncmp (q, "armv", 4) == 0)
26226	{
26227	  int i;
26228
26229	  q += 4;
26230	  for (i = 0; q[i]; i++)
26231	    q[i] = TOUPPER (q[i]);
26232	}
26233      aeabi_set_attribute_string (Tag_CPU_name, q);
26234    }
26235
26236  /* Tag_CPU_arch.  */
26237  aeabi_set_attribute_int (Tag_CPU_arch, arch);
26238
26239  /* Tag_CPU_arch_profile.  */
26240  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26241      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26242      || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26243	  && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26244    profile = 'A';
26245  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26246    profile = 'R';
26247  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26248    profile = 'M';
26249  else
26250    profile = '\0';
26251
26252  if (profile != '\0')
26253    aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26254
26255  /* Tag_DSP_extension.  */
26256  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26257    {
26258      arm_feature_set ext;
26259
26260      /* DSP instructions not in architecture.  */
26261      ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26262      if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26263	aeabi_set_attribute_int (Tag_DSP_extension, 1);
26264    }
26265
26266  /* Tag_ARM_ISA_use.  */
26267  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26268      || arch == 0)
26269    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26270
26271  /* Tag_THUMB_ISA_use.  */
26272  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26273      || arch == 0)
26274    {
26275      int thumb_isa_use;
26276
26277      if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26278	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26279	thumb_isa_use = 3;
26280      else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26281	thumb_isa_use = 2;
26282      else
26283	thumb_isa_use = 1;
26284      aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26285    }
26286
26287  /* Tag_VFP_arch.  */
26288  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26289    aeabi_set_attribute_int (Tag_VFP_arch,
26290			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26291			     ? 7 : 8);
26292  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26293    aeabi_set_attribute_int (Tag_VFP_arch,
26294			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26295			     ? 5 : 6);
26296  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26297    {
26298      fp16_optional = 1;
26299      aeabi_set_attribute_int (Tag_VFP_arch, 3);
26300    }
26301  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26302    {
26303      aeabi_set_attribute_int (Tag_VFP_arch, 4);
26304      fp16_optional = 1;
26305    }
26306  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26307    aeabi_set_attribute_int (Tag_VFP_arch, 2);
26308  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26309	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26310    aeabi_set_attribute_int (Tag_VFP_arch, 1);
26311
26312  /* Tag_ABI_HardFP_use.  */
26313  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26314      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26315    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26316
26317  /* Tag_WMMX_arch.  */
26318  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26319    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26320  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26321    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26322
26323  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
26324  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26325    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26326  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26327    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26328  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26329    {
26330      if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26331	{
26332	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26333	}
26334      else
26335	{
26336	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26337	  fp16_optional = 1;
26338	}
26339    }
26340
26341  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
26342  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26343    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26344
26345  /* Tag_DIV_use.
26346
26347     We set Tag_DIV_use to two when integer divide instructions have been used
26348     in ARM state, or when Thumb integer divide instructions have been used,
26349     but we have no architecture profile set, nor have we any ARM instructions.
26350
26351     For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26352     by the base architecture.
26353
26354     For new architectures we will have to check these tests.  */
26355  gas_assert (arch <= TAG_CPU_ARCH_V8
26356	      || (arch >= TAG_CPU_ARCH_V8M_BASE
26357		  && arch <= TAG_CPU_ARCH_V8M_MAIN));
26358  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26359      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26360    aeabi_set_attribute_int (Tag_DIV_use, 0);
26361  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26362	   || (profile == '\0'
26363	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26364	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26365    aeabi_set_attribute_int (Tag_DIV_use, 2);
26366
26367  /* Tag_MP_extension_use.  */
26368  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26369    aeabi_set_attribute_int (Tag_MPextension_use, 1);
26370
26371  /* Tag Virtualization_use.  */
26372  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26373    virt_sec |= 1;
26374  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26375    virt_sec |= 2;
26376  if (virt_sec != 0)
26377    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26378}
26379
26380/* Add the default contents for the .ARM.attributes section.  */
26381void
26382arm_md_end (void)
26383{
26384  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26385    return;
26386
26387  aeabi_set_public_attributes ();
26388}
26389#endif /* OBJ_ELF */
26390
26391
26392/* Parse a .cpu directive.  */
26393
26394static void
26395s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26396{
26397  const struct arm_cpu_option_table *opt;
26398  char *name;
26399  char saved_char;
26400
26401  name = input_line_pointer;
26402  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26403    input_line_pointer++;
26404  saved_char = *input_line_pointer;
26405  *input_line_pointer = 0;
26406
26407  /* Skip the first "all" entry.  */
26408  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26409    if (streq (opt->name, name))
26410      {
26411	mcpu_cpu_opt = &opt->value;
26412	selected_cpu = opt->value;
26413	if (opt->canonical_name)
26414	  strcpy (selected_cpu_name, opt->canonical_name);
26415	else
26416	  {
26417	    int i;
26418	    for (i = 0; opt->name[i]; i++)
26419	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
26420
26421	    selected_cpu_name[i] = 0;
26422	  }
26423	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26424	*input_line_pointer = saved_char;
26425	demand_empty_rest_of_line ();
26426	return;
26427      }
26428  as_bad (_("unknown cpu `%s'"), name);
26429  *input_line_pointer = saved_char;
26430  ignore_rest_of_line ();
26431}
26432
26433
26434/* Parse a .arch directive.  */
26435
26436static void
26437s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26438{
26439  const struct arm_arch_option_table *opt;
26440  char saved_char;
26441  char *name;
26442
26443  name = input_line_pointer;
26444  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26445    input_line_pointer++;
26446  saved_char = *input_line_pointer;
26447  *input_line_pointer = 0;
26448
26449  /* Skip the first "all" entry.  */
26450  for (opt = arm_archs + 1; opt->name != NULL; opt++)
26451    if (streq (opt->name, name))
26452      {
26453	mcpu_cpu_opt = &opt->value;
26454	selected_cpu = opt->value;
26455	strcpy (selected_cpu_name, opt->name);
26456	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26457	*input_line_pointer = saved_char;
26458	demand_empty_rest_of_line ();
26459	return;
26460      }
26461
26462  as_bad (_("unknown architecture `%s'\n"), name);
26463  *input_line_pointer = saved_char;
26464  ignore_rest_of_line ();
26465}
26466
26467
26468/* Parse a .object_arch directive.  */
26469
26470static void
26471s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26472{
26473  const struct arm_arch_option_table *opt;
26474  char saved_char;
26475  char *name;
26476
26477  name = input_line_pointer;
26478  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26479    input_line_pointer++;
26480  saved_char = *input_line_pointer;
26481  *input_line_pointer = 0;
26482
26483  /* Skip the first "all" entry.  */
26484  for (opt = arm_archs + 1; opt->name != NULL; opt++)
26485    if (streq (opt->name, name))
26486      {
26487	object_arch = &opt->value;
26488	*input_line_pointer = saved_char;
26489	demand_empty_rest_of_line ();
26490	return;
26491      }
26492
26493  as_bad (_("unknown architecture `%s'\n"), name);
26494  *input_line_pointer = saved_char;
26495  ignore_rest_of_line ();
26496}
26497
26498/* Parse a .arch_extension directive.  */
26499
26500static void
26501s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26502{
26503  const struct arm_option_extension_value_table *opt;
26504  const arm_feature_set arm_any = ARM_ANY;
26505  char saved_char;
26506  char *name;
26507  int adding_value = 1;
26508
26509  name = input_line_pointer;
26510  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26511    input_line_pointer++;
26512  saved_char = *input_line_pointer;
26513  *input_line_pointer = 0;
26514
26515  if (strlen (name) >= 2
26516      && strncmp (name, "no", 2) == 0)
26517    {
26518      adding_value = 0;
26519      name += 2;
26520    }
26521
26522  for (opt = arm_extensions; opt->name != NULL; opt++)
26523    if (streq (opt->name, name))
26524      {
26525	int i, nb_allowed_archs =
26526	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26527	for (i = 0; i < nb_allowed_archs; i++)
26528	  {
26529	    /* Empty entry.  */
26530	    if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26531	      continue;
26532	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26533	      break;
26534	  }
26535
26536	if (i == nb_allowed_archs)
26537	  {
26538	    as_bad (_("architectural extension `%s' is not allowed for the "
26539		      "current base architecture"), name);
26540	    break;
26541	  }
26542
26543	if (adding_value)
26544	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26545				  opt->merge_value);
26546	else
26547	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26548
26549	mcpu_cpu_opt = &selected_cpu;
26550	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26551	*input_line_pointer = saved_char;
26552	demand_empty_rest_of_line ();
26553	return;
26554      }
26555
26556  if (opt->name == NULL)
26557    as_bad (_("unknown architecture extension `%s'\n"), name);
26558
26559  *input_line_pointer = saved_char;
26560  ignore_rest_of_line ();
26561}
26562
26563/* Parse a .fpu directive.  */
26564
26565static void
26566s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26567{
26568  const struct arm_option_fpu_value_table *opt;
26569  char saved_char;
26570  char *name;
26571
26572  name = input_line_pointer;
26573  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26574    input_line_pointer++;
26575  saved_char = *input_line_pointer;
26576  *input_line_pointer = 0;
26577
26578  for (opt = arm_fpus; opt->name != NULL; opt++)
26579    if (streq (opt->name, name))
26580      {
26581	mfpu_opt = &opt->value;
26582	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26583	*input_line_pointer = saved_char;
26584	demand_empty_rest_of_line ();
26585	return;
26586      }
26587
26588  as_bad (_("unknown floating point format `%s'\n"), name);
26589  *input_line_pointer = saved_char;
26590  ignore_rest_of_line ();
26591}
26592
26593/* Copy symbol information.  */
26594
26595void
26596arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26597{
26598  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26599}
26600
26601#ifdef OBJ_ELF
26602/* Given a symbolic attribute NAME, return the proper integer value.
26603   Returns -1 if the attribute is not known.  */
26604
26605int
26606arm_convert_symbolic_attribute (const char *name)
26607{
26608  static const struct
26609  {
26610    const char * name;
26611    const int    tag;
26612  }
26613  attribute_table[] =
26614    {
26615      /* When you modify this table you should
26616	 also modify the list in doc/c-arm.texi.  */
26617#define T(tag) {#tag, tag}
26618      T (Tag_CPU_raw_name),
26619      T (Tag_CPU_name),
26620      T (Tag_CPU_arch),
26621      T (Tag_CPU_arch_profile),
26622      T (Tag_ARM_ISA_use),
26623      T (Tag_THUMB_ISA_use),
26624      T (Tag_FP_arch),
26625      T (Tag_VFP_arch),
26626      T (Tag_WMMX_arch),
26627      T (Tag_Advanced_SIMD_arch),
26628      T (Tag_PCS_config),
26629      T (Tag_ABI_PCS_R9_use),
26630      T (Tag_ABI_PCS_RW_data),
26631      T (Tag_ABI_PCS_RO_data),
26632      T (Tag_ABI_PCS_GOT_use),
26633      T (Tag_ABI_PCS_wchar_t),
26634      T (Tag_ABI_FP_rounding),
26635      T (Tag_ABI_FP_denormal),
26636      T (Tag_ABI_FP_exceptions),
26637      T (Tag_ABI_FP_user_exceptions),
26638      T (Tag_ABI_FP_number_model),
26639      T (Tag_ABI_align_needed),
26640      T (Tag_ABI_align8_needed),
26641      T (Tag_ABI_align_preserved),
26642      T (Tag_ABI_align8_preserved),
26643      T (Tag_ABI_enum_size),
26644      T (Tag_ABI_HardFP_use),
26645      T (Tag_ABI_VFP_args),
26646      T (Tag_ABI_WMMX_args),
26647      T (Tag_ABI_optimization_goals),
26648      T (Tag_ABI_FP_optimization_goals),
26649      T (Tag_compatibility),
26650      T (Tag_CPU_unaligned_access),
26651      T (Tag_FP_HP_extension),
26652      T (Tag_VFP_HP_extension),
26653      T (Tag_ABI_FP_16bit_format),
26654      T (Tag_MPextension_use),
26655      T (Tag_DIV_use),
26656      T (Tag_nodefaults),
26657      T (Tag_also_compatible_with),
26658      T (Tag_conformance),
26659      T (Tag_T2EE_use),
26660      T (Tag_Virtualization_use),
26661      T (Tag_DSP_extension),
26662      /* We deliberately do not include Tag_MPextension_use_legacy.  */
26663#undef T
26664    };
26665  unsigned int i;
26666
26667  if (name == NULL)
26668    return -1;
26669
26670  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26671    if (streq (name, attribute_table[i].name))
26672      return attribute_table[i].tag;
26673
26674  return -1;
26675}
26676
26677
26678/* Apply sym value for relocations only in the case that they are for
26679   local symbols in the same segment as the fixup and you have the
26680   respective architectural feature for blx and simple switches.  */
26681int
26682arm_apply_sym_value (struct fix * fixP, segT this_seg)
26683{
26684  if (fixP->fx_addsy
26685      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26686      /* PR 17444: If the local symbol is in a different section then a reloc
26687	 will always be generated for it, so applying the symbol value now
26688	 will result in a double offset being stored in the relocation.  */
26689      && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26690      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26691    {
26692      switch (fixP->fx_r_type)
26693	{
26694	case BFD_RELOC_ARM_PCREL_BLX:
26695	case BFD_RELOC_THUMB_PCREL_BRANCH23:
26696	  if (ARM_IS_FUNC (fixP->fx_addsy))
26697	    return 1;
26698	  break;
26699
26700	case BFD_RELOC_ARM_PCREL_CALL:
26701	case BFD_RELOC_THUMB_PCREL_BLX:
26702	  if (THUMB_IS_FUNC (fixP->fx_addsy))
26703	    return 1;
26704	  break;
26705
26706	default:
26707	  break;
26708	}
26709
26710    }
26711  return 0;
26712}
26713#endif /* OBJ_ELF */
26714