1/* tc-arm.c -- Assemble for the ARM
2   Copyright (C) 1994-2017 Free Software Foundation, Inc.
3   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4	Modified by David Taylor (dtaylor@armltd.co.uk)
5	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9   This file is part of GAS, the GNU Assembler.
10
11   GAS is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 3, or (at your option)
14   any later version.
15
16   GAS is distributed in the hope that it will be useful,
17   but WITHOUT ANY WARRANTY; without even the implied warranty of
18   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19   GNU General Public License for more details.
20
21   You should have received a copy of the GNU General Public License
22   along with GAS; see the file COPYING.  If not, write to the Free
23   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24   02110-1301, USA.  */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define	 NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35
36#ifdef OBJ_ELF
37#include "elf/arm.h"
38#include "dw2gencfi.h"
39#endif
40
41#include "dwarf2dbg.h"
42
43#ifdef OBJ_ELF
44/* Must be at least the size of the largest unwind opcode (currently two).  */
45#define ARM_OPCODE_CHUNK_SIZE 8
46
47/* This structure holds the unwinding state.  */
48
49static struct
50{
51  symbolS *	  proc_start;
52  symbolS *	  table_entry;
53  symbolS *	  personality_routine;
54  int		  personality_index;
55  /* The segment containing the function.  */
56  segT		  saved_seg;
57  subsegT	  saved_subseg;
58  /* Opcodes generated from this function.  */
59  unsigned char * opcodes;
60  int		  opcode_count;
61  int		  opcode_alloc;
62  /* The number of bytes pushed to the stack.  */
63  offsetT	  frame_size;
64  /* We don't add stack adjustment opcodes immediately so that we can merge
65     multiple adjustments.  We can also omit the final adjustment
66     when using a frame pointer.  */
67  offsetT	  pending_offset;
68  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
69     hold the reg+offset to use when restoring sp from a frame pointer.	 */
70  offsetT	  fp_offset;
71  int		  fp_reg;
72  /* Nonzero if an unwind_setfp directive has been seen.  */
73  unsigned	  fp_used:1;
74  /* Nonzero if the last opcode restores sp from fp_reg.  */
75  unsigned	  sp_restored:1;
76} unwind;
77
78#endif /* OBJ_ELF */
79
80/* Results from operand parsing worker functions.  */
81
82typedef enum
83{
84  PARSE_OPERAND_SUCCESS,
85  PARSE_OPERAND_FAIL,
86  PARSE_OPERAND_FAIL_NO_BACKTRACK
87} parse_operand_result;
88
89enum arm_float_abi
90{
91  ARM_FLOAT_ABI_HARD,
92  ARM_FLOAT_ABI_SOFTFP,
93  ARM_FLOAT_ABI_SOFT
94};
95
96/* Types of processor to assemble for.	*/
97#ifndef CPU_DEFAULT
98/* The code that was here used to select a default CPU depending on compiler
99   pre-defines which were only present when doing native builds, thus
100   changing gas' default behaviour depending upon the build host.
101
102   If you have a target that requires a default CPU option then the you
103   should define CPU_DEFAULT here.  */
104#endif
105
106#ifndef FPU_DEFAULT
107# ifdef TE_LINUX
108#  define FPU_DEFAULT FPU_ARCH_FPA
109# elif defined (TE_NetBSD)
110#  ifdef OBJ_ELF
111#   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
112#  else
113    /* Legacy a.out format.  */
114#   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
115#  endif
116# elif defined (TE_VXWORKS)
117#  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
118# else
119   /* For backwards compatibility, default to FPA.  */
120#  define FPU_DEFAULT FPU_ARCH_FPA
121# endif
122#endif /* ifndef FPU_DEFAULT */
123
124#define streq(a, b)	      (strcmp (a, b) == 0)
125
126static arm_feature_set cpu_variant;
127static arm_feature_set arm_arch_used;
128static arm_feature_set thumb_arch_used;
129
130/* Flags stored in private area of BFD structure.  */
131static int uses_apcs_26	     = FALSE;
132static int atpcs	     = FALSE;
133static int support_interwork = FALSE;
134static int uses_apcs_float   = FALSE;
135static int pic_code	     = FALSE;
136static int fix_v4bx	     = FALSE;
137/* Warn on using deprecated features.  */
138static int warn_on_deprecated = TRUE;
139
140/* Understand CodeComposer Studio assembly syntax.  */
141bfd_boolean codecomposer_syntax = FALSE;
142
143/* Variables that we set while parsing command-line options.  Once all
144   options have been read we re-process these values to set the real
145   assembly flags.  */
146static const arm_feature_set *legacy_cpu = NULL;
147static const arm_feature_set *legacy_fpu = NULL;
148
149static const arm_feature_set *mcpu_cpu_opt = NULL;
150static const arm_feature_set *mcpu_fpu_opt = NULL;
151static const arm_feature_set *march_cpu_opt = NULL;
152static const arm_feature_set *march_fpu_opt = NULL;
153static const arm_feature_set *mfpu_opt = NULL;
154static const arm_feature_set *object_arch = NULL;
155
156/* Constants for known architecture features.  */
157static const arm_feature_set fpu_default = FPU_DEFAULT;
158static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164#ifdef OBJ_ELF
165static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166#endif
167static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169#ifdef CPU_DEFAULT
170static const arm_feature_set cpu_default = CPU_DEFAULT;
171#endif
172
173static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181static const arm_feature_set arm_ext_v4t_5 =
182  ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191static const arm_feature_set arm_ext_v6_notm =
192  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193static const arm_feature_set arm_ext_v6_dsp =
194  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195static const arm_feature_set arm_ext_barrier =
196  ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197static const arm_feature_set arm_ext_msr =
198  ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203#ifdef OBJ_ELF
204static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205#endif
206static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207static const arm_feature_set arm_ext_m =
208  ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217static const arm_feature_set arm_ext_v8m_main =
218  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219/* Instructions in ARMv8-M only found in M profile architectures.  */
220static const arm_feature_set arm_ext_v8m_m_only =
221  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222static const arm_feature_set arm_ext_v6t2_v8m =
223  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224/* Instructions shared between ARMv8-A and ARMv8-M.  */
225static const arm_feature_set arm_ext_atomics =
226  ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227#ifdef OBJ_ELF
228/* DSP instructions Tag_DSP_extension refers to.  */
229static const arm_feature_set arm_ext_dsp =
230  ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231#endif
232static const arm_feature_set arm_ext_ras =
233  ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234/* FP16 instructions.  */
235static const arm_feature_set arm_ext_fp16 =
236  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237static const arm_feature_set arm_ext_v8_3 =
238  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
239
240static const arm_feature_set arm_arch_any = ARM_ANY;
241static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
242static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
243static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
244#ifdef OBJ_ELF
245static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
246#endif
247
248static const arm_feature_set arm_cext_iwmmxt2 =
249  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
250static const arm_feature_set arm_cext_iwmmxt =
251  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
252static const arm_feature_set arm_cext_xscale =
253  ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
254static const arm_feature_set arm_cext_maverick =
255  ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
256static const arm_feature_set fpu_fpa_ext_v1 =
257  ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
258static const arm_feature_set fpu_fpa_ext_v2 =
259  ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
260static const arm_feature_set fpu_vfp_ext_v1xd =
261  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
262static const arm_feature_set fpu_vfp_ext_v1 =
263  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
264static const arm_feature_set fpu_vfp_ext_v2 =
265  ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
266static const arm_feature_set fpu_vfp_ext_v3xd =
267  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
268static const arm_feature_set fpu_vfp_ext_v3 =
269  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
270static const arm_feature_set fpu_vfp_ext_d32 =
271  ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
272static const arm_feature_set fpu_neon_ext_v1 =
273  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
274static const arm_feature_set fpu_vfp_v3_or_neon_ext =
275  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
276#ifdef OBJ_ELF
277static const arm_feature_set fpu_vfp_fp16 =
278  ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
279static const arm_feature_set fpu_neon_ext_fma =
280  ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
281#endif
282static const arm_feature_set fpu_vfp_ext_fma =
283  ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
284static const arm_feature_set fpu_vfp_ext_armv8 =
285  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
286static const arm_feature_set fpu_vfp_ext_armv8xd =
287  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
288static const arm_feature_set fpu_neon_ext_armv8 =
289  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
290static const arm_feature_set fpu_crypto_ext_armv8 =
291  ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
292static const arm_feature_set crc_ext_armv8 =
293  ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
294static const arm_feature_set fpu_neon_ext_v8_1 =
295  ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
296
297static int mfloat_abi_opt = -1;
298/* Record user cpu selection for object attributes.  */
299static arm_feature_set selected_cpu = ARM_ARCH_NONE;
300/* Must be long enough to hold any of the names in arm_cpus.  */
301static char selected_cpu_name[20];
302
303extern FLONUM_TYPE generic_floating_point_number;
304
305/* Return if no cpu was selected on command-line.  */
306static bfd_boolean
307no_cpu_selected (void)
308{
309  return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
310}
311
312#ifdef OBJ_ELF
313# ifdef EABI_DEFAULT
314static int meabi_flags = EABI_DEFAULT;
315# else
316static int meabi_flags = EF_ARM_EABI_UNKNOWN;
317# endif
318
319static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
320
321bfd_boolean
322arm_is_eabi (void)
323{
324  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
325}
326#endif
327
328#ifdef OBJ_ELF
329/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
330symbolS * GOT_symbol;
331#endif
332
333/* 0: assemble for ARM,
334   1: assemble for Thumb,
335   2: assemble for Thumb even though target CPU does not support thumb
336      instructions.  */
337static int thumb_mode = 0;
338/* A value distinct from the possible values for thumb_mode that we
339   can use to record whether thumb_mode has been copied into the
340   tc_frag_data field of a frag.  */
341#define MODE_RECORDED (1 << 4)
342
343/* Specifies the intrinsic IT insn behavior mode.  */
344enum implicit_it_mode
345{
346  IMPLICIT_IT_MODE_NEVER  = 0x00,
347  IMPLICIT_IT_MODE_ARM    = 0x01,
348  IMPLICIT_IT_MODE_THUMB  = 0x02,
349  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
350};
351static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
352
353/* If unified_syntax is true, we are processing the new unified
354   ARM/Thumb syntax.  Important differences from the old ARM mode:
355
356     - Immediate operands do not require a # prefix.
357     - Conditional affixes always appear at the end of the
358       instruction.  (For backward compatibility, those instructions
359       that formerly had them in the middle, continue to accept them
360       there.)
361     - The IT instruction may appear, and if it does is validated
362       against subsequent conditional affixes.  It does not generate
363       machine code.
364
365   Important differences from the old Thumb mode:
366
367     - Immediate operands do not require a # prefix.
368     - Most of the V6T2 instructions are only available in unified mode.
369     - The .N and .W suffixes are recognized and honored (it is an error
370       if they cannot be honored).
371     - All instructions set the flags if and only if they have an 's' affix.
372     - Conditional affixes may be used.  They are validated against
373       preceding IT instructions.  Unlike ARM mode, you cannot use a
374       conditional affix except in the scope of an IT instruction.  */
375
376static bfd_boolean unified_syntax = FALSE;
377
378/* An immediate operand can start with #, and ld*, st*, pld operands
379   can contain [ and ].  We need to tell APP not to elide whitespace
380   before a [, which can appear as the first operand for pld.
381   Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
382const char arm_symbol_chars[] = "#[]{}";
383
384enum neon_el_type
385{
386  NT_invtype,
387  NT_untyped,
388  NT_integer,
389  NT_float,
390  NT_poly,
391  NT_signed,
392  NT_unsigned
393};
394
395struct neon_type_el
396{
397  enum neon_el_type type;
398  unsigned size;
399};
400
401#define NEON_MAX_TYPE_ELS 4
402
403struct neon_type
404{
405  struct neon_type_el el[NEON_MAX_TYPE_ELS];
406  unsigned elems;
407};
408
409enum it_instruction_type
410{
411   OUTSIDE_IT_INSN,
412   INSIDE_IT_INSN,
413   INSIDE_IT_LAST_INSN,
414   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
415			      if inside, should be the last one.  */
416   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
417			      i.e. BKPT and NOP.  */
418   IT_INSN                 /* The IT insn has been parsed.  */
419};
420
421/* The maximum number of operands we need.  */
422#define ARM_IT_MAX_OPERANDS 6
423
424struct arm_it
425{
426  const char *	error;
427  unsigned long instruction;
428  int		size;
429  int		size_req;
430  int		cond;
431  /* "uncond_value" is set to the value in place of the conditional field in
432     unconditional versions of the instruction, or -1 if nothing is
433     appropriate.  */
434  int		uncond_value;
435  struct neon_type vectype;
436  /* This does not indicate an actual NEON instruction, only that
437     the mnemonic accepts neon-style type suffixes.  */
438  int		is_neon;
439  /* Set to the opcode if the instruction needs relaxation.
440     Zero if the instruction is not relaxed.  */
441  unsigned long	relax;
442  struct
443  {
444    bfd_reloc_code_real_type type;
445    expressionS		     exp;
446    int			     pc_rel;
447  } reloc;
448
449  enum it_instruction_type it_insn_type;
450
451  struct
452  {
453    unsigned reg;
454    signed int imm;
455    struct neon_type_el vectype;
456    unsigned present	: 1;  /* Operand present.  */
457    unsigned isreg	: 1;  /* Operand was a register.  */
458    unsigned immisreg	: 1;  /* .imm field is a second register.  */
459    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
460    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
461    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
462    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
463       instructions. This allows us to disambiguate ARM <-> vector insns.  */
464    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
465    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
466    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
467    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
468    unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
469    unsigned writeback	: 1;  /* Operand has trailing !  */
470    unsigned preind	: 1;  /* Preindexed address.  */
471    unsigned postind	: 1;  /* Postindexed address.  */
472    unsigned negative	: 1;  /* Index register was negated.  */
473    unsigned shifted	: 1;  /* Shift applied to operation.  */
474    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
475  } operands[ARM_IT_MAX_OPERANDS];
476};
477
478static struct arm_it inst;
479
480#define NUM_FLOAT_VALS 8
481
482const char * fp_const[] =
483{
484  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
485};
486
487/* Number of littlenums required to hold an extended precision number.	*/
488#define MAX_LITTLENUMS 6
489
490LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
491
492#define FAIL	(-1)
493#define SUCCESS (0)
494
495#define SUFF_S 1
496#define SUFF_D 2
497#define SUFF_E 3
498#define SUFF_P 4
499
500#define CP_T_X	 0x00008000
501#define CP_T_Y	 0x00400000
502
503#define CONDS_BIT	 0x00100000
504#define LOAD_BIT	 0x00100000
505
506#define DOUBLE_LOAD_FLAG 0x00000001
507
508struct asm_cond
509{
510  const char *	 template_name;
511  unsigned long  value;
512};
513
514#define COND_ALWAYS 0xE
515
516struct asm_psr
517{
518  const char *   template_name;
519  unsigned long  field;
520};
521
522struct asm_barrier_opt
523{
524  const char *    template_name;
525  unsigned long   value;
526  const arm_feature_set arch;
527};
528
529/* The bit that distinguishes CPSR and SPSR.  */
530#define SPSR_BIT   (1 << 22)
531
532/* The individual PSR flag bits.  */
533#define PSR_c	(1 << 16)
534#define PSR_x	(1 << 17)
535#define PSR_s	(1 << 18)
536#define PSR_f	(1 << 19)
537
538struct reloc_entry
539{
540  const char *                    name;
541  bfd_reloc_code_real_type  reloc;
542};
543
544enum vfp_reg_pos
545{
546  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
547  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
548};
549
550enum vfp_ldstm_type
551{
552  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
553};
554
555/* Bits for DEFINED field in neon_typed_alias.  */
556#define NTA_HASTYPE  1
557#define NTA_HASINDEX 2
558
559struct neon_typed_alias
560{
561  unsigned char        defined;
562  unsigned char        index;
563  struct neon_type_el  eltype;
564};
565
566/* ARM register categories.  This includes coprocessor numbers and various
567   architecture extensions' registers.	*/
568enum arm_reg_type
569{
570  REG_TYPE_RN,
571  REG_TYPE_CP,
572  REG_TYPE_CN,
573  REG_TYPE_FN,
574  REG_TYPE_VFS,
575  REG_TYPE_VFD,
576  REG_TYPE_NQ,
577  REG_TYPE_VFSD,
578  REG_TYPE_NDQ,
579  REG_TYPE_NSDQ,
580  REG_TYPE_VFC,
581  REG_TYPE_MVF,
582  REG_TYPE_MVD,
583  REG_TYPE_MVFX,
584  REG_TYPE_MVDX,
585  REG_TYPE_MVAX,
586  REG_TYPE_DSPSC,
587  REG_TYPE_MMXWR,
588  REG_TYPE_MMXWC,
589  REG_TYPE_MMXWCG,
590  REG_TYPE_XSCALE,
591  REG_TYPE_RNB
592};
593
594/* Structure for a hash table entry for a register.
595   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
596   information which states whether a vector type or index is specified (for a
597   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
598struct reg_entry
599{
600  const char *               name;
601  unsigned int               number;
602  unsigned char              type;
603  unsigned char              builtin;
604  struct neon_typed_alias *  neon;
605};
606
607/* Diagnostics used when we don't get a register of the expected type.	*/
608const char * const reg_expected_msgs[] =
609{
610  N_("ARM register expected"),
611  N_("bad or missing co-processor number"),
612  N_("co-processor register expected"),
613  N_("FPA register expected"),
614  N_("VFP single precision register expected"),
615  N_("VFP/Neon double precision register expected"),
616  N_("Neon quad precision register expected"),
617  N_("VFP single or double precision register expected"),
618  N_("Neon double or quad precision register expected"),
619  N_("VFP single, double or Neon quad precision register expected"),
620  N_("VFP system register expected"),
621  N_("Maverick MVF register expected"),
622  N_("Maverick MVD register expected"),
623  N_("Maverick MVFX register expected"),
624  N_("Maverick MVDX register expected"),
625  N_("Maverick MVAX register expected"),
626  N_("Maverick DSPSC register expected"),
627  N_("iWMMXt data register expected"),
628  N_("iWMMXt control register expected"),
629  N_("iWMMXt scalar register expected"),
630  N_("XScale accumulator register expected"),
631};
632
633/* Some well known registers that we refer to directly elsewhere.  */
634#define REG_R12	12
635#define REG_SP	13
636#define REG_LR	14
637#define REG_PC	15
638
639/* ARM instructions take 4bytes in the object file, Thumb instructions
640   take 2:  */
641#define INSN_SIZE	4
642
643struct asm_opcode
644{
645  /* Basic string to match.  */
646  const char * template_name;
647
648  /* Parameters to instruction.	 */
649  unsigned int operands[8];
650
651  /* Conditional tag - see opcode_lookup.  */
652  unsigned int tag : 4;
653
654  /* Basic instruction code.  */
655  unsigned int avalue : 28;
656
657  /* Thumb-format instruction code.  */
658  unsigned int tvalue;
659
660  /* Which architecture variant provides this instruction.  */
661  const arm_feature_set * avariant;
662  const arm_feature_set * tvariant;
663
664  /* Function to call to encode instruction in ARM format.  */
665  void (* aencode) (void);
666
667  /* Function to call to encode instruction in Thumb format.  */
668  void (* tencode) (void);
669};
670
671/* Defines for various bits that we will want to toggle.  */
672#define INST_IMMEDIATE	0x02000000
673#define OFFSET_REG	0x02000000
674#define HWOFFSET_IMM	0x00400000
675#define SHIFT_BY_REG	0x00000010
676#define PRE_INDEX	0x01000000
677#define INDEX_UP	0x00800000
678#define WRITE_BACK	0x00200000
679#define LDM_TYPE_2_OR_3	0x00400000
680#define CPSI_MMOD	0x00020000
681
682#define LITERAL_MASK	0xf000f000
683#define OPCODE_MASK	0xfe1fffff
684#define V4_STR_BIT	0x00000020
685#define VLDR_VMOV_SAME	0x0040f000
686
687#define T2_SUBS_PC_LR	0xf3de8f00
688
689#define DATA_OP_SHIFT	21
690#define SBIT_SHIFT	20
691
692#define T2_OPCODE_MASK	0xfe1fffff
693#define T2_DATA_OP_SHIFT 21
694#define T2_SBIT_SHIFT	 20
695
696#define A_COND_MASK         0xf0000000
697#define A_PUSH_POP_OP_MASK  0x0fff0000
698
699/* Opcodes for pushing/poping registers to/from the stack.  */
700#define A1_OPCODE_PUSH    0x092d0000
701#define A2_OPCODE_PUSH    0x052d0004
702#define A2_OPCODE_POP     0x049d0004
703
704/* Codes to distinguish the arithmetic instructions.  */
705#define OPCODE_AND	0
706#define OPCODE_EOR	1
707#define OPCODE_SUB	2
708#define OPCODE_RSB	3
709#define OPCODE_ADD	4
710#define OPCODE_ADC	5
711#define OPCODE_SBC	6
712#define OPCODE_RSC	7
713#define OPCODE_TST	8
714#define OPCODE_TEQ	9
715#define OPCODE_CMP	10
716#define OPCODE_CMN	11
717#define OPCODE_ORR	12
718#define OPCODE_MOV	13
719#define OPCODE_BIC	14
720#define OPCODE_MVN	15
721
722#define T2_OPCODE_AND	0
723#define T2_OPCODE_BIC	1
724#define T2_OPCODE_ORR	2
725#define T2_OPCODE_ORN	3
726#define T2_OPCODE_EOR	4
727#define T2_OPCODE_ADD	8
728#define T2_OPCODE_ADC	10
729#define T2_OPCODE_SBC	11
730#define T2_OPCODE_SUB	13
731#define T2_OPCODE_RSB	14
732
733#define T_OPCODE_MUL 0x4340
734#define T_OPCODE_TST 0x4200
735#define T_OPCODE_CMN 0x42c0
736#define T_OPCODE_NEG 0x4240
737#define T_OPCODE_MVN 0x43c0
738
739#define T_OPCODE_ADD_R3	0x1800
740#define T_OPCODE_SUB_R3 0x1a00
741#define T_OPCODE_ADD_HI 0x4400
742#define T_OPCODE_ADD_ST 0xb000
743#define T_OPCODE_SUB_ST 0xb080
744#define T_OPCODE_ADD_SP 0xa800
745#define T_OPCODE_ADD_PC 0xa000
746#define T_OPCODE_ADD_I8 0x3000
747#define T_OPCODE_SUB_I8 0x3800
748#define T_OPCODE_ADD_I3 0x1c00
749#define T_OPCODE_SUB_I3 0x1e00
750
751#define T_OPCODE_ASR_R	0x4100
752#define T_OPCODE_LSL_R	0x4080
753#define T_OPCODE_LSR_R	0x40c0
754#define T_OPCODE_ROR_R	0x41c0
755#define T_OPCODE_ASR_I	0x1000
756#define T_OPCODE_LSL_I	0x0000
757#define T_OPCODE_LSR_I	0x0800
758
759#define T_OPCODE_MOV_I8	0x2000
760#define T_OPCODE_CMP_I8 0x2800
761#define T_OPCODE_CMP_LR 0x4280
762#define T_OPCODE_MOV_HR 0x4600
763#define T_OPCODE_CMP_HR 0x4500
764
765#define T_OPCODE_LDR_PC 0x4800
766#define T_OPCODE_LDR_SP 0x9800
767#define T_OPCODE_STR_SP 0x9000
768#define T_OPCODE_LDR_IW 0x6800
769#define T_OPCODE_STR_IW 0x6000
770#define T_OPCODE_LDR_IH 0x8800
771#define T_OPCODE_STR_IH 0x8000
772#define T_OPCODE_LDR_IB 0x7800
773#define T_OPCODE_STR_IB 0x7000
774#define T_OPCODE_LDR_RW 0x5800
775#define T_OPCODE_STR_RW 0x5000
776#define T_OPCODE_LDR_RH 0x5a00
777#define T_OPCODE_STR_RH 0x5200
778#define T_OPCODE_LDR_RB 0x5c00
779#define T_OPCODE_STR_RB 0x5400
780
781#define T_OPCODE_PUSH	0xb400
782#define T_OPCODE_POP	0xbc00
783
784#define T_OPCODE_BRANCH 0xe000
785
786#define THUMB_SIZE	2	/* Size of thumb instruction.  */
787#define THUMB_PP_PC_LR 0x0100
788#define THUMB_LOAD_BIT 0x0800
789#define THUMB2_LOAD_BIT 0x00100000
790
791#define BAD_ARGS	_("bad arguments to instruction")
792#define BAD_SP          _("r13 not allowed here")
793#define BAD_PC		_("r15 not allowed here")
794#define BAD_COND	_("instruction cannot be conditional")
795#define BAD_OVERLAP	_("registers may not be the same")
796#define BAD_HIREG	_("lo register required")
797#define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
798#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
799#define BAD_BRANCH	_("branch must be last instruction in IT block")
800#define BAD_NOT_IT	_("instruction not allowed in IT block")
801#define BAD_FPU		_("selected FPU does not support instruction")
802#define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
803#define BAD_IT_COND	_("incorrect condition in IT block")
804#define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
805#define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
806#define BAD_PC_ADDRESSING \
807	_("cannot use register index with PC-relative addressing")
808#define BAD_PC_WRITEBACK \
809	_("cannot use writeback with PC-relative addressing")
810#define BAD_RANGE	_("branch out of range")
811#define BAD_FP16	_("selected processor does not support fp16 instruction")
812#define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
813#define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
814
815static struct hash_control * arm_ops_hsh;
816static struct hash_control * arm_cond_hsh;
817static struct hash_control * arm_shift_hsh;
818static struct hash_control * arm_psr_hsh;
819static struct hash_control * arm_v7m_psr_hsh;
820static struct hash_control * arm_reg_hsh;
821static struct hash_control * arm_reloc_hsh;
822static struct hash_control * arm_barrier_opt_hsh;
823
824/* Stuff needed to resolve the label ambiguity
825   As:
826     ...
827     label:   <insn>
828   may differ from:
829     ...
830     label:
831	      <insn>  */
832
833symbolS *  last_label_seen;
834static int label_is_thumb_function_name = FALSE;
835
836/* Literal pool structure.  Held on a per-section
837   and per-sub-section basis.  */
838
839#define MAX_LITERAL_POOL_SIZE 1024
840typedef struct literal_pool
841{
842  expressionS	         literals [MAX_LITERAL_POOL_SIZE];
843  unsigned int	         next_free_entry;
844  unsigned int	         id;
845  symbolS *	         symbol;
846  segT		         section;
847  subsegT	         sub_section;
848#ifdef OBJ_ELF
849  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
850#endif
851  struct literal_pool *  next;
852  unsigned int		 alignment;
853} literal_pool;
854
855/* Pointer to a linked list of literal pools.  */
856literal_pool * list_of_pools = NULL;
857
858typedef enum asmfunc_states
859{
860  OUTSIDE_ASMFUNC,
861  WAITING_ASMFUNC_NAME,
862  WAITING_ENDASMFUNC
863} asmfunc_states;
864
865static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
866
867#ifdef OBJ_ELF
868#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
869#else
870static struct current_it now_it;
871#endif
872
873static inline int
874now_it_compatible (int cond)
875{
876  return (cond & ~1) == (now_it.cc & ~1);
877}
878
879static inline int
880conditional_insn (void)
881{
882  return inst.cond != COND_ALWAYS;
883}
884
885static int in_it_block (void);
886
887static int handle_it_state (void);
888
889static void force_automatic_it_block_close (void);
890
891static void it_fsm_post_encode (void);
892
893#define set_it_insn_type(type)			\
894  do						\
895    {						\
896      inst.it_insn_type = type;			\
897      if (handle_it_state () == FAIL)		\
898	return;					\
899    }						\
900  while (0)
901
902#define set_it_insn_type_nonvoid(type, failret) \
903  do						\
904    {                                           \
905      inst.it_insn_type = type;			\
906      if (handle_it_state () == FAIL)		\
907	return failret;				\
908    }						\
909  while(0)
910
911#define set_it_insn_type_last()				\
912  do							\
913    {							\
914      if (inst.cond == COND_ALWAYS)			\
915	set_it_insn_type (IF_INSIDE_IT_LAST_INSN);	\
916      else						\
917	set_it_insn_type (INSIDE_IT_LAST_INSN);		\
918    }							\
919  while (0)
920
921/* Pure syntax.	 */
922
923/* This array holds the chars that always start a comment.  If the
924   pre-processor is disabled, these aren't very useful.	 */
925char arm_comment_chars[] = "@";
926
927/* This array holds the chars that only start a comment at the beginning of
928   a line.  If the line seems to have the form '# 123 filename'
929   .line and .file directives will appear in the pre-processed output.	*/
930/* Note that input_file.c hand checks for '#' at the beginning of the
931   first line of the input file.  This is because the compiler outputs
932   #NO_APP at the beginning of its output.  */
933/* Also note that comments like this one will always work.  */
934const char line_comment_chars[] = "#";
935
936char arm_line_separator_chars[] = ";";
937
938/* Chars that can be used to separate mant
939   from exp in floating point numbers.	*/
940const char EXP_CHARS[] = "eE";
941
942/* Chars that mean this number is a floating point constant.  */
943/* As in 0f12.456  */
944/* or	 0d1.2345e12  */
945
946const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
947
948/* Prefix characters that indicate the start of an immediate
949   value.  */
950#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
951
952/* Separator character handling.  */
953
954#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
955
956static inline int
957skip_past_char (char ** str, char c)
958{
959  /* PR gas/14987: Allow for whitespace before the expected character.  */
960  skip_whitespace (*str);
961
962  if (**str == c)
963    {
964      (*str)++;
965      return SUCCESS;
966    }
967  else
968    return FAIL;
969}
970
971#define skip_past_comma(str) skip_past_char (str, ',')
972
973/* Arithmetic expressions (possibly involving symbols).	 */
974
975/* Return TRUE if anything in the expression is a bignum.  */
976
977static int
978walk_no_bignums (symbolS * sp)
979{
980  if (symbol_get_value_expression (sp)->X_op == O_big)
981    return 1;
982
983  if (symbol_get_value_expression (sp)->X_add_symbol)
984    {
985      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
986	      || (symbol_get_value_expression (sp)->X_op_symbol
987		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
988    }
989
990  return 0;
991}
992
993static int in_my_get_expression = 0;
994
995/* Third argument to my_get_expression.	 */
996#define GE_NO_PREFIX 0
997#define GE_IMM_PREFIX 1
998#define GE_OPT_PREFIX 2
999/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1000   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
1001#define GE_OPT_PREFIX_BIG 3
1002
1003static int
1004my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1005{
1006  char * save_in;
1007  segT	 seg;
1008
1009  /* In unified syntax, all prefixes are optional.  */
1010  if (unified_syntax)
1011    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1012		  : GE_OPT_PREFIX;
1013
1014  switch (prefix_mode)
1015    {
1016    case GE_NO_PREFIX: break;
1017    case GE_IMM_PREFIX:
1018      if (!is_immediate_prefix (**str))
1019	{
1020	  inst.error = _("immediate expression requires a # prefix");
1021	  return FAIL;
1022	}
1023      (*str)++;
1024      break;
1025    case GE_OPT_PREFIX:
1026    case GE_OPT_PREFIX_BIG:
1027      if (is_immediate_prefix (**str))
1028	(*str)++;
1029      break;
1030    default: abort ();
1031    }
1032
1033  memset (ep, 0, sizeof (expressionS));
1034
1035  save_in = input_line_pointer;
1036  input_line_pointer = *str;
1037  in_my_get_expression = 1;
1038  seg = expression (ep);
1039  in_my_get_expression = 0;
1040
1041  if (ep->X_op == O_illegal || ep->X_op == O_absent)
1042    {
1043      /* We found a bad or missing expression in md_operand().  */
1044      *str = input_line_pointer;
1045      input_line_pointer = save_in;
1046      if (inst.error == NULL)
1047	inst.error = (ep->X_op == O_absent
1048		      ? _("missing expression") :_("bad expression"));
1049      return 1;
1050    }
1051
1052#ifdef OBJ_AOUT
1053  if (seg != absolute_section
1054      && seg != text_section
1055      && seg != data_section
1056      && seg != bss_section
1057      && seg != undefined_section)
1058    {
1059      inst.error = _("bad segment");
1060      *str = input_line_pointer;
1061      input_line_pointer = save_in;
1062      return 1;
1063    }
1064#else
1065  (void) seg;
1066#endif
1067
1068  /* Get rid of any bignums now, so that we don't generate an error for which
1069     we can't establish a line number later on.	 Big numbers are never valid
1070     in instructions, which is where this routine is always called.  */
1071  if (prefix_mode != GE_OPT_PREFIX_BIG
1072      && (ep->X_op == O_big
1073	  || (ep->X_add_symbol
1074	      && (walk_no_bignums (ep->X_add_symbol)
1075		  || (ep->X_op_symbol
1076		      && walk_no_bignums (ep->X_op_symbol))))))
1077    {
1078      inst.error = _("invalid constant");
1079      *str = input_line_pointer;
1080      input_line_pointer = save_in;
1081      return 1;
1082    }
1083
1084  *str = input_line_pointer;
1085  input_line_pointer = save_in;
1086  return 0;
1087}
1088
1089/* Turn a string in input_line_pointer into a floating point constant
1090   of type TYPE, and store the appropriate bytes in *LITP.  The number
1091   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1092   returned, or NULL on OK.
1093
1094   Note that fp constants aren't represent in the normal way on the ARM.
1095   In big endian mode, things are as expected.	However, in little endian
1096   mode fp constants are big-endian word-wise, and little-endian byte-wise
1097   within the words.  For example, (double) 1.1 in big endian mode is
1098   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1099   the byte sequence 99 99 f1 3f 9a 99 99 99.
1100
1101   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1102
1103const char *
1104md_atof (int type, char * litP, int * sizeP)
1105{
1106  int prec;
1107  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1108  char *t;
1109  int i;
1110
1111  switch (type)
1112    {
1113    case 'f':
1114    case 'F':
1115    case 's':
1116    case 'S':
1117      prec = 2;
1118      break;
1119
1120    case 'd':
1121    case 'D':
1122    case 'r':
1123    case 'R':
1124      prec = 4;
1125      break;
1126
1127    case 'x':
1128    case 'X':
1129      prec = 5;
1130      break;
1131
1132    case 'p':
1133    case 'P':
1134      prec = 5;
1135      break;
1136
1137    default:
1138      *sizeP = 0;
1139      return _("Unrecognized or unsupported floating point constant");
1140    }
1141
1142  t = atof_ieee (input_line_pointer, type, words);
1143  if (t)
1144    input_line_pointer = t;
1145  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1146
1147  if (target_big_endian)
1148    {
1149      for (i = 0; i < prec; i++)
1150	{
1151	  md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1152	  litP += sizeof (LITTLENUM_TYPE);
1153	}
1154    }
1155  else
1156    {
1157      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1158	for (i = prec - 1; i >= 0; i--)
1159	  {
1160	    md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1161	    litP += sizeof (LITTLENUM_TYPE);
1162	  }
1163      else
1164	/* For a 4 byte float the order of elements in `words' is 1 0.
1165	   For an 8 byte float the order is 1 0 3 2.  */
1166	for (i = 0; i < prec; i += 2)
1167	  {
1168	    md_number_to_chars (litP, (valueT) words[i + 1],
1169				sizeof (LITTLENUM_TYPE));
1170	    md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1171				(valueT) words[i], sizeof (LITTLENUM_TYPE));
1172	    litP += 2 * sizeof (LITTLENUM_TYPE);
1173	  }
1174    }
1175
1176  return NULL;
1177}
1178
1179/* We handle all bad expressions here, so that we can report the faulty
1180   instruction in the error message.  */
1181void
1182md_operand (expressionS * exp)
1183{
1184  if (in_my_get_expression)
1185    exp->X_op = O_illegal;
1186}
1187
1188/* Immediate values.  */
1189
1190/* Generic immediate-value read function for use in directives.
1191   Accepts anything that 'expression' can fold to a constant.
1192   *val receives the number.  */
1193#ifdef OBJ_ELF
1194static int
1195immediate_for_directive (int *val)
1196{
1197  expressionS exp;
1198  exp.X_op = O_illegal;
1199
1200  if (is_immediate_prefix (*input_line_pointer))
1201    {
1202      input_line_pointer++;
1203      expression (&exp);
1204    }
1205
1206  if (exp.X_op != O_constant)
1207    {
1208      as_bad (_("expected #constant"));
1209      ignore_rest_of_line ();
1210      return FAIL;
1211    }
1212  *val = exp.X_add_number;
1213  return SUCCESS;
1214}
1215#endif
1216
1217/* Register parsing.  */
1218
1219/* Generic register parser.  CCP points to what should be the
1220   beginning of a register name.  If it is indeed a valid register
1221   name, advance CCP over it and return the reg_entry structure;
1222   otherwise return NULL.  Does not issue diagnostics.	*/
1223
1224static struct reg_entry *
1225arm_reg_parse_multi (char **ccp)
1226{
1227  char *start = *ccp;
1228  char *p;
1229  struct reg_entry *reg;
1230
1231  skip_whitespace (start);
1232
1233#ifdef REGISTER_PREFIX
1234  if (*start != REGISTER_PREFIX)
1235    return NULL;
1236  start++;
1237#endif
1238#ifdef OPTIONAL_REGISTER_PREFIX
1239  if (*start == OPTIONAL_REGISTER_PREFIX)
1240    start++;
1241#endif
1242
1243  p = start;
1244  if (!ISALPHA (*p) || !is_name_beginner (*p))
1245    return NULL;
1246
1247  do
1248    p++;
1249  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1250
1251  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1252
1253  if (!reg)
1254    return NULL;
1255
1256  *ccp = p;
1257  return reg;
1258}
1259
1260static int
1261arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1262		    enum arm_reg_type type)
1263{
1264  /* Alternative syntaxes are accepted for a few register classes.  */
1265  switch (type)
1266    {
1267    case REG_TYPE_MVF:
1268    case REG_TYPE_MVD:
1269    case REG_TYPE_MVFX:
1270    case REG_TYPE_MVDX:
1271      /* Generic coprocessor register names are allowed for these.  */
1272      if (reg && reg->type == REG_TYPE_CN)
1273	return reg->number;
1274      break;
1275
1276    case REG_TYPE_CP:
1277      /* For backward compatibility, a bare number is valid here.  */
1278      {
1279	unsigned long processor = strtoul (start, ccp, 10);
1280	if (*ccp != start && processor <= 15)
1281	  return processor;
1282      }
1283      /* Fall through.  */
1284
1285    case REG_TYPE_MMXWC:
1286      /* WC includes WCG.  ??? I'm not sure this is true for all
1287	 instructions that take WC registers.  */
1288      if (reg && reg->type == REG_TYPE_MMXWCG)
1289	return reg->number;
1290      break;
1291
1292    default:
1293      break;
1294    }
1295
1296  return FAIL;
1297}
1298
1299/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1300   return value is the register number or FAIL.  */
1301
1302static int
1303arm_reg_parse (char **ccp, enum arm_reg_type type)
1304{
1305  char *start = *ccp;
1306  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1307  int ret;
1308
1309  /* Do not allow a scalar (reg+index) to parse as a register.  */
1310  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1311    return FAIL;
1312
1313  if (reg && reg->type == type)
1314    return reg->number;
1315
1316  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1317    return ret;
1318
1319  *ccp = start;
1320  return FAIL;
1321}
1322
1323/* Parse a Neon type specifier. *STR should point at the leading '.'
1324   character. Does no verification at this stage that the type fits the opcode
1325   properly. E.g.,
1326
1327     .i32.i32.s16
1328     .s32.f32
1329     .u16
1330
1331   Can all be legally parsed by this function.
1332
1333   Fills in neon_type struct pointer with parsed information, and updates STR
1334   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1335   type, FAIL if not.  */
1336
1337static int
1338parse_neon_type (struct neon_type *type, char **str)
1339{
1340  char *ptr = *str;
1341
1342  if (type)
1343    type->elems = 0;
1344
1345  while (type->elems < NEON_MAX_TYPE_ELS)
1346    {
1347      enum neon_el_type thistype = NT_untyped;
1348      unsigned thissize = -1u;
1349
1350      if (*ptr != '.')
1351	break;
1352
1353      ptr++;
1354
1355      /* Just a size without an explicit type.  */
1356      if (ISDIGIT (*ptr))
1357	goto parsesize;
1358
1359      switch (TOLOWER (*ptr))
1360	{
1361	case 'i': thistype = NT_integer; break;
1362	case 'f': thistype = NT_float; break;
1363	case 'p': thistype = NT_poly; break;
1364	case 's': thistype = NT_signed; break;
1365	case 'u': thistype = NT_unsigned; break;
1366	case 'd':
1367	  thistype = NT_float;
1368	  thissize = 64;
1369	  ptr++;
1370	  goto done;
1371	default:
1372	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1373	  return FAIL;
1374	}
1375
1376      ptr++;
1377
1378      /* .f is an abbreviation for .f32.  */
1379      if (thistype == NT_float && !ISDIGIT (*ptr))
1380	thissize = 32;
1381      else
1382	{
1383	parsesize:
1384	  thissize = strtoul (ptr, &ptr, 10);
1385
1386	  if (thissize != 8 && thissize != 16 && thissize != 32
1387	      && thissize != 64)
1388	    {
1389	      as_bad (_("bad size %d in type specifier"), thissize);
1390	      return FAIL;
1391	    }
1392	}
1393
1394      done:
1395      if (type)
1396	{
1397	  type->el[type->elems].type = thistype;
1398	  type->el[type->elems].size = thissize;
1399	  type->elems++;
1400	}
1401    }
1402
1403  /* Empty/missing type is not a successful parse.  */
1404  if (type->elems == 0)
1405    return FAIL;
1406
1407  *str = ptr;
1408
1409  return SUCCESS;
1410}
1411
1412/* Errors may be set multiple times during parsing or bit encoding
1413   (particularly in the Neon bits), but usually the earliest error which is set
1414   will be the most meaningful. Avoid overwriting it with later (cascading)
1415   errors by calling this function.  */
1416
1417static void
1418first_error (const char *err)
1419{
1420  if (!inst.error)
1421    inst.error = err;
1422}
1423
1424/* Parse a single type, e.g. ".s32", leading period included.  */
1425static int
1426parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1427{
1428  char *str = *ccp;
1429  struct neon_type optype;
1430
1431  if (*str == '.')
1432    {
1433      if (parse_neon_type (&optype, &str) == SUCCESS)
1434	{
1435	  if (optype.elems == 1)
1436	    *vectype = optype.el[0];
1437	  else
1438	    {
1439	      first_error (_("only one type should be specified for operand"));
1440	      return FAIL;
1441	    }
1442	}
1443      else
1444	{
1445	  first_error (_("vector type expected"));
1446	  return FAIL;
1447	}
1448    }
1449  else
1450    return FAIL;
1451
1452  *ccp = str;
1453
1454  return SUCCESS;
1455}
1456
1457/* Special meanings for indices (which have a range of 0-7), which will fit into
1458   a 4-bit integer.  */
1459
1460#define NEON_ALL_LANES		15
1461#define NEON_INTERLEAVE_LANES	14
1462
1463/* Parse either a register or a scalar, with an optional type. Return the
1464   register number, and optionally fill in the actual type of the register
1465   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1466   type/index information in *TYPEINFO.  */
1467
1468static int
1469parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1470			   enum arm_reg_type *rtype,
1471			   struct neon_typed_alias *typeinfo)
1472{
1473  char *str = *ccp;
1474  struct reg_entry *reg = arm_reg_parse_multi (&str);
1475  struct neon_typed_alias atype;
1476  struct neon_type_el parsetype;
1477
1478  atype.defined = 0;
1479  atype.index = -1;
1480  atype.eltype.type = NT_invtype;
1481  atype.eltype.size = -1;
1482
1483  /* Try alternate syntax for some types of register. Note these are mutually
1484     exclusive with the Neon syntax extensions.  */
1485  if (reg == NULL)
1486    {
1487      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1488      if (altreg != FAIL)
1489	*ccp = str;
1490      if (typeinfo)
1491	*typeinfo = atype;
1492      return altreg;
1493    }
1494
1495  /* Undo polymorphism when a set of register types may be accepted.  */
1496  if ((type == REG_TYPE_NDQ
1497       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1498      || (type == REG_TYPE_VFSD
1499	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1500      || (type == REG_TYPE_NSDQ
1501	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1502	      || reg->type == REG_TYPE_NQ))
1503      || (type == REG_TYPE_MMXWC
1504	  && (reg->type == REG_TYPE_MMXWCG)))
1505    type = (enum arm_reg_type) reg->type;
1506
1507  if (type != reg->type)
1508    return FAIL;
1509
1510  if (reg->neon)
1511    atype = *reg->neon;
1512
1513  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1514    {
1515      if ((atype.defined & NTA_HASTYPE) != 0)
1516	{
1517	  first_error (_("can't redefine type for operand"));
1518	  return FAIL;
1519	}
1520      atype.defined |= NTA_HASTYPE;
1521      atype.eltype = parsetype;
1522    }
1523
1524  if (skip_past_char (&str, '[') == SUCCESS)
1525    {
1526      if (type != REG_TYPE_VFD)
1527	{
1528	  first_error (_("only D registers may be indexed"));
1529	  return FAIL;
1530	}
1531
1532      if ((atype.defined & NTA_HASINDEX) != 0)
1533	{
1534	  first_error (_("can't change index for operand"));
1535	  return FAIL;
1536	}
1537
1538      atype.defined |= NTA_HASINDEX;
1539
1540      if (skip_past_char (&str, ']') == SUCCESS)
1541	atype.index = NEON_ALL_LANES;
1542      else
1543	{
1544	  expressionS exp;
1545
1546	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1547
1548	  if (exp.X_op != O_constant)
1549	    {
1550	      first_error (_("constant expression required"));
1551	      return FAIL;
1552	    }
1553
1554	  if (skip_past_char (&str, ']') == FAIL)
1555	    return FAIL;
1556
1557	  atype.index = exp.X_add_number;
1558	}
1559    }
1560
1561  if (typeinfo)
1562    *typeinfo = atype;
1563
1564  if (rtype)
1565    *rtype = type;
1566
1567  *ccp = str;
1568
1569  return reg->number;
1570}
1571
1572/* Like arm_reg_parse, but allow allow the following extra features:
1573    - If RTYPE is non-zero, return the (possibly restricted) type of the
1574      register (e.g. Neon double or quad reg when either has been requested).
1575    - If this is a Neon vector type with additional type information, fill
1576      in the struct pointed to by VECTYPE (if non-NULL).
1577   This function will fault on encountering a scalar.  */
1578
1579static int
1580arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1581		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1582{
1583  struct neon_typed_alias atype;
1584  char *str = *ccp;
1585  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1586
1587  if (reg == FAIL)
1588    return FAIL;
1589
1590  /* Do not allow regname(... to parse as a register.  */
1591  if (*str == '(')
1592    return FAIL;
1593
1594  /* Do not allow a scalar (reg+index) to parse as a register.  */
1595  if ((atype.defined & NTA_HASINDEX) != 0)
1596    {
1597      first_error (_("register operand expected, but got scalar"));
1598      return FAIL;
1599    }
1600
1601  if (vectype)
1602    *vectype = atype.eltype;
1603
1604  *ccp = str;
1605
1606  return reg;
1607}
1608
1609#define NEON_SCALAR_REG(X)	((X) >> 4)
1610#define NEON_SCALAR_INDEX(X)	((X) & 15)
1611
1612/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1613   have enough information to be able to do a good job bounds-checking. So, we
1614   just do easy checks here, and do further checks later.  */
1615
1616static int
1617parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1618{
1619  int reg;
1620  char *str = *ccp;
1621  struct neon_typed_alias atype;
1622
1623  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1624
1625  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1626    return FAIL;
1627
1628  if (atype.index == NEON_ALL_LANES)
1629    {
1630      first_error (_("scalar must have an index"));
1631      return FAIL;
1632    }
1633  else if (atype.index >= 64 / elsize)
1634    {
1635      first_error (_("scalar index out of range"));
1636      return FAIL;
1637    }
1638
1639  if (type)
1640    *type = atype.eltype;
1641
1642  *ccp = str;
1643
1644  return reg * 16 + atype.index;
1645}
1646
1647/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1648
1649static long
1650parse_reg_list (char ** strp)
1651{
1652  char * str = * strp;
1653  long	 range = 0;
1654  int	 another_range;
1655
1656  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1657  do
1658    {
1659      skip_whitespace (str);
1660
1661      another_range = 0;
1662
1663      if (*str == '{')
1664	{
1665	  int in_range = 0;
1666	  int cur_reg = -1;
1667
1668	  str++;
1669	  do
1670	    {
1671	      int reg;
1672
1673	      if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1674		{
1675		  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1676		  return FAIL;
1677		}
1678
1679	      if (in_range)
1680		{
1681		  int i;
1682
1683		  if (reg <= cur_reg)
1684		    {
1685		      first_error (_("bad range in register list"));
1686		      return FAIL;
1687		    }
1688
1689		  for (i = cur_reg + 1; i < reg; i++)
1690		    {
1691		      if (range & (1 << i))
1692			as_tsktsk
1693			  (_("Warning: duplicated register (r%d) in register list"),
1694			   i);
1695		      else
1696			range |= 1 << i;
1697		    }
1698		  in_range = 0;
1699		}
1700
1701	      if (range & (1 << reg))
1702		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1703			   reg);
1704	      else if (reg <= cur_reg)
1705		as_tsktsk (_("Warning: register range not in ascending order"));
1706
1707	      range |= 1 << reg;
1708	      cur_reg = reg;
1709	    }
1710	  while (skip_past_comma (&str) != FAIL
1711		 || (in_range = 1, *str++ == '-'));
1712	  str--;
1713
1714	  if (skip_past_char (&str, '}') == FAIL)
1715	    {
1716	      first_error (_("missing `}'"));
1717	      return FAIL;
1718	    }
1719	}
1720      else
1721	{
1722	  expressionS exp;
1723
1724	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1725	    return FAIL;
1726
1727	  if (exp.X_op == O_constant)
1728	    {
1729	      if (exp.X_add_number
1730		  != (exp.X_add_number & 0x0000ffff))
1731		{
1732		  inst.error = _("invalid register mask");
1733		  return FAIL;
1734		}
1735
1736	      if ((range & exp.X_add_number) != 0)
1737		{
1738		  int regno = range & exp.X_add_number;
1739
1740		  regno &= -regno;
1741		  regno = (1 << regno) - 1;
1742		  as_tsktsk
1743		    (_("Warning: duplicated register (r%d) in register list"),
1744		     regno);
1745		}
1746
1747	      range |= exp.X_add_number;
1748	    }
1749	  else
1750	    {
1751	      if (inst.reloc.type != 0)
1752		{
1753		  inst.error = _("expression too complex");
1754		  return FAIL;
1755		}
1756
1757	      memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1758	      inst.reloc.type = BFD_RELOC_ARM_MULTI;
1759	      inst.reloc.pc_rel = 0;
1760	    }
1761	}
1762
1763      if (*str == '|' || *str == '+')
1764	{
1765	  str++;
1766	  another_range = 1;
1767	}
1768    }
1769  while (another_range);
1770
1771  *strp = str;
1772  return range;
1773}
1774
1775/* Types of registers in a list.  */
1776
1777enum reg_list_els
1778{
1779  REGLIST_VFP_S,
1780  REGLIST_VFP_D,
1781  REGLIST_NEON_D
1782};
1783
1784/* Parse a VFP register list.  If the string is invalid return FAIL.
1785   Otherwise return the number of registers, and set PBASE to the first
1786   register.  Parses registers of type ETYPE.
1787   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1788     - Q registers can be used to specify pairs of D registers
1789     - { } can be omitted from around a singleton register list
1790	 FIXME: This is not implemented, as it would require backtracking in
1791	 some cases, e.g.:
1792	   vtbl.8 d3,d4,d5
1793	 This could be done (the meaning isn't really ambiguous), but doesn't
1794	 fit in well with the current parsing framework.
1795     - 32 D registers may be used (also true for VFPv3).
1796   FIXME: Types are ignored in these register lists, which is probably a
1797   bug.  */
1798
1799static int
1800parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1801{
1802  char *str = *ccp;
1803  int base_reg;
1804  int new_base;
1805  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1806  int max_regs = 0;
1807  int count = 0;
1808  int warned = 0;
1809  unsigned long mask = 0;
1810  int i;
1811
1812  if (skip_past_char (&str, '{') == FAIL)
1813    {
1814      inst.error = _("expecting {");
1815      return FAIL;
1816    }
1817
1818  switch (etype)
1819    {
1820    case REGLIST_VFP_S:
1821      regtype = REG_TYPE_VFS;
1822      max_regs = 32;
1823      break;
1824
1825    case REGLIST_VFP_D:
1826      regtype = REG_TYPE_VFD;
1827      break;
1828
1829    case REGLIST_NEON_D:
1830      regtype = REG_TYPE_NDQ;
1831      break;
1832    }
1833
1834  if (etype != REGLIST_VFP_S)
1835    {
1836      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1837      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1838	{
1839	  max_regs = 32;
1840	  if (thumb_mode)
1841	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1842				    fpu_vfp_ext_d32);
1843	  else
1844	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1845				    fpu_vfp_ext_d32);
1846	}
1847      else
1848	max_regs = 16;
1849    }
1850
1851  base_reg = max_regs;
1852
1853  do
1854    {
1855      int setmask = 1, addregs = 1;
1856
1857      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1858
1859      if (new_base == FAIL)
1860	{
1861	  first_error (_(reg_expected_msgs[regtype]));
1862	  return FAIL;
1863	}
1864
1865      if (new_base >= max_regs)
1866	{
1867	  first_error (_("register out of range in list"));
1868	  return FAIL;
1869	}
1870
1871      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1872      if (regtype == REG_TYPE_NQ)
1873	{
1874	  setmask = 3;
1875	  addregs = 2;
1876	}
1877
1878      if (new_base < base_reg)
1879	base_reg = new_base;
1880
1881      if (mask & (setmask << new_base))
1882	{
1883	  first_error (_("invalid register list"));
1884	  return FAIL;
1885	}
1886
1887      if ((mask >> new_base) != 0 && ! warned)
1888	{
1889	  as_tsktsk (_("register list not in ascending order"));
1890	  warned = 1;
1891	}
1892
1893      mask |= setmask << new_base;
1894      count += addregs;
1895
1896      if (*str == '-') /* We have the start of a range expression */
1897	{
1898	  int high_range;
1899
1900	  str++;
1901
1902	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1903	      == FAIL)
1904	    {
1905	      inst.error = gettext (reg_expected_msgs[regtype]);
1906	      return FAIL;
1907	    }
1908
1909	  if (high_range >= max_regs)
1910	    {
1911	      first_error (_("register out of range in list"));
1912	      return FAIL;
1913	    }
1914
1915	  if (regtype == REG_TYPE_NQ)
1916	    high_range = high_range + 1;
1917
1918	  if (high_range <= new_base)
1919	    {
1920	      inst.error = _("register range not in ascending order");
1921	      return FAIL;
1922	    }
1923
1924	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
1925	    {
1926	      if (mask & (setmask << new_base))
1927		{
1928		  inst.error = _("invalid register list");
1929		  return FAIL;
1930		}
1931
1932	      mask |= setmask << new_base;
1933	      count += addregs;
1934	    }
1935	}
1936    }
1937  while (skip_past_comma (&str) != FAIL);
1938
1939  str++;
1940
1941  /* Sanity check -- should have raised a parse error above.  */
1942  if (count == 0 || count > max_regs)
1943    abort ();
1944
1945  *pbase = base_reg;
1946
1947  /* Final test -- the registers must be consecutive.  */
1948  mask >>= base_reg;
1949  for (i = 0; i < count; i++)
1950    {
1951      if ((mask & (1u << i)) == 0)
1952	{
1953	  inst.error = _("non-contiguous register range");
1954	  return FAIL;
1955	}
1956    }
1957
1958  *ccp = str;
1959
1960  return count;
1961}
1962
1963/* True if two alias types are the same.  */
1964
1965static bfd_boolean
1966neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1967{
1968  if (!a && !b)
1969    return TRUE;
1970
1971  if (!a || !b)
1972    return FALSE;
1973
1974  if (a->defined != b->defined)
1975    return FALSE;
1976
1977  if ((a->defined & NTA_HASTYPE) != 0
1978      && (a->eltype.type != b->eltype.type
1979	  || a->eltype.size != b->eltype.size))
1980    return FALSE;
1981
1982  if ((a->defined & NTA_HASINDEX) != 0
1983      && (a->index != b->index))
1984    return FALSE;
1985
1986  return TRUE;
1987}
1988
1989/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1990   The base register is put in *PBASE.
1991   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1992   the return value.
1993   The register stride (minus one) is put in bit 4 of the return value.
1994   Bits [6:5] encode the list length (minus one).
1995   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1996
1997#define NEON_LANE(X)		((X) & 0xf)
1998#define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
1999#define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
2000
2001static int
2002parse_neon_el_struct_list (char **str, unsigned *pbase,
2003			   struct neon_type_el *eltype)
2004{
2005  char *ptr = *str;
2006  int base_reg = -1;
2007  int reg_incr = -1;
2008  int count = 0;
2009  int lane = -1;
2010  int leading_brace = 0;
2011  enum arm_reg_type rtype = REG_TYPE_NDQ;
2012  const char *const incr_error = _("register stride must be 1 or 2");
2013  const char *const type_error = _("mismatched element/structure types in list");
2014  struct neon_typed_alias firsttype;
2015  firsttype.defined = 0;
2016  firsttype.eltype.type = NT_invtype;
2017  firsttype.eltype.size = -1;
2018  firsttype.index = -1;
2019
2020  if (skip_past_char (&ptr, '{') == SUCCESS)
2021    leading_brace = 1;
2022
2023  do
2024    {
2025      struct neon_typed_alias atype;
2026      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2027
2028      if (getreg == FAIL)
2029	{
2030	  first_error (_(reg_expected_msgs[rtype]));
2031	  return FAIL;
2032	}
2033
2034      if (base_reg == -1)
2035	{
2036	  base_reg = getreg;
2037	  if (rtype == REG_TYPE_NQ)
2038	    {
2039	      reg_incr = 1;
2040	    }
2041	  firsttype = atype;
2042	}
2043      else if (reg_incr == -1)
2044	{
2045	  reg_incr = getreg - base_reg;
2046	  if (reg_incr < 1 || reg_incr > 2)
2047	    {
2048	      first_error (_(incr_error));
2049	      return FAIL;
2050	    }
2051	}
2052      else if (getreg != base_reg + reg_incr * count)
2053	{
2054	  first_error (_(incr_error));
2055	  return FAIL;
2056	}
2057
2058      if (! neon_alias_types_same (&atype, &firsttype))
2059	{
2060	  first_error (_(type_error));
2061	  return FAIL;
2062	}
2063
2064      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2065	 modes.  */
2066      if (ptr[0] == '-')
2067	{
2068	  struct neon_typed_alias htype;
2069	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2070	  if (lane == -1)
2071	    lane = NEON_INTERLEAVE_LANES;
2072	  else if (lane != NEON_INTERLEAVE_LANES)
2073	    {
2074	      first_error (_(type_error));
2075	      return FAIL;
2076	    }
2077	  if (reg_incr == -1)
2078	    reg_incr = 1;
2079	  else if (reg_incr != 1)
2080	    {
2081	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2082	      return FAIL;
2083	    }
2084	  ptr++;
2085	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2086	  if (hireg == FAIL)
2087	    {
2088	      first_error (_(reg_expected_msgs[rtype]));
2089	      return FAIL;
2090	    }
2091	  if (! neon_alias_types_same (&htype, &firsttype))
2092	    {
2093	      first_error (_(type_error));
2094	      return FAIL;
2095	    }
2096	  count += hireg + dregs - getreg;
2097	  continue;
2098	}
2099
2100      /* If we're using Q registers, we can't use [] or [n] syntax.  */
2101      if (rtype == REG_TYPE_NQ)
2102	{
2103	  count += 2;
2104	  continue;
2105	}
2106
2107      if ((atype.defined & NTA_HASINDEX) != 0)
2108	{
2109	  if (lane == -1)
2110	    lane = atype.index;
2111	  else if (lane != atype.index)
2112	    {
2113	      first_error (_(type_error));
2114	      return FAIL;
2115	    }
2116	}
2117      else if (lane == -1)
2118	lane = NEON_INTERLEAVE_LANES;
2119      else if (lane != NEON_INTERLEAVE_LANES)
2120	{
2121	  first_error (_(type_error));
2122	  return FAIL;
2123	}
2124      count++;
2125    }
2126  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2127
2128  /* No lane set by [x]. We must be interleaving structures.  */
2129  if (lane == -1)
2130    lane = NEON_INTERLEAVE_LANES;
2131
2132  /* Sanity check.  */
2133  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2134      || (count > 1 && reg_incr == -1))
2135    {
2136      first_error (_("error parsing element/structure list"));
2137      return FAIL;
2138    }
2139
2140  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2141    {
2142      first_error (_("expected }"));
2143      return FAIL;
2144    }
2145
2146  if (reg_incr == -1)
2147    reg_incr = 1;
2148
2149  if (eltype)
2150    *eltype = firsttype.eltype;
2151
2152  *pbase = base_reg;
2153  *str = ptr;
2154
2155  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2156}
2157
2158/* Parse an explicit relocation suffix on an expression.  This is
2159   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2160   arm_reloc_hsh contains no entries, so this function can only
2161   succeed if there is no () after the word.  Returns -1 on error,
2162   BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2163
2164static int
2165parse_reloc (char **str)
2166{
2167  struct reloc_entry *r;
2168  char *p, *q;
2169
2170  if (**str != '(')
2171    return BFD_RELOC_UNUSED;
2172
2173  p = *str + 1;
2174  q = p;
2175
2176  while (*q && *q != ')' && *q != ',')
2177    q++;
2178  if (*q != ')')
2179    return -1;
2180
2181  if ((r = (struct reloc_entry *)
2182       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2183    return -1;
2184
2185  *str = q + 1;
2186  return r->reloc;
2187}
2188
2189/* Directives: register aliases.  */
2190
2191static struct reg_entry *
2192insert_reg_alias (char *str, unsigned number, int type)
2193{
2194  struct reg_entry *new_reg;
2195  const char *name;
2196
2197  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2198    {
2199      if (new_reg->builtin)
2200	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2201
2202      /* Only warn about a redefinition if it's not defined as the
2203	 same register.	 */
2204      else if (new_reg->number != number || new_reg->type != type)
2205	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2206
2207      return NULL;
2208    }
2209
2210  name = xstrdup (str);
2211  new_reg = XNEW (struct reg_entry);
2212
2213  new_reg->name = name;
2214  new_reg->number = number;
2215  new_reg->type = type;
2216  new_reg->builtin = FALSE;
2217  new_reg->neon = NULL;
2218
2219  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2220    abort ();
2221
2222  return new_reg;
2223}
2224
2225static void
2226insert_neon_reg_alias (char *str, int number, int type,
2227		       struct neon_typed_alias *atype)
2228{
2229  struct reg_entry *reg = insert_reg_alias (str, number, type);
2230
2231  if (!reg)
2232    {
2233      first_error (_("attempt to redefine typed alias"));
2234      return;
2235    }
2236
2237  if (atype)
2238    {
2239      reg->neon = XNEW (struct neon_typed_alias);
2240      *reg->neon = *atype;
2241    }
2242}
2243
2244/* Look for the .req directive.	 This is of the form:
2245
2246	new_register_name .req existing_register_name
2247
2248   If we find one, or if it looks sufficiently like one that we want to
2249   handle any error here, return TRUE.  Otherwise return FALSE.  */
2250
2251static bfd_boolean
2252create_register_alias (char * newname, char *p)
2253{
2254  struct reg_entry *old;
2255  char *oldname, *nbuf;
2256  size_t nlen;
2257
2258  /* The input scrubber ensures that whitespace after the mnemonic is
2259     collapsed to single spaces.  */
2260  oldname = p;
2261  if (strncmp (oldname, " .req ", 6) != 0)
2262    return FALSE;
2263
2264  oldname += 6;
2265  if (*oldname == '\0')
2266    return FALSE;
2267
2268  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2269  if (!old)
2270    {
2271      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2272      return TRUE;
2273    }
2274
2275  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2276     the desired alias name, and p points to its end.  If not, then
2277     the desired alias name is in the global original_case_string.  */
2278#ifdef TC_CASE_SENSITIVE
2279  nlen = p - newname;
2280#else
2281  newname = original_case_string;
2282  nlen = strlen (newname);
2283#endif
2284
2285  nbuf = xmemdup0 (newname, nlen);
2286
2287  /* Create aliases under the new name as stated; an all-lowercase
2288     version of the new name; and an all-uppercase version of the new
2289     name.  */
2290  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2291    {
2292      for (p = nbuf; *p; p++)
2293	*p = TOUPPER (*p);
2294
2295      if (strncmp (nbuf, newname, nlen))
2296	{
2297	  /* If this attempt to create an additional alias fails, do not bother
2298	     trying to create the all-lower case alias.  We will fail and issue
2299	     a second, duplicate error message.  This situation arises when the
2300	     programmer does something like:
2301	       foo .req r0
2302	       Foo .req r1
2303	     The second .req creates the "Foo" alias but then fails to create
2304	     the artificial FOO alias because it has already been created by the
2305	     first .req.  */
2306	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2307	    {
2308	      free (nbuf);
2309	      return TRUE;
2310	    }
2311	}
2312
2313      for (p = nbuf; *p; p++)
2314	*p = TOLOWER (*p);
2315
2316      if (strncmp (nbuf, newname, nlen))
2317	insert_reg_alias (nbuf, old->number, old->type);
2318    }
2319
2320  free (nbuf);
2321  return TRUE;
2322}
2323
2324/* Create a Neon typed/indexed register alias using directives, e.g.:
2325     X .dn d5.s32[1]
2326     Y .qn 6.s16
2327     Z .dn d7
2328     T .dn Z[0]
2329   These typed registers can be used instead of the types specified after the
2330   Neon mnemonic, so long as all operands given have types. Types can also be
2331   specified directly, e.g.:
2332     vadd d0.s32, d1.s32, d2.s32  */
2333
2334static bfd_boolean
2335create_neon_reg_alias (char *newname, char *p)
2336{
2337  enum arm_reg_type basetype;
2338  struct reg_entry *basereg;
2339  struct reg_entry mybasereg;
2340  struct neon_type ntype;
2341  struct neon_typed_alias typeinfo;
2342  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2343  int namelen;
2344
2345  typeinfo.defined = 0;
2346  typeinfo.eltype.type = NT_invtype;
2347  typeinfo.eltype.size = -1;
2348  typeinfo.index = -1;
2349
2350  nameend = p;
2351
2352  if (strncmp (p, " .dn ", 5) == 0)
2353    basetype = REG_TYPE_VFD;
2354  else if (strncmp (p, " .qn ", 5) == 0)
2355    basetype = REG_TYPE_NQ;
2356  else
2357    return FALSE;
2358
2359  p += 5;
2360
2361  if (*p == '\0')
2362    return FALSE;
2363
2364  basereg = arm_reg_parse_multi (&p);
2365
2366  if (basereg && basereg->type != basetype)
2367    {
2368      as_bad (_("bad type for register"));
2369      return FALSE;
2370    }
2371
2372  if (basereg == NULL)
2373    {
2374      expressionS exp;
2375      /* Try parsing as an integer.  */
2376      my_get_expression (&exp, &p, GE_NO_PREFIX);
2377      if (exp.X_op != O_constant)
2378	{
2379	  as_bad (_("expression must be constant"));
2380	  return FALSE;
2381	}
2382      basereg = &mybasereg;
2383      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2384						  : exp.X_add_number;
2385      basereg->neon = 0;
2386    }
2387
2388  if (basereg->neon)
2389    typeinfo = *basereg->neon;
2390
2391  if (parse_neon_type (&ntype, &p) == SUCCESS)
2392    {
2393      /* We got a type.  */
2394      if (typeinfo.defined & NTA_HASTYPE)
2395	{
2396	  as_bad (_("can't redefine the type of a register alias"));
2397	  return FALSE;
2398	}
2399
2400      typeinfo.defined |= NTA_HASTYPE;
2401      if (ntype.elems != 1)
2402	{
2403	  as_bad (_("you must specify a single type only"));
2404	  return FALSE;
2405	}
2406      typeinfo.eltype = ntype.el[0];
2407    }
2408
2409  if (skip_past_char (&p, '[') == SUCCESS)
2410    {
2411      expressionS exp;
2412      /* We got a scalar index.  */
2413
2414      if (typeinfo.defined & NTA_HASINDEX)
2415	{
2416	  as_bad (_("can't redefine the index of a scalar alias"));
2417	  return FALSE;
2418	}
2419
2420      my_get_expression (&exp, &p, GE_NO_PREFIX);
2421
2422      if (exp.X_op != O_constant)
2423	{
2424	  as_bad (_("scalar index must be constant"));
2425	  return FALSE;
2426	}
2427
2428      typeinfo.defined |= NTA_HASINDEX;
2429      typeinfo.index = exp.X_add_number;
2430
2431      if (skip_past_char (&p, ']') == FAIL)
2432	{
2433	  as_bad (_("expecting ]"));
2434	  return FALSE;
2435	}
2436    }
2437
2438  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2439     the desired alias name, and p points to its end.  If not, then
2440     the desired alias name is in the global original_case_string.  */
2441#ifdef TC_CASE_SENSITIVE
2442  namelen = nameend - newname;
2443#else
2444  newname = original_case_string;
2445  namelen = strlen (newname);
2446#endif
2447
2448  namebuf = xmemdup0 (newname, namelen);
2449
2450  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2451			 typeinfo.defined != 0 ? &typeinfo : NULL);
2452
2453  /* Insert name in all uppercase.  */
2454  for (p = namebuf; *p; p++)
2455    *p = TOUPPER (*p);
2456
2457  if (strncmp (namebuf, newname, namelen))
2458    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2459			   typeinfo.defined != 0 ? &typeinfo : NULL);
2460
2461  /* Insert name in all lowercase.  */
2462  for (p = namebuf; *p; p++)
2463    *p = TOLOWER (*p);
2464
2465  if (strncmp (namebuf, newname, namelen))
2466    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2467			   typeinfo.defined != 0 ? &typeinfo : NULL);
2468
2469  free (namebuf);
2470  return TRUE;
2471}
2472
2473/* Should never be called, as .req goes between the alias and the
2474   register name, not at the beginning of the line.  */
2475
2476static void
2477s_req (int a ATTRIBUTE_UNUSED)
2478{
2479  as_bad (_("invalid syntax for .req directive"));
2480}
2481
2482static void
2483s_dn (int a ATTRIBUTE_UNUSED)
2484{
2485  as_bad (_("invalid syntax for .dn directive"));
2486}
2487
2488static void
2489s_qn (int a ATTRIBUTE_UNUSED)
2490{
2491  as_bad (_("invalid syntax for .qn directive"));
2492}
2493
2494/* The .unreq directive deletes an alias which was previously defined
2495   by .req.  For example:
2496
2497       my_alias .req r11
2498       .unreq my_alias	  */
2499
2500static void
2501s_unreq (int a ATTRIBUTE_UNUSED)
2502{
2503  char * name;
2504  char saved_char;
2505
2506  name = input_line_pointer;
2507
2508  while (*input_line_pointer != 0
2509	 && *input_line_pointer != ' '
2510	 && *input_line_pointer != '\n')
2511    ++input_line_pointer;
2512
2513  saved_char = *input_line_pointer;
2514  *input_line_pointer = 0;
2515
2516  if (!*name)
2517    as_bad (_("invalid syntax for .unreq directive"));
2518  else
2519    {
2520      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2521							      name);
2522
2523      if (!reg)
2524	as_bad (_("unknown register alias '%s'"), name);
2525      else if (reg->builtin)
2526	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2527		 name);
2528      else
2529	{
2530	  char * p;
2531	  char * nbuf;
2532
2533	  hash_delete (arm_reg_hsh, name, FALSE);
2534	  free ((char *) reg->name);
2535	  if (reg->neon)
2536	    free (reg->neon);
2537	  free (reg);
2538
2539	  /* Also locate the all upper case and all lower case versions.
2540	     Do not complain if we cannot find one or the other as it
2541	     was probably deleted above.  */
2542
2543	  nbuf = strdup (name);
2544	  for (p = nbuf; *p; p++)
2545	    *p = TOUPPER (*p);
2546	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2547	  if (reg)
2548	    {
2549	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2550	      free ((char *) reg->name);
2551	      if (reg->neon)
2552		free (reg->neon);
2553	      free (reg);
2554	    }
2555
2556	  for (p = nbuf; *p; p++)
2557	    *p = TOLOWER (*p);
2558	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2559	  if (reg)
2560	    {
2561	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2562	      free ((char *) reg->name);
2563	      if (reg->neon)
2564		free (reg->neon);
2565	      free (reg);
2566	    }
2567
2568	  free (nbuf);
2569	}
2570    }
2571
2572  *input_line_pointer = saved_char;
2573  demand_empty_rest_of_line ();
2574}
2575
2576/* Directives: Instruction set selection.  */
2577
2578#ifdef OBJ_ELF
2579/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2580   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2581   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2582   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2583
2584/* Create a new mapping symbol for the transition to STATE.  */
2585
2586static void
2587make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2588{
2589  symbolS * symbolP;
2590  const char * symname;
2591  int type;
2592
2593  switch (state)
2594    {
2595    case MAP_DATA:
2596      symname = "$d";
2597      type = BSF_NO_FLAGS;
2598      break;
2599    case MAP_ARM:
2600      symname = "$a";
2601      type = BSF_NO_FLAGS;
2602      break;
2603    case MAP_THUMB:
2604      symname = "$t";
2605      type = BSF_NO_FLAGS;
2606      break;
2607    default:
2608      abort ();
2609    }
2610
2611  symbolP = symbol_new (symname, now_seg, value, frag);
2612  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2613
2614  switch (state)
2615    {
2616    case MAP_ARM:
2617      THUMB_SET_FUNC (symbolP, 0);
2618      ARM_SET_THUMB (symbolP, 0);
2619      ARM_SET_INTERWORK (symbolP, support_interwork);
2620      break;
2621
2622    case MAP_THUMB:
2623      THUMB_SET_FUNC (symbolP, 1);
2624      ARM_SET_THUMB (symbolP, 1);
2625      ARM_SET_INTERWORK (symbolP, support_interwork);
2626      break;
2627
2628    case MAP_DATA:
2629    default:
2630      break;
2631    }
2632
2633  /* Save the mapping symbols for future reference.  Also check that
2634     we do not place two mapping symbols at the same offset within a
2635     frag.  We'll handle overlap between frags in
2636     check_mapping_symbols.
2637
2638     If .fill or other data filling directive generates zero sized data,
2639     the mapping symbol for the following code will have the same value
2640     as the one generated for the data filling directive.  In this case,
2641     we replace the old symbol with the new one at the same address.  */
2642  if (value == 0)
2643    {
2644      if (frag->tc_frag_data.first_map != NULL)
2645	{
2646	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2647	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2648	}
2649      frag->tc_frag_data.first_map = symbolP;
2650    }
2651  if (frag->tc_frag_data.last_map != NULL)
2652    {
2653      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2654      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2655	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2656    }
2657  frag->tc_frag_data.last_map = symbolP;
2658}
2659
2660/* We must sometimes convert a region marked as code to data during
2661   code alignment, if an odd number of bytes have to be padded.  The
2662   code mapping symbol is pushed to an aligned address.  */
2663
2664static void
2665insert_data_mapping_symbol (enum mstate state,
2666			    valueT value, fragS *frag, offsetT bytes)
2667{
2668  /* If there was already a mapping symbol, remove it.  */
2669  if (frag->tc_frag_data.last_map != NULL
2670      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2671    {
2672      symbolS *symp = frag->tc_frag_data.last_map;
2673
2674      if (value == 0)
2675	{
2676	  know (frag->tc_frag_data.first_map == symp);
2677	  frag->tc_frag_data.first_map = NULL;
2678	}
2679      frag->tc_frag_data.last_map = NULL;
2680      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2681    }
2682
2683  make_mapping_symbol (MAP_DATA, value, frag);
2684  make_mapping_symbol (state, value + bytes, frag);
2685}
2686
2687static void mapping_state_2 (enum mstate state, int max_chars);
2688
2689/* Set the mapping state to STATE.  Only call this when about to
2690   emit some STATE bytes to the file.  */
2691
2692#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2693void
2694mapping_state (enum mstate state)
2695{
2696  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2697
2698  if (mapstate == state)
2699    /* The mapping symbol has already been emitted.
2700       There is nothing else to do.  */
2701    return;
2702
2703  if (state == MAP_ARM || state == MAP_THUMB)
2704    /*  PR gas/12931
2705	All ARM instructions require 4-byte alignment.
2706	(Almost) all Thumb instructions require 2-byte alignment.
2707
2708	When emitting instructions into any section, mark the section
2709	appropriately.
2710
2711	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2712	but themselves require 2-byte alignment; this applies to some
2713	PC- relative forms.  However, these cases will invovle implicit
2714	literal pool generation or an explicit .align >=2, both of
2715	which will cause the section to me marked with sufficient
2716	alignment.  Thus, we don't handle those cases here.  */
2717    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2718
2719  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2720    /* This case will be evaluated later.  */
2721    return;
2722
2723  mapping_state_2 (state, 0);
2724}
2725
2726/* Same as mapping_state, but MAX_CHARS bytes have already been
2727   allocated.  Put the mapping symbol that far back.  */
2728
2729static void
2730mapping_state_2 (enum mstate state, int max_chars)
2731{
2732  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2733
2734  if (!SEG_NORMAL (now_seg))
2735    return;
2736
2737  if (mapstate == state)
2738    /* The mapping symbol has already been emitted.
2739       There is nothing else to do.  */
2740    return;
2741
2742  if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2743	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2744    {
2745      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2746      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2747
2748      if (add_symbol)
2749	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2750    }
2751
2752  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2753  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2754}
2755#undef TRANSITION
2756#else
2757#define mapping_state(x) ((void)0)
2758#define mapping_state_2(x, y) ((void)0)
2759#endif
2760
2761/* Find the real, Thumb encoded start of a Thumb function.  */
2762
2763#ifdef OBJ_COFF
2764static symbolS *
2765find_real_start (symbolS * symbolP)
2766{
2767  char *       real_start;
2768  const char * name = S_GET_NAME (symbolP);
2769  symbolS *    new_target;
2770
2771  /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
2772#define STUB_NAME ".real_start_of"
2773
2774  if (name == NULL)
2775    abort ();
2776
2777  /* The compiler may generate BL instructions to local labels because
2778     it needs to perform a branch to a far away location. These labels
2779     do not have a corresponding ".real_start_of" label.  We check
2780     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2781     the ".real_start_of" convention for nonlocal branches.  */
2782  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2783    return symbolP;
2784
2785  real_start = concat (STUB_NAME, name, NULL);
2786  new_target = symbol_find (real_start);
2787  free (real_start);
2788
2789  if (new_target == NULL)
2790    {
2791      as_warn (_("Failed to find real start of function: %s\n"), name);
2792      new_target = symbolP;
2793    }
2794
2795  return new_target;
2796}
2797#endif
2798
2799static void
2800opcode_select (int width)
2801{
2802  switch (width)
2803    {
2804    case 16:
2805      if (! thumb_mode)
2806	{
2807	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2808	    as_bad (_("selected processor does not support THUMB opcodes"));
2809
2810	  thumb_mode = 1;
2811	  /* No need to force the alignment, since we will have been
2812	     coming from ARM mode, which is word-aligned.  */
2813	  record_alignment (now_seg, 1);
2814	}
2815      break;
2816
2817    case 32:
2818      if (thumb_mode)
2819	{
2820	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2821	    as_bad (_("selected processor does not support ARM opcodes"));
2822
2823	  thumb_mode = 0;
2824
2825	  if (!need_pass_2)
2826	    frag_align (2, 0, 0);
2827
2828	  record_alignment (now_seg, 1);
2829	}
2830      break;
2831
2832    default:
2833      as_bad (_("invalid instruction size selected (%d)"), width);
2834    }
2835}
2836
2837static void
2838s_arm (int ignore ATTRIBUTE_UNUSED)
2839{
2840  opcode_select (32);
2841  demand_empty_rest_of_line ();
2842}
2843
2844static void
2845s_thumb (int ignore ATTRIBUTE_UNUSED)
2846{
2847  opcode_select (16);
2848  demand_empty_rest_of_line ();
2849}
2850
2851static void
2852s_code (int unused ATTRIBUTE_UNUSED)
2853{
2854  int temp;
2855
2856  temp = get_absolute_expression ();
2857  switch (temp)
2858    {
2859    case 16:
2860    case 32:
2861      opcode_select (temp);
2862      break;
2863
2864    default:
2865      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2866    }
2867}
2868
2869static void
2870s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2871{
2872  /* If we are not already in thumb mode go into it, EVEN if
2873     the target processor does not support thumb instructions.
2874     This is used by gcc/config/arm/lib1funcs.asm for example
2875     to compile interworking support functions even if the
2876     target processor should not support interworking.	*/
2877  if (! thumb_mode)
2878    {
2879      thumb_mode = 2;
2880      record_alignment (now_seg, 1);
2881    }
2882
2883  demand_empty_rest_of_line ();
2884}
2885
2886static void
2887s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2888{
2889  s_thumb (0);
2890
2891  /* The following label is the name/address of the start of a Thumb function.
2892     We need to know this for the interworking support.	 */
2893  label_is_thumb_function_name = TRUE;
2894}
2895
2896/* Perform a .set directive, but also mark the alias as
2897   being a thumb function.  */
2898
2899static void
2900s_thumb_set (int equiv)
2901{
2902  /* XXX the following is a duplicate of the code for s_set() in read.c
2903     We cannot just call that code as we need to get at the symbol that
2904     is created.  */
2905  char *    name;
2906  char	    delim;
2907  char *    end_name;
2908  symbolS * symbolP;
2909
2910  /* Especial apologies for the random logic:
2911     This just grew, and could be parsed much more simply!
2912     Dean - in haste.  */
2913  delim	    = get_symbol_name (& name);
2914  end_name  = input_line_pointer;
2915  (void) restore_line_pointer (delim);
2916
2917  if (*input_line_pointer != ',')
2918    {
2919      *end_name = 0;
2920      as_bad (_("expected comma after name \"%s\""), name);
2921      *end_name = delim;
2922      ignore_rest_of_line ();
2923      return;
2924    }
2925
2926  input_line_pointer++;
2927  *end_name = 0;
2928
2929  if (name[0] == '.' && name[1] == '\0')
2930    {
2931      /* XXX - this should not happen to .thumb_set.  */
2932      abort ();
2933    }
2934
2935  if ((symbolP = symbol_find (name)) == NULL
2936      && (symbolP = md_undefined_symbol (name)) == NULL)
2937    {
2938#ifndef NO_LISTING
2939      /* When doing symbol listings, play games with dummy fragments living
2940	 outside the normal fragment chain to record the file and line info
2941	 for this symbol.  */
2942      if (listing & LISTING_SYMBOLS)
2943	{
2944	  extern struct list_info_struct * listing_tail;
2945	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2946
2947	  memset (dummy_frag, 0, sizeof (fragS));
2948	  dummy_frag->fr_type = rs_fill;
2949	  dummy_frag->line = listing_tail;
2950	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2951	  dummy_frag->fr_symbol = symbolP;
2952	}
2953      else
2954#endif
2955	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2956
2957#ifdef OBJ_COFF
2958      /* "set" symbols are local unless otherwise specified.  */
2959      SF_SET_LOCAL (symbolP);
2960#endif /* OBJ_COFF  */
2961    }				/* Make a new symbol.  */
2962
2963  symbol_table_insert (symbolP);
2964
2965  * end_name = delim;
2966
2967  if (equiv
2968      && S_IS_DEFINED (symbolP)
2969      && S_GET_SEGMENT (symbolP) != reg_section)
2970    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2971
2972  pseudo_set (symbolP);
2973
2974  demand_empty_rest_of_line ();
2975
2976  /* XXX Now we come to the Thumb specific bit of code.	 */
2977
2978  THUMB_SET_FUNC (symbolP, 1);
2979  ARM_SET_THUMB (symbolP, 1);
2980#if defined OBJ_ELF || defined OBJ_COFF
2981  ARM_SET_INTERWORK (symbolP, support_interwork);
2982#endif
2983}
2984
2985/* Directives: Mode selection.  */
2986
2987/* .syntax [unified|divided] - choose the new unified syntax
2988   (same for Arm and Thumb encoding, modulo slight differences in what
2989   can be represented) or the old divergent syntax for each mode.  */
2990static void
2991s_syntax (int unused ATTRIBUTE_UNUSED)
2992{
2993  char *name, delim;
2994
2995  delim = get_symbol_name (& name);
2996
2997  if (!strcasecmp (name, "unified"))
2998    unified_syntax = TRUE;
2999  else if (!strcasecmp (name, "divided"))
3000    unified_syntax = FALSE;
3001  else
3002    {
3003      as_bad (_("unrecognized syntax mode \"%s\""), name);
3004      return;
3005    }
3006  (void) restore_line_pointer (delim);
3007  demand_empty_rest_of_line ();
3008}
3009
3010/* Directives: sectioning and alignment.  */
3011
3012static void
3013s_bss (int ignore ATTRIBUTE_UNUSED)
3014{
3015  /* We don't support putting frags in the BSS segment, we fake it by
3016     marking in_bss, then looking at s_skip for clues.	*/
3017  subseg_set (bss_section, 0);
3018  demand_empty_rest_of_line ();
3019
3020#ifdef md_elf_section_change_hook
3021  md_elf_section_change_hook ();
3022#endif
3023}
3024
3025static void
3026s_even (int ignore ATTRIBUTE_UNUSED)
3027{
3028  /* Never make frag if expect extra pass.  */
3029  if (!need_pass_2)
3030    frag_align (1, 0, 0);
3031
3032  record_alignment (now_seg, 1);
3033
3034  demand_empty_rest_of_line ();
3035}
3036
3037/* Directives: CodeComposer Studio.  */
3038
3039/*  .ref  (for CodeComposer Studio syntax only).  */
3040static void
3041s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3042{
3043  if (codecomposer_syntax)
3044    ignore_rest_of_line ();
3045  else
3046    as_bad (_(".ref pseudo-op only available with -mccs flag."));
3047}
3048
3049/*  If name is not NULL, then it is used for marking the beginning of a
3050    function, whereas if it is NULL then it means the function end.  */
3051static void
3052asmfunc_debug (const char * name)
3053{
3054  static const char * last_name = NULL;
3055
3056  if (name != NULL)
3057    {
3058      gas_assert (last_name == NULL);
3059      last_name = name;
3060
3061      if (debug_type == DEBUG_STABS)
3062         stabs_generate_asm_func (name, name);
3063    }
3064  else
3065    {
3066      gas_assert (last_name != NULL);
3067
3068      if (debug_type == DEBUG_STABS)
3069        stabs_generate_asm_endfunc (last_name, last_name);
3070
3071      last_name = NULL;
3072    }
3073}
3074
3075static void
3076s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3077{
3078  if (codecomposer_syntax)
3079    {
3080      switch (asmfunc_state)
3081	{
3082	case OUTSIDE_ASMFUNC:
3083	  asmfunc_state = WAITING_ASMFUNC_NAME;
3084	  break;
3085
3086	case WAITING_ASMFUNC_NAME:
3087	  as_bad (_(".asmfunc repeated."));
3088	  break;
3089
3090	case WAITING_ENDASMFUNC:
3091	  as_bad (_(".asmfunc without function."));
3092	  break;
3093	}
3094      demand_empty_rest_of_line ();
3095    }
3096  else
3097    as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3098}
3099
3100static void
3101s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3102{
3103  if (codecomposer_syntax)
3104    {
3105      switch (asmfunc_state)
3106	{
3107	case OUTSIDE_ASMFUNC:
3108	  as_bad (_(".endasmfunc without a .asmfunc."));
3109	  break;
3110
3111	case WAITING_ASMFUNC_NAME:
3112	  as_bad (_(".endasmfunc without function."));
3113	  break;
3114
3115	case WAITING_ENDASMFUNC:
3116	  asmfunc_state = OUTSIDE_ASMFUNC;
3117	  asmfunc_debug (NULL);
3118	  break;
3119	}
3120      demand_empty_rest_of_line ();
3121    }
3122  else
3123    as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3124}
3125
3126static void
3127s_ccs_def (int name)
3128{
3129  if (codecomposer_syntax)
3130    s_globl (name);
3131  else
3132    as_bad (_(".def pseudo-op only available with -mccs flag."));
3133}
3134
3135/* Directives: Literal pools.  */
3136
3137static literal_pool *
3138find_literal_pool (void)
3139{
3140  literal_pool * pool;
3141
3142  for (pool = list_of_pools; pool != NULL; pool = pool->next)
3143    {
3144      if (pool->section == now_seg
3145	  && pool->sub_section == now_subseg)
3146	break;
3147    }
3148
3149  return pool;
3150}
3151
3152static literal_pool *
3153find_or_make_literal_pool (void)
3154{
3155  /* Next literal pool ID number.  */
3156  static unsigned int latest_pool_num = 1;
3157  literal_pool *      pool;
3158
3159  pool = find_literal_pool ();
3160
3161  if (pool == NULL)
3162    {
3163      /* Create a new pool.  */
3164      pool = XNEW (literal_pool);
3165      if (! pool)
3166	return NULL;
3167
3168      pool->next_free_entry = 0;
3169      pool->section	    = now_seg;
3170      pool->sub_section	    = now_subseg;
3171      pool->next	    = list_of_pools;
3172      pool->symbol	    = NULL;
3173      pool->alignment	    = 2;
3174
3175      /* Add it to the list.  */
3176      list_of_pools = pool;
3177    }
3178
3179  /* New pools, and emptied pools, will have a NULL symbol.  */
3180  if (pool->symbol == NULL)
3181    {
3182      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3183				    (valueT) 0, &zero_address_frag);
3184      pool->id = latest_pool_num ++;
3185    }
3186
3187  /* Done.  */
3188  return pool;
3189}
3190
3191/* Add the literal in the global 'inst'
3192   structure to the relevant literal pool.  */
3193
3194static int
3195add_to_lit_pool (unsigned int nbytes)
3196{
3197#define PADDING_SLOT 0x1
3198#define LIT_ENTRY_SIZE_MASK 0xFF
3199  literal_pool * pool;
3200  unsigned int entry, pool_size = 0;
3201  bfd_boolean padding_slot_p = FALSE;
3202  unsigned imm1 = 0;
3203  unsigned imm2 = 0;
3204
3205  if (nbytes == 8)
3206    {
3207      imm1 = inst.operands[1].imm;
3208      imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3209	       : inst.reloc.exp.X_unsigned ? 0
3210	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3211      if (target_big_endian)
3212	{
3213	  imm1 = imm2;
3214	  imm2 = inst.operands[1].imm;
3215	}
3216    }
3217
3218  pool = find_or_make_literal_pool ();
3219
3220  /* Check if this literal value is already in the pool.  */
3221  for (entry = 0; entry < pool->next_free_entry; entry ++)
3222    {
3223      if (nbytes == 4)
3224	{
3225	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3226	      && (inst.reloc.exp.X_op == O_constant)
3227	      && (pool->literals[entry].X_add_number
3228		  == inst.reloc.exp.X_add_number)
3229	      && (pool->literals[entry].X_md == nbytes)
3230	      && (pool->literals[entry].X_unsigned
3231		  == inst.reloc.exp.X_unsigned))
3232	    break;
3233
3234	  if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3235	      && (inst.reloc.exp.X_op == O_symbol)
3236	      && (pool->literals[entry].X_add_number
3237		  == inst.reloc.exp.X_add_number)
3238	      && (pool->literals[entry].X_add_symbol
3239		  == inst.reloc.exp.X_add_symbol)
3240	      && (pool->literals[entry].X_op_symbol
3241		  == inst.reloc.exp.X_op_symbol)
3242	      && (pool->literals[entry].X_md == nbytes))
3243	    break;
3244	}
3245      else if ((nbytes == 8)
3246	       && !(pool_size & 0x7)
3247	       && ((entry + 1) != pool->next_free_entry)
3248	       && (pool->literals[entry].X_op == O_constant)
3249	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3250	       && (pool->literals[entry].X_unsigned
3251		   == inst.reloc.exp.X_unsigned)
3252	       && (pool->literals[entry + 1].X_op == O_constant)
3253	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3254	       && (pool->literals[entry + 1].X_unsigned
3255		   == inst.reloc.exp.X_unsigned))
3256	break;
3257
3258      padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3259      if (padding_slot_p && (nbytes == 4))
3260	break;
3261
3262      pool_size += 4;
3263    }
3264
3265  /* Do we need to create a new entry?	*/
3266  if (entry == pool->next_free_entry)
3267    {
3268      if (entry >= MAX_LITERAL_POOL_SIZE)
3269	{
3270	  inst.error = _("literal pool overflow");
3271	  return FAIL;
3272	}
3273
3274      if (nbytes == 8)
3275	{
3276	  /* For 8-byte entries, we align to an 8-byte boundary,
3277	     and split it into two 4-byte entries, because on 32-bit
3278	     host, 8-byte constants are treated as big num, thus
3279	     saved in "generic_bignum" which will be overwritten
3280	     by later assignments.
3281
3282	     We also need to make sure there is enough space for
3283	     the split.
3284
3285	     We also check to make sure the literal operand is a
3286	     constant number.  */
3287	  if (!(inst.reloc.exp.X_op == O_constant
3288	        || inst.reloc.exp.X_op == O_big))
3289	    {
3290	      inst.error = _("invalid type for literal pool");
3291	      return FAIL;
3292	    }
3293	  else if (pool_size & 0x7)
3294	    {
3295	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3296		{
3297		  inst.error = _("literal pool overflow");
3298		  return FAIL;
3299		}
3300
3301	      pool->literals[entry] = inst.reloc.exp;
3302	      pool->literals[entry].X_op = O_constant;
3303	      pool->literals[entry].X_add_number = 0;
3304	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3305	      pool->next_free_entry += 1;
3306	      pool_size += 4;
3307	    }
3308	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3309	    {
3310	      inst.error = _("literal pool overflow");
3311	      return FAIL;
3312	    }
3313
3314	  pool->literals[entry] = inst.reloc.exp;
3315	  pool->literals[entry].X_op = O_constant;
3316	  pool->literals[entry].X_add_number = imm1;
3317	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3318	  pool->literals[entry++].X_md = 4;
3319	  pool->literals[entry] = inst.reloc.exp;
3320	  pool->literals[entry].X_op = O_constant;
3321	  pool->literals[entry].X_add_number = imm2;
3322	  pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3323	  pool->literals[entry].X_md = 4;
3324	  pool->alignment = 3;
3325	  pool->next_free_entry += 1;
3326	}
3327      else
3328	{
3329	  pool->literals[entry] = inst.reloc.exp;
3330	  pool->literals[entry].X_md = 4;
3331	}
3332
3333#ifdef OBJ_ELF
3334      /* PR ld/12974: Record the location of the first source line to reference
3335	 this entry in the literal pool.  If it turns out during linking that the
3336	 symbol does not exist we will be able to give an accurate line number for
3337	 the (first use of the) missing reference.  */
3338      if (debug_type == DEBUG_DWARF2)
3339	dwarf2_where (pool->locs + entry);
3340#endif
3341      pool->next_free_entry += 1;
3342    }
3343  else if (padding_slot_p)
3344    {
3345      pool->literals[entry] = inst.reloc.exp;
3346      pool->literals[entry].X_md = nbytes;
3347    }
3348
3349  inst.reloc.exp.X_op	      = O_symbol;
3350  inst.reloc.exp.X_add_number = pool_size;
3351  inst.reloc.exp.X_add_symbol = pool->symbol;
3352
3353  return SUCCESS;
3354}
3355
3356bfd_boolean
3357tc_start_label_without_colon (void)
3358{
3359  bfd_boolean ret = TRUE;
3360
3361  if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3362    {
3363      const char *label = input_line_pointer;
3364
3365      while (!is_end_of_line[(int) label[-1]])
3366	--label;
3367
3368      if (*label == '.')
3369	{
3370	  as_bad (_("Invalid label '%s'"), label);
3371	  ret = FALSE;
3372	}
3373
3374      asmfunc_debug (label);
3375
3376      asmfunc_state = WAITING_ENDASMFUNC;
3377    }
3378
3379  return ret;
3380}
3381
3382/* Can't use symbol_new here, so have to create a symbol and then at
3383   a later date assign it a value. Thats what these functions do.  */
3384
3385static void
3386symbol_locate (symbolS *    symbolP,
3387	       const char * name,	/* It is copied, the caller can modify.	 */
3388	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3389	       valueT	    valu,	/* Symbol value.  */
3390	       fragS *	    frag)	/* Associated fragment.	 */
3391{
3392  size_t name_length;
3393  char * preserved_copy_of_name;
3394
3395  name_length = strlen (name) + 1;   /* +1 for \0.  */
3396  obstack_grow (&notes, name, name_length);
3397  preserved_copy_of_name = (char *) obstack_finish (&notes);
3398
3399#ifdef tc_canonicalize_symbol_name
3400  preserved_copy_of_name =
3401    tc_canonicalize_symbol_name (preserved_copy_of_name);
3402#endif
3403
3404  S_SET_NAME (symbolP, preserved_copy_of_name);
3405
3406  S_SET_SEGMENT (symbolP, segment);
3407  S_SET_VALUE (symbolP, valu);
3408  symbol_clear_list_pointers (symbolP);
3409
3410  symbol_set_frag (symbolP, frag);
3411
3412  /* Link to end of symbol chain.  */
3413  {
3414    extern int symbol_table_frozen;
3415
3416    if (symbol_table_frozen)
3417      abort ();
3418  }
3419
3420  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3421
3422  obj_symbol_new_hook (symbolP);
3423
3424#ifdef tc_symbol_new_hook
3425  tc_symbol_new_hook (symbolP);
3426#endif
3427
3428#ifdef DEBUG_SYMS
3429  verify_symbol_chain (symbol_rootP, symbol_lastP);
3430#endif /* DEBUG_SYMS  */
3431}
3432
3433static void
3434s_ltorg (int ignored ATTRIBUTE_UNUSED)
3435{
3436  unsigned int entry;
3437  literal_pool * pool;
3438  char sym_name[20];
3439
3440  pool = find_literal_pool ();
3441  if (pool == NULL
3442      || pool->symbol == NULL
3443      || pool->next_free_entry == 0)
3444    return;
3445
3446  /* Align pool as you have word accesses.
3447     Only make a frag if we have to.  */
3448  if (!need_pass_2)
3449    frag_align (pool->alignment, 0, 0);
3450
3451  record_alignment (now_seg, 2);
3452
3453#ifdef OBJ_ELF
3454  seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3455  make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3456#endif
3457  sprintf (sym_name, "$$lit_\002%x", pool->id);
3458
3459  symbol_locate (pool->symbol, sym_name, now_seg,
3460		 (valueT) frag_now_fix (), frag_now);
3461  symbol_table_insert (pool->symbol);
3462
3463  ARM_SET_THUMB (pool->symbol, thumb_mode);
3464
3465#if defined OBJ_COFF || defined OBJ_ELF
3466  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3467#endif
3468
3469  for (entry = 0; entry < pool->next_free_entry; entry ++)
3470    {
3471#ifdef OBJ_ELF
3472      if (debug_type == DEBUG_DWARF2)
3473	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3474#endif
3475      /* First output the expression in the instruction to the pool.  */
3476      emit_expr (&(pool->literals[entry]),
3477		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3478    }
3479
3480  /* Mark the pool as empty.  */
3481  pool->next_free_entry = 0;
3482  pool->symbol = NULL;
3483}
3484
3485#ifdef OBJ_ELF
3486/* Forward declarations for functions below, in the MD interface
3487   section.  */
3488static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3489static valueT create_unwind_entry (int);
3490static void start_unwind_section (const segT, int);
3491static void add_unwind_opcode (valueT, int);
3492static void flush_pending_unwind (void);
3493
3494/* Directives: Data.  */
3495
3496static void
3497s_arm_elf_cons (int nbytes)
3498{
3499  expressionS exp;
3500
3501#ifdef md_flush_pending_output
3502  md_flush_pending_output ();
3503#endif
3504
3505  if (is_it_end_of_statement ())
3506    {
3507      demand_empty_rest_of_line ();
3508      return;
3509    }
3510
3511#ifdef md_cons_align
3512  md_cons_align (nbytes);
3513#endif
3514
3515  mapping_state (MAP_DATA);
3516  do
3517    {
3518      int reloc;
3519      char *base = input_line_pointer;
3520
3521      expression (& exp);
3522
3523      if (exp.X_op != O_symbol)
3524	emit_expr (&exp, (unsigned int) nbytes);
3525      else
3526	{
3527	  char *before_reloc = input_line_pointer;
3528	  reloc = parse_reloc (&input_line_pointer);
3529	  if (reloc == -1)
3530	    {
3531	      as_bad (_("unrecognized relocation suffix"));
3532	      ignore_rest_of_line ();
3533	      return;
3534	    }
3535	  else if (reloc == BFD_RELOC_UNUSED)
3536	    emit_expr (&exp, (unsigned int) nbytes);
3537	  else
3538	    {
3539	      reloc_howto_type *howto = (reloc_howto_type *)
3540		  bfd_reloc_type_lookup (stdoutput,
3541					 (bfd_reloc_code_real_type) reloc);
3542	      int size = bfd_get_reloc_size (howto);
3543
3544	      if (reloc == BFD_RELOC_ARM_PLT32)
3545		{
3546		  as_bad (_("(plt) is only valid on branch targets"));
3547		  reloc = BFD_RELOC_UNUSED;
3548		  size = 0;
3549		}
3550
3551	      if (size > nbytes)
3552		as_bad (_("%s relocations do not fit in %d bytes"),
3553			howto->name, nbytes);
3554	      else
3555		{
3556		  /* We've parsed an expression stopping at O_symbol.
3557		     But there may be more expression left now that we
3558		     have parsed the relocation marker.  Parse it again.
3559		     XXX Surely there is a cleaner way to do this.  */
3560		  char *p = input_line_pointer;
3561		  int offset;
3562		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
3563
3564		  memcpy (save_buf, base, input_line_pointer - base);
3565		  memmove (base + (input_line_pointer - before_reloc),
3566			   base, before_reloc - base);
3567
3568		  input_line_pointer = base + (input_line_pointer-before_reloc);
3569		  expression (&exp);
3570		  memcpy (base, save_buf, p - base);
3571
3572		  offset = nbytes - size;
3573		  p = frag_more (nbytes);
3574		  memset (p, 0, nbytes);
3575		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3576			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3577		  free (save_buf);
3578		}
3579	    }
3580	}
3581    }
3582  while (*input_line_pointer++ == ',');
3583
3584  /* Put terminator back into stream.  */
3585  input_line_pointer --;
3586  demand_empty_rest_of_line ();
3587}
3588
3589/* Emit an expression containing a 32-bit thumb instruction.
3590   Implementation based on put_thumb32_insn.  */
3591
3592static void
3593emit_thumb32_expr (expressionS * exp)
3594{
3595  expressionS exp_high = *exp;
3596
3597  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3598  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3599  exp->X_add_number &= 0xffff;
3600  emit_expr (exp, (unsigned int) THUMB_SIZE);
3601}
3602
3603/*  Guess the instruction size based on the opcode.  */
3604
3605static int
3606thumb_insn_size (int opcode)
3607{
3608  if ((unsigned int) opcode < 0xe800u)
3609    return 2;
3610  else if ((unsigned int) opcode >= 0xe8000000u)
3611    return 4;
3612  else
3613    return 0;
3614}
3615
3616static bfd_boolean
3617emit_insn (expressionS *exp, int nbytes)
3618{
3619  int size = 0;
3620
3621  if (exp->X_op == O_constant)
3622    {
3623      size = nbytes;
3624
3625      if (size == 0)
3626	size = thumb_insn_size (exp->X_add_number);
3627
3628      if (size != 0)
3629	{
3630	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3631	    {
3632	      as_bad (_(".inst.n operand too big. "\
3633			"Use .inst.w instead"));
3634	      size = 0;
3635	    }
3636	  else
3637	    {
3638	      if (now_it.state == AUTOMATIC_IT_BLOCK)
3639		set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3640	      else
3641		set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3642
3643	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3644		emit_thumb32_expr (exp);
3645	      else
3646		emit_expr (exp, (unsigned int) size);
3647
3648	      it_fsm_post_encode ();
3649	    }
3650	}
3651      else
3652	as_bad (_("cannot determine Thumb instruction size. "	\
3653		  "Use .inst.n/.inst.w instead"));
3654    }
3655  else
3656    as_bad (_("constant expression required"));
3657
3658  return (size != 0);
3659}
3660
3661/* Like s_arm_elf_cons but do not use md_cons_align and
3662   set the mapping state to MAP_ARM/MAP_THUMB.  */
3663
3664static void
3665s_arm_elf_inst (int nbytes)
3666{
3667  if (is_it_end_of_statement ())
3668    {
3669      demand_empty_rest_of_line ();
3670      return;
3671    }
3672
3673  /* Calling mapping_state () here will not change ARM/THUMB,
3674     but will ensure not to be in DATA state.  */
3675
3676  if (thumb_mode)
3677    mapping_state (MAP_THUMB);
3678  else
3679    {
3680      if (nbytes != 0)
3681	{
3682	  as_bad (_("width suffixes are invalid in ARM mode"));
3683	  ignore_rest_of_line ();
3684	  return;
3685	}
3686
3687      nbytes = 4;
3688
3689      mapping_state (MAP_ARM);
3690    }
3691
3692  do
3693    {
3694      expressionS exp;
3695
3696      expression (& exp);
3697
3698      if (! emit_insn (& exp, nbytes))
3699	{
3700	  ignore_rest_of_line ();
3701	  return;
3702	}
3703    }
3704  while (*input_line_pointer++ == ',');
3705
3706  /* Put terminator back into stream.  */
3707  input_line_pointer --;
3708  demand_empty_rest_of_line ();
3709}
3710
3711/* Parse a .rel31 directive.  */
3712
3713static void
3714s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3715{
3716  expressionS exp;
3717  char *p;
3718  valueT highbit;
3719
3720  highbit = 0;
3721  if (*input_line_pointer == '1')
3722    highbit = 0x80000000;
3723  else if (*input_line_pointer != '0')
3724    as_bad (_("expected 0 or 1"));
3725
3726  input_line_pointer++;
3727  if (*input_line_pointer != ',')
3728    as_bad (_("missing comma"));
3729  input_line_pointer++;
3730
3731#ifdef md_flush_pending_output
3732  md_flush_pending_output ();
3733#endif
3734
3735#ifdef md_cons_align
3736  md_cons_align (4);
3737#endif
3738
3739  mapping_state (MAP_DATA);
3740
3741  expression (&exp);
3742
3743  p = frag_more (4);
3744  md_number_to_chars (p, highbit, 4);
3745  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3746	       BFD_RELOC_ARM_PREL31);
3747
3748  demand_empty_rest_of_line ();
3749}
3750
3751/* Directives: AEABI stack-unwind tables.  */
3752
3753/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3754
3755static void
3756s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3757{
3758  demand_empty_rest_of_line ();
3759  if (unwind.proc_start)
3760    {
3761      as_bad (_("duplicate .fnstart directive"));
3762      return;
3763    }
3764
3765  /* Mark the start of the function.  */
3766  unwind.proc_start = expr_build_dot ();
3767
3768  /* Reset the rest of the unwind info.	 */
3769  unwind.opcode_count = 0;
3770  unwind.table_entry = NULL;
3771  unwind.personality_routine = NULL;
3772  unwind.personality_index = -1;
3773  unwind.frame_size = 0;
3774  unwind.fp_offset = 0;
3775  unwind.fp_reg = REG_SP;
3776  unwind.fp_used = 0;
3777  unwind.sp_restored = 0;
3778}
3779
3780
3781/* Parse a handlerdata directive.  Creates the exception handling table entry
3782   for the function.  */
3783
3784static void
3785s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3786{
3787  demand_empty_rest_of_line ();
3788  if (!unwind.proc_start)
3789    as_bad (MISSING_FNSTART);
3790
3791  if (unwind.table_entry)
3792    as_bad (_("duplicate .handlerdata directive"));
3793
3794  create_unwind_entry (1);
3795}
3796
3797/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3798
3799static void
3800s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3801{
3802  long where;
3803  char *ptr;
3804  valueT val;
3805  unsigned int marked_pr_dependency;
3806
3807  demand_empty_rest_of_line ();
3808
3809  if (!unwind.proc_start)
3810    {
3811      as_bad (_(".fnend directive without .fnstart"));
3812      return;
3813    }
3814
3815  /* Add eh table entry.  */
3816  if (unwind.table_entry == NULL)
3817    val = create_unwind_entry (0);
3818  else
3819    val = 0;
3820
3821  /* Add index table entry.  This is two words.	 */
3822  start_unwind_section (unwind.saved_seg, 1);
3823  frag_align (2, 0, 0);
3824  record_alignment (now_seg, 2);
3825
3826  ptr = frag_more (8);
3827  memset (ptr, 0, 8);
3828  where = frag_now_fix () - 8;
3829
3830  /* Self relative offset of the function start.  */
3831  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3832	   BFD_RELOC_ARM_PREL31);
3833
3834  /* Indicate dependency on EHABI-defined personality routines to the
3835     linker, if it hasn't been done already.  */
3836  marked_pr_dependency
3837    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3838  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3839      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3840    {
3841      static const char *const name[] =
3842	{
3843	  "__aeabi_unwind_cpp_pr0",
3844	  "__aeabi_unwind_cpp_pr1",
3845	  "__aeabi_unwind_cpp_pr2"
3846	};
3847      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3848      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3849      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3850	|= 1 << unwind.personality_index;
3851    }
3852
3853  if (val)
3854    /* Inline exception table entry.  */
3855    md_number_to_chars (ptr + 4, val, 4);
3856  else
3857    /* Self relative offset of the table entry.	 */
3858    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3859	     BFD_RELOC_ARM_PREL31);
3860
3861  /* Restore the original section.  */
3862  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3863
3864  unwind.proc_start = NULL;
3865}
3866
3867
3868/* Parse an unwind_cantunwind directive.  */
3869
3870static void
3871s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3872{
3873  demand_empty_rest_of_line ();
3874  if (!unwind.proc_start)
3875    as_bad (MISSING_FNSTART);
3876
3877  if (unwind.personality_routine || unwind.personality_index != -1)
3878    as_bad (_("personality routine specified for cantunwind frame"));
3879
3880  unwind.personality_index = -2;
3881}
3882
3883
3884/* Parse a personalityindex directive.	*/
3885
3886static void
3887s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3888{
3889  expressionS exp;
3890
3891  if (!unwind.proc_start)
3892    as_bad (MISSING_FNSTART);
3893
3894  if (unwind.personality_routine || unwind.personality_index != -1)
3895    as_bad (_("duplicate .personalityindex directive"));
3896
3897  expression (&exp);
3898
3899  if (exp.X_op != O_constant
3900      || exp.X_add_number < 0 || exp.X_add_number > 15)
3901    {
3902      as_bad (_("bad personality routine number"));
3903      ignore_rest_of_line ();
3904      return;
3905    }
3906
3907  unwind.personality_index = exp.X_add_number;
3908
3909  demand_empty_rest_of_line ();
3910}
3911
3912
3913/* Parse a personality directive.  */
3914
3915static void
3916s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3917{
3918  char *name, *p, c;
3919
3920  if (!unwind.proc_start)
3921    as_bad (MISSING_FNSTART);
3922
3923  if (unwind.personality_routine || unwind.personality_index != -1)
3924    as_bad (_("duplicate .personality directive"));
3925
3926  c = get_symbol_name (& name);
3927  p = input_line_pointer;
3928  if (c == '"')
3929    ++ input_line_pointer;
3930  unwind.personality_routine = symbol_find_or_make (name);
3931  *p = c;
3932  demand_empty_rest_of_line ();
3933}
3934
3935
3936/* Parse a directive saving core registers.  */
3937
3938static void
3939s_arm_unwind_save_core (void)
3940{
3941  valueT op;
3942  long range;
3943  int n;
3944
3945  range = parse_reg_list (&input_line_pointer);
3946  if (range == FAIL)
3947    {
3948      as_bad (_("expected register list"));
3949      ignore_rest_of_line ();
3950      return;
3951    }
3952
3953  demand_empty_rest_of_line ();
3954
3955  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3956     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3957     ip because it is clobbered by calls.  */
3958  if (unwind.sp_restored && unwind.fp_reg == 12
3959      && (range & 0x3000) == 0x1000)
3960    {
3961      unwind.opcode_count--;
3962      unwind.sp_restored = 0;
3963      range = (range | 0x2000) & ~0x1000;
3964      unwind.pending_offset = 0;
3965    }
3966
3967  /* Pop r4-r15.  */
3968  if (range & 0xfff0)
3969    {
3970      /* See if we can use the short opcodes.  These pop a block of up to 8
3971	 registers starting with r4, plus maybe r14.  */
3972      for (n = 0; n < 8; n++)
3973	{
3974	  /* Break at the first non-saved register.	 */
3975	  if ((range & (1 << (n + 4))) == 0)
3976	    break;
3977	}
3978      /* See if there are any other bits set.  */
3979      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3980	{
3981	  /* Use the long form.  */
3982	  op = 0x8000 | ((range >> 4) & 0xfff);
3983	  add_unwind_opcode (op, 2);
3984	}
3985      else
3986	{
3987	  /* Use the short form.  */
3988	  if (range & 0x4000)
3989	    op = 0xa8; /* Pop r14.	*/
3990	  else
3991	    op = 0xa0; /* Do not pop r14.  */
3992	  op |= (n - 1);
3993	  add_unwind_opcode (op, 1);
3994	}
3995    }
3996
3997  /* Pop r0-r3.	 */
3998  if (range & 0xf)
3999    {
4000      op = 0xb100 | (range & 0xf);
4001      add_unwind_opcode (op, 2);
4002    }
4003
4004  /* Record the number of bytes pushed.	 */
4005  for (n = 0; n < 16; n++)
4006    {
4007      if (range & (1 << n))
4008	unwind.frame_size += 4;
4009    }
4010}
4011
4012
4013/* Parse a directive saving FPA registers.  */
4014
4015static void
4016s_arm_unwind_save_fpa (int reg)
4017{
4018  expressionS exp;
4019  int num_regs;
4020  valueT op;
4021
4022  /* Get Number of registers to transfer.  */
4023  if (skip_past_comma (&input_line_pointer) != FAIL)
4024    expression (&exp);
4025  else
4026    exp.X_op = O_illegal;
4027
4028  if (exp.X_op != O_constant)
4029    {
4030      as_bad (_("expected , <constant>"));
4031      ignore_rest_of_line ();
4032      return;
4033    }
4034
4035  num_regs = exp.X_add_number;
4036
4037  if (num_regs < 1 || num_regs > 4)
4038    {
4039      as_bad (_("number of registers must be in the range [1:4]"));
4040      ignore_rest_of_line ();
4041      return;
4042    }
4043
4044  demand_empty_rest_of_line ();
4045
4046  if (reg == 4)
4047    {
4048      /* Short form.  */
4049      op = 0xb4 | (num_regs - 1);
4050      add_unwind_opcode (op, 1);
4051    }
4052  else
4053    {
4054      /* Long form.  */
4055      op = 0xc800 | (reg << 4) | (num_regs - 1);
4056      add_unwind_opcode (op, 2);
4057    }
4058  unwind.frame_size += num_regs * 12;
4059}
4060
4061
4062/* Parse a directive saving VFP registers for ARMv6 and above.  */
4063
4064static void
4065s_arm_unwind_save_vfp_armv6 (void)
4066{
4067  int count;
4068  unsigned int start;
4069  valueT op;
4070  int num_vfpv3_regs = 0;
4071  int num_regs_below_16;
4072
4073  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4074  if (count == FAIL)
4075    {
4076      as_bad (_("expected register list"));
4077      ignore_rest_of_line ();
4078      return;
4079    }
4080
4081  demand_empty_rest_of_line ();
4082
4083  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4084     than FSTMX/FLDMX-style ones).  */
4085
4086  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4087  if (start >= 16)
4088    num_vfpv3_regs = count;
4089  else if (start + count > 16)
4090    num_vfpv3_regs = start + count - 16;
4091
4092  if (num_vfpv3_regs > 0)
4093    {
4094      int start_offset = start > 16 ? start - 16 : 0;
4095      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4096      add_unwind_opcode (op, 2);
4097    }
4098
4099  /* Generate opcode for registers numbered in the range 0 .. 15.  */
4100  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4101  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4102  if (num_regs_below_16 > 0)
4103    {
4104      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4105      add_unwind_opcode (op, 2);
4106    }
4107
4108  unwind.frame_size += count * 8;
4109}
4110
4111
4112/* Parse a directive saving VFP registers for pre-ARMv6.  */
4113
4114static void
4115s_arm_unwind_save_vfp (void)
4116{
4117  int count;
4118  unsigned int reg;
4119  valueT op;
4120
4121  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4122  if (count == FAIL)
4123    {
4124      as_bad (_("expected register list"));
4125      ignore_rest_of_line ();
4126      return;
4127    }
4128
4129  demand_empty_rest_of_line ();
4130
4131  if (reg == 8)
4132    {
4133      /* Short form.  */
4134      op = 0xb8 | (count - 1);
4135      add_unwind_opcode (op, 1);
4136    }
4137  else
4138    {
4139      /* Long form.  */
4140      op = 0xb300 | (reg << 4) | (count - 1);
4141      add_unwind_opcode (op, 2);
4142    }
4143  unwind.frame_size += count * 8 + 4;
4144}
4145
4146
4147/* Parse a directive saving iWMMXt data registers.  */
4148
4149static void
4150s_arm_unwind_save_mmxwr (void)
4151{
4152  int reg;
4153  int hi_reg;
4154  int i;
4155  unsigned mask = 0;
4156  valueT op;
4157
4158  if (*input_line_pointer == '{')
4159    input_line_pointer++;
4160
4161  do
4162    {
4163      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4164
4165      if (reg == FAIL)
4166	{
4167	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4168	  goto error;
4169	}
4170
4171      if (mask >> reg)
4172	as_tsktsk (_("register list not in ascending order"));
4173      mask |= 1 << reg;
4174
4175      if (*input_line_pointer == '-')
4176	{
4177	  input_line_pointer++;
4178	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4179	  if (hi_reg == FAIL)
4180	    {
4181	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4182	      goto error;
4183	    }
4184	  else if (reg >= hi_reg)
4185	    {
4186	      as_bad (_("bad register range"));
4187	      goto error;
4188	    }
4189	  for (; reg < hi_reg; reg++)
4190	    mask |= 1 << reg;
4191	}
4192    }
4193  while (skip_past_comma (&input_line_pointer) != FAIL);
4194
4195  skip_past_char (&input_line_pointer, '}');
4196
4197  demand_empty_rest_of_line ();
4198
4199  /* Generate any deferred opcodes because we're going to be looking at
4200     the list.	*/
4201  flush_pending_unwind ();
4202
4203  for (i = 0; i < 16; i++)
4204    {
4205      if (mask & (1 << i))
4206	unwind.frame_size += 8;
4207    }
4208
4209  /* Attempt to combine with a previous opcode.	 We do this because gcc
4210     likes to output separate unwind directives for a single block of
4211     registers.	 */
4212  if (unwind.opcode_count > 0)
4213    {
4214      i = unwind.opcodes[unwind.opcode_count - 1];
4215      if ((i & 0xf8) == 0xc0)
4216	{
4217	  i &= 7;
4218	  /* Only merge if the blocks are contiguous.  */
4219	  if (i < 6)
4220	    {
4221	      if ((mask & 0xfe00) == (1 << 9))
4222		{
4223		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4224		  unwind.opcode_count--;
4225		}
4226	    }
4227	  else if (i == 6 && unwind.opcode_count >= 2)
4228	    {
4229	      i = unwind.opcodes[unwind.opcode_count - 2];
4230	      reg = i >> 4;
4231	      i &= 0xf;
4232
4233	      op = 0xffff << (reg - 1);
4234	      if (reg > 0
4235		  && ((mask & op) == (1u << (reg - 1))))
4236		{
4237		  op = (1 << (reg + i + 1)) - 1;
4238		  op &= ~((1 << reg) - 1);
4239		  mask |= op;
4240		  unwind.opcode_count -= 2;
4241		}
4242	    }
4243	}
4244    }
4245
4246  hi_reg = 15;
4247  /* We want to generate opcodes in the order the registers have been
4248     saved, ie. descending order.  */
4249  for (reg = 15; reg >= -1; reg--)
4250    {
4251      /* Save registers in blocks.  */
4252      if (reg < 0
4253	  || !(mask & (1 << reg)))
4254	{
4255	  /* We found an unsaved reg.  Generate opcodes to save the
4256	     preceding block.	*/
4257	  if (reg != hi_reg)
4258	    {
4259	      if (reg == 9)
4260		{
4261		  /* Short form.  */
4262		  op = 0xc0 | (hi_reg - 10);
4263		  add_unwind_opcode (op, 1);
4264		}
4265	      else
4266		{
4267		  /* Long form.	 */
4268		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4269		  add_unwind_opcode (op, 2);
4270		}
4271	    }
4272	  hi_reg = reg - 1;
4273	}
4274    }
4275
4276  return;
4277error:
4278  ignore_rest_of_line ();
4279}
4280
4281static void
4282s_arm_unwind_save_mmxwcg (void)
4283{
4284  int reg;
4285  int hi_reg;
4286  unsigned mask = 0;
4287  valueT op;
4288
4289  if (*input_line_pointer == '{')
4290    input_line_pointer++;
4291
4292  skip_whitespace (input_line_pointer);
4293
4294  do
4295    {
4296      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4297
4298      if (reg == FAIL)
4299	{
4300	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4301	  goto error;
4302	}
4303
4304      reg -= 8;
4305      if (mask >> reg)
4306	as_tsktsk (_("register list not in ascending order"));
4307      mask |= 1 << reg;
4308
4309      if (*input_line_pointer == '-')
4310	{
4311	  input_line_pointer++;
4312	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4313	  if (hi_reg == FAIL)
4314	    {
4315	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4316	      goto error;
4317	    }
4318	  else if (reg >= hi_reg)
4319	    {
4320	      as_bad (_("bad register range"));
4321	      goto error;
4322	    }
4323	  for (; reg < hi_reg; reg++)
4324	    mask |= 1 << reg;
4325	}
4326    }
4327  while (skip_past_comma (&input_line_pointer) != FAIL);
4328
4329  skip_past_char (&input_line_pointer, '}');
4330
4331  demand_empty_rest_of_line ();
4332
4333  /* Generate any deferred opcodes because we're going to be looking at
4334     the list.	*/
4335  flush_pending_unwind ();
4336
4337  for (reg = 0; reg < 16; reg++)
4338    {
4339      if (mask & (1 << reg))
4340	unwind.frame_size += 4;
4341    }
4342  op = 0xc700 | mask;
4343  add_unwind_opcode (op, 2);
4344  return;
4345error:
4346  ignore_rest_of_line ();
4347}
4348
4349
4350/* Parse an unwind_save directive.
4351   If the argument is non-zero, this is a .vsave directive.  */
4352
4353static void
4354s_arm_unwind_save (int arch_v6)
4355{
4356  char *peek;
4357  struct reg_entry *reg;
4358  bfd_boolean had_brace = FALSE;
4359
4360  if (!unwind.proc_start)
4361    as_bad (MISSING_FNSTART);
4362
4363  /* Figure out what sort of save we have.  */
4364  peek = input_line_pointer;
4365
4366  if (*peek == '{')
4367    {
4368      had_brace = TRUE;
4369      peek++;
4370    }
4371
4372  reg = arm_reg_parse_multi (&peek);
4373
4374  if (!reg)
4375    {
4376      as_bad (_("register expected"));
4377      ignore_rest_of_line ();
4378      return;
4379    }
4380
4381  switch (reg->type)
4382    {
4383    case REG_TYPE_FN:
4384      if (had_brace)
4385	{
4386	  as_bad (_("FPA .unwind_save does not take a register list"));
4387	  ignore_rest_of_line ();
4388	  return;
4389	}
4390      input_line_pointer = peek;
4391      s_arm_unwind_save_fpa (reg->number);
4392      return;
4393
4394    case REG_TYPE_RN:
4395      s_arm_unwind_save_core ();
4396      return;
4397
4398    case REG_TYPE_VFD:
4399      if (arch_v6)
4400	s_arm_unwind_save_vfp_armv6 ();
4401      else
4402	s_arm_unwind_save_vfp ();
4403      return;
4404
4405    case REG_TYPE_MMXWR:
4406      s_arm_unwind_save_mmxwr ();
4407      return;
4408
4409    case REG_TYPE_MMXWCG:
4410      s_arm_unwind_save_mmxwcg ();
4411      return;
4412
4413    default:
4414      as_bad (_(".unwind_save does not support this kind of register"));
4415      ignore_rest_of_line ();
4416    }
4417}
4418
4419
4420/* Parse an unwind_movsp directive.  */
4421
4422static void
4423s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4424{
4425  int reg;
4426  valueT op;
4427  int offset;
4428
4429  if (!unwind.proc_start)
4430    as_bad (MISSING_FNSTART);
4431
4432  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4433  if (reg == FAIL)
4434    {
4435      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4436      ignore_rest_of_line ();
4437      return;
4438    }
4439
4440  /* Optional constant.	 */
4441  if (skip_past_comma (&input_line_pointer) != FAIL)
4442    {
4443      if (immediate_for_directive (&offset) == FAIL)
4444	return;
4445    }
4446  else
4447    offset = 0;
4448
4449  demand_empty_rest_of_line ();
4450
4451  if (reg == REG_SP || reg == REG_PC)
4452    {
4453      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4454      return;
4455    }
4456
4457  if (unwind.fp_reg != REG_SP)
4458    as_bad (_("unexpected .unwind_movsp directive"));
4459
4460  /* Generate opcode to restore the value.  */
4461  op = 0x90 | reg;
4462  add_unwind_opcode (op, 1);
4463
4464  /* Record the information for later.	*/
4465  unwind.fp_reg = reg;
4466  unwind.fp_offset = unwind.frame_size - offset;
4467  unwind.sp_restored = 1;
4468}
4469
4470/* Parse an unwind_pad directive.  */
4471
4472static void
4473s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4474{
4475  int offset;
4476
4477  if (!unwind.proc_start)
4478    as_bad (MISSING_FNSTART);
4479
4480  if (immediate_for_directive (&offset) == FAIL)
4481    return;
4482
4483  if (offset & 3)
4484    {
4485      as_bad (_("stack increment must be multiple of 4"));
4486      ignore_rest_of_line ();
4487      return;
4488    }
4489
4490  /* Don't generate any opcodes, just record the details for later.  */
4491  unwind.frame_size += offset;
4492  unwind.pending_offset += offset;
4493
4494  demand_empty_rest_of_line ();
4495}
4496
4497/* Parse an unwind_setfp directive.  */
4498
4499static void
4500s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4501{
4502  int sp_reg;
4503  int fp_reg;
4504  int offset;
4505
4506  if (!unwind.proc_start)
4507    as_bad (MISSING_FNSTART);
4508
4509  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4510  if (skip_past_comma (&input_line_pointer) == FAIL)
4511    sp_reg = FAIL;
4512  else
4513    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4514
4515  if (fp_reg == FAIL || sp_reg == FAIL)
4516    {
4517      as_bad (_("expected <reg>, <reg>"));
4518      ignore_rest_of_line ();
4519      return;
4520    }
4521
4522  /* Optional constant.	 */
4523  if (skip_past_comma (&input_line_pointer) != FAIL)
4524    {
4525      if (immediate_for_directive (&offset) == FAIL)
4526	return;
4527    }
4528  else
4529    offset = 0;
4530
4531  demand_empty_rest_of_line ();
4532
4533  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4534    {
4535      as_bad (_("register must be either sp or set by a previous"
4536		"unwind_movsp directive"));
4537      return;
4538    }
4539
4540  /* Don't generate any opcodes, just record the information for later.	 */
4541  unwind.fp_reg = fp_reg;
4542  unwind.fp_used = 1;
4543  if (sp_reg == REG_SP)
4544    unwind.fp_offset = unwind.frame_size - offset;
4545  else
4546    unwind.fp_offset -= offset;
4547}
4548
4549/* Parse an unwind_raw directive.  */
4550
4551static void
4552s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4553{
4554  expressionS exp;
4555  /* This is an arbitrary limit.	 */
4556  unsigned char op[16];
4557  int count;
4558
4559  if (!unwind.proc_start)
4560    as_bad (MISSING_FNSTART);
4561
4562  expression (&exp);
4563  if (exp.X_op == O_constant
4564      && skip_past_comma (&input_line_pointer) != FAIL)
4565    {
4566      unwind.frame_size += exp.X_add_number;
4567      expression (&exp);
4568    }
4569  else
4570    exp.X_op = O_illegal;
4571
4572  if (exp.X_op != O_constant)
4573    {
4574      as_bad (_("expected <offset>, <opcode>"));
4575      ignore_rest_of_line ();
4576      return;
4577    }
4578
4579  count = 0;
4580
4581  /* Parse the opcode.	*/
4582  for (;;)
4583    {
4584      if (count >= 16)
4585	{
4586	  as_bad (_("unwind opcode too long"));
4587	  ignore_rest_of_line ();
4588	}
4589      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4590	{
4591	  as_bad (_("invalid unwind opcode"));
4592	  ignore_rest_of_line ();
4593	  return;
4594	}
4595      op[count++] = exp.X_add_number;
4596
4597      /* Parse the next byte.  */
4598      if (skip_past_comma (&input_line_pointer) == FAIL)
4599	break;
4600
4601      expression (&exp);
4602    }
4603
4604  /* Add the opcode bytes in reverse order.  */
4605  while (count--)
4606    add_unwind_opcode (op[count], 1);
4607
4608  demand_empty_rest_of_line ();
4609}
4610
4611
4612/* Parse a .eabi_attribute directive.  */
4613
4614static void
4615s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4616{
4617  int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4618
4619  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4620    attributes_set_explicitly[tag] = 1;
4621}
4622
4623/* Emit a tls fix for the symbol.  */
4624
4625static void
4626s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4627{
4628  char *p;
4629  expressionS exp;
4630#ifdef md_flush_pending_output
4631  md_flush_pending_output ();
4632#endif
4633
4634#ifdef md_cons_align
4635  md_cons_align (4);
4636#endif
4637
4638  /* Since we're just labelling the code, there's no need to define a
4639     mapping symbol.  */
4640  expression (&exp);
4641  p = obstack_next_free (&frchain_now->frch_obstack);
4642  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4643	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4644	       : BFD_RELOC_ARM_TLS_DESCSEQ);
4645}
4646#endif /* OBJ_ELF */
4647
4648static void s_arm_arch (int);
4649static void s_arm_object_arch (int);
4650static void s_arm_cpu (int);
4651static void s_arm_fpu (int);
4652static void s_arm_arch_extension (int);
4653
4654#ifdef TE_PE
4655
4656static void
4657pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4658{
4659  expressionS exp;
4660
4661  do
4662    {
4663      expression (&exp);
4664      if (exp.X_op == O_symbol)
4665	exp.X_op = O_secrel;
4666
4667      emit_expr (&exp, 4);
4668    }
4669  while (*input_line_pointer++ == ',');
4670
4671  input_line_pointer--;
4672  demand_empty_rest_of_line ();
4673}
4674#endif /* TE_PE */
4675
4676/* This table describes all the machine specific pseudo-ops the assembler
4677   has to support.  The fields are:
4678     pseudo-op name without dot
4679     function to call to execute this pseudo-op
4680     Integer arg to pass to the function.  */
4681
4682const pseudo_typeS md_pseudo_table[] =
4683{
4684  /* Never called because '.req' does not start a line.	 */
4685  { "req",	   s_req,	  0 },
4686  /* Following two are likewise never called.  */
4687  { "dn",	   s_dn,          0 },
4688  { "qn",          s_qn,          0 },
4689  { "unreq",	   s_unreq,	  0 },
4690  { "bss",	   s_bss,	  0 },
4691  { "align",	   s_align_ptwo,  2 },
4692  { "arm",	   s_arm,	  0 },
4693  { "thumb",	   s_thumb,	  0 },
4694  { "code",	   s_code,	  0 },
4695  { "force_thumb", s_force_thumb, 0 },
4696  { "thumb_func",  s_thumb_func,  0 },
4697  { "thumb_set",   s_thumb_set,	  0 },
4698  { "even",	   s_even,	  0 },
4699  { "ltorg",	   s_ltorg,	  0 },
4700  { "pool",	   s_ltorg,	  0 },
4701  { "syntax",	   s_syntax,	  0 },
4702  { "cpu",	   s_arm_cpu,	  0 },
4703  { "arch",	   s_arm_arch,	  0 },
4704  { "object_arch", s_arm_object_arch,	0 },
4705  { "fpu",	   s_arm_fpu,	  0 },
4706  { "arch_extension", s_arm_arch_extension, 0 },
4707#ifdef OBJ_ELF
4708  { "word",	        s_arm_elf_cons, 4 },
4709  { "long",	        s_arm_elf_cons, 4 },
4710  { "inst.n",           s_arm_elf_inst, 2 },
4711  { "inst.w",           s_arm_elf_inst, 4 },
4712  { "inst",             s_arm_elf_inst, 0 },
4713  { "rel31",	        s_arm_rel31,	  0 },
4714  { "fnstart",		s_arm_unwind_fnstart,	0 },
4715  { "fnend",		s_arm_unwind_fnend,	0 },
4716  { "cantunwind",	s_arm_unwind_cantunwind, 0 },
4717  { "personality",	s_arm_unwind_personality, 0 },
4718  { "personalityindex",	s_arm_unwind_personalityindex, 0 },
4719  { "handlerdata",	s_arm_unwind_handlerdata, 0 },
4720  { "save",		s_arm_unwind_save,	0 },
4721  { "vsave",		s_arm_unwind_save,	1 },
4722  { "movsp",		s_arm_unwind_movsp,	0 },
4723  { "pad",		s_arm_unwind_pad,	0 },
4724  { "setfp",		s_arm_unwind_setfp,	0 },
4725  { "unwind_raw",	s_arm_unwind_raw,	0 },
4726  { "eabi_attribute",	s_arm_eabi_attribute,	0 },
4727  { "tlsdescseq",	s_arm_tls_descseq,      0 },
4728#else
4729  { "word",	   cons, 4},
4730
4731  /* These are used for dwarf.  */
4732  {"2byte", cons, 2},
4733  {"4byte", cons, 4},
4734  {"8byte", cons, 8},
4735  /* These are used for dwarf2.  */
4736  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4737  { "loc",  dwarf2_directive_loc,  0 },
4738  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4739#endif
4740  { "extend",	   float_cons, 'x' },
4741  { "ldouble",	   float_cons, 'x' },
4742  { "packed",	   float_cons, 'p' },
4743#ifdef TE_PE
4744  {"secrel32", pe_directive_secrel, 0},
4745#endif
4746
4747  /* These are for compatibility with CodeComposer Studio.  */
4748  {"ref",          s_ccs_ref,        0},
4749  {"def",          s_ccs_def,        0},
4750  {"asmfunc",      s_ccs_asmfunc,    0},
4751  {"endasmfunc",   s_ccs_endasmfunc, 0},
4752
4753  { 0, 0, 0 }
4754};
4755
4756/* Parser functions used exclusively in instruction operands.  */
4757
4758/* Generic immediate-value read function for use in insn parsing.
4759   STR points to the beginning of the immediate (the leading #);
4760   VAL receives the value; if the value is outside [MIN, MAX]
4761   issue an error.  PREFIX_OPT is true if the immediate prefix is
4762   optional.  */
4763
4764static int
4765parse_immediate (char **str, int *val, int min, int max,
4766		 bfd_boolean prefix_opt)
4767{
4768  expressionS exp;
4769  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4770  if (exp.X_op != O_constant)
4771    {
4772      inst.error = _("constant expression required");
4773      return FAIL;
4774    }
4775
4776  if (exp.X_add_number < min || exp.X_add_number > max)
4777    {
4778      inst.error = _("immediate value out of range");
4779      return FAIL;
4780    }
4781
4782  *val = exp.X_add_number;
4783  return SUCCESS;
4784}
4785
4786/* Less-generic immediate-value read function with the possibility of loading a
4787   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4788   instructions. Puts the result directly in inst.operands[i].  */
4789
4790static int
4791parse_big_immediate (char **str, int i, expressionS *in_exp,
4792		     bfd_boolean allow_symbol_p)
4793{
4794  expressionS exp;
4795  expressionS *exp_p = in_exp ? in_exp : &exp;
4796  char *ptr = *str;
4797
4798  my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4799
4800  if (exp_p->X_op == O_constant)
4801    {
4802      inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4803      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4804	 O_constant.  We have to be careful not to break compilation for
4805	 32-bit X_add_number, though.  */
4806      if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4807	{
4808	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
4809	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4810				  & 0xffffffff);
4811	  inst.operands[i].regisimm = 1;
4812	}
4813    }
4814  else if (exp_p->X_op == O_big
4815	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4816    {
4817      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4818
4819      /* Bignums have their least significant bits in
4820	 generic_bignum[0]. Make sure we put 32 bits in imm and
4821	 32 bits in reg,  in a (hopefully) portable way.  */
4822      gas_assert (parts != 0);
4823
4824      /* Make sure that the number is not too big.
4825	 PR 11972: Bignums can now be sign-extended to the
4826	 size of a .octa so check that the out of range bits
4827	 are all zero or all one.  */
4828      if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4829	{
4830	  LITTLENUM_TYPE m = -1;
4831
4832	  if (generic_bignum[parts * 2] != 0
4833	      && generic_bignum[parts * 2] != m)
4834	    return FAIL;
4835
4836	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4837	    if (generic_bignum[j] != generic_bignum[j-1])
4838	      return FAIL;
4839	}
4840
4841      inst.operands[i].imm = 0;
4842      for (j = 0; j < parts; j++, idx++)
4843	inst.operands[i].imm |= generic_bignum[idx]
4844				<< (LITTLENUM_NUMBER_OF_BITS * j);
4845      inst.operands[i].reg = 0;
4846      for (j = 0; j < parts; j++, idx++)
4847	inst.operands[i].reg |= generic_bignum[idx]
4848				<< (LITTLENUM_NUMBER_OF_BITS * j);
4849      inst.operands[i].regisimm = 1;
4850    }
4851  else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4852    return FAIL;
4853
4854  *str = ptr;
4855
4856  return SUCCESS;
4857}
4858
4859/* Returns the pseudo-register number of an FPA immediate constant,
4860   or FAIL if there isn't a valid constant here.  */
4861
4862static int
4863parse_fpa_immediate (char ** str)
4864{
4865  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4866  char *	 save_in;
4867  expressionS	 exp;
4868  int		 i;
4869  int		 j;
4870
4871  /* First try and match exact strings, this is to guarantee
4872     that some formats will work even for cross assembly.  */
4873
4874  for (i = 0; fp_const[i]; i++)
4875    {
4876      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4877	{
4878	  char *start = *str;
4879
4880	  *str += strlen (fp_const[i]);
4881	  if (is_end_of_line[(unsigned char) **str])
4882	    return i + 8;
4883	  *str = start;
4884	}
4885    }
4886
4887  /* Just because we didn't get a match doesn't mean that the constant
4888     isn't valid, just that it is in a format that we don't
4889     automatically recognize.  Try parsing it with the standard
4890     expression routines.  */
4891
4892  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4893
4894  /* Look for a raw floating point number.  */
4895  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4896      && is_end_of_line[(unsigned char) *save_in])
4897    {
4898      for (i = 0; i < NUM_FLOAT_VALS; i++)
4899	{
4900	  for (j = 0; j < MAX_LITTLENUMS; j++)
4901	    {
4902	      if (words[j] != fp_values[i][j])
4903		break;
4904	    }
4905
4906	  if (j == MAX_LITTLENUMS)
4907	    {
4908	      *str = save_in;
4909	      return i + 8;
4910	    }
4911	}
4912    }
4913
4914  /* Try and parse a more complex expression, this will probably fail
4915     unless the code uses a floating point prefix (eg "0f").  */
4916  save_in = input_line_pointer;
4917  input_line_pointer = *str;
4918  if (expression (&exp) == absolute_section
4919      && exp.X_op == O_big
4920      && exp.X_add_number < 0)
4921    {
4922      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4923	 Ditto for 15.	*/
4924#define X_PRECISION 5
4925#define E_PRECISION 15L
4926      if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4927	{
4928	  for (i = 0; i < NUM_FLOAT_VALS; i++)
4929	    {
4930	      for (j = 0; j < MAX_LITTLENUMS; j++)
4931		{
4932		  if (words[j] != fp_values[i][j])
4933		    break;
4934		}
4935
4936	      if (j == MAX_LITTLENUMS)
4937		{
4938		  *str = input_line_pointer;
4939		  input_line_pointer = save_in;
4940		  return i + 8;
4941		}
4942	    }
4943	}
4944    }
4945
4946  *str = input_line_pointer;
4947  input_line_pointer = save_in;
4948  inst.error = _("invalid FPA immediate expression");
4949  return FAIL;
4950}
4951
4952/* Returns 1 if a number has "quarter-precision" float format
4953   0baBbbbbbc defgh000 00000000 00000000.  */
4954
4955static int
4956is_quarter_float (unsigned imm)
4957{
4958  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4959  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4960}
4961
4962
4963/* Detect the presence of a floating point or integer zero constant,
4964   i.e. #0.0 or #0.  */
4965
4966static bfd_boolean
4967parse_ifimm_zero (char **in)
4968{
4969  int error_code;
4970
4971  if (!is_immediate_prefix (**in))
4972    return FALSE;
4973
4974  ++*in;
4975
4976  /* Accept #0x0 as a synonym for #0.  */
4977  if (strncmp (*in, "0x", 2) == 0)
4978    {
4979      int val;
4980      if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4981        return FALSE;
4982      return TRUE;
4983    }
4984
4985  error_code = atof_generic (in, ".", EXP_CHARS,
4986                             &generic_floating_point_number);
4987
4988  if (!error_code
4989      && generic_floating_point_number.sign == '+'
4990      && (generic_floating_point_number.low
4991          > generic_floating_point_number.leader))
4992    return TRUE;
4993
4994  return FALSE;
4995}
4996
4997/* Parse an 8-bit "quarter-precision" floating point number of the form:
4998   0baBbbbbbc defgh000 00000000 00000000.
4999   The zero and minus-zero cases need special handling, since they can't be
5000   encoded in the "quarter-precision" float format, but can nonetheless be
5001   loaded as integer constants.  */
5002
5003static unsigned
5004parse_qfloat_immediate (char **ccp, int *immed)
5005{
5006  char *str = *ccp;
5007  char *fpnum;
5008  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5009  int found_fpchar = 0;
5010
5011  skip_past_char (&str, '#');
5012
5013  /* We must not accidentally parse an integer as a floating-point number. Make
5014     sure that the value we parse is not an integer by checking for special
5015     characters '.' or 'e'.
5016     FIXME: This is a horrible hack, but doing better is tricky because type
5017     information isn't in a very usable state at parse time.  */
5018  fpnum = str;
5019  skip_whitespace (fpnum);
5020
5021  if (strncmp (fpnum, "0x", 2) == 0)
5022    return FAIL;
5023  else
5024    {
5025      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5026	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5027	  {
5028	    found_fpchar = 1;
5029	    break;
5030	  }
5031
5032      if (!found_fpchar)
5033	return FAIL;
5034    }
5035
5036  if ((str = atof_ieee (str, 's', words)) != NULL)
5037    {
5038      unsigned fpword = 0;
5039      int i;
5040
5041      /* Our FP word must be 32 bits (single-precision FP).  */
5042      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5043	{
5044	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5045	  fpword |= words[i];
5046	}
5047
5048      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5049	*immed = fpword;
5050      else
5051	return FAIL;
5052
5053      *ccp = str;
5054
5055      return SUCCESS;
5056    }
5057
5058  return FAIL;
5059}
5060
5061/* Shift operands.  */
5062enum shift_kind
5063{
5064  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5065};
5066
5067struct asm_shift_name
5068{
5069  const char	  *name;
5070  enum shift_kind  kind;
5071};
5072
5073/* Third argument to parse_shift.  */
5074enum parse_shift_mode
5075{
5076  NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5077  SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5078  SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5079  SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5080  SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5081};
5082
5083/* Parse a <shift> specifier on an ARM data processing instruction.
5084   This has three forms:
5085
5086     (LSL|LSR|ASL|ASR|ROR) Rs
5087     (LSL|LSR|ASL|ASR|ROR) #imm
5088     RRX
5089
5090   Note that ASL is assimilated to LSL in the instruction encoding, and
5091   RRX to ROR #0 (which cannot be written as such).  */
5092
5093static int
5094parse_shift (char **str, int i, enum parse_shift_mode mode)
5095{
5096  const struct asm_shift_name *shift_name;
5097  enum shift_kind shift;
5098  char *s = *str;
5099  char *p = s;
5100  int reg;
5101
5102  for (p = *str; ISALPHA (*p); p++)
5103    ;
5104
5105  if (p == *str)
5106    {
5107      inst.error = _("shift expression expected");
5108      return FAIL;
5109    }
5110
5111  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5112							    p - *str);
5113
5114  if (shift_name == NULL)
5115    {
5116      inst.error = _("shift expression expected");
5117      return FAIL;
5118    }
5119
5120  shift = shift_name->kind;
5121
5122  switch (mode)
5123    {
5124    case NO_SHIFT_RESTRICT:
5125    case SHIFT_IMMEDIATE:   break;
5126
5127    case SHIFT_LSL_OR_ASR_IMMEDIATE:
5128      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5129	{
5130	  inst.error = _("'LSL' or 'ASR' required");
5131	  return FAIL;
5132	}
5133      break;
5134
5135    case SHIFT_LSL_IMMEDIATE:
5136      if (shift != SHIFT_LSL)
5137	{
5138	  inst.error = _("'LSL' required");
5139	  return FAIL;
5140	}
5141      break;
5142
5143    case SHIFT_ASR_IMMEDIATE:
5144      if (shift != SHIFT_ASR)
5145	{
5146	  inst.error = _("'ASR' required");
5147	  return FAIL;
5148	}
5149      break;
5150
5151    default: abort ();
5152    }
5153
5154  if (shift != SHIFT_RRX)
5155    {
5156      /* Whitespace can appear here if the next thing is a bare digit.	*/
5157      skip_whitespace (p);
5158
5159      if (mode == NO_SHIFT_RESTRICT
5160	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5161	{
5162	  inst.operands[i].imm = reg;
5163	  inst.operands[i].immisreg = 1;
5164	}
5165      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5166	return FAIL;
5167    }
5168  inst.operands[i].shift_kind = shift;
5169  inst.operands[i].shifted = 1;
5170  *str = p;
5171  return SUCCESS;
5172}
5173
5174/* Parse a <shifter_operand> for an ARM data processing instruction:
5175
5176      #<immediate>
5177      #<immediate>, <rotate>
5178      <Rm>
5179      <Rm>, <shift>
5180
5181   where <shift> is defined by parse_shift above, and <rotate> is a
5182   multiple of 2 between 0 and 30.  Validation of immediate operands
5183   is deferred to md_apply_fix.  */
5184
5185static int
5186parse_shifter_operand (char **str, int i)
5187{
5188  int value;
5189  expressionS exp;
5190
5191  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5192    {
5193      inst.operands[i].reg = value;
5194      inst.operands[i].isreg = 1;
5195
5196      /* parse_shift will override this if appropriate */
5197      inst.reloc.exp.X_op = O_constant;
5198      inst.reloc.exp.X_add_number = 0;
5199
5200      if (skip_past_comma (str) == FAIL)
5201	return SUCCESS;
5202
5203      /* Shift operation on register.  */
5204      return parse_shift (str, i, NO_SHIFT_RESTRICT);
5205    }
5206
5207  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5208    return FAIL;
5209
5210  if (skip_past_comma (str) == SUCCESS)
5211    {
5212      /* #x, y -- ie explicit rotation by Y.  */
5213      if (my_get_expression (&exp, str, GE_NO_PREFIX))
5214	return FAIL;
5215
5216      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5217	{
5218	  inst.error = _("constant expression expected");
5219	  return FAIL;
5220	}
5221
5222      value = exp.X_add_number;
5223      if (value < 0 || value > 30 || value % 2 != 0)
5224	{
5225	  inst.error = _("invalid rotation");
5226	  return FAIL;
5227	}
5228      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5229	{
5230	  inst.error = _("invalid constant");
5231	  return FAIL;
5232	}
5233
5234      /* Encode as specified.  */
5235      inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5236      return SUCCESS;
5237    }
5238
5239  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5240  inst.reloc.pc_rel = 0;
5241  return SUCCESS;
5242}
5243
5244/* Group relocation information.  Each entry in the table contains the
5245   textual name of the relocation as may appear in assembler source
5246   and must end with a colon.
5247   Along with this textual name are the relocation codes to be used if
5248   the corresponding instruction is an ALU instruction (ADD or SUB only),
5249   an LDR, an LDRS, or an LDC.  */
5250
5251struct group_reloc_table_entry
5252{
5253  const char *name;
5254  int alu_code;
5255  int ldr_code;
5256  int ldrs_code;
5257  int ldc_code;
5258};
5259
5260typedef enum
5261{
5262  /* Varieties of non-ALU group relocation.  */
5263
5264  GROUP_LDR,
5265  GROUP_LDRS,
5266  GROUP_LDC
5267} group_reloc_type;
5268
5269static struct group_reloc_table_entry group_reloc_table[] =
5270  { /* Program counter relative: */
5271    { "pc_g0_nc",
5272      BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5273      0,				/* LDR */
5274      0,				/* LDRS */
5275      0 },				/* LDC */
5276    { "pc_g0",
5277      BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5278      BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5279      BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5280      BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5281    { "pc_g1_nc",
5282      BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5283      0,				/* LDR */
5284      0,				/* LDRS */
5285      0 },				/* LDC */
5286    { "pc_g1",
5287      BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5288      BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5289      BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5290      BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5291    { "pc_g2",
5292      BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5293      BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5294      BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5295      BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5296    /* Section base relative */
5297    { "sb_g0_nc",
5298      BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5299      0,				/* LDR */
5300      0,				/* LDRS */
5301      0 },				/* LDC */
5302    { "sb_g0",
5303      BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5304      BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5305      BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5306      BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5307    { "sb_g1_nc",
5308      BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5309      0,				/* LDR */
5310      0,				/* LDRS */
5311      0 },				/* LDC */
5312    { "sb_g1",
5313      BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5314      BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5315      BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5316      BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5317    { "sb_g2",
5318      BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5319      BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5320      BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5321      BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
5322    /* Absolute thumb alu relocations.  */
5323    { "lower0_7",
5324      BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
5325      0,				/* LDR.  */
5326      0,				/* LDRS.  */
5327      0 },				/* LDC.  */
5328    { "lower8_15",
5329      BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
5330      0,				/* LDR.  */
5331      0,				/* LDRS.  */
5332      0 },				/* LDC.  */
5333    { "upper0_7",
5334      BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
5335      0,				/* LDR.  */
5336      0,				/* LDRS.  */
5337      0 },				/* LDC.  */
5338    { "upper8_15",
5339      BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
5340      0,				/* LDR.  */
5341      0,				/* LDRS.  */
5342      0 } };				/* LDC.  */
5343
5344/* Given the address of a pointer pointing to the textual name of a group
5345   relocation as may appear in assembler source, attempt to find its details
5346   in group_reloc_table.  The pointer will be updated to the character after
5347   the trailing colon.  On failure, FAIL will be returned; SUCCESS
5348   otherwise.  On success, *entry will be updated to point at the relevant
5349   group_reloc_table entry. */
5350
5351static int
5352find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5353{
5354  unsigned int i;
5355  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5356    {
5357      int length = strlen (group_reloc_table[i].name);
5358
5359      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5360	  && (*str)[length] == ':')
5361	{
5362	  *out = &group_reloc_table[i];
5363	  *str += (length + 1);
5364	  return SUCCESS;
5365	}
5366    }
5367
5368  return FAIL;
5369}
5370
5371/* Parse a <shifter_operand> for an ARM data processing instruction
5372   (as for parse_shifter_operand) where group relocations are allowed:
5373
5374      #<immediate>
5375      #<immediate>, <rotate>
5376      #:<group_reloc>:<expression>
5377      <Rm>
5378      <Rm>, <shift>
5379
5380   where <group_reloc> is one of the strings defined in group_reloc_table.
5381   The hashes are optional.
5382
5383   Everything else is as for parse_shifter_operand.  */
5384
5385static parse_operand_result
5386parse_shifter_operand_group_reloc (char **str, int i)
5387{
5388  /* Determine if we have the sequence of characters #: or just :
5389     coming next.  If we do, then we check for a group relocation.
5390     If we don't, punt the whole lot to parse_shifter_operand.  */
5391
5392  if (((*str)[0] == '#' && (*str)[1] == ':')
5393      || (*str)[0] == ':')
5394    {
5395      struct group_reloc_table_entry *entry;
5396
5397      if ((*str)[0] == '#')
5398	(*str) += 2;
5399      else
5400	(*str)++;
5401
5402      /* Try to parse a group relocation.  Anything else is an error.  */
5403      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5404	{
5405	  inst.error = _("unknown group relocation");
5406	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5407	}
5408
5409      /* We now have the group relocation table entry corresponding to
5410	 the name in the assembler source.  Next, we parse the expression.  */
5411      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5412	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5413
5414      /* Record the relocation type (always the ALU variant here).  */
5415      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5416      gas_assert (inst.reloc.type != 0);
5417
5418      return PARSE_OPERAND_SUCCESS;
5419    }
5420  else
5421    return parse_shifter_operand (str, i) == SUCCESS
5422	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5423
5424  /* Never reached.  */
5425}
5426
5427/* Parse a Neon alignment expression.  Information is written to
5428   inst.operands[i].  We assume the initial ':' has been skipped.
5429
5430   align	.imm = align << 8, .immisalign=1, .preind=0  */
5431static parse_operand_result
5432parse_neon_alignment (char **str, int i)
5433{
5434  char *p = *str;
5435  expressionS exp;
5436
5437  my_get_expression (&exp, &p, GE_NO_PREFIX);
5438
5439  if (exp.X_op != O_constant)
5440    {
5441      inst.error = _("alignment must be constant");
5442      return PARSE_OPERAND_FAIL;
5443    }
5444
5445  inst.operands[i].imm = exp.X_add_number << 8;
5446  inst.operands[i].immisalign = 1;
5447  /* Alignments are not pre-indexes.  */
5448  inst.operands[i].preind = 0;
5449
5450  *str = p;
5451  return PARSE_OPERAND_SUCCESS;
5452}
5453
5454/* Parse all forms of an ARM address expression.  Information is written
5455   to inst.operands[i] and/or inst.reloc.
5456
5457   Preindexed addressing (.preind=1):
5458
5459   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5460   [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5461   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5462		       .shift_kind=shift .reloc.exp=shift_imm
5463
5464   These three may have a trailing ! which causes .writeback to be set also.
5465
5466   Postindexed addressing (.postind=1, .writeback=1):
5467
5468   [Rn], #offset       .reg=Rn .reloc.exp=offset
5469   [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5470   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5471		       .shift_kind=shift .reloc.exp=shift_imm
5472
5473   Unindexed addressing (.preind=0, .postind=0):
5474
5475   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5476
5477   Other:
5478
5479   [Rn]{!}	       shorthand for [Rn,#0]{!}
5480   =immediate	       .isreg=0 .reloc.exp=immediate
5481   label	       .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5482
5483  It is the caller's responsibility to check for addressing modes not
5484  supported by the instruction, and to set inst.reloc.type.  */
5485
5486static parse_operand_result
5487parse_address_main (char **str, int i, int group_relocations,
5488		    group_reloc_type group_type)
5489{
5490  char *p = *str;
5491  int reg;
5492
5493  if (skip_past_char (&p, '[') == FAIL)
5494    {
5495      if (skip_past_char (&p, '=') == FAIL)
5496	{
5497	  /* Bare address - translate to PC-relative offset.  */
5498	  inst.reloc.pc_rel = 1;
5499	  inst.operands[i].reg = REG_PC;
5500	  inst.operands[i].isreg = 1;
5501	  inst.operands[i].preind = 1;
5502
5503	  if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5504	    return PARSE_OPERAND_FAIL;
5505	}
5506      else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5507				    /*allow_symbol_p=*/TRUE))
5508	return PARSE_OPERAND_FAIL;
5509
5510      *str = p;
5511      return PARSE_OPERAND_SUCCESS;
5512    }
5513
5514  /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5515  skip_whitespace (p);
5516
5517  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5518    {
5519      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5520      return PARSE_OPERAND_FAIL;
5521    }
5522  inst.operands[i].reg = reg;
5523  inst.operands[i].isreg = 1;
5524
5525  if (skip_past_comma (&p) == SUCCESS)
5526    {
5527      inst.operands[i].preind = 1;
5528
5529      if (*p == '+') p++;
5530      else if (*p == '-') p++, inst.operands[i].negative = 1;
5531
5532      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5533	{
5534	  inst.operands[i].imm = reg;
5535	  inst.operands[i].immisreg = 1;
5536
5537	  if (skip_past_comma (&p) == SUCCESS)
5538	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5539	      return PARSE_OPERAND_FAIL;
5540	}
5541      else if (skip_past_char (&p, ':') == SUCCESS)
5542	{
5543	  /* FIXME: '@' should be used here, but it's filtered out by generic
5544	     code before we get to see it here. This may be subject to
5545	     change.  */
5546	  parse_operand_result result = parse_neon_alignment (&p, i);
5547
5548	  if (result != PARSE_OPERAND_SUCCESS)
5549	    return result;
5550	}
5551      else
5552	{
5553	  if (inst.operands[i].negative)
5554	    {
5555	      inst.operands[i].negative = 0;
5556	      p--;
5557	    }
5558
5559	  if (group_relocations
5560	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5561	    {
5562	      struct group_reloc_table_entry *entry;
5563
5564	      /* Skip over the #: or : sequence.  */
5565	      if (*p == '#')
5566		p += 2;
5567	      else
5568		p++;
5569
5570	      /* Try to parse a group relocation.  Anything else is an
5571		 error.  */
5572	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5573		{
5574		  inst.error = _("unknown group relocation");
5575		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5576		}
5577
5578	      /* We now have the group relocation table entry corresponding to
5579		 the name in the assembler source.  Next, we parse the
5580		 expression.  */
5581	      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5582		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5583
5584	      /* Record the relocation type.  */
5585	      switch (group_type)
5586		{
5587		  case GROUP_LDR:
5588		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5589		    break;
5590
5591		  case GROUP_LDRS:
5592		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5593		    break;
5594
5595		  case GROUP_LDC:
5596		    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5597		    break;
5598
5599		  default:
5600		    gas_assert (0);
5601		}
5602
5603	      if (inst.reloc.type == 0)
5604		{
5605		  inst.error = _("this group relocation is not allowed on this instruction");
5606		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5607		}
5608	    }
5609	  else
5610	    {
5611	      char *q = p;
5612	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5613		return PARSE_OPERAND_FAIL;
5614	      /* If the offset is 0, find out if it's a +0 or -0.  */
5615	      if (inst.reloc.exp.X_op == O_constant
5616		  && inst.reloc.exp.X_add_number == 0)
5617		{
5618		  skip_whitespace (q);
5619		  if (*q == '#')
5620		    {
5621		      q++;
5622		      skip_whitespace (q);
5623		    }
5624		  if (*q == '-')
5625		    inst.operands[i].negative = 1;
5626		}
5627	    }
5628	}
5629    }
5630  else if (skip_past_char (&p, ':') == SUCCESS)
5631    {
5632      /* FIXME: '@' should be used here, but it's filtered out by generic code
5633	 before we get to see it here. This may be subject to change.  */
5634      parse_operand_result result = parse_neon_alignment (&p, i);
5635
5636      if (result != PARSE_OPERAND_SUCCESS)
5637	return result;
5638    }
5639
5640  if (skip_past_char (&p, ']') == FAIL)
5641    {
5642      inst.error = _("']' expected");
5643      return PARSE_OPERAND_FAIL;
5644    }
5645
5646  if (skip_past_char (&p, '!') == SUCCESS)
5647    inst.operands[i].writeback = 1;
5648
5649  else if (skip_past_comma (&p) == SUCCESS)
5650    {
5651      if (skip_past_char (&p, '{') == SUCCESS)
5652	{
5653	  /* [Rn], {expr} - unindexed, with option */
5654	  if (parse_immediate (&p, &inst.operands[i].imm,
5655			       0, 255, TRUE) == FAIL)
5656	    return PARSE_OPERAND_FAIL;
5657
5658	  if (skip_past_char (&p, '}') == FAIL)
5659	    {
5660	      inst.error = _("'}' expected at end of 'option' field");
5661	      return PARSE_OPERAND_FAIL;
5662	    }
5663	  if (inst.operands[i].preind)
5664	    {
5665	      inst.error = _("cannot combine index with option");
5666	      return PARSE_OPERAND_FAIL;
5667	    }
5668	  *str = p;
5669	  return PARSE_OPERAND_SUCCESS;
5670	}
5671      else
5672	{
5673	  inst.operands[i].postind = 1;
5674	  inst.operands[i].writeback = 1;
5675
5676	  if (inst.operands[i].preind)
5677	    {
5678	      inst.error = _("cannot combine pre- and post-indexing");
5679	      return PARSE_OPERAND_FAIL;
5680	    }
5681
5682	  if (*p == '+') p++;
5683	  else if (*p == '-') p++, inst.operands[i].negative = 1;
5684
5685	  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5686	    {
5687	      /* We might be using the immediate for alignment already. If we
5688		 are, OR the register number into the low-order bits.  */
5689	      if (inst.operands[i].immisalign)
5690		inst.operands[i].imm |= reg;
5691	      else
5692		inst.operands[i].imm = reg;
5693	      inst.operands[i].immisreg = 1;
5694
5695	      if (skip_past_comma (&p) == SUCCESS)
5696		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5697		  return PARSE_OPERAND_FAIL;
5698	    }
5699	  else
5700	    {
5701	      char *q = p;
5702	      if (inst.operands[i].negative)
5703		{
5704		  inst.operands[i].negative = 0;
5705		  p--;
5706		}
5707	      if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5708		return PARSE_OPERAND_FAIL;
5709	      /* If the offset is 0, find out if it's a +0 or -0.  */
5710	      if (inst.reloc.exp.X_op == O_constant
5711		  && inst.reloc.exp.X_add_number == 0)
5712		{
5713		  skip_whitespace (q);
5714		  if (*q == '#')
5715		    {
5716		      q++;
5717		      skip_whitespace (q);
5718		    }
5719		  if (*q == '-')
5720		    inst.operands[i].negative = 1;
5721		}
5722	    }
5723	}
5724    }
5725
5726  /* If at this point neither .preind nor .postind is set, we have a
5727     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5728  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5729    {
5730      inst.operands[i].preind = 1;
5731      inst.reloc.exp.X_op = O_constant;
5732      inst.reloc.exp.X_add_number = 0;
5733    }
5734  *str = p;
5735  return PARSE_OPERAND_SUCCESS;
5736}
5737
5738static int
5739parse_address (char **str, int i)
5740{
5741  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5742	 ? SUCCESS : FAIL;
5743}
5744
5745static parse_operand_result
5746parse_address_group_reloc (char **str, int i, group_reloc_type type)
5747{
5748  return parse_address_main (str, i, 1, type);
5749}
5750
5751/* Parse an operand for a MOVW or MOVT instruction.  */
5752static int
5753parse_half (char **str)
5754{
5755  char * p;
5756
5757  p = *str;
5758  skip_past_char (&p, '#');
5759  if (strncasecmp (p, ":lower16:", 9) == 0)
5760    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5761  else if (strncasecmp (p, ":upper16:", 9) == 0)
5762    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5763
5764  if (inst.reloc.type != BFD_RELOC_UNUSED)
5765    {
5766      p += 9;
5767      skip_whitespace (p);
5768    }
5769
5770  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5771    return FAIL;
5772
5773  if (inst.reloc.type == BFD_RELOC_UNUSED)
5774    {
5775      if (inst.reloc.exp.X_op != O_constant)
5776	{
5777	  inst.error = _("constant expression expected");
5778	  return FAIL;
5779	}
5780      if (inst.reloc.exp.X_add_number < 0
5781	  || inst.reloc.exp.X_add_number > 0xffff)
5782	{
5783	  inst.error = _("immediate value out of range");
5784	  return FAIL;
5785	}
5786    }
5787  *str = p;
5788  return SUCCESS;
5789}
5790
5791/* Miscellaneous. */
5792
5793/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5794   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5795static int
5796parse_psr (char **str, bfd_boolean lhs)
5797{
5798  char *p;
5799  unsigned long psr_field;
5800  const struct asm_psr *psr;
5801  char *start;
5802  bfd_boolean is_apsr = FALSE;
5803  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5804
5805  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5806     be TRUE, but we want to ignore it in this case as we are building for any
5807     CPU type, including non-m variants.  */
5808  if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5809    m_profile = FALSE;
5810
5811  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5812     feature for ease of use and backwards compatibility.  */
5813  p = *str;
5814  if (strncasecmp (p, "SPSR", 4) == 0)
5815    {
5816      if (m_profile)
5817	goto unsupported_psr;
5818
5819      psr_field = SPSR_BIT;
5820    }
5821  else if (strncasecmp (p, "CPSR", 4) == 0)
5822    {
5823      if (m_profile)
5824	goto unsupported_psr;
5825
5826      psr_field = 0;
5827    }
5828  else if (strncasecmp (p, "APSR", 4) == 0)
5829    {
5830      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5831	 and ARMv7-R architecture CPUs.  */
5832      is_apsr = TRUE;
5833      psr_field = 0;
5834    }
5835  else if (m_profile)
5836    {
5837      start = p;
5838      do
5839	p++;
5840      while (ISALNUM (*p) || *p == '_');
5841
5842      if (strncasecmp (start, "iapsr", 5) == 0
5843	  || strncasecmp (start, "eapsr", 5) == 0
5844	  || strncasecmp (start, "xpsr", 4) == 0
5845	  || strncasecmp (start, "psr", 3) == 0)
5846	p = start + strcspn (start, "rR") + 1;
5847
5848      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5849						  p - start);
5850
5851      if (!psr)
5852	return FAIL;
5853
5854      /* If APSR is being written, a bitfield may be specified.  Note that
5855	 APSR itself is handled above.  */
5856      if (psr->field <= 3)
5857	{
5858	  psr_field = psr->field;
5859	  is_apsr = TRUE;
5860	  goto check_suffix;
5861	}
5862
5863      *str = p;
5864      /* M-profile MSR instructions have the mask field set to "10", except
5865	 *PSR variants which modify APSR, which may use a different mask (and
5866	 have been handled already).  Do that by setting the PSR_f field
5867	 here.  */
5868      return psr->field | (lhs ? PSR_f : 0);
5869    }
5870  else
5871    goto unsupported_psr;
5872
5873  p += 4;
5874check_suffix:
5875  if (*p == '_')
5876    {
5877      /* A suffix follows.  */
5878      p++;
5879      start = p;
5880
5881      do
5882	p++;
5883      while (ISALNUM (*p) || *p == '_');
5884
5885      if (is_apsr)
5886	{
5887	  /* APSR uses a notation for bits, rather than fields.  */
5888	  unsigned int nzcvq_bits = 0;
5889	  unsigned int g_bit = 0;
5890	  char *bit;
5891
5892	  for (bit = start; bit != p; bit++)
5893	    {
5894	      switch (TOLOWER (*bit))
5895		{
5896		case 'n':
5897		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5898		  break;
5899
5900		case 'z':
5901		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5902		  break;
5903
5904		case 'c':
5905		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5906		  break;
5907
5908		case 'v':
5909		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5910		  break;
5911
5912		case 'q':
5913		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5914		  break;
5915
5916		case 'g':
5917		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5918		  break;
5919
5920		default:
5921		  inst.error = _("unexpected bit specified after APSR");
5922		  return FAIL;
5923		}
5924	    }
5925
5926	  if (nzcvq_bits == 0x1f)
5927	    psr_field |= PSR_f;
5928
5929	  if (g_bit == 0x1)
5930	    {
5931	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5932		{
5933		  inst.error = _("selected processor does not "
5934				 "support DSP extension");
5935		  return FAIL;
5936		}
5937
5938	      psr_field |= PSR_s;
5939	    }
5940
5941	  if ((nzcvq_bits & 0x20) != 0
5942	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5943	      || (g_bit & 0x2) != 0)
5944	    {
5945	      inst.error = _("bad bitmask specified after APSR");
5946	      return FAIL;
5947	    }
5948	}
5949      else
5950	{
5951	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5952						      p - start);
5953	  if (!psr)
5954	    goto error;
5955
5956	  psr_field |= psr->field;
5957	}
5958    }
5959  else
5960    {
5961      if (ISALNUM (*p))
5962	goto error;    /* Garbage after "[CS]PSR".  */
5963
5964      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5965	 is deprecated, but allow it anyway.  */
5966      if (is_apsr && lhs)
5967	{
5968	  psr_field |= PSR_f;
5969	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
5970		       "deprecated"));
5971	}
5972      else if (!m_profile)
5973	/* These bits are never right for M-profile devices: don't set them
5974	   (only code paths which read/write APSR reach here).  */
5975	psr_field |= (PSR_c | PSR_f);
5976    }
5977  *str = p;
5978  return psr_field;
5979
5980 unsupported_psr:
5981  inst.error = _("selected processor does not support requested special "
5982		 "purpose register");
5983  return FAIL;
5984
5985 error:
5986  inst.error = _("flag for {c}psr instruction expected");
5987  return FAIL;
5988}
5989
5990/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5991   value suitable for splatting into the AIF field of the instruction.	*/
5992
5993static int
5994parse_cps_flags (char **str)
5995{
5996  int val = 0;
5997  int saw_a_flag = 0;
5998  char *s = *str;
5999
6000  for (;;)
6001    switch (*s++)
6002      {
6003      case '\0': case ',':
6004	goto done;
6005
6006      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6007      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6008      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6009
6010      default:
6011	inst.error = _("unrecognized CPS flag");
6012	return FAIL;
6013      }
6014
6015 done:
6016  if (saw_a_flag == 0)
6017    {
6018      inst.error = _("missing CPS flags");
6019      return FAIL;
6020    }
6021
6022  *str = s - 1;
6023  return val;
6024}
6025
6026/* Parse an endian specifier ("BE" or "LE", case insensitive);
6027   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
6028
6029static int
6030parse_endian_specifier (char **str)
6031{
6032  int little_endian;
6033  char *s = *str;
6034
6035  if (strncasecmp (s, "BE", 2))
6036    little_endian = 0;
6037  else if (strncasecmp (s, "LE", 2))
6038    little_endian = 1;
6039  else
6040    {
6041      inst.error = _("valid endian specifiers are be or le");
6042      return FAIL;
6043    }
6044
6045  if (ISALNUM (s[2]) || s[2] == '_')
6046    {
6047      inst.error = _("valid endian specifiers are be or le");
6048      return FAIL;
6049    }
6050
6051  *str = s + 2;
6052  return little_endian;
6053}
6054
6055/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
6056   value suitable for poking into the rotate field of an sxt or sxta
6057   instruction, or FAIL on error.  */
6058
6059static int
6060parse_ror (char **str)
6061{
6062  int rot;
6063  char *s = *str;
6064
6065  if (strncasecmp (s, "ROR", 3) == 0)
6066    s += 3;
6067  else
6068    {
6069      inst.error = _("missing rotation field after comma");
6070      return FAIL;
6071    }
6072
6073  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6074    return FAIL;
6075
6076  switch (rot)
6077    {
6078    case  0: *str = s; return 0x0;
6079    case  8: *str = s; return 0x1;
6080    case 16: *str = s; return 0x2;
6081    case 24: *str = s; return 0x3;
6082
6083    default:
6084      inst.error = _("rotation can only be 0, 8, 16, or 24");
6085      return FAIL;
6086    }
6087}
6088
6089/* Parse a conditional code (from conds[] below).  The value returned is in the
6090   range 0 .. 14, or FAIL.  */
6091static int
6092parse_cond (char **str)
6093{
6094  char *q;
6095  const struct asm_cond *c;
6096  int n;
6097  /* Condition codes are always 2 characters, so matching up to
6098     3 characters is sufficient.  */
6099  char cond[3];
6100
6101  q = *str;
6102  n = 0;
6103  while (ISALPHA (*q) && n < 3)
6104    {
6105      cond[n] = TOLOWER (*q);
6106      q++;
6107      n++;
6108    }
6109
6110  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6111  if (!c)
6112    {
6113      inst.error = _("condition required");
6114      return FAIL;
6115    }
6116
6117  *str = q;
6118  return c->value;
6119}
6120
6121/* Record a use of the given feature.  */
6122static void
6123record_feature_use (const arm_feature_set *feature)
6124{
6125  if (thumb_mode)
6126    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6127  else
6128    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6129}
6130
6131/* If the given feature available in the selected CPU, mark it as used.
6132   Returns TRUE iff feature is available.  */
6133static bfd_boolean
6134mark_feature_used (const arm_feature_set *feature)
6135{
6136  /* Ensure the option is valid on the current architecture.  */
6137  if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6138    return FALSE;
6139
6140  /* Add the appropriate architecture feature for the barrier option used.
6141     */
6142  record_feature_use (feature);
6143
6144  return TRUE;
6145}
6146
6147/* Parse an option for a barrier instruction.  Returns the encoding for the
6148   option, or FAIL.  */
6149static int
6150parse_barrier (char **str)
6151{
6152  char *p, *q;
6153  const struct asm_barrier_opt *o;
6154
6155  p = q = *str;
6156  while (ISALPHA (*q))
6157    q++;
6158
6159  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6160						    q - p);
6161  if (!o)
6162    return FAIL;
6163
6164  if (!mark_feature_used (&o->arch))
6165    return FAIL;
6166
6167  *str = q;
6168  return o->value;
6169}
6170
6171/* Parse the operands of a table branch instruction.  Similar to a memory
6172   operand.  */
6173static int
6174parse_tb (char **str)
6175{
6176  char * p = *str;
6177  int reg;
6178
6179  if (skip_past_char (&p, '[') == FAIL)
6180    {
6181      inst.error = _("'[' expected");
6182      return FAIL;
6183    }
6184
6185  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6186    {
6187      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6188      return FAIL;
6189    }
6190  inst.operands[0].reg = reg;
6191
6192  if (skip_past_comma (&p) == FAIL)
6193    {
6194      inst.error = _("',' expected");
6195      return FAIL;
6196    }
6197
6198  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6199    {
6200      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6201      return FAIL;
6202    }
6203  inst.operands[0].imm = reg;
6204
6205  if (skip_past_comma (&p) == SUCCESS)
6206    {
6207      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6208	return FAIL;
6209      if (inst.reloc.exp.X_add_number != 1)
6210	{
6211	  inst.error = _("invalid shift");
6212	  return FAIL;
6213	}
6214      inst.operands[0].shifted = 1;
6215    }
6216
6217  if (skip_past_char (&p, ']') == FAIL)
6218    {
6219      inst.error = _("']' expected");
6220      return FAIL;
6221    }
6222  *str = p;
6223  return SUCCESS;
6224}
6225
6226/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6227   information on the types the operands can take and how they are encoded.
6228   Up to four operands may be read; this function handles setting the
6229   ".present" field for each read operand itself.
6230   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6231   else returns FAIL.  */
6232
6233static int
6234parse_neon_mov (char **str, int *which_operand)
6235{
6236  int i = *which_operand, val;
6237  enum arm_reg_type rtype;
6238  char *ptr = *str;
6239  struct neon_type_el optype;
6240
6241  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6242    {
6243      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6244      inst.operands[i].reg = val;
6245      inst.operands[i].isscalar = 1;
6246      inst.operands[i].vectype = optype;
6247      inst.operands[i++].present = 1;
6248
6249      if (skip_past_comma (&ptr) == FAIL)
6250	goto wanted_comma;
6251
6252      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6253	goto wanted_arm;
6254
6255      inst.operands[i].reg = val;
6256      inst.operands[i].isreg = 1;
6257      inst.operands[i].present = 1;
6258    }
6259  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6260	   != FAIL)
6261    {
6262      /* Cases 0, 1, 2, 3, 5 (D only).  */
6263      if (skip_past_comma (&ptr) == FAIL)
6264	goto wanted_comma;
6265
6266      inst.operands[i].reg = val;
6267      inst.operands[i].isreg = 1;
6268      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6269      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6270      inst.operands[i].isvec = 1;
6271      inst.operands[i].vectype = optype;
6272      inst.operands[i++].present = 1;
6273
6274      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6275	{
6276	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6277	     Case 13: VMOV <Sd>, <Rm>  */
6278	  inst.operands[i].reg = val;
6279	  inst.operands[i].isreg = 1;
6280	  inst.operands[i].present = 1;
6281
6282	  if (rtype == REG_TYPE_NQ)
6283	    {
6284	      first_error (_("can't use Neon quad register here"));
6285	      return FAIL;
6286	    }
6287	  else if (rtype != REG_TYPE_VFS)
6288	    {
6289	      i++;
6290	      if (skip_past_comma (&ptr) == FAIL)
6291		goto wanted_comma;
6292	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6293		goto wanted_arm;
6294	      inst.operands[i].reg = val;
6295	      inst.operands[i].isreg = 1;
6296	      inst.operands[i].present = 1;
6297	    }
6298	}
6299      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6300					   &optype)) != FAIL)
6301	{
6302	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6303	     Case 1: VMOV<c><q> <Dd>, <Dm>
6304	     Case 8: VMOV.F32 <Sd>, <Sm>
6305	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6306
6307	  inst.operands[i].reg = val;
6308	  inst.operands[i].isreg = 1;
6309	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6310	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6311	  inst.operands[i].isvec = 1;
6312	  inst.operands[i].vectype = optype;
6313	  inst.operands[i].present = 1;
6314
6315	  if (skip_past_comma (&ptr) == SUCCESS)
6316	    {
6317	      /* Case 15.  */
6318	      i++;
6319
6320	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6321		goto wanted_arm;
6322
6323	      inst.operands[i].reg = val;
6324	      inst.operands[i].isreg = 1;
6325	      inst.operands[i++].present = 1;
6326
6327	      if (skip_past_comma (&ptr) == FAIL)
6328		goto wanted_comma;
6329
6330	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6331		goto wanted_arm;
6332
6333	      inst.operands[i].reg = val;
6334	      inst.operands[i].isreg = 1;
6335	      inst.operands[i].present = 1;
6336	    }
6337	}
6338      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6339	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6340	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6341	     Case 10: VMOV.F32 <Sd>, #<imm>
6342	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6343	inst.operands[i].immisfloat = 1;
6344      else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6345	       == SUCCESS)
6346	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6347	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6348	;
6349      else
6350	{
6351	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6352	  return FAIL;
6353	}
6354    }
6355  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6356    {
6357      /* Cases 6, 7.  */
6358      inst.operands[i].reg = val;
6359      inst.operands[i].isreg = 1;
6360      inst.operands[i++].present = 1;
6361
6362      if (skip_past_comma (&ptr) == FAIL)
6363	goto wanted_comma;
6364
6365      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6366	{
6367	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6368	  inst.operands[i].reg = val;
6369	  inst.operands[i].isscalar = 1;
6370	  inst.operands[i].present = 1;
6371	  inst.operands[i].vectype = optype;
6372	}
6373      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6374	{
6375	  /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6376	  inst.operands[i].reg = val;
6377	  inst.operands[i].isreg = 1;
6378	  inst.operands[i++].present = 1;
6379
6380	  if (skip_past_comma (&ptr) == FAIL)
6381	    goto wanted_comma;
6382
6383	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6384	      == FAIL)
6385	    {
6386	      first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6387	      return FAIL;
6388	    }
6389
6390	  inst.operands[i].reg = val;
6391	  inst.operands[i].isreg = 1;
6392	  inst.operands[i].isvec = 1;
6393	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6394	  inst.operands[i].vectype = optype;
6395	  inst.operands[i].present = 1;
6396
6397	  if (rtype == REG_TYPE_VFS)
6398	    {
6399	      /* Case 14.  */
6400	      i++;
6401	      if (skip_past_comma (&ptr) == FAIL)
6402		goto wanted_comma;
6403	      if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6404					      &optype)) == FAIL)
6405		{
6406		  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6407		  return FAIL;
6408		}
6409	      inst.operands[i].reg = val;
6410	      inst.operands[i].isreg = 1;
6411	      inst.operands[i].isvec = 1;
6412	      inst.operands[i].issingle = 1;
6413	      inst.operands[i].vectype = optype;
6414	      inst.operands[i].present = 1;
6415	    }
6416	}
6417      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6418	       != FAIL)
6419	{
6420	  /* Case 13.  */
6421	  inst.operands[i].reg = val;
6422	  inst.operands[i].isreg = 1;
6423	  inst.operands[i].isvec = 1;
6424	  inst.operands[i].issingle = 1;
6425	  inst.operands[i].vectype = optype;
6426	  inst.operands[i].present = 1;
6427	}
6428    }
6429  else
6430    {
6431      first_error (_("parse error"));
6432      return FAIL;
6433    }
6434
6435  /* Successfully parsed the operands. Update args.  */
6436  *which_operand = i;
6437  *str = ptr;
6438  return SUCCESS;
6439
6440 wanted_comma:
6441  first_error (_("expected comma"));
6442  return FAIL;
6443
6444 wanted_arm:
6445  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6446  return FAIL;
6447}
6448
6449/* Use this macro when the operand constraints are different
6450   for ARM and THUMB (e.g. ldrd).  */
6451#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6452	((arm_operand) | ((thumb_operand) << 16))
6453
6454/* Matcher codes for parse_operands.  */
6455enum operand_parse_code
6456{
6457  OP_stop,	/* end of line */
6458
6459  OP_RR,	/* ARM register */
6460  OP_RRnpc,	/* ARM register, not r15 */
6461  OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6462  OP_RRnpcb,	/* ARM register, not r15, in square brackets */
6463  OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
6464		   optional trailing ! */
6465  OP_RRw,	/* ARM register, not r15, optional trailing ! */
6466  OP_RCP,	/* Coprocessor number */
6467  OP_RCN,	/* Coprocessor register */
6468  OP_RF,	/* FPA register */
6469  OP_RVS,	/* VFP single precision register */
6470  OP_RVD,	/* VFP double precision register (0..15) */
6471  OP_RND,       /* Neon double precision register (0..31) */
6472  OP_RNQ,	/* Neon quad precision register */
6473  OP_RVSD,	/* VFP single or double precision register */
6474  OP_RNDQ,      /* Neon double or quad precision register */
6475  OP_RNSDQ,	/* Neon single, double or quad precision register */
6476  OP_RNSC,      /* Neon scalar D[X] */
6477  OP_RVC,	/* VFP control register */
6478  OP_RMF,	/* Maverick F register */
6479  OP_RMD,	/* Maverick D register */
6480  OP_RMFX,	/* Maverick FX register */
6481  OP_RMDX,	/* Maverick DX register */
6482  OP_RMAX,	/* Maverick AX register */
6483  OP_RMDS,	/* Maverick DSPSC register */
6484  OP_RIWR,	/* iWMMXt wR register */
6485  OP_RIWC,	/* iWMMXt wC register */
6486  OP_RIWG,	/* iWMMXt wCG register */
6487  OP_RXA,	/* XScale accumulator register */
6488
6489  OP_REGLST,	/* ARM register list */
6490  OP_VRSLST,	/* VFP single-precision register list */
6491  OP_VRDLST,	/* VFP double-precision register list */
6492  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6493  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6494  OP_NSTRLST,   /* Neon element/structure list */
6495
6496  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6497  OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
6498  OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
6499  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6500  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6501  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6502  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6503  OP_VMOV,      /* Neon VMOV operands.  */
6504  OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
6505  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6506  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6507
6508  OP_I0,        /* immediate zero */
6509  OP_I7,	/* immediate value 0 .. 7 */
6510  OP_I15,	/*		   0 .. 15 */
6511  OP_I16,	/*		   1 .. 16 */
6512  OP_I16z,      /*                 0 .. 16 */
6513  OP_I31,	/*		   0 .. 31 */
6514  OP_I31w,	/*		   0 .. 31, optional trailing ! */
6515  OP_I32,	/*		   1 .. 32 */
6516  OP_I32z,	/*		   0 .. 32 */
6517  OP_I63,	/*		   0 .. 63 */
6518  OP_I63s,	/*		 -64 .. 63 */
6519  OP_I64,	/*		   1 .. 64 */
6520  OP_I64z,	/*		   0 .. 64 */
6521  OP_I255,	/*		   0 .. 255 */
6522
6523  OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
6524  OP_I7b,	/*			       0 .. 7 */
6525  OP_I15b,	/*			       0 .. 15 */
6526  OP_I31b,	/*			       0 .. 31 */
6527
6528  OP_SH,	/* shifter operand */
6529  OP_SHG,	/* shifter operand with possible group relocation */
6530  OP_ADDR,	/* Memory address expression (any mode) */
6531  OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
6532  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6533  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6534  OP_EXP,	/* arbitrary expression */
6535  OP_EXPi,	/* same, with optional immediate prefix */
6536  OP_EXPr,	/* same, with optional relocation suffix */
6537  OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
6538  OP_IROT1,	/* VCADD rotate immediate: 90, 270.  */
6539  OP_IROT2,	/* VCMLA rotate immediate: 0, 90, 180, 270.  */
6540
6541  OP_CPSF,	/* CPS flags */
6542  OP_ENDI,	/* Endianness specifier */
6543  OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
6544  OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
6545  OP_COND,	/* conditional code */
6546  OP_TB,	/* Table branch.  */
6547
6548  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6549
6550  OP_RRnpc_I0,	/* ARM register or literal 0 */
6551  OP_RR_EXr,	/* ARM register or expression with opt. reloc suff. */
6552  OP_RR_EXi,	/* ARM register or expression with imm prefix */
6553  OP_RF_IF,	/* FPA register or immediate */
6554  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6555  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6556
6557  /* Optional operands.	 */
6558  OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
6559  OP_oI31b,	 /*				0 .. 31 */
6560  OP_oI32b,      /*                             1 .. 32 */
6561  OP_oI32z,      /*                             0 .. 32 */
6562  OP_oIffffb,	 /*				0 .. 65535 */
6563  OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
6564
6565  OP_oRR,	 /* ARM register */
6566  OP_oRRnpc,	 /* ARM register, not the PC */
6567  OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6568  OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
6569  OP_oRND,       /* Optional Neon double precision register */
6570  OP_oRNQ,       /* Optional Neon quad precision register */
6571  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6572  OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
6573  OP_oSHll,	 /* LSL immediate */
6574  OP_oSHar,	 /* ASR immediate */
6575  OP_oSHllar,	 /* LSL or ASR immediate */
6576  OP_oROR,	 /* ROR 0/8/16/24 */
6577  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6578
6579  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6580  OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6581  OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6582  OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6583
6584  OP_FIRST_OPTIONAL = OP_oI7b
6585};
6586
6587/* Generic instruction operand parser.	This does no encoding and no
6588   semantic validation; it merely squirrels values away in the inst
6589   structure.  Returns SUCCESS or FAIL depending on whether the
6590   specified grammar matched.  */
6591static int
6592parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6593{
6594  unsigned const int *upat = pattern;
6595  char *backtrack_pos = 0;
6596  const char *backtrack_error = 0;
6597  int i, val = 0, backtrack_index = 0;
6598  enum arm_reg_type rtype;
6599  parse_operand_result result;
6600  unsigned int op_parse_code;
6601
6602#define po_char_or_fail(chr)			\
6603  do						\
6604    {						\
6605      if (skip_past_char (&str, chr) == FAIL)	\
6606	goto bad_args;				\
6607    }						\
6608  while (0)
6609
6610#define po_reg_or_fail(regtype)					\
6611  do								\
6612    {								\
6613      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6614				 & inst.operands[i].vectype);	\
6615      if (val == FAIL)						\
6616	{							\
6617	  first_error (_(reg_expected_msgs[regtype]));		\
6618	  goto failure;						\
6619	}							\
6620      inst.operands[i].reg = val;				\
6621      inst.operands[i].isreg = 1;				\
6622      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6623      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6624      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6625			     || rtype == REG_TYPE_VFD		\
6626			     || rtype == REG_TYPE_NQ);		\
6627    }								\
6628  while (0)
6629
6630#define po_reg_or_goto(regtype, label)				\
6631  do								\
6632    {								\
6633      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
6634				 & inst.operands[i].vectype);	\
6635      if (val == FAIL)						\
6636	goto label;						\
6637								\
6638      inst.operands[i].reg = val;				\
6639      inst.operands[i].isreg = 1;				\
6640      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
6641      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
6642      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
6643			     || rtype == REG_TYPE_VFD		\
6644			     || rtype == REG_TYPE_NQ);		\
6645    }								\
6646  while (0)
6647
6648#define po_imm_or_fail(min, max, popt)				\
6649  do								\
6650    {								\
6651      if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
6652	goto failure;						\
6653      inst.operands[i].imm = val;				\
6654    }								\
6655  while (0)
6656
6657#define po_scalar_or_goto(elsz, label)					\
6658  do									\
6659    {									\
6660      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);	\
6661      if (val == FAIL)							\
6662	goto label;							\
6663      inst.operands[i].reg = val;					\
6664      inst.operands[i].isscalar = 1;					\
6665    }									\
6666  while (0)
6667
6668#define po_misc_or_fail(expr)			\
6669  do						\
6670    {						\
6671      if (expr)					\
6672	goto failure;				\
6673    }						\
6674  while (0)
6675
6676#define po_misc_or_fail_no_backtrack(expr)		\
6677  do							\
6678    {							\
6679      result = expr;					\
6680      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
6681	backtrack_pos = 0;				\
6682      if (result != PARSE_OPERAND_SUCCESS)		\
6683	goto failure;					\
6684    }							\
6685  while (0)
6686
6687#define po_barrier_or_imm(str)				   \
6688  do							   \
6689    {						 	   \
6690      val = parse_barrier (&str);			   \
6691      if (val == FAIL && ! ISALPHA (*str))		   \
6692	goto immediate;					   \
6693      if (val == FAIL					   \
6694	  /* ISB can only take SY as an option.  */	   \
6695	  || ((inst.instruction & 0xf0) == 0x60		   \
6696	       && val != 0xf))				   \
6697	{						   \
6698	   inst.error = _("invalid barrier type");	   \
6699	   backtrack_pos = 0;				   \
6700	   goto failure;				   \
6701	}						   \
6702    }							   \
6703  while (0)
6704
6705  skip_whitespace (str);
6706
6707  for (i = 0; upat[i] != OP_stop; i++)
6708    {
6709      op_parse_code = upat[i];
6710      if (op_parse_code >= 1<<16)
6711	op_parse_code = thumb ? (op_parse_code >> 16)
6712				: (op_parse_code & ((1<<16)-1));
6713
6714      if (op_parse_code >= OP_FIRST_OPTIONAL)
6715	{
6716	  /* Remember where we are in case we need to backtrack.  */
6717	  gas_assert (!backtrack_pos);
6718	  backtrack_pos = str;
6719	  backtrack_error = inst.error;
6720	  backtrack_index = i;
6721	}
6722
6723      if (i > 0 && (i > 1 || inst.operands[0].present))
6724	po_char_or_fail (',');
6725
6726      switch (op_parse_code)
6727	{
6728	  /* Registers */
6729	case OP_oRRnpc:
6730	case OP_oRRnpcsp:
6731	case OP_RRnpc:
6732	case OP_RRnpcsp:
6733	case OP_oRR:
6734	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
6735	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
6736	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
6737	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
6738	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
6739	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6740	case OP_oRND:
6741	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
6742	case OP_RVC:
6743	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6744	  break;
6745	  /* Also accept generic coprocessor regs for unknown registers.  */
6746	  coproc_reg:
6747	  po_reg_or_fail (REG_TYPE_CN);
6748	  break;
6749	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
6750	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
6751	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
6752	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
6753	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
6754	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
6755	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
6756	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
6757	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6758	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6759	case OP_oRNQ:
6760	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6761	case OP_oRNDQ:
6762	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6763	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6764	case OP_oRNSDQ:
6765	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6766
6767	/* Neon scalar. Using an element size of 8 means that some invalid
6768	   scalars are accepted here, so deal with those in later code.  */
6769	case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6770
6771	case OP_RNDQ_I0:
6772	  {
6773	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6774	    break;
6775	    try_imm0:
6776	    po_imm_or_fail (0, 0, TRUE);
6777	  }
6778	  break;
6779
6780	case OP_RVSD_I0:
6781	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6782	  break;
6783
6784	case OP_RSVD_FI0:
6785	  {
6786	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6787	    break;
6788	    try_ifimm0:
6789	    if (parse_ifimm_zero (&str))
6790	      inst.operands[i].imm = 0;
6791	    else
6792	    {
6793	      inst.error
6794	        = _("only floating point zero is allowed as immediate value");
6795	      goto failure;
6796	    }
6797	  }
6798	  break;
6799
6800	case OP_RR_RNSC:
6801	  {
6802	    po_scalar_or_goto (8, try_rr);
6803	    break;
6804	    try_rr:
6805	    po_reg_or_fail (REG_TYPE_RN);
6806	  }
6807	  break;
6808
6809	case OP_RNSDQ_RNSC:
6810	  {
6811	    po_scalar_or_goto (8, try_nsdq);
6812	    break;
6813	    try_nsdq:
6814	    po_reg_or_fail (REG_TYPE_NSDQ);
6815	  }
6816	  break;
6817
6818	case OP_RNDQ_RNSC:
6819	  {
6820	    po_scalar_or_goto (8, try_ndq);
6821	    break;
6822	    try_ndq:
6823	    po_reg_or_fail (REG_TYPE_NDQ);
6824	  }
6825	  break;
6826
6827	case OP_RND_RNSC:
6828	  {
6829	    po_scalar_or_goto (8, try_vfd);
6830	    break;
6831	    try_vfd:
6832	    po_reg_or_fail (REG_TYPE_VFD);
6833	  }
6834	  break;
6835
6836	case OP_VMOV:
6837	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6838	     not careful then bad things might happen.  */
6839	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6840	  break;
6841
6842	case OP_RNDQ_Ibig:
6843	  {
6844	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6845	    break;
6846	    try_immbig:
6847	    /* There's a possibility of getting a 64-bit immediate here, so
6848	       we need special handling.  */
6849	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6850		== FAIL)
6851	      {
6852		inst.error = _("immediate value is out of range");
6853		goto failure;
6854	      }
6855	  }
6856	  break;
6857
6858	case OP_RNDQ_I63b:
6859	  {
6860	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6861	    break;
6862	    try_shimm:
6863	    po_imm_or_fail (0, 63, TRUE);
6864	  }
6865	  break;
6866
6867	case OP_RRnpcb:
6868	  po_char_or_fail ('[');
6869	  po_reg_or_fail  (REG_TYPE_RN);
6870	  po_char_or_fail (']');
6871	  break;
6872
6873	case OP_RRnpctw:
6874	case OP_RRw:
6875	case OP_oRRw:
6876	  po_reg_or_fail (REG_TYPE_RN);
6877	  if (skip_past_char (&str, '!') == SUCCESS)
6878	    inst.operands[i].writeback = 1;
6879	  break;
6880
6881	  /* Immediates */
6882	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
6883	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
6884	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
6885	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
6886	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
6887	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
6888	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
6889	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
6890	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
6891	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
6892	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
6893	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
6894
6895	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
6896	case OP_oI7b:
6897	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
6898	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
6899	case OP_oI31b:
6900	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
6901	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6902	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6903	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
6904
6905	  /* Immediate variants */
6906	case OP_oI255c:
6907	  po_char_or_fail ('{');
6908	  po_imm_or_fail (0, 255, TRUE);
6909	  po_char_or_fail ('}');
6910	  break;
6911
6912	case OP_I31w:
6913	  /* The expression parser chokes on a trailing !, so we have
6914	     to find it first and zap it.  */
6915	  {
6916	    char *s = str;
6917	    while (*s && *s != ',')
6918	      s++;
6919	    if (s[-1] == '!')
6920	      {
6921		s[-1] = '\0';
6922		inst.operands[i].writeback = 1;
6923	      }
6924	    po_imm_or_fail (0, 31, TRUE);
6925	    if (str == s - 1)
6926	      str = s;
6927	  }
6928	  break;
6929
6930	  /* Expressions */
6931	case OP_EXPi:	EXPi:
6932	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6933					      GE_OPT_PREFIX));
6934	  break;
6935
6936	case OP_EXP:
6937	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6938					      GE_NO_PREFIX));
6939	  break;
6940
6941	case OP_EXPr:	EXPr:
6942	  po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6943					      GE_NO_PREFIX));
6944	  if (inst.reloc.exp.X_op == O_symbol)
6945	    {
6946	      val = parse_reloc (&str);
6947	      if (val == -1)
6948		{
6949		  inst.error = _("unrecognized relocation suffix");
6950		  goto failure;
6951		}
6952	      else if (val != BFD_RELOC_UNUSED)
6953		{
6954		  inst.operands[i].imm = val;
6955		  inst.operands[i].hasreloc = 1;
6956		}
6957	    }
6958	  break;
6959
6960	  /* Operand for MOVW or MOVT.  */
6961	case OP_HALF:
6962	  po_misc_or_fail (parse_half (&str));
6963	  break;
6964
6965	  /* Register or expression.  */
6966	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6967	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6968
6969	  /* Register or immediate.  */
6970	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6971	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
6972
6973	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6974	IF:
6975	  if (!is_immediate_prefix (*str))
6976	    goto bad_args;
6977	  str++;
6978	  val = parse_fpa_immediate (&str);
6979	  if (val == FAIL)
6980	    goto failure;
6981	  /* FPA immediates are encoded as registers 8-15.
6982	     parse_fpa_immediate has already applied the offset.  */
6983	  inst.operands[i].reg = val;
6984	  inst.operands[i].isreg = 1;
6985	  break;
6986
6987	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6988	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
6989
6990	  /* Two kinds of register.  */
6991	case OP_RIWR_RIWC:
6992	  {
6993	    struct reg_entry *rege = arm_reg_parse_multi (&str);
6994	    if (!rege
6995		|| (rege->type != REG_TYPE_MMXWR
6996		    && rege->type != REG_TYPE_MMXWC
6997		    && rege->type != REG_TYPE_MMXWCG))
6998	      {
6999		inst.error = _("iWMMXt data or control register expected");
7000		goto failure;
7001	      }
7002	    inst.operands[i].reg = rege->number;
7003	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7004	  }
7005	  break;
7006
7007	case OP_RIWC_RIWG:
7008	  {
7009	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7010	    if (!rege
7011		|| (rege->type != REG_TYPE_MMXWC
7012		    && rege->type != REG_TYPE_MMXWCG))
7013	      {
7014		inst.error = _("iWMMXt control register expected");
7015		goto failure;
7016	      }
7017	    inst.operands[i].reg = rege->number;
7018	    inst.operands[i].isreg = 1;
7019	  }
7020	  break;
7021
7022	  /* Misc */
7023	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
7024	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
7025	case OP_oROR:	 val = parse_ror (&str);		break;
7026	case OP_COND:	 val = parse_cond (&str);		break;
7027	case OP_oBARRIER_I15:
7028	  po_barrier_or_imm (str); break;
7029	  immediate:
7030	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7031	    goto failure;
7032	  break;
7033
7034	case OP_wPSR:
7035	case OP_rPSR:
7036	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
7037	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7038	    {
7039	      inst.error = _("Banked registers are not available with this "
7040			     "architecture.");
7041	      goto failure;
7042	    }
7043	  break;
7044	  try_psr:
7045	  val = parse_psr (&str, op_parse_code == OP_wPSR);
7046	  break;
7047
7048	case OP_APSR_RR:
7049	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
7050	  break;
7051	  try_apsr:
7052	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7053	     instruction).  */
7054	  if (strncasecmp (str, "APSR_", 5) == 0)
7055	    {
7056	      unsigned found = 0;
7057	      str += 5;
7058	      while (found < 15)
7059		switch (*str++)
7060		  {
7061		  case 'c': found = (found & 1) ? 16 : found | 1; break;
7062		  case 'n': found = (found & 2) ? 16 : found | 2; break;
7063		  case 'z': found = (found & 4) ? 16 : found | 4; break;
7064		  case 'v': found = (found & 8) ? 16 : found | 8; break;
7065		  default: found = 16;
7066		  }
7067	      if (found != 15)
7068		goto failure;
7069	      inst.operands[i].isvec = 1;
7070	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
7071	      inst.operands[i].reg = REG_PC;
7072	    }
7073	  else
7074	    goto failure;
7075	  break;
7076
7077	case OP_TB:
7078	  po_misc_or_fail (parse_tb (&str));
7079	  break;
7080
7081	  /* Register lists.  */
7082	case OP_REGLST:
7083	  val = parse_reg_list (&str);
7084	  if (*str == '^')
7085	    {
7086	      inst.operands[i].writeback = 1;
7087	      str++;
7088	    }
7089	  break;
7090
7091	case OP_VRSLST:
7092	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7093	  break;
7094
7095	case OP_VRDLST:
7096	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7097	  break;
7098
7099	case OP_VRSDLST:
7100	  /* Allow Q registers too.  */
7101	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7102				    REGLIST_NEON_D);
7103	  if (val == FAIL)
7104	    {
7105	      inst.error = NULL;
7106	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7107					REGLIST_VFP_S);
7108	      inst.operands[i].issingle = 1;
7109	    }
7110	  break;
7111
7112	case OP_NRDLST:
7113	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7114				    REGLIST_NEON_D);
7115	  break;
7116
7117	case OP_NSTRLST:
7118	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7119					   &inst.operands[i].vectype);
7120	  break;
7121
7122	  /* Addressing modes */
7123	case OP_ADDR:
7124	  po_misc_or_fail (parse_address (&str, i));
7125	  break;
7126
7127	case OP_ADDRGLDR:
7128	  po_misc_or_fail_no_backtrack (
7129	    parse_address_group_reloc (&str, i, GROUP_LDR));
7130	  break;
7131
7132	case OP_ADDRGLDRS:
7133	  po_misc_or_fail_no_backtrack (
7134	    parse_address_group_reloc (&str, i, GROUP_LDRS));
7135	  break;
7136
7137	case OP_ADDRGLDC:
7138	  po_misc_or_fail_no_backtrack (
7139	    parse_address_group_reloc (&str, i, GROUP_LDC));
7140	  break;
7141
7142	case OP_SH:
7143	  po_misc_or_fail (parse_shifter_operand (&str, i));
7144	  break;
7145
7146	case OP_SHG:
7147	  po_misc_or_fail_no_backtrack (
7148	    parse_shifter_operand_group_reloc (&str, i));
7149	  break;
7150
7151	case OP_oSHll:
7152	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7153	  break;
7154
7155	case OP_oSHar:
7156	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7157	  break;
7158
7159	case OP_oSHllar:
7160	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7161	  break;
7162
7163	default:
7164	  as_fatal (_("unhandled operand code %d"), op_parse_code);
7165	}
7166
7167      /* Various value-based sanity checks and shared operations.  We
7168	 do not signal immediate failures for the register constraints;
7169	 this allows a syntax error to take precedence.	 */
7170      switch (op_parse_code)
7171	{
7172	case OP_oRRnpc:
7173	case OP_RRnpc:
7174	case OP_RRnpcb:
7175	case OP_RRw:
7176	case OP_oRRw:
7177	case OP_RRnpc_I0:
7178	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7179	    inst.error = BAD_PC;
7180	  break;
7181
7182	case OP_oRRnpcsp:
7183	case OP_RRnpcsp:
7184	  if (inst.operands[i].isreg)
7185	    {
7186	      if (inst.operands[i].reg == REG_PC)
7187		inst.error = BAD_PC;
7188	      else if (inst.operands[i].reg == REG_SP
7189		       /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7190			  relaxed since ARMv8-A.  */
7191		       && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7192		{
7193		  gas_assert (thumb);
7194		  inst.error = BAD_SP;
7195		}
7196	    }
7197	  break;
7198
7199	case OP_RRnpctw:
7200	  if (inst.operands[i].isreg
7201	      && inst.operands[i].reg == REG_PC
7202	      && (inst.operands[i].writeback || thumb))
7203	    inst.error = BAD_PC;
7204	  break;
7205
7206	case OP_CPSF:
7207	case OP_ENDI:
7208	case OP_oROR:
7209	case OP_wPSR:
7210	case OP_rPSR:
7211	case OP_COND:
7212	case OP_oBARRIER_I15:
7213	case OP_REGLST:
7214	case OP_VRSLST:
7215	case OP_VRDLST:
7216	case OP_VRSDLST:
7217	case OP_NRDLST:
7218	case OP_NSTRLST:
7219	  if (val == FAIL)
7220	    goto failure;
7221	  inst.operands[i].imm = val;
7222	  break;
7223
7224	default:
7225	  break;
7226	}
7227
7228      /* If we get here, this operand was successfully parsed.	*/
7229      inst.operands[i].present = 1;
7230      continue;
7231
7232    bad_args:
7233      inst.error = BAD_ARGS;
7234
7235    failure:
7236      if (!backtrack_pos)
7237	{
7238	  /* The parse routine should already have set inst.error, but set a
7239	     default here just in case.  */
7240	  if (!inst.error)
7241	    inst.error = _("syntax error");
7242	  return FAIL;
7243	}
7244
7245      /* Do not backtrack over a trailing optional argument that
7246	 absorbed some text.  We will only fail again, with the
7247	 'garbage following instruction' error message, which is
7248	 probably less helpful than the current one.  */
7249      if (backtrack_index == i && backtrack_pos != str
7250	  && upat[i+1] == OP_stop)
7251	{
7252	  if (!inst.error)
7253	    inst.error = _("syntax error");
7254	  return FAIL;
7255	}
7256
7257      /* Try again, skipping the optional argument at backtrack_pos.  */
7258      str = backtrack_pos;
7259      inst.error = backtrack_error;
7260      inst.operands[backtrack_index].present = 0;
7261      i = backtrack_index;
7262      backtrack_pos = 0;
7263    }
7264
7265  /* Check that we have parsed all the arguments.  */
7266  if (*str != '\0' && !inst.error)
7267    inst.error = _("garbage following instruction");
7268
7269  return inst.error ? FAIL : SUCCESS;
7270}
7271
7272#undef po_char_or_fail
7273#undef po_reg_or_fail
7274#undef po_reg_or_goto
7275#undef po_imm_or_fail
7276#undef po_scalar_or_fail
7277#undef po_barrier_or_imm
7278
7279/* Shorthand macro for instruction encoding functions issuing errors.  */
7280#define constraint(expr, err)			\
7281  do						\
7282    {						\
7283      if (expr)					\
7284	{					\
7285	  inst.error = err;			\
7286	  return;				\
7287	}					\
7288    }						\
7289  while (0)
7290
7291/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
7292   instructions are unpredictable if these registers are used.  This
7293   is the BadReg predicate in ARM's Thumb-2 documentation.
7294
7295   Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7296   places, while the restriction on REG_SP was relaxed since ARMv8-A.  */
7297#define reject_bad_reg(reg)					\
7298  do								\
7299   if (reg == REG_PC)						\
7300     {								\
7301       inst.error = BAD_PC;					\
7302       return;							\
7303     }								\
7304   else if (reg == REG_SP					\
7305	    && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))	\
7306     {								\
7307       inst.error = BAD_SP;					\
7308       return;							\
7309     }								\
7310  while (0)
7311
7312/* If REG is R13 (the stack pointer), warn that its use is
7313   deprecated.  */
7314#define warn_deprecated_sp(reg)			\
7315  do						\
7316    if (warn_on_deprecated && reg == REG_SP)	\
7317       as_tsktsk (_("use of r13 is deprecated"));	\
7318  while (0)
7319
7320/* Functions for operand encoding.  ARM, then Thumb.  */
7321
7322#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7323
7324/* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7325
7326   The only binary encoding difference is the Coprocessor number.  Coprocessor
7327   9 is used for half-precision calculations or conversions.  The format of the
7328   instruction is the same as the equivalent Coprocessor 10 instruction that
7329   exists for Single-Precision operation.  */
7330
7331static void
7332do_scalar_fp16_v82_encode (void)
7333{
7334  if (inst.cond != COND_ALWAYS)
7335    as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7336	       " the behaviour is UNPREDICTABLE"));
7337  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7338	      _(BAD_FP16));
7339
7340  inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7341  mark_feature_used (&arm_ext_fp16);
7342}
7343
7344/* If VAL can be encoded in the immediate field of an ARM instruction,
7345   return the encoded form.  Otherwise, return FAIL.  */
7346
7347static unsigned int
7348encode_arm_immediate (unsigned int val)
7349{
7350  unsigned int a, i;
7351
7352  if (val <= 0xff)
7353    return val;
7354
7355  for (i = 2; i < 32; i += 2)
7356    if ((a = rotate_left (val, i)) <= 0xff)
7357      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
7358
7359  return FAIL;
7360}
7361
7362/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7363   return the encoded form.  Otherwise, return FAIL.  */
7364static unsigned int
7365encode_thumb32_immediate (unsigned int val)
7366{
7367  unsigned int a, i;
7368
7369  if (val <= 0xff)
7370    return val;
7371
7372  for (i = 1; i <= 24; i++)
7373    {
7374      a = val >> i;
7375      if ((val & ~(0xff << i)) == 0)
7376	return ((val >> i) & 0x7f) | ((32 - i) << 7);
7377    }
7378
7379  a = val & 0xff;
7380  if (val == ((a << 16) | a))
7381    return 0x100 | a;
7382  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7383    return 0x300 | a;
7384
7385  a = val & 0xff00;
7386  if (val == ((a << 16) | a))
7387    return 0x200 | (a >> 8);
7388
7389  return FAIL;
7390}
7391/* Encode a VFP SP or DP register number into inst.instruction.  */
7392
7393static void
7394encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7395{
7396  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7397      && reg > 15)
7398    {
7399      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7400	{
7401	  if (thumb_mode)
7402	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7403				    fpu_vfp_ext_d32);
7404	  else
7405	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7406				    fpu_vfp_ext_d32);
7407	}
7408      else
7409	{
7410	  first_error (_("D register out of range for selected VFP version"));
7411	  return;
7412	}
7413    }
7414
7415  switch (pos)
7416    {
7417    case VFP_REG_Sd:
7418      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7419      break;
7420
7421    case VFP_REG_Sn:
7422      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7423      break;
7424
7425    case VFP_REG_Sm:
7426      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7427      break;
7428
7429    case VFP_REG_Dd:
7430      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7431      break;
7432
7433    case VFP_REG_Dn:
7434      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7435      break;
7436
7437    case VFP_REG_Dm:
7438      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7439      break;
7440
7441    default:
7442      abort ();
7443    }
7444}
7445
7446/* Encode a <shift> in an ARM-format instruction.  The immediate,
7447   if any, is handled by md_apply_fix.	 */
7448static void
7449encode_arm_shift (int i)
7450{
7451  /* register-shifted register.  */
7452  if (inst.operands[i].immisreg)
7453    {
7454      int op_index;
7455      for (op_index = 0; op_index <= i; ++op_index)
7456	{
7457	  /* Check the operand only when it's presented.  In pre-UAL syntax,
7458	     if the destination register is the same as the first operand, two
7459	     register form of the instruction can be used.  */
7460	  if (inst.operands[op_index].present && inst.operands[op_index].isreg
7461	      && inst.operands[op_index].reg == REG_PC)
7462	    as_warn (UNPRED_REG ("r15"));
7463	}
7464
7465      if (inst.operands[i].imm == REG_PC)
7466	as_warn (UNPRED_REG ("r15"));
7467    }
7468
7469  if (inst.operands[i].shift_kind == SHIFT_RRX)
7470    inst.instruction |= SHIFT_ROR << 5;
7471  else
7472    {
7473      inst.instruction |= inst.operands[i].shift_kind << 5;
7474      if (inst.operands[i].immisreg)
7475	{
7476	  inst.instruction |= SHIFT_BY_REG;
7477	  inst.instruction |= inst.operands[i].imm << 8;
7478	}
7479      else
7480	inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7481    }
7482}
7483
7484static void
7485encode_arm_shifter_operand (int i)
7486{
7487  if (inst.operands[i].isreg)
7488    {
7489      inst.instruction |= inst.operands[i].reg;
7490      encode_arm_shift (i);
7491    }
7492  else
7493    {
7494      inst.instruction |= INST_IMMEDIATE;
7495      if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7496	inst.instruction |= inst.operands[i].imm;
7497    }
7498}
7499
7500/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7501static void
7502encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7503{
7504  /* PR 14260:
7505     Generate an error if the operand is not a register.  */
7506  constraint (!inst.operands[i].isreg,
7507	      _("Instruction does not support =N addresses"));
7508
7509  inst.instruction |= inst.operands[i].reg << 16;
7510
7511  if (inst.operands[i].preind)
7512    {
7513      if (is_t)
7514	{
7515	  inst.error = _("instruction does not accept preindexed addressing");
7516	  return;
7517	}
7518      inst.instruction |= PRE_INDEX;
7519      if (inst.operands[i].writeback)
7520	inst.instruction |= WRITE_BACK;
7521
7522    }
7523  else if (inst.operands[i].postind)
7524    {
7525      gas_assert (inst.operands[i].writeback);
7526      if (is_t)
7527	inst.instruction |= WRITE_BACK;
7528    }
7529  else /* unindexed - only for coprocessor */
7530    {
7531      inst.error = _("instruction does not accept unindexed addressing");
7532      return;
7533    }
7534
7535  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7536      && (((inst.instruction & 0x000f0000) >> 16)
7537	  == ((inst.instruction & 0x0000f000) >> 12)))
7538    as_warn ((inst.instruction & LOAD_BIT)
7539	     ? _("destination register same as write-back base")
7540	     : _("source register same as write-back base"));
7541}
7542
7543/* inst.operands[i] was set up by parse_address.  Encode it into an
7544   ARM-format mode 2 load or store instruction.	 If is_t is true,
7545   reject forms that cannot be used with a T instruction (i.e. not
7546   post-indexed).  */
7547static void
7548encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7549{
7550  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7551
7552  encode_arm_addr_mode_common (i, is_t);
7553
7554  if (inst.operands[i].immisreg)
7555    {
7556      constraint ((inst.operands[i].imm == REG_PC
7557		   || (is_pc && inst.operands[i].writeback)),
7558		  BAD_PC_ADDRESSING);
7559      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7560      inst.instruction |= inst.operands[i].imm;
7561      if (!inst.operands[i].negative)
7562	inst.instruction |= INDEX_UP;
7563      if (inst.operands[i].shifted)
7564	{
7565	  if (inst.operands[i].shift_kind == SHIFT_RRX)
7566	    inst.instruction |= SHIFT_ROR << 5;
7567	  else
7568	    {
7569	      inst.instruction |= inst.operands[i].shift_kind << 5;
7570	      inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7571	    }
7572	}
7573    }
7574  else /* immediate offset in inst.reloc */
7575    {
7576      if (is_pc && !inst.reloc.pc_rel)
7577	{
7578	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7579
7580	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7581	     cannot use PC in addressing.
7582	     PC cannot be used in writeback addressing, either.  */
7583	  constraint ((is_t || inst.operands[i].writeback),
7584		      BAD_PC_ADDRESSING);
7585
7586	  /* Use of PC in str is deprecated for ARMv7.  */
7587	  if (warn_on_deprecated
7588	      && !is_load
7589	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7590	    as_tsktsk (_("use of PC in this instruction is deprecated"));
7591	}
7592
7593      if (inst.reloc.type == BFD_RELOC_UNUSED)
7594	{
7595	  /* Prefer + for zero encoded value.  */
7596	  if (!inst.operands[i].negative)
7597	    inst.instruction |= INDEX_UP;
7598	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7599	}
7600    }
7601}
7602
7603/* inst.operands[i] was set up by parse_address.  Encode it into an
7604   ARM-format mode 3 load or store instruction.	 Reject forms that
7605   cannot be used with such instructions.  If is_t is true, reject
7606   forms that cannot be used with a T instruction (i.e. not
7607   post-indexed).  */
7608static void
7609encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7610{
7611  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7612    {
7613      inst.error = _("instruction does not accept scaled register index");
7614      return;
7615    }
7616
7617  encode_arm_addr_mode_common (i, is_t);
7618
7619  if (inst.operands[i].immisreg)
7620    {
7621      constraint ((inst.operands[i].imm == REG_PC
7622		   || (is_t && inst.operands[i].reg == REG_PC)),
7623		  BAD_PC_ADDRESSING);
7624      constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7625		  BAD_PC_WRITEBACK);
7626      inst.instruction |= inst.operands[i].imm;
7627      if (!inst.operands[i].negative)
7628	inst.instruction |= INDEX_UP;
7629    }
7630  else /* immediate offset in inst.reloc */
7631    {
7632      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7633		   && inst.operands[i].writeback),
7634		  BAD_PC_WRITEBACK);
7635      inst.instruction |= HWOFFSET_IMM;
7636      if (inst.reloc.type == BFD_RELOC_UNUSED)
7637	{
7638	  /* Prefer + for zero encoded value.  */
7639	  if (!inst.operands[i].negative)
7640	    inst.instruction |= INDEX_UP;
7641
7642	  inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7643	}
7644    }
7645}
7646
7647/* Write immediate bits [7:0] to the following locations:
7648
7649  |28/24|23     19|18 16|15                    4|3     0|
7650  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7651
7652  This function is used by VMOV/VMVN/VORR/VBIC.  */
7653
7654static void
7655neon_write_immbits (unsigned immbits)
7656{
7657  inst.instruction |= immbits & 0xf;
7658  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7659  inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7660}
7661
7662/* Invert low-order SIZE bits of XHI:XLO.  */
7663
7664static void
7665neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7666{
7667  unsigned immlo = xlo ? *xlo : 0;
7668  unsigned immhi = xhi ? *xhi : 0;
7669
7670  switch (size)
7671    {
7672    case 8:
7673      immlo = (~immlo) & 0xff;
7674      break;
7675
7676    case 16:
7677      immlo = (~immlo) & 0xffff;
7678      break;
7679
7680    case 64:
7681      immhi = (~immhi) & 0xffffffff;
7682      /* fall through.  */
7683
7684    case 32:
7685      immlo = (~immlo) & 0xffffffff;
7686      break;
7687
7688    default:
7689      abort ();
7690    }
7691
7692  if (xlo)
7693    *xlo = immlo;
7694
7695  if (xhi)
7696    *xhi = immhi;
7697}
7698
7699/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7700   A, B, C, D.  */
7701
7702static int
7703neon_bits_same_in_bytes (unsigned imm)
7704{
7705  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7706	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7707	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7708	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7709}
7710
7711/* For immediate of above form, return 0bABCD.  */
7712
7713static unsigned
7714neon_squash_bits (unsigned imm)
7715{
7716  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7717	 | ((imm & 0x01000000) >> 21);
7718}
7719
7720/* Compress quarter-float representation to 0b...000 abcdefgh.  */
7721
7722static unsigned
7723neon_qfloat_bits (unsigned imm)
7724{
7725  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7726}
7727
7728/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7729   the instruction. *OP is passed as the initial value of the op field, and
7730   may be set to a different value depending on the constant (i.e.
7731   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7732   MVN).  If the immediate looks like a repeated pattern then also
7733   try smaller element sizes.  */
7734
7735static int
7736neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7737			 unsigned *immbits, int *op, int size,
7738			 enum neon_el_type type)
7739{
7740  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7741     float.  */
7742  if (type == NT_float && !float_p)
7743    return FAIL;
7744
7745  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7746    {
7747      if (size != 32 || *op == 1)
7748	return FAIL;
7749      *immbits = neon_qfloat_bits (immlo);
7750      return 0xf;
7751    }
7752
7753  if (size == 64)
7754    {
7755      if (neon_bits_same_in_bytes (immhi)
7756	  && neon_bits_same_in_bytes (immlo))
7757	{
7758	  if (*op == 1)
7759	    return FAIL;
7760	  *immbits = (neon_squash_bits (immhi) << 4)
7761		     | neon_squash_bits (immlo);
7762	  *op = 1;
7763	  return 0xe;
7764	}
7765
7766      if (immhi != immlo)
7767	return FAIL;
7768    }
7769
7770  if (size >= 32)
7771    {
7772      if (immlo == (immlo & 0x000000ff))
7773	{
7774	  *immbits = immlo;
7775	  return 0x0;
7776	}
7777      else if (immlo == (immlo & 0x0000ff00))
7778	{
7779	  *immbits = immlo >> 8;
7780	  return 0x2;
7781	}
7782      else if (immlo == (immlo & 0x00ff0000))
7783	{
7784	  *immbits = immlo >> 16;
7785	  return 0x4;
7786	}
7787      else if (immlo == (immlo & 0xff000000))
7788	{
7789	  *immbits = immlo >> 24;
7790	  return 0x6;
7791	}
7792      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7793	{
7794	  *immbits = (immlo >> 8) & 0xff;
7795	  return 0xc;
7796	}
7797      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7798	{
7799	  *immbits = (immlo >> 16) & 0xff;
7800	  return 0xd;
7801	}
7802
7803      if ((immlo & 0xffff) != (immlo >> 16))
7804	return FAIL;
7805      immlo &= 0xffff;
7806    }
7807
7808  if (size >= 16)
7809    {
7810      if (immlo == (immlo & 0x000000ff))
7811	{
7812	  *immbits = immlo;
7813	  return 0x8;
7814	}
7815      else if (immlo == (immlo & 0x0000ff00))
7816	{
7817	  *immbits = immlo >> 8;
7818	  return 0xa;
7819	}
7820
7821      if ((immlo & 0xff) != (immlo >> 8))
7822	return FAIL;
7823      immlo &= 0xff;
7824    }
7825
7826  if (immlo == (immlo & 0x000000ff))
7827    {
7828      /* Don't allow MVN with 8-bit immediate.  */
7829      if (*op == 1)
7830	return FAIL;
7831      *immbits = immlo;
7832      return 0xe;
7833    }
7834
7835  return FAIL;
7836}
7837
7838#if defined BFD_HOST_64_BIT
7839/* Returns TRUE if double precision value V may be cast
7840   to single precision without loss of accuracy.  */
7841
7842static bfd_boolean
7843is_double_a_single (bfd_int64_t v)
7844{
7845  int exp = (int)((v >> 52) & 0x7FF);
7846  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7847
7848  return (exp == 0 || exp == 0x7FF
7849	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
7850    && (mantissa & 0x1FFFFFFFl) == 0;
7851}
7852
7853/* Returns a double precision value casted to single precision
7854   (ignoring the least significant bits in exponent and mantissa).  */
7855
7856static int
7857double_to_single (bfd_int64_t v)
7858{
7859  int sign = (int) ((v >> 63) & 1l);
7860  int exp = (int) ((v >> 52) & 0x7FF);
7861  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7862
7863  if (exp == 0x7FF)
7864    exp = 0xFF;
7865  else
7866    {
7867      exp = exp - 1023 + 127;
7868      if (exp >= 0xFF)
7869	{
7870	  /* Infinity.  */
7871	  exp = 0x7F;
7872	  mantissa = 0;
7873	}
7874      else if (exp < 0)
7875	{
7876	  /* No denormalized numbers.  */
7877	  exp = 0;
7878	  mantissa = 0;
7879	}
7880    }
7881  mantissa >>= 29;
7882  return (sign << 31) | (exp << 23) | mantissa;
7883}
7884#endif /* BFD_HOST_64_BIT */
7885
7886enum lit_type
7887{
7888  CONST_THUMB,
7889  CONST_ARM,
7890  CONST_VEC
7891};
7892
7893static void do_vfp_nsyn_opcode (const char *);
7894
7895/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7896   Determine whether it can be performed with a move instruction; if
7897   it can, convert inst.instruction to that move instruction and
7898   return TRUE; if it can't, convert inst.instruction to a literal-pool
7899   load and return FALSE.  If this is not a valid thing to do in the
7900   current context, set inst.error and return TRUE.
7901
7902   inst.operands[i] describes the destination register.	 */
7903
7904static bfd_boolean
7905move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7906{
7907  unsigned long tbit;
7908  bfd_boolean thumb_p = (t == CONST_THUMB);
7909  bfd_boolean arm_p   = (t == CONST_ARM);
7910
7911  if (thumb_p)
7912    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7913  else
7914    tbit = LOAD_BIT;
7915
7916  if ((inst.instruction & tbit) == 0)
7917    {
7918      inst.error = _("invalid pseudo operation");
7919      return TRUE;
7920    }
7921
7922  if (inst.reloc.exp.X_op != O_constant
7923      && inst.reloc.exp.X_op != O_symbol
7924      && inst.reloc.exp.X_op != O_big)
7925    {
7926      inst.error = _("constant expression expected");
7927      return TRUE;
7928    }
7929
7930  if (inst.reloc.exp.X_op == O_constant
7931      || inst.reloc.exp.X_op == O_big)
7932    {
7933#if defined BFD_HOST_64_BIT
7934      bfd_int64_t v;
7935#else
7936      offsetT v;
7937#endif
7938      if (inst.reloc.exp.X_op == O_big)
7939	{
7940	  LITTLENUM_TYPE w[X_PRECISION];
7941	  LITTLENUM_TYPE * l;
7942
7943	  if (inst.reloc.exp.X_add_number == -1)
7944	    {
7945	      gen_to_words (w, X_PRECISION, E_PRECISION);
7946	      l = w;
7947	      /* FIXME: Should we check words w[2..5] ?  */
7948	    }
7949	  else
7950	    l = generic_bignum;
7951
7952#if defined BFD_HOST_64_BIT
7953	  v =
7954	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7955		  << LITTLENUM_NUMBER_OF_BITS)
7956		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7957		<< LITTLENUM_NUMBER_OF_BITS)
7958	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7959	      << LITTLENUM_NUMBER_OF_BITS)
7960	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7961#else
7962	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7963	    |  (l[0] & LITTLENUM_MASK);
7964#endif
7965	}
7966      else
7967	v = inst.reloc.exp.X_add_number;
7968
7969      if (!inst.operands[i].issingle)
7970	{
7971	  if (thumb_p)
7972	    {
7973	      /* LDR should not use lead in a flag-setting instruction being
7974		 chosen so we do not check whether movs can be used.  */
7975
7976	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7977		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7978		  && inst.operands[i].reg != 13
7979		  && inst.operands[i].reg != 15)
7980		{
7981		  /* Check if on thumb2 it can be done with a mov.w, mvn or
7982		     movw instruction.  */
7983		  unsigned int newimm;
7984		  bfd_boolean isNegated;
7985
7986		  newimm = encode_thumb32_immediate (v);
7987		  if (newimm != (unsigned int) FAIL)
7988		    isNegated = FALSE;
7989		  else
7990		    {
7991		      newimm = encode_thumb32_immediate (~v);
7992		      if (newimm != (unsigned int) FAIL)
7993			isNegated = TRUE;
7994		    }
7995
7996		  /* The number can be loaded with a mov.w or mvn
7997		     instruction.  */
7998		  if (newimm != (unsigned int) FAIL
7999		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8000		    {
8001		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
8002					  | (inst.operands[i].reg << 8));
8003		      /* Change to MOVN.  */
8004		      inst.instruction |= (isNegated ? 0x200000 : 0);
8005		      inst.instruction |= (newimm & 0x800) << 15;
8006		      inst.instruction |= (newimm & 0x700) << 4;
8007		      inst.instruction |= (newimm & 0x0ff);
8008		      return TRUE;
8009		    }
8010		  /* The number can be loaded with a movw instruction.  */
8011		  else if ((v & ~0xFFFF) == 0
8012			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8013		    {
8014		      int imm = v & 0xFFFF;
8015
8016		      inst.instruction = 0xf2400000;  /* MOVW.  */
8017		      inst.instruction |= (inst.operands[i].reg << 8);
8018		      inst.instruction |= (imm & 0xf000) << 4;
8019		      inst.instruction |= (imm & 0x0800) << 15;
8020		      inst.instruction |= (imm & 0x0700) << 4;
8021		      inst.instruction |= (imm & 0x00ff);
8022		      return TRUE;
8023		    }
8024		}
8025	    }
8026	  else if (arm_p)
8027	    {
8028	      int value = encode_arm_immediate (v);
8029
8030	      if (value != FAIL)
8031		{
8032		  /* This can be done with a mov instruction.  */
8033		  inst.instruction &= LITERAL_MASK;
8034		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8035		  inst.instruction |= value & 0xfff;
8036		  return TRUE;
8037		}
8038
8039	      value = encode_arm_immediate (~ v);
8040	      if (value != FAIL)
8041		{
8042		  /* This can be done with a mvn instruction.  */
8043		  inst.instruction &= LITERAL_MASK;
8044		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8045		  inst.instruction |= value & 0xfff;
8046		  return TRUE;
8047		}
8048	    }
8049	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8050	    {
8051	      int op = 0;
8052	      unsigned immbits = 0;
8053	      unsigned immlo = inst.operands[1].imm;
8054	      unsigned immhi = inst.operands[1].regisimm
8055		? inst.operands[1].reg
8056		: inst.reloc.exp.X_unsigned
8057		? 0
8058		: ((bfd_int64_t)((int) immlo)) >> 32;
8059	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8060						   &op, 64, NT_invtype);
8061
8062	      if (cmode == FAIL)
8063		{
8064		  neon_invert_size (&immlo, &immhi, 64);
8065		  op = !op;
8066		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8067						   &op, 64, NT_invtype);
8068		}
8069
8070	      if (cmode != FAIL)
8071		{
8072		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8073		    | (1 << 23)
8074		    | (cmode << 8)
8075		    | (op << 5)
8076		    | (1 << 4);
8077
8078		  /* Fill other bits in vmov encoding for both thumb and arm.  */
8079		  if (thumb_mode)
8080		    inst.instruction |= (0x7U << 29) | (0xF << 24);
8081		  else
8082		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
8083		  neon_write_immbits (immbits);
8084		  return TRUE;
8085		}
8086	    }
8087	}
8088
8089      if (t == CONST_VEC)
8090	{
8091	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
8092	  if (inst.operands[i].issingle
8093	      && is_quarter_float (inst.operands[1].imm)
8094	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8095	    {
8096	      inst.operands[1].imm =
8097		neon_qfloat_bits (v);
8098	      do_vfp_nsyn_opcode ("fconsts");
8099	      return TRUE;
8100	    }
8101
8102	  /* If our host does not support a 64-bit type then we cannot perform
8103	     the following optimization.  This mean that there will be a
8104	     discrepancy between the output produced by an assembler built for
8105	     a 32-bit-only host and the output produced from a 64-bit host, but
8106	     this cannot be helped.  */
8107#if defined BFD_HOST_64_BIT
8108	  else if (!inst.operands[1].issingle
8109		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8110	    {
8111	      if (is_double_a_single (v)
8112		  && is_quarter_float (double_to_single (v)))
8113		{
8114		  inst.operands[1].imm =
8115		    neon_qfloat_bits (double_to_single (v));
8116		  do_vfp_nsyn_opcode ("fconstd");
8117		  return TRUE;
8118		}
8119	    }
8120#endif
8121	}
8122    }
8123
8124  if (add_to_lit_pool ((!inst.operands[i].isvec
8125			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
8126    return TRUE;
8127
8128  inst.operands[1].reg = REG_PC;
8129  inst.operands[1].isreg = 1;
8130  inst.operands[1].preind = 1;
8131  inst.reloc.pc_rel = 1;
8132  inst.reloc.type = (thumb_p
8133		     ? BFD_RELOC_ARM_THUMB_OFFSET
8134		     : (mode_3
8135			? BFD_RELOC_ARM_HWLITERAL
8136			: BFD_RELOC_ARM_LITERAL));
8137  return FALSE;
8138}
8139
8140/* inst.operands[i] was set up by parse_address.  Encode it into an
8141   ARM-format instruction.  Reject all forms which cannot be encoded
8142   into a coprocessor load/store instruction.  If wb_ok is false,
8143   reject use of writeback; if unind_ok is false, reject use of
8144   unindexed addressing.  If reloc_override is not 0, use it instead
8145   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8146   (in which case it is preserved).  */
8147
8148static int
8149encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8150{
8151  if (!inst.operands[i].isreg)
8152    {
8153      /* PR 18256 */
8154      if (! inst.operands[0].isvec)
8155	{
8156	  inst.error = _("invalid co-processor operand");
8157	  return FAIL;
8158	}
8159      if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8160	return SUCCESS;
8161    }
8162
8163  inst.instruction |= inst.operands[i].reg << 16;
8164
8165  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8166
8167  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8168    {
8169      gas_assert (!inst.operands[i].writeback);
8170      if (!unind_ok)
8171	{
8172	  inst.error = _("instruction does not support unindexed addressing");
8173	  return FAIL;
8174	}
8175      inst.instruction |= inst.operands[i].imm;
8176      inst.instruction |= INDEX_UP;
8177      return SUCCESS;
8178    }
8179
8180  if (inst.operands[i].preind)
8181    inst.instruction |= PRE_INDEX;
8182
8183  if (inst.operands[i].writeback)
8184    {
8185      if (inst.operands[i].reg == REG_PC)
8186	{
8187	  inst.error = _("pc may not be used with write-back");
8188	  return FAIL;
8189	}
8190      if (!wb_ok)
8191	{
8192	  inst.error = _("instruction does not support writeback");
8193	  return FAIL;
8194	}
8195      inst.instruction |= WRITE_BACK;
8196    }
8197
8198  if (reloc_override)
8199    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8200  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8201	    || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8202	   && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8203    {
8204      if (thumb_mode)
8205	inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8206      else
8207	inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8208    }
8209
8210  /* Prefer + for zero encoded value.  */
8211  if (!inst.operands[i].negative)
8212    inst.instruction |= INDEX_UP;
8213
8214  return SUCCESS;
8215}
8216
8217/* Functions for instruction encoding, sorted by sub-architecture.
8218   First some generics; their names are taken from the conventional
8219   bit positions for register arguments in ARM format instructions.  */
8220
8221static void
8222do_noargs (void)
8223{
8224}
8225
8226static void
8227do_rd (void)
8228{
8229  inst.instruction |= inst.operands[0].reg << 12;
8230}
8231
8232static void
8233do_rn (void)
8234{
8235  inst.instruction |= inst.operands[0].reg << 16;
8236}
8237
8238static void
8239do_rd_rm (void)
8240{
8241  inst.instruction |= inst.operands[0].reg << 12;
8242  inst.instruction |= inst.operands[1].reg;
8243}
8244
8245static void
8246do_rm_rn (void)
8247{
8248  inst.instruction |= inst.operands[0].reg;
8249  inst.instruction |= inst.operands[1].reg << 16;
8250}
8251
8252static void
8253do_rd_rn (void)
8254{
8255  inst.instruction |= inst.operands[0].reg << 12;
8256  inst.instruction |= inst.operands[1].reg << 16;
8257}
8258
8259static void
8260do_rn_rd (void)
8261{
8262  inst.instruction |= inst.operands[0].reg << 16;
8263  inst.instruction |= inst.operands[1].reg << 12;
8264}
8265
8266static void
8267do_tt (void)
8268{
8269  inst.instruction |= inst.operands[0].reg << 8;
8270  inst.instruction |= inst.operands[1].reg << 16;
8271}
8272
8273static bfd_boolean
8274check_obsolete (const arm_feature_set *feature, const char *msg)
8275{
8276  if (ARM_CPU_IS_ANY (cpu_variant))
8277    {
8278      as_tsktsk ("%s", msg);
8279      return TRUE;
8280    }
8281  else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8282    {
8283      as_bad ("%s", msg);
8284      return TRUE;
8285    }
8286
8287  return FALSE;
8288}
8289
8290static void
8291do_rd_rm_rn (void)
8292{
8293  unsigned Rn = inst.operands[2].reg;
8294  /* Enforce restrictions on SWP instruction.  */
8295  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8296    {
8297      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8298		  _("Rn must not overlap other operands"));
8299
8300      /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8301       */
8302      if (!check_obsolete (&arm_ext_v8,
8303			   _("swp{b} use is obsoleted for ARMv8 and later"))
8304	  && warn_on_deprecated
8305	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8306	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8307    }
8308
8309  inst.instruction |= inst.operands[0].reg << 12;
8310  inst.instruction |= inst.operands[1].reg;
8311  inst.instruction |= Rn << 16;
8312}
8313
8314static void
8315do_rd_rn_rm (void)
8316{
8317  inst.instruction |= inst.operands[0].reg << 12;
8318  inst.instruction |= inst.operands[1].reg << 16;
8319  inst.instruction |= inst.operands[2].reg;
8320}
8321
8322static void
8323do_rm_rd_rn (void)
8324{
8325  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8326  constraint (((inst.reloc.exp.X_op != O_constant
8327		&& inst.reloc.exp.X_op != O_illegal)
8328	       || inst.reloc.exp.X_add_number != 0),
8329	      BAD_ADDR_MODE);
8330  inst.instruction |= inst.operands[0].reg;
8331  inst.instruction |= inst.operands[1].reg << 12;
8332  inst.instruction |= inst.operands[2].reg << 16;
8333}
8334
8335static void
8336do_imm0 (void)
8337{
8338  inst.instruction |= inst.operands[0].imm;
8339}
8340
8341static void
8342do_rd_cpaddr (void)
8343{
8344  inst.instruction |= inst.operands[0].reg << 12;
8345  encode_arm_cp_address (1, TRUE, TRUE, 0);
8346}
8347
8348/* ARM instructions, in alphabetical order by function name (except
8349   that wrapper functions appear immediately after the function they
8350   wrap).  */
8351
8352/* This is a pseudo-op of the form "adr rd, label" to be converted
8353   into a relative address of the form "add rd, pc, #label-.-8".  */
8354
8355static void
8356do_adr (void)
8357{
8358  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8359
8360  /* Frag hacking will turn this into a sub instruction if the offset turns
8361     out to be negative.  */
8362  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8363  inst.reloc.pc_rel = 1;
8364  inst.reloc.exp.X_add_number -= 8;
8365}
8366
8367/* This is a pseudo-op of the form "adrl rd, label" to be converted
8368   into a relative address of the form:
8369   add rd, pc, #low(label-.-8)"
8370   add rd, rd, #high(label-.-8)"  */
8371
8372static void
8373do_adrl (void)
8374{
8375  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
8376
8377  /* Frag hacking will turn this into a sub instruction if the offset turns
8378     out to be negative.  */
8379  inst.reloc.type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8380  inst.reloc.pc_rel	       = 1;
8381  inst.size		       = INSN_SIZE * 2;
8382  inst.reloc.exp.X_add_number -= 8;
8383}
8384
8385static void
8386do_arit (void)
8387{
8388  constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8389	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8390	      THUMB1_RELOC_ONLY);
8391  if (!inst.operands[1].present)
8392    inst.operands[1].reg = inst.operands[0].reg;
8393  inst.instruction |= inst.operands[0].reg << 12;
8394  inst.instruction |= inst.operands[1].reg << 16;
8395  encode_arm_shifter_operand (2);
8396}
8397
8398static void
8399do_barrier (void)
8400{
8401  if (inst.operands[0].present)
8402    inst.instruction |= inst.operands[0].imm;
8403  else
8404    inst.instruction |= 0xf;
8405}
8406
8407static void
8408do_bfc (void)
8409{
8410  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8411  constraint (msb > 32, _("bit-field extends past end of register"));
8412  /* The instruction encoding stores the LSB and MSB,
8413     not the LSB and width.  */
8414  inst.instruction |= inst.operands[0].reg << 12;
8415  inst.instruction |= inst.operands[1].imm << 7;
8416  inst.instruction |= (msb - 1) << 16;
8417}
8418
8419static void
8420do_bfi (void)
8421{
8422  unsigned int msb;
8423
8424  /* #0 in second position is alternative syntax for bfc, which is
8425     the same instruction but with REG_PC in the Rm field.  */
8426  if (!inst.operands[1].isreg)
8427    inst.operands[1].reg = REG_PC;
8428
8429  msb = inst.operands[2].imm + inst.operands[3].imm;
8430  constraint (msb > 32, _("bit-field extends past end of register"));
8431  /* The instruction encoding stores the LSB and MSB,
8432     not the LSB and width.  */
8433  inst.instruction |= inst.operands[0].reg << 12;
8434  inst.instruction |= inst.operands[1].reg;
8435  inst.instruction |= inst.operands[2].imm << 7;
8436  inst.instruction |= (msb - 1) << 16;
8437}
8438
8439static void
8440do_bfx (void)
8441{
8442  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8443	      _("bit-field extends past end of register"));
8444  inst.instruction |= inst.operands[0].reg << 12;
8445  inst.instruction |= inst.operands[1].reg;
8446  inst.instruction |= inst.operands[2].imm << 7;
8447  inst.instruction |= (inst.operands[3].imm - 1) << 16;
8448}
8449
8450/* ARM V5 breakpoint instruction (argument parse)
8451     BKPT <16 bit unsigned immediate>
8452     Instruction is not conditional.
8453	The bit pattern given in insns[] has the COND_ALWAYS condition,
8454	and it is an error if the caller tried to override that.  */
8455
8456static void
8457do_bkpt (void)
8458{
8459  /* Top 12 of 16 bits to bits 19:8.  */
8460  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8461
8462  /* Bottom 4 of 16 bits to bits 3:0.  */
8463  inst.instruction |= inst.operands[0].imm & 0xf;
8464}
8465
8466static void
8467encode_branch (int default_reloc)
8468{
8469  if (inst.operands[0].hasreloc)
8470    {
8471      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8472		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8473		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8474      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8475	? BFD_RELOC_ARM_PLT32
8476	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8477    }
8478  else
8479    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8480  inst.reloc.pc_rel = 1;
8481}
8482
8483static void
8484do_branch (void)
8485{
8486#ifdef OBJ_ELF
8487  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8488    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8489  else
8490#endif
8491    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8492}
8493
8494static void
8495do_bl (void)
8496{
8497#ifdef OBJ_ELF
8498  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8499    {
8500      if (inst.cond == COND_ALWAYS)
8501	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8502      else
8503	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8504    }
8505  else
8506#endif
8507    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8508}
8509
8510/* ARM V5 branch-link-exchange instruction (argument parse)
8511     BLX <target_addr>		ie BLX(1)
8512     BLX{<condition>} <Rm>	ie BLX(2)
8513   Unfortunately, there are two different opcodes for this mnemonic.
8514   So, the insns[].value is not used, and the code here zaps values
8515	into inst.instruction.
8516   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
8517
8518static void
8519do_blx (void)
8520{
8521  if (inst.operands[0].isreg)
8522    {
8523      /* Arg is a register; the opcode provided by insns[] is correct.
8524	 It is not illegal to do "blx pc", just useless.  */
8525      if (inst.operands[0].reg == REG_PC)
8526	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8527
8528      inst.instruction |= inst.operands[0].reg;
8529    }
8530  else
8531    {
8532      /* Arg is an address; this instruction cannot be executed
8533	 conditionally, and the opcode must be adjusted.
8534	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8535	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
8536      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8537      inst.instruction = 0xfa000000;
8538      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8539    }
8540}
8541
8542static void
8543do_bx (void)
8544{
8545  bfd_boolean want_reloc;
8546
8547  if (inst.operands[0].reg == REG_PC)
8548    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8549
8550  inst.instruction |= inst.operands[0].reg;
8551  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8552     it is for ARMv4t or earlier.  */
8553  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8554  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8555      want_reloc = TRUE;
8556
8557#ifdef OBJ_ELF
8558  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8559#endif
8560    want_reloc = FALSE;
8561
8562  if (want_reloc)
8563    inst.reloc.type = BFD_RELOC_ARM_V4BX;
8564}
8565
8566
8567/* ARM v5TEJ.  Jump to Jazelle code.  */
8568
8569static void
8570do_bxj (void)
8571{
8572  if (inst.operands[0].reg == REG_PC)
8573    as_tsktsk (_("use of r15 in bxj is not really useful"));
8574
8575  inst.instruction |= inst.operands[0].reg;
8576}
8577
8578/* Co-processor data operation:
8579      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8580      CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
8581static void
8582do_cdp (void)
8583{
8584  inst.instruction |= inst.operands[0].reg << 8;
8585  inst.instruction |= inst.operands[1].imm << 20;
8586  inst.instruction |= inst.operands[2].reg << 12;
8587  inst.instruction |= inst.operands[3].reg << 16;
8588  inst.instruction |= inst.operands[4].reg;
8589  inst.instruction |= inst.operands[5].imm << 5;
8590}
8591
8592static void
8593do_cmp (void)
8594{
8595  inst.instruction |= inst.operands[0].reg << 16;
8596  encode_arm_shifter_operand (1);
8597}
8598
8599/* Transfer between coprocessor and ARM registers.
8600   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8601   MRC2
8602   MCR{cond}
8603   MCR2
8604
8605   No special properties.  */
8606
8607struct deprecated_coproc_regs_s
8608{
8609  unsigned cp;
8610  int opc1;
8611  unsigned crn;
8612  unsigned crm;
8613  int opc2;
8614  arm_feature_set deprecated;
8615  arm_feature_set obsoleted;
8616  const char *dep_msg;
8617  const char *obs_msg;
8618};
8619
8620#define DEPR_ACCESS_V8 \
8621  N_("This coprocessor register access is deprecated in ARMv8")
8622
8623/* Table of all deprecated coprocessor registers.  */
8624static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8625{
8626    {15, 0, 7, 10, 5,					/* CP15DMB.  */
8627     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8628     DEPR_ACCESS_V8, NULL},
8629    {15, 0, 7, 10, 4,					/* CP15DSB.  */
8630     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8631     DEPR_ACCESS_V8, NULL},
8632    {15, 0, 7,  5, 4,					/* CP15ISB.  */
8633     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8634     DEPR_ACCESS_V8, NULL},
8635    {14, 6, 1,  0, 0,					/* TEEHBR.  */
8636     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8637     DEPR_ACCESS_V8, NULL},
8638    {14, 6, 0,  0, 0,					/* TEECR.  */
8639     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8640     DEPR_ACCESS_V8, NULL},
8641};
8642
8643#undef DEPR_ACCESS_V8
8644
8645static const size_t deprecated_coproc_reg_count =
8646  sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8647
8648static void
8649do_co_reg (void)
8650{
8651  unsigned Rd;
8652  size_t i;
8653
8654  Rd = inst.operands[2].reg;
8655  if (thumb_mode)
8656    {
8657      if (inst.instruction == 0xee000010
8658	  || inst.instruction == 0xfe000010)
8659	/* MCR, MCR2  */
8660	reject_bad_reg (Rd);
8661      else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8662	/* MRC, MRC2  */
8663	constraint (Rd == REG_SP, BAD_SP);
8664    }
8665  else
8666    {
8667      /* MCR */
8668      if (inst.instruction == 0xe000010)
8669	constraint (Rd == REG_PC, BAD_PC);
8670    }
8671
8672    for (i = 0; i < deprecated_coproc_reg_count; ++i)
8673      {
8674	const struct deprecated_coproc_regs_s *r =
8675	  deprecated_coproc_regs + i;
8676
8677	if (inst.operands[0].reg == r->cp
8678	    && inst.operands[1].imm == r->opc1
8679	    && inst.operands[3].reg == r->crn
8680	    && inst.operands[4].reg == r->crm
8681	    && inst.operands[5].imm == r->opc2)
8682	  {
8683	    if (! ARM_CPU_IS_ANY (cpu_variant)
8684		&& warn_on_deprecated
8685		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8686	      as_tsktsk ("%s", r->dep_msg);
8687	  }
8688      }
8689
8690  inst.instruction |= inst.operands[0].reg << 8;
8691  inst.instruction |= inst.operands[1].imm << 21;
8692  inst.instruction |= Rd << 12;
8693  inst.instruction |= inst.operands[3].reg << 16;
8694  inst.instruction |= inst.operands[4].reg;
8695  inst.instruction |= inst.operands[5].imm << 5;
8696}
8697
8698/* Transfer between coprocessor register and pair of ARM registers.
8699   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8700   MCRR2
8701   MRRC{cond}
8702   MRRC2
8703
8704   Two XScale instructions are special cases of these:
8705
8706     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8707     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8708
8709   Result unpredictable if Rd or Rn is R15.  */
8710
8711static void
8712do_co_reg2c (void)
8713{
8714  unsigned Rd, Rn;
8715
8716  Rd = inst.operands[2].reg;
8717  Rn = inst.operands[3].reg;
8718
8719  if (thumb_mode)
8720    {
8721      reject_bad_reg (Rd);
8722      reject_bad_reg (Rn);
8723    }
8724  else
8725    {
8726      constraint (Rd == REG_PC, BAD_PC);
8727      constraint (Rn == REG_PC, BAD_PC);
8728    }
8729
8730  /* Only check the MRRC{2} variants.  */
8731  if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8732    {
8733       /* If Rd == Rn, error that the operation is
8734	  unpredictable (example MRRC p3,#1,r1,r1,c4).  */
8735       constraint (Rd == Rn, BAD_OVERLAP);
8736    }
8737
8738  inst.instruction |= inst.operands[0].reg << 8;
8739  inst.instruction |= inst.operands[1].imm << 4;
8740  inst.instruction |= Rd << 12;
8741  inst.instruction |= Rn << 16;
8742  inst.instruction |= inst.operands[4].reg;
8743}
8744
8745static void
8746do_cpsi (void)
8747{
8748  inst.instruction |= inst.operands[0].imm << 6;
8749  if (inst.operands[1].present)
8750    {
8751      inst.instruction |= CPSI_MMOD;
8752      inst.instruction |= inst.operands[1].imm;
8753    }
8754}
8755
8756static void
8757do_dbg (void)
8758{
8759  inst.instruction |= inst.operands[0].imm;
8760}
8761
8762static void
8763do_div (void)
8764{
8765  unsigned Rd, Rn, Rm;
8766
8767  Rd = inst.operands[0].reg;
8768  Rn = (inst.operands[1].present
8769	? inst.operands[1].reg : Rd);
8770  Rm = inst.operands[2].reg;
8771
8772  constraint ((Rd == REG_PC), BAD_PC);
8773  constraint ((Rn == REG_PC), BAD_PC);
8774  constraint ((Rm == REG_PC), BAD_PC);
8775
8776  inst.instruction |= Rd << 16;
8777  inst.instruction |= Rn << 0;
8778  inst.instruction |= Rm << 8;
8779}
8780
8781static void
8782do_it (void)
8783{
8784  /* There is no IT instruction in ARM mode.  We
8785     process it to do the validation as if in
8786     thumb mode, just in case the code gets
8787     assembled for thumb using the unified syntax.  */
8788
8789  inst.size = 0;
8790  if (unified_syntax)
8791    {
8792      set_it_insn_type (IT_INSN);
8793      now_it.mask = (inst.instruction & 0xf) | 0x10;
8794      now_it.cc = inst.operands[0].imm;
8795    }
8796}
8797
8798/* If there is only one register in the register list,
8799   then return its register number.  Otherwise return -1.  */
8800static int
8801only_one_reg_in_list (int range)
8802{
8803  int i = ffs (range) - 1;
8804  return (i > 15 || range != (1 << i)) ? -1 : i;
8805}
8806
8807static void
8808encode_ldmstm(int from_push_pop_mnem)
8809{
8810  int base_reg = inst.operands[0].reg;
8811  int range = inst.operands[1].imm;
8812  int one_reg;
8813
8814  inst.instruction |= base_reg << 16;
8815  inst.instruction |= range;
8816
8817  if (inst.operands[1].writeback)
8818    inst.instruction |= LDM_TYPE_2_OR_3;
8819
8820  if (inst.operands[0].writeback)
8821    {
8822      inst.instruction |= WRITE_BACK;
8823      /* Check for unpredictable uses of writeback.  */
8824      if (inst.instruction & LOAD_BIT)
8825	{
8826	  /* Not allowed in LDM type 2.	 */
8827	  if ((inst.instruction & LDM_TYPE_2_OR_3)
8828	      && ((range & (1 << REG_PC)) == 0))
8829	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8830	  /* Only allowed if base reg not in list for other types.  */
8831	  else if (range & (1 << base_reg))
8832	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8833	}
8834      else /* STM.  */
8835	{
8836	  /* Not allowed for type 2.  */
8837	  if (inst.instruction & LDM_TYPE_2_OR_3)
8838	    as_warn (_("writeback of base register is UNPREDICTABLE"));
8839	  /* Only allowed if base reg not in list, or first in list.  */
8840	  else if ((range & (1 << base_reg))
8841		   && (range & ((1 << base_reg) - 1)))
8842	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8843	}
8844    }
8845
8846  /* If PUSH/POP has only one register, then use the A2 encoding.  */
8847  one_reg = only_one_reg_in_list (range);
8848  if (from_push_pop_mnem && one_reg >= 0)
8849    {
8850      int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8851
8852      inst.instruction &= A_COND_MASK;
8853      inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8854      inst.instruction |= one_reg << 12;
8855    }
8856}
8857
8858static void
8859do_ldmstm (void)
8860{
8861  encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8862}
8863
8864/* ARMv5TE load-consecutive (argument parse)
8865   Mode is like LDRH.
8866
8867     LDRccD R, mode
8868     STRccD R, mode.  */
8869
8870static void
8871do_ldrd (void)
8872{
8873  constraint (inst.operands[0].reg % 2 != 0,
8874	      _("first transfer register must be even"));
8875  constraint (inst.operands[1].present
8876	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8877	      _("can only transfer two consecutive registers"));
8878  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8879  constraint (!inst.operands[2].isreg, _("'[' expected"));
8880
8881  if (!inst.operands[1].present)
8882    inst.operands[1].reg = inst.operands[0].reg + 1;
8883
8884  /* encode_arm_addr_mode_3 will diagnose overlap between the base
8885     register and the first register written; we have to diagnose
8886     overlap between the base and the second register written here.  */
8887
8888  if (inst.operands[2].reg == inst.operands[1].reg
8889      && (inst.operands[2].writeback || inst.operands[2].postind))
8890    as_warn (_("base register written back, and overlaps "
8891	       "second transfer register"));
8892
8893  if (!(inst.instruction & V4_STR_BIT))
8894    {
8895      /* For an index-register load, the index register must not overlap the
8896	destination (even if not write-back).  */
8897      if (inst.operands[2].immisreg
8898	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8899	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8900	as_warn (_("index register overlaps transfer register"));
8901    }
8902  inst.instruction |= inst.operands[0].reg << 12;
8903  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8904}
8905
8906static void
8907do_ldrex (void)
8908{
8909  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8910	      || inst.operands[1].postind || inst.operands[1].writeback
8911	      || inst.operands[1].immisreg || inst.operands[1].shifted
8912	      || inst.operands[1].negative
8913	      /* This can arise if the programmer has written
8914		   strex rN, rM, foo
8915		 or if they have mistakenly used a register name as the last
8916		 operand,  eg:
8917		   strex rN, rM, rX
8918		 It is very difficult to distinguish between these two cases
8919		 because "rX" might actually be a label. ie the register
8920		 name has been occluded by a symbol of the same name. So we
8921		 just generate a general 'bad addressing mode' type error
8922		 message and leave it up to the programmer to discover the
8923		 true cause and fix their mistake.  */
8924	      || (inst.operands[1].reg == REG_PC),
8925	      BAD_ADDR_MODE);
8926
8927  constraint (inst.reloc.exp.X_op != O_constant
8928	      || inst.reloc.exp.X_add_number != 0,
8929	      _("offset must be zero in ARM encoding"));
8930
8931  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8932
8933  inst.instruction |= inst.operands[0].reg << 12;
8934  inst.instruction |= inst.operands[1].reg << 16;
8935  inst.reloc.type = BFD_RELOC_UNUSED;
8936}
8937
8938static void
8939do_ldrexd (void)
8940{
8941  constraint (inst.operands[0].reg % 2 != 0,
8942	      _("even register required"));
8943  constraint (inst.operands[1].present
8944	      && inst.operands[1].reg != inst.operands[0].reg + 1,
8945	      _("can only load two consecutive registers"));
8946  /* If op 1 were present and equal to PC, this function wouldn't
8947     have been called in the first place.  */
8948  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8949
8950  inst.instruction |= inst.operands[0].reg << 12;
8951  inst.instruction |= inst.operands[2].reg << 16;
8952}
8953
8954/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
8955   which is not a multiple of four is UNPREDICTABLE.  */
8956static void
8957check_ldr_r15_aligned (void)
8958{
8959  constraint (!(inst.operands[1].immisreg)
8960	      && (inst.operands[0].reg == REG_PC
8961	      && inst.operands[1].reg == REG_PC
8962	      && (inst.reloc.exp.X_add_number & 0x3)),
8963	      _("ldr to register 15 must be 4-byte alligned"));
8964}
8965
8966static void
8967do_ldst (void)
8968{
8969  inst.instruction |= inst.operands[0].reg << 12;
8970  if (!inst.operands[1].isreg)
8971    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8972      return;
8973  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8974  check_ldr_r15_aligned ();
8975}
8976
8977static void
8978do_ldstt (void)
8979{
8980  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
8981     reject [Rn,...].  */
8982  if (inst.operands[1].preind)
8983    {
8984      constraint (inst.reloc.exp.X_op != O_constant
8985		  || inst.reloc.exp.X_add_number != 0,
8986		  _("this instruction requires a post-indexed address"));
8987
8988      inst.operands[1].preind = 0;
8989      inst.operands[1].postind = 1;
8990      inst.operands[1].writeback = 1;
8991    }
8992  inst.instruction |= inst.operands[0].reg << 12;
8993  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8994}
8995
8996/* Halfword and signed-byte load/store operations.  */
8997
8998static void
8999do_ldstv4 (void)
9000{
9001  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9002  inst.instruction |= inst.operands[0].reg << 12;
9003  if (!inst.operands[1].isreg)
9004    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9005      return;
9006  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9007}
9008
9009static void
9010do_ldsttv4 (void)
9011{
9012  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9013     reject [Rn,...].  */
9014  if (inst.operands[1].preind)
9015    {
9016      constraint (inst.reloc.exp.X_op != O_constant
9017		  || inst.reloc.exp.X_add_number != 0,
9018		  _("this instruction requires a post-indexed address"));
9019
9020      inst.operands[1].preind = 0;
9021      inst.operands[1].postind = 1;
9022      inst.operands[1].writeback = 1;
9023    }
9024  inst.instruction |= inst.operands[0].reg << 12;
9025  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9026}
9027
9028/* Co-processor register load/store.
9029   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
9030static void
9031do_lstc (void)
9032{
9033  inst.instruction |= inst.operands[0].reg << 8;
9034  inst.instruction |= inst.operands[1].reg << 12;
9035  encode_arm_cp_address (2, TRUE, TRUE, 0);
9036}
9037
9038static void
9039do_mlas (void)
9040{
9041  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
9042  if (inst.operands[0].reg == inst.operands[1].reg
9043      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9044      && !(inst.instruction & 0x00400000))
9045    as_tsktsk (_("Rd and Rm should be different in mla"));
9046
9047  inst.instruction |= inst.operands[0].reg << 16;
9048  inst.instruction |= inst.operands[1].reg;
9049  inst.instruction |= inst.operands[2].reg << 8;
9050  inst.instruction |= inst.operands[3].reg << 12;
9051}
9052
9053static void
9054do_mov (void)
9055{
9056  constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9057	      && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9058	      THUMB1_RELOC_ONLY);
9059  inst.instruction |= inst.operands[0].reg << 12;
9060  encode_arm_shifter_operand (1);
9061}
9062
9063/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
9064static void
9065do_mov16 (void)
9066{
9067  bfd_vma imm;
9068  bfd_boolean top;
9069
9070  top = (inst.instruction & 0x00400000) != 0;
9071  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9072	      _(":lower16: not allowed this instruction"));
9073  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9074	      _(":upper16: not allowed instruction"));
9075  inst.instruction |= inst.operands[0].reg << 12;
9076  if (inst.reloc.type == BFD_RELOC_UNUSED)
9077    {
9078      imm = inst.reloc.exp.X_add_number;
9079      /* The value is in two pieces: 0:11, 16:19.  */
9080      inst.instruction |= (imm & 0x00000fff);
9081      inst.instruction |= (imm & 0x0000f000) << 4;
9082    }
9083}
9084
9085static int
9086do_vfp_nsyn_mrs (void)
9087{
9088  if (inst.operands[0].isvec)
9089    {
9090      if (inst.operands[1].reg != 1)
9091	first_error (_("operand 1 must be FPSCR"));
9092      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9093      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9094      do_vfp_nsyn_opcode ("fmstat");
9095    }
9096  else if (inst.operands[1].isvec)
9097    do_vfp_nsyn_opcode ("fmrx");
9098  else
9099    return FAIL;
9100
9101  return SUCCESS;
9102}
9103
9104static int
9105do_vfp_nsyn_msr (void)
9106{
9107  if (inst.operands[0].isvec)
9108    do_vfp_nsyn_opcode ("fmxr");
9109  else
9110    return FAIL;
9111
9112  return SUCCESS;
9113}
9114
9115static void
9116do_vmrs (void)
9117{
9118  unsigned Rt = inst.operands[0].reg;
9119
9120  if (thumb_mode && Rt == REG_SP)
9121    {
9122      inst.error = BAD_SP;
9123      return;
9124    }
9125
9126  /* MVFR2 is only valid at ARMv8-A.  */
9127  if (inst.operands[1].reg == 5)
9128    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9129		_(BAD_FPU));
9130
9131  /* APSR_ sets isvec. All other refs to PC are illegal.  */
9132  if (!inst.operands[0].isvec && Rt == REG_PC)
9133    {
9134      inst.error = BAD_PC;
9135      return;
9136    }
9137
9138  /* If we get through parsing the register name, we just insert the number
9139     generated into the instruction without further validation.  */
9140  inst.instruction |= (inst.operands[1].reg << 16);
9141  inst.instruction |= (Rt << 12);
9142}
9143
9144static void
9145do_vmsr (void)
9146{
9147  unsigned Rt = inst.operands[1].reg;
9148
9149  if (thumb_mode)
9150    reject_bad_reg (Rt);
9151  else if (Rt == REG_PC)
9152    {
9153      inst.error = BAD_PC;
9154      return;
9155    }
9156
9157  /* MVFR2 is only valid for ARMv8-A.  */
9158  if (inst.operands[0].reg == 5)
9159    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9160		_(BAD_FPU));
9161
9162  /* If we get through parsing the register name, we just insert the number
9163     generated into the instruction without further validation.  */
9164  inst.instruction |= (inst.operands[0].reg << 16);
9165  inst.instruction |= (Rt << 12);
9166}
9167
9168static void
9169do_mrs (void)
9170{
9171  unsigned br;
9172
9173  if (do_vfp_nsyn_mrs () == SUCCESS)
9174    return;
9175
9176  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9177  inst.instruction |= inst.operands[0].reg << 12;
9178
9179  if (inst.operands[1].isreg)
9180    {
9181      br = inst.operands[1].reg;
9182      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9183	as_bad (_("bad register for mrs"));
9184    }
9185  else
9186    {
9187      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
9188      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9189		  != (PSR_c|PSR_f),
9190		  _("'APSR', 'CPSR' or 'SPSR' expected"));
9191      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9192    }
9193
9194  inst.instruction |= br;
9195}
9196
9197/* Two possible forms:
9198      "{C|S}PSR_<field>, Rm",
9199      "{C|S}PSR_f, #expression".  */
9200
9201static void
9202do_msr (void)
9203{
9204  if (do_vfp_nsyn_msr () == SUCCESS)
9205    return;
9206
9207  inst.instruction |= inst.operands[0].imm;
9208  if (inst.operands[1].isreg)
9209    inst.instruction |= inst.operands[1].reg;
9210  else
9211    {
9212      inst.instruction |= INST_IMMEDIATE;
9213      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9214      inst.reloc.pc_rel = 0;
9215    }
9216}
9217
9218static void
9219do_mul (void)
9220{
9221  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9222
9223  if (!inst.operands[2].present)
9224    inst.operands[2].reg = inst.operands[0].reg;
9225  inst.instruction |= inst.operands[0].reg << 16;
9226  inst.instruction |= inst.operands[1].reg;
9227  inst.instruction |= inst.operands[2].reg << 8;
9228
9229  if (inst.operands[0].reg == inst.operands[1].reg
9230      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9231    as_tsktsk (_("Rd and Rm should be different in mul"));
9232}
9233
9234/* Long Multiply Parser
9235   UMULL RdLo, RdHi, Rm, Rs
9236   SMULL RdLo, RdHi, Rm, Rs
9237   UMLAL RdLo, RdHi, Rm, Rs
9238   SMLAL RdLo, RdHi, Rm, Rs.  */
9239
9240static void
9241do_mull (void)
9242{
9243  inst.instruction |= inst.operands[0].reg << 12;
9244  inst.instruction |= inst.operands[1].reg << 16;
9245  inst.instruction |= inst.operands[2].reg;
9246  inst.instruction |= inst.operands[3].reg << 8;
9247
9248  /* rdhi and rdlo must be different.  */
9249  if (inst.operands[0].reg == inst.operands[1].reg)
9250    as_tsktsk (_("rdhi and rdlo must be different"));
9251
9252  /* rdhi, rdlo and rm must all be different before armv6.  */
9253  if ((inst.operands[0].reg == inst.operands[2].reg
9254      || inst.operands[1].reg == inst.operands[2].reg)
9255      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9256    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9257}
9258
9259static void
9260do_nop (void)
9261{
9262  if (inst.operands[0].present
9263      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9264    {
9265      /* Architectural NOP hints are CPSR sets with no bits selected.  */
9266      inst.instruction &= 0xf0000000;
9267      inst.instruction |= 0x0320f000;
9268      if (inst.operands[0].present)
9269	inst.instruction |= inst.operands[0].imm;
9270    }
9271}
9272
9273/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9274   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9275   Condition defaults to COND_ALWAYS.
9276   Error if Rd, Rn or Rm are R15.  */
9277
9278static void
9279do_pkhbt (void)
9280{
9281  inst.instruction |= inst.operands[0].reg << 12;
9282  inst.instruction |= inst.operands[1].reg << 16;
9283  inst.instruction |= inst.operands[2].reg;
9284  if (inst.operands[3].present)
9285    encode_arm_shift (3);
9286}
9287
9288/* ARM V6 PKHTB (Argument Parse).  */
9289
9290static void
9291do_pkhtb (void)
9292{
9293  if (!inst.operands[3].present)
9294    {
9295      /* If the shift specifier is omitted, turn the instruction
9296	 into pkhbt rd, rm, rn. */
9297      inst.instruction &= 0xfff00010;
9298      inst.instruction |= inst.operands[0].reg << 12;
9299      inst.instruction |= inst.operands[1].reg;
9300      inst.instruction |= inst.operands[2].reg << 16;
9301    }
9302  else
9303    {
9304      inst.instruction |= inst.operands[0].reg << 12;
9305      inst.instruction |= inst.operands[1].reg << 16;
9306      inst.instruction |= inst.operands[2].reg;
9307      encode_arm_shift (3);
9308    }
9309}
9310
9311/* ARMv5TE: Preload-Cache
9312   MP Extensions: Preload for write
9313
9314    PLD(W) <addr_mode>
9315
9316  Syntactically, like LDR with B=1, W=0, L=1.  */
9317
9318static void
9319do_pld (void)
9320{
9321  constraint (!inst.operands[0].isreg,
9322	      _("'[' expected after PLD mnemonic"));
9323  constraint (inst.operands[0].postind,
9324	      _("post-indexed expression used in preload instruction"));
9325  constraint (inst.operands[0].writeback,
9326	      _("writeback used in preload instruction"));
9327  constraint (!inst.operands[0].preind,
9328	      _("unindexed addressing used in preload instruction"));
9329  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9330}
9331
9332/* ARMv7: PLI <addr_mode>  */
9333static void
9334do_pli (void)
9335{
9336  constraint (!inst.operands[0].isreg,
9337	      _("'[' expected after PLI mnemonic"));
9338  constraint (inst.operands[0].postind,
9339	      _("post-indexed expression used in preload instruction"));
9340  constraint (inst.operands[0].writeback,
9341	      _("writeback used in preload instruction"));
9342  constraint (!inst.operands[0].preind,
9343	      _("unindexed addressing used in preload instruction"));
9344  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9345  inst.instruction &= ~PRE_INDEX;
9346}
9347
9348static void
9349do_push_pop (void)
9350{
9351  constraint (inst.operands[0].writeback,
9352	      _("push/pop do not support {reglist}^"));
9353  inst.operands[1] = inst.operands[0];
9354  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9355  inst.operands[0].isreg = 1;
9356  inst.operands[0].writeback = 1;
9357  inst.operands[0].reg = REG_SP;
9358  encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9359}
9360
9361/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9362   word at the specified address and the following word
9363   respectively.
9364   Unconditionally executed.
9365   Error if Rn is R15.	*/
9366
9367static void
9368do_rfe (void)
9369{
9370  inst.instruction |= inst.operands[0].reg << 16;
9371  if (inst.operands[0].writeback)
9372    inst.instruction |= WRITE_BACK;
9373}
9374
9375/* ARM V6 ssat (argument parse).  */
9376
9377static void
9378do_ssat (void)
9379{
9380  inst.instruction |= inst.operands[0].reg << 12;
9381  inst.instruction |= (inst.operands[1].imm - 1) << 16;
9382  inst.instruction |= inst.operands[2].reg;
9383
9384  if (inst.operands[3].present)
9385    encode_arm_shift (3);
9386}
9387
9388/* ARM V6 usat (argument parse).  */
9389
9390static void
9391do_usat (void)
9392{
9393  inst.instruction |= inst.operands[0].reg << 12;
9394  inst.instruction |= inst.operands[1].imm << 16;
9395  inst.instruction |= inst.operands[2].reg;
9396
9397  if (inst.operands[3].present)
9398    encode_arm_shift (3);
9399}
9400
9401/* ARM V6 ssat16 (argument parse).  */
9402
9403static void
9404do_ssat16 (void)
9405{
9406  inst.instruction |= inst.operands[0].reg << 12;
9407  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9408  inst.instruction |= inst.operands[2].reg;
9409}
9410
9411static void
9412do_usat16 (void)
9413{
9414  inst.instruction |= inst.operands[0].reg << 12;
9415  inst.instruction |= inst.operands[1].imm << 16;
9416  inst.instruction |= inst.operands[2].reg;
9417}
9418
9419/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
9420   preserving the other bits.
9421
9422   setend <endian_specifier>, where <endian_specifier> is either
9423   BE or LE.  */
9424
9425static void
9426do_setend (void)
9427{
9428  if (warn_on_deprecated
9429      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9430      as_tsktsk (_("setend use is deprecated for ARMv8"));
9431
9432  if (inst.operands[0].imm)
9433    inst.instruction |= 0x200;
9434}
9435
9436static void
9437do_shift (void)
9438{
9439  unsigned int Rm = (inst.operands[1].present
9440		     ? inst.operands[1].reg
9441		     : inst.operands[0].reg);
9442
9443  inst.instruction |= inst.operands[0].reg << 12;
9444  inst.instruction |= Rm;
9445  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
9446    {
9447      inst.instruction |= inst.operands[2].reg << 8;
9448      inst.instruction |= SHIFT_BY_REG;
9449      /* PR 12854: Error on extraneous shifts.  */
9450      constraint (inst.operands[2].shifted,
9451		  _("extraneous shift as part of operand to shift insn"));
9452    }
9453  else
9454    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9455}
9456
9457static void
9458do_smc (void)
9459{
9460  inst.reloc.type = BFD_RELOC_ARM_SMC;
9461  inst.reloc.pc_rel = 0;
9462}
9463
9464static void
9465do_hvc (void)
9466{
9467  inst.reloc.type = BFD_RELOC_ARM_HVC;
9468  inst.reloc.pc_rel = 0;
9469}
9470
9471static void
9472do_swi (void)
9473{
9474  inst.reloc.type = BFD_RELOC_ARM_SWI;
9475  inst.reloc.pc_rel = 0;
9476}
9477
9478static void
9479do_setpan (void)
9480{
9481  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9482	      _("selected processor does not support SETPAN instruction"));
9483
9484  inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9485}
9486
9487static void
9488do_t_setpan (void)
9489{
9490  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9491	      _("selected processor does not support SETPAN instruction"));
9492
9493  inst.instruction |= (inst.operands[0].imm << 3);
9494}
9495
9496/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9497   SMLAxy{cond} Rd,Rm,Rs,Rn
9498   SMLAWy{cond} Rd,Rm,Rs,Rn
9499   Error if any register is R15.  */
9500
9501static void
9502do_smla (void)
9503{
9504  inst.instruction |= inst.operands[0].reg << 16;
9505  inst.instruction |= inst.operands[1].reg;
9506  inst.instruction |= inst.operands[2].reg << 8;
9507  inst.instruction |= inst.operands[3].reg << 12;
9508}
9509
9510/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9511   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9512   Error if any register is R15.
9513   Warning if Rdlo == Rdhi.  */
9514
9515static void
9516do_smlal (void)
9517{
9518  inst.instruction |= inst.operands[0].reg << 12;
9519  inst.instruction |= inst.operands[1].reg << 16;
9520  inst.instruction |= inst.operands[2].reg;
9521  inst.instruction |= inst.operands[3].reg << 8;
9522
9523  if (inst.operands[0].reg == inst.operands[1].reg)
9524    as_tsktsk (_("rdhi and rdlo must be different"));
9525}
9526
9527/* ARM V5E (El Segundo) signed-multiply (argument parse)
9528   SMULxy{cond} Rd,Rm,Rs
9529   Error if any register is R15.  */
9530
9531static void
9532do_smul (void)
9533{
9534  inst.instruction |= inst.operands[0].reg << 16;
9535  inst.instruction |= inst.operands[1].reg;
9536  inst.instruction |= inst.operands[2].reg << 8;
9537}
9538
9539/* ARM V6 srs (argument parse).  The variable fields in the encoding are
9540   the same for both ARM and Thumb-2.  */
9541
9542static void
9543do_srs (void)
9544{
9545  int reg;
9546
9547  if (inst.operands[0].present)
9548    {
9549      reg = inst.operands[0].reg;
9550      constraint (reg != REG_SP, _("SRS base register must be r13"));
9551    }
9552  else
9553    reg = REG_SP;
9554
9555  inst.instruction |= reg << 16;
9556  inst.instruction |= inst.operands[1].imm;
9557  if (inst.operands[0].writeback || inst.operands[1].writeback)
9558    inst.instruction |= WRITE_BACK;
9559}
9560
9561/* ARM V6 strex (argument parse).  */
9562
9563static void
9564do_strex (void)
9565{
9566  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9567	      || inst.operands[2].postind || inst.operands[2].writeback
9568	      || inst.operands[2].immisreg || inst.operands[2].shifted
9569	      || inst.operands[2].negative
9570	      /* See comment in do_ldrex().  */
9571	      || (inst.operands[2].reg == REG_PC),
9572	      BAD_ADDR_MODE);
9573
9574  constraint (inst.operands[0].reg == inst.operands[1].reg
9575	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9576
9577  constraint (inst.reloc.exp.X_op != O_constant
9578	      || inst.reloc.exp.X_add_number != 0,
9579	      _("offset must be zero in ARM encoding"));
9580
9581  inst.instruction |= inst.operands[0].reg << 12;
9582  inst.instruction |= inst.operands[1].reg;
9583  inst.instruction |= inst.operands[2].reg << 16;
9584  inst.reloc.type = BFD_RELOC_UNUSED;
9585}
9586
9587static void
9588do_t_strexbh (void)
9589{
9590  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9591	      || inst.operands[2].postind || inst.operands[2].writeback
9592	      || inst.operands[2].immisreg || inst.operands[2].shifted
9593	      || inst.operands[2].negative,
9594	      BAD_ADDR_MODE);
9595
9596  constraint (inst.operands[0].reg == inst.operands[1].reg
9597	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9598
9599  do_rm_rd_rn ();
9600}
9601
9602static void
9603do_strexd (void)
9604{
9605  constraint (inst.operands[1].reg % 2 != 0,
9606	      _("even register required"));
9607  constraint (inst.operands[2].present
9608	      && inst.operands[2].reg != inst.operands[1].reg + 1,
9609	      _("can only store two consecutive registers"));
9610  /* If op 2 were present and equal to PC, this function wouldn't
9611     have been called in the first place.  */
9612  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9613
9614  constraint (inst.operands[0].reg == inst.operands[1].reg
9615	      || inst.operands[0].reg == inst.operands[1].reg + 1
9616	      || inst.operands[0].reg == inst.operands[3].reg,
9617	      BAD_OVERLAP);
9618
9619  inst.instruction |= inst.operands[0].reg << 12;
9620  inst.instruction |= inst.operands[1].reg;
9621  inst.instruction |= inst.operands[3].reg << 16;
9622}
9623
9624/* ARM V8 STRL.  */
9625static void
9626do_stlex (void)
9627{
9628  constraint (inst.operands[0].reg == inst.operands[1].reg
9629	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9630
9631  do_rd_rm_rn ();
9632}
9633
9634static void
9635do_t_stlex (void)
9636{
9637  constraint (inst.operands[0].reg == inst.operands[1].reg
9638	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9639
9640  do_rm_rd_rn ();
9641}
9642
9643/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9644   extends it to 32-bits, and adds the result to a value in another
9645   register.  You can specify a rotation by 0, 8, 16, or 24 bits
9646   before extracting the 16-bit value.
9647   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9648   Condition defaults to COND_ALWAYS.
9649   Error if any register uses R15.  */
9650
9651static void
9652do_sxtah (void)
9653{
9654  inst.instruction |= inst.operands[0].reg << 12;
9655  inst.instruction |= inst.operands[1].reg << 16;
9656  inst.instruction |= inst.operands[2].reg;
9657  inst.instruction |= inst.operands[3].imm << 10;
9658}
9659
9660/* ARM V6 SXTH.
9661
9662   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9663   Condition defaults to COND_ALWAYS.
9664   Error if any register uses R15.  */
9665
9666static void
9667do_sxth (void)
9668{
9669  inst.instruction |= inst.operands[0].reg << 12;
9670  inst.instruction |= inst.operands[1].reg;
9671  inst.instruction |= inst.operands[2].imm << 10;
9672}
9673
9674/* VFP instructions.  In a logical order: SP variant first, monad
9675   before dyad, arithmetic then move then load/store.  */
9676
9677static void
9678do_vfp_sp_monadic (void)
9679{
9680  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9681  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9682}
9683
9684static void
9685do_vfp_sp_dyadic (void)
9686{
9687  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9688  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9689  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9690}
9691
9692static void
9693do_vfp_sp_compare_z (void)
9694{
9695  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9696}
9697
9698static void
9699do_vfp_dp_sp_cvt (void)
9700{
9701  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9702  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9703}
9704
9705static void
9706do_vfp_sp_dp_cvt (void)
9707{
9708  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9709  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9710}
9711
9712static void
9713do_vfp_reg_from_sp (void)
9714{
9715  inst.instruction |= inst.operands[0].reg << 12;
9716  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9717}
9718
9719static void
9720do_vfp_reg2_from_sp2 (void)
9721{
9722  constraint (inst.operands[2].imm != 2,
9723	      _("only two consecutive VFP SP registers allowed here"));
9724  inst.instruction |= inst.operands[0].reg << 12;
9725  inst.instruction |= inst.operands[1].reg << 16;
9726  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9727}
9728
9729static void
9730do_vfp_sp_from_reg (void)
9731{
9732  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9733  inst.instruction |= inst.operands[1].reg << 12;
9734}
9735
9736static void
9737do_vfp_sp2_from_reg2 (void)
9738{
9739  constraint (inst.operands[0].imm != 2,
9740	      _("only two consecutive VFP SP registers allowed here"));
9741  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9742  inst.instruction |= inst.operands[1].reg << 12;
9743  inst.instruction |= inst.operands[2].reg << 16;
9744}
9745
9746static void
9747do_vfp_sp_ldst (void)
9748{
9749  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9750  encode_arm_cp_address (1, FALSE, TRUE, 0);
9751}
9752
9753static void
9754do_vfp_dp_ldst (void)
9755{
9756  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9757  encode_arm_cp_address (1, FALSE, TRUE, 0);
9758}
9759
9760
9761static void
9762vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9763{
9764  if (inst.operands[0].writeback)
9765    inst.instruction |= WRITE_BACK;
9766  else
9767    constraint (ldstm_type != VFP_LDSTMIA,
9768		_("this addressing mode requires base-register writeback"));
9769  inst.instruction |= inst.operands[0].reg << 16;
9770  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9771  inst.instruction |= inst.operands[1].imm;
9772}
9773
9774static void
9775vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9776{
9777  int count;
9778
9779  if (inst.operands[0].writeback)
9780    inst.instruction |= WRITE_BACK;
9781  else
9782    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9783		_("this addressing mode requires base-register writeback"));
9784
9785  inst.instruction |= inst.operands[0].reg << 16;
9786  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9787
9788  count = inst.operands[1].imm << 1;
9789  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9790    count += 1;
9791
9792  inst.instruction |= count;
9793}
9794
9795static void
9796do_vfp_sp_ldstmia (void)
9797{
9798  vfp_sp_ldstm (VFP_LDSTMIA);
9799}
9800
9801static void
9802do_vfp_sp_ldstmdb (void)
9803{
9804  vfp_sp_ldstm (VFP_LDSTMDB);
9805}
9806
9807static void
9808do_vfp_dp_ldstmia (void)
9809{
9810  vfp_dp_ldstm (VFP_LDSTMIA);
9811}
9812
9813static void
9814do_vfp_dp_ldstmdb (void)
9815{
9816  vfp_dp_ldstm (VFP_LDSTMDB);
9817}
9818
9819static void
9820do_vfp_xp_ldstmia (void)
9821{
9822  vfp_dp_ldstm (VFP_LDSTMIAX);
9823}
9824
9825static void
9826do_vfp_xp_ldstmdb (void)
9827{
9828  vfp_dp_ldstm (VFP_LDSTMDBX);
9829}
9830
9831static void
9832do_vfp_dp_rd_rm (void)
9833{
9834  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9835  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9836}
9837
9838static void
9839do_vfp_dp_rn_rd (void)
9840{
9841  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9842  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9843}
9844
9845static void
9846do_vfp_dp_rd_rn (void)
9847{
9848  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9849  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9850}
9851
9852static void
9853do_vfp_dp_rd_rn_rm (void)
9854{
9855  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9856  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9857  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9858}
9859
9860static void
9861do_vfp_dp_rd (void)
9862{
9863  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9864}
9865
9866static void
9867do_vfp_dp_rm_rd_rn (void)
9868{
9869  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9870  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9871  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9872}
9873
9874/* VFPv3 instructions.  */
9875static void
9876do_vfp_sp_const (void)
9877{
9878  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9879  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9880  inst.instruction |= (inst.operands[1].imm & 0x0f);
9881}
9882
9883static void
9884do_vfp_dp_const (void)
9885{
9886  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9887  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9888  inst.instruction |= (inst.operands[1].imm & 0x0f);
9889}
9890
9891static void
9892vfp_conv (int srcsize)
9893{
9894  int immbits = srcsize - inst.operands[1].imm;
9895
9896  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9897    {
9898      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9899	 i.e. immbits must be in range 0 - 16.  */
9900      inst.error = _("immediate value out of range, expected range [0, 16]");
9901      return;
9902    }
9903  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9904    {
9905      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9906	 i.e. immbits must be in range 0 - 31.  */
9907      inst.error = _("immediate value out of range, expected range [1, 32]");
9908      return;
9909    }
9910
9911  inst.instruction |= (immbits & 1) << 5;
9912  inst.instruction |= (immbits >> 1);
9913}
9914
9915static void
9916do_vfp_sp_conv_16 (void)
9917{
9918  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9919  vfp_conv (16);
9920}
9921
9922static void
9923do_vfp_dp_conv_16 (void)
9924{
9925  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9926  vfp_conv (16);
9927}
9928
9929static void
9930do_vfp_sp_conv_32 (void)
9931{
9932  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9933  vfp_conv (32);
9934}
9935
9936static void
9937do_vfp_dp_conv_32 (void)
9938{
9939  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9940  vfp_conv (32);
9941}
9942
9943/* FPA instructions.  Also in a logical order.	*/
9944
9945static void
9946do_fpa_cmp (void)
9947{
9948  inst.instruction |= inst.operands[0].reg << 16;
9949  inst.instruction |= inst.operands[1].reg;
9950}
9951
9952static void
9953do_fpa_ldmstm (void)
9954{
9955  inst.instruction |= inst.operands[0].reg << 12;
9956  switch (inst.operands[1].imm)
9957    {
9958    case 1: inst.instruction |= CP_T_X;		 break;
9959    case 2: inst.instruction |= CP_T_Y;		 break;
9960    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9961    case 4:					 break;
9962    default: abort ();
9963    }
9964
9965  if (inst.instruction & (PRE_INDEX | INDEX_UP))
9966    {
9967      /* The instruction specified "ea" or "fd", so we can only accept
9968	 [Rn]{!}.  The instruction does not really support stacking or
9969	 unstacking, so we have to emulate these by setting appropriate
9970	 bits and offsets.  */
9971      constraint (inst.reloc.exp.X_op != O_constant
9972		  || inst.reloc.exp.X_add_number != 0,
9973		  _("this instruction does not support indexing"));
9974
9975      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9976	inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9977
9978      if (!(inst.instruction & INDEX_UP))
9979	inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9980
9981      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9982	{
9983	  inst.operands[2].preind = 0;
9984	  inst.operands[2].postind = 1;
9985	}
9986    }
9987
9988  encode_arm_cp_address (2, TRUE, TRUE, 0);
9989}
9990
9991/* iWMMXt instructions: strictly in alphabetical order.	 */
9992
9993static void
9994do_iwmmxt_tandorc (void)
9995{
9996  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9997}
9998
9999static void
10000do_iwmmxt_textrc (void)
10001{
10002  inst.instruction |= inst.operands[0].reg << 12;
10003  inst.instruction |= inst.operands[1].imm;
10004}
10005
10006static void
10007do_iwmmxt_textrm (void)
10008{
10009  inst.instruction |= inst.operands[0].reg << 12;
10010  inst.instruction |= inst.operands[1].reg << 16;
10011  inst.instruction |= inst.operands[2].imm;
10012}
10013
10014static void
10015do_iwmmxt_tinsr (void)
10016{
10017  inst.instruction |= inst.operands[0].reg << 16;
10018  inst.instruction |= inst.operands[1].reg << 12;
10019  inst.instruction |= inst.operands[2].imm;
10020}
10021
10022static void
10023do_iwmmxt_tmia (void)
10024{
10025  inst.instruction |= inst.operands[0].reg << 5;
10026  inst.instruction |= inst.operands[1].reg;
10027  inst.instruction |= inst.operands[2].reg << 12;
10028}
10029
10030static void
10031do_iwmmxt_waligni (void)
10032{
10033  inst.instruction |= inst.operands[0].reg << 12;
10034  inst.instruction |= inst.operands[1].reg << 16;
10035  inst.instruction |= inst.operands[2].reg;
10036  inst.instruction |= inst.operands[3].imm << 20;
10037}
10038
10039static void
10040do_iwmmxt_wmerge (void)
10041{
10042  inst.instruction |= inst.operands[0].reg << 12;
10043  inst.instruction |= inst.operands[1].reg << 16;
10044  inst.instruction |= inst.operands[2].reg;
10045  inst.instruction |= inst.operands[3].imm << 21;
10046}
10047
10048static void
10049do_iwmmxt_wmov (void)
10050{
10051  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
10052  inst.instruction |= inst.operands[0].reg << 12;
10053  inst.instruction |= inst.operands[1].reg << 16;
10054  inst.instruction |= inst.operands[1].reg;
10055}
10056
10057static void
10058do_iwmmxt_wldstbh (void)
10059{
10060  int reloc;
10061  inst.instruction |= inst.operands[0].reg << 12;
10062  if (thumb_mode)
10063    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10064  else
10065    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10066  encode_arm_cp_address (1, TRUE, FALSE, reloc);
10067}
10068
10069static void
10070do_iwmmxt_wldstw (void)
10071{
10072  /* RIWR_RIWC clears .isreg for a control register.  */
10073  if (!inst.operands[0].isreg)
10074    {
10075      constraint (inst.cond != COND_ALWAYS, BAD_COND);
10076      inst.instruction |= 0xf0000000;
10077    }
10078
10079  inst.instruction |= inst.operands[0].reg << 12;
10080  encode_arm_cp_address (1, TRUE, TRUE, 0);
10081}
10082
10083static void
10084do_iwmmxt_wldstd (void)
10085{
10086  inst.instruction |= inst.operands[0].reg << 12;
10087  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10088      && inst.operands[1].immisreg)
10089    {
10090      inst.instruction &= ~0x1a000ff;
10091      inst.instruction |= (0xfU << 28);
10092      if (inst.operands[1].preind)
10093	inst.instruction |= PRE_INDEX;
10094      if (!inst.operands[1].negative)
10095	inst.instruction |= INDEX_UP;
10096      if (inst.operands[1].writeback)
10097	inst.instruction |= WRITE_BACK;
10098      inst.instruction |= inst.operands[1].reg << 16;
10099      inst.instruction |= inst.reloc.exp.X_add_number << 4;
10100      inst.instruction |= inst.operands[1].imm;
10101    }
10102  else
10103    encode_arm_cp_address (1, TRUE, FALSE, 0);
10104}
10105
10106static void
10107do_iwmmxt_wshufh (void)
10108{
10109  inst.instruction |= inst.operands[0].reg << 12;
10110  inst.instruction |= inst.operands[1].reg << 16;
10111  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10112  inst.instruction |= (inst.operands[2].imm & 0x0f);
10113}
10114
10115static void
10116do_iwmmxt_wzero (void)
10117{
10118  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
10119  inst.instruction |= inst.operands[0].reg;
10120  inst.instruction |= inst.operands[0].reg << 12;
10121  inst.instruction |= inst.operands[0].reg << 16;
10122}
10123
10124static void
10125do_iwmmxt_wrwrwr_or_imm5 (void)
10126{
10127  if (inst.operands[2].isreg)
10128    do_rd_rn_rm ();
10129  else {
10130    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10131		_("immediate operand requires iWMMXt2"));
10132    do_rd_rn ();
10133    if (inst.operands[2].imm == 0)
10134      {
10135	switch ((inst.instruction >> 20) & 0xf)
10136	  {
10137	  case 4:
10138	  case 5:
10139	  case 6:
10140	  case 7:
10141	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
10142	    inst.operands[2].imm = 16;
10143	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10144	    break;
10145	  case 8:
10146	  case 9:
10147	  case 10:
10148	  case 11:
10149	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
10150	    inst.operands[2].imm = 32;
10151	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10152	    break;
10153	  case 12:
10154	  case 13:
10155	  case 14:
10156	  case 15:
10157	    {
10158	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
10159	      unsigned long wrn;
10160	      wrn = (inst.instruction >> 16) & 0xf;
10161	      inst.instruction &= 0xff0fff0f;
10162	      inst.instruction |= wrn;
10163	      /* Bail out here; the instruction is now assembled.  */
10164	      return;
10165	    }
10166	  }
10167      }
10168    /* Map 32 -> 0, etc.  */
10169    inst.operands[2].imm &= 0x1f;
10170    inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10171  }
10172}
10173
10174/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
10175   operations first, then control, shift, and load/store.  */
10176
10177/* Insns like "foo X,Y,Z".  */
10178
10179static void
10180do_mav_triple (void)
10181{
10182  inst.instruction |= inst.operands[0].reg << 16;
10183  inst.instruction |= inst.operands[1].reg;
10184  inst.instruction |= inst.operands[2].reg << 12;
10185}
10186
10187/* Insns like "foo W,X,Y,Z".
10188    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
10189
10190static void
10191do_mav_quad (void)
10192{
10193  inst.instruction |= inst.operands[0].reg << 5;
10194  inst.instruction |= inst.operands[1].reg << 12;
10195  inst.instruction |= inst.operands[2].reg << 16;
10196  inst.instruction |= inst.operands[3].reg;
10197}
10198
10199/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
10200static void
10201do_mav_dspsc (void)
10202{
10203  inst.instruction |= inst.operands[1].reg << 12;
10204}
10205
10206/* Maverick shift immediate instructions.
10207   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10208   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
10209
10210static void
10211do_mav_shift (void)
10212{
10213  int imm = inst.operands[2].imm;
10214
10215  inst.instruction |= inst.operands[0].reg << 12;
10216  inst.instruction |= inst.operands[1].reg << 16;
10217
10218  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10219     Bits 5-7 of the insn should have bits 4-6 of the immediate.
10220     Bit 4 should be 0.	 */
10221  imm = (imm & 0xf) | ((imm & 0x70) << 1);
10222
10223  inst.instruction |= imm;
10224}
10225
10226/* XScale instructions.	 Also sorted arithmetic before move.  */
10227
10228/* Xscale multiply-accumulate (argument parse)
10229     MIAcc   acc0,Rm,Rs
10230     MIAPHcc acc0,Rm,Rs
10231     MIAxycc acc0,Rm,Rs.  */
10232
10233static void
10234do_xsc_mia (void)
10235{
10236  inst.instruction |= inst.operands[1].reg;
10237  inst.instruction |= inst.operands[2].reg << 12;
10238}
10239
10240/* Xscale move-accumulator-register (argument parse)
10241
10242     MARcc   acc0,RdLo,RdHi.  */
10243
10244static void
10245do_xsc_mar (void)
10246{
10247  inst.instruction |= inst.operands[1].reg << 12;
10248  inst.instruction |= inst.operands[2].reg << 16;
10249}
10250
10251/* Xscale move-register-accumulator (argument parse)
10252
10253     MRAcc   RdLo,RdHi,acc0.  */
10254
10255static void
10256do_xsc_mra (void)
10257{
10258  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10259  inst.instruction |= inst.operands[0].reg << 12;
10260  inst.instruction |= inst.operands[1].reg << 16;
10261}
10262
10263/* Encoding functions relevant only to Thumb.  */
10264
10265/* inst.operands[i] is a shifted-register operand; encode
10266   it into inst.instruction in the format used by Thumb32.  */
10267
10268static void
10269encode_thumb32_shifted_operand (int i)
10270{
10271  unsigned int value = inst.reloc.exp.X_add_number;
10272  unsigned int shift = inst.operands[i].shift_kind;
10273
10274  constraint (inst.operands[i].immisreg,
10275	      _("shift by register not allowed in thumb mode"));
10276  inst.instruction |= inst.operands[i].reg;
10277  if (shift == SHIFT_RRX)
10278    inst.instruction |= SHIFT_ROR << 4;
10279  else
10280    {
10281      constraint (inst.reloc.exp.X_op != O_constant,
10282		  _("expression too complex"));
10283
10284      constraint (value > 32
10285		  || (value == 32 && (shift == SHIFT_LSL
10286				      || shift == SHIFT_ROR)),
10287		  _("shift expression is too large"));
10288
10289      if (value == 0)
10290	shift = SHIFT_LSL;
10291      else if (value == 32)
10292	value = 0;
10293
10294      inst.instruction |= shift << 4;
10295      inst.instruction |= (value & 0x1c) << 10;
10296      inst.instruction |= (value & 0x03) << 6;
10297    }
10298}
10299
10300
10301/* inst.operands[i] was set up by parse_address.  Encode it into a
10302   Thumb32 format load or store instruction.  Reject forms that cannot
10303   be used with such instructions.  If is_t is true, reject forms that
10304   cannot be used with a T instruction; if is_d is true, reject forms
10305   that cannot be used with a D instruction.  If it is a store insn,
10306   reject PC in Rn.  */
10307
10308static void
10309encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10310{
10311  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10312
10313  constraint (!inst.operands[i].isreg,
10314	      _("Instruction does not support =N addresses"));
10315
10316  inst.instruction |= inst.operands[i].reg << 16;
10317  if (inst.operands[i].immisreg)
10318    {
10319      constraint (is_pc, BAD_PC_ADDRESSING);
10320      constraint (is_t || is_d, _("cannot use register index with this instruction"));
10321      constraint (inst.operands[i].negative,
10322		  _("Thumb does not support negative register indexing"));
10323      constraint (inst.operands[i].postind,
10324		  _("Thumb does not support register post-indexing"));
10325      constraint (inst.operands[i].writeback,
10326		  _("Thumb does not support register indexing with writeback"));
10327      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10328		  _("Thumb supports only LSL in shifted register indexing"));
10329
10330      inst.instruction |= inst.operands[i].imm;
10331      if (inst.operands[i].shifted)
10332	{
10333	  constraint (inst.reloc.exp.X_op != O_constant,
10334		      _("expression too complex"));
10335	  constraint (inst.reloc.exp.X_add_number < 0
10336		      || inst.reloc.exp.X_add_number > 3,
10337		      _("shift out of range"));
10338	  inst.instruction |= inst.reloc.exp.X_add_number << 4;
10339	}
10340      inst.reloc.type = BFD_RELOC_UNUSED;
10341    }
10342  else if (inst.operands[i].preind)
10343    {
10344      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10345      constraint (is_t && inst.operands[i].writeback,
10346		  _("cannot use writeback with this instruction"));
10347      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10348		  BAD_PC_ADDRESSING);
10349
10350      if (is_d)
10351	{
10352	  inst.instruction |= 0x01000000;
10353	  if (inst.operands[i].writeback)
10354	    inst.instruction |= 0x00200000;
10355	}
10356      else
10357	{
10358	  inst.instruction |= 0x00000c00;
10359	  if (inst.operands[i].writeback)
10360	    inst.instruction |= 0x00000100;
10361	}
10362      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10363    }
10364  else if (inst.operands[i].postind)
10365    {
10366      gas_assert (inst.operands[i].writeback);
10367      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10368      constraint (is_t, _("cannot use post-indexing with this instruction"));
10369
10370      if (is_d)
10371	inst.instruction |= 0x00200000;
10372      else
10373	inst.instruction |= 0x00000900;
10374      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10375    }
10376  else /* unindexed - only for coprocessor */
10377    inst.error = _("instruction does not accept unindexed addressing");
10378}
10379
10380/* Table of Thumb instructions which exist in both 16- and 32-bit
10381   encodings (the latter only in post-V6T2 cores).  The index is the
10382   value used in the insns table below.  When there is more than one
10383   possible 16-bit encoding for the instruction, this table always
10384   holds variant (1).
10385   Also contains several pseudo-instructions used during relaxation.  */
10386#define T16_32_TAB				\
10387  X(_adc,   4140, eb400000),			\
10388  X(_adcs,  4140, eb500000),			\
10389  X(_add,   1c00, eb000000),			\
10390  X(_adds,  1c00, eb100000),			\
10391  X(_addi,  0000, f1000000),			\
10392  X(_addis, 0000, f1100000),			\
10393  X(_add_pc,000f, f20f0000),			\
10394  X(_add_sp,000d, f10d0000),			\
10395  X(_adr,   000f, f20f0000),			\
10396  X(_and,   4000, ea000000),			\
10397  X(_ands,  4000, ea100000),			\
10398  X(_asr,   1000, fa40f000),			\
10399  X(_asrs,  1000, fa50f000),			\
10400  X(_b,     e000, f000b000),			\
10401  X(_bcond, d000, f0008000),			\
10402  X(_bic,   4380, ea200000),			\
10403  X(_bics,  4380, ea300000),			\
10404  X(_cmn,   42c0, eb100f00),			\
10405  X(_cmp,   2800, ebb00f00),			\
10406  X(_cpsie, b660, f3af8400),			\
10407  X(_cpsid, b670, f3af8600),			\
10408  X(_cpy,   4600, ea4f0000),			\
10409  X(_dec_sp,80dd, f1ad0d00),			\
10410  X(_eor,   4040, ea800000),			\
10411  X(_eors,  4040, ea900000),			\
10412  X(_inc_sp,00dd, f10d0d00),			\
10413  X(_ldmia, c800, e8900000),			\
10414  X(_ldr,   6800, f8500000),			\
10415  X(_ldrb,  7800, f8100000),			\
10416  X(_ldrh,  8800, f8300000),			\
10417  X(_ldrsb, 5600, f9100000),			\
10418  X(_ldrsh, 5e00, f9300000),			\
10419  X(_ldr_pc,4800, f85f0000),			\
10420  X(_ldr_pc2,4800, f85f0000),			\
10421  X(_ldr_sp,9800, f85d0000),			\
10422  X(_lsl,   0000, fa00f000),			\
10423  X(_lsls,  0000, fa10f000),			\
10424  X(_lsr,   0800, fa20f000),			\
10425  X(_lsrs,  0800, fa30f000),			\
10426  X(_mov,   2000, ea4f0000),			\
10427  X(_movs,  2000, ea5f0000),			\
10428  X(_mul,   4340, fb00f000),                     \
10429  X(_muls,  4340, ffffffff), /* no 32b muls */	\
10430  X(_mvn,   43c0, ea6f0000),			\
10431  X(_mvns,  43c0, ea7f0000),			\
10432  X(_neg,   4240, f1c00000), /* rsb #0 */	\
10433  X(_negs,  4240, f1d00000), /* rsbs #0 */	\
10434  X(_orr,   4300, ea400000),			\
10435  X(_orrs,  4300, ea500000),			\
10436  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
10437  X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
10438  X(_rev,   ba00, fa90f080),			\
10439  X(_rev16, ba40, fa90f090),			\
10440  X(_revsh, bac0, fa90f0b0),			\
10441  X(_ror,   41c0, fa60f000),			\
10442  X(_rors,  41c0, fa70f000),			\
10443  X(_sbc,   4180, eb600000),			\
10444  X(_sbcs,  4180, eb700000),			\
10445  X(_stmia, c000, e8800000),			\
10446  X(_str,   6000, f8400000),			\
10447  X(_strb,  7000, f8000000),			\
10448  X(_strh,  8000, f8200000),			\
10449  X(_str_sp,9000, f84d0000),			\
10450  X(_sub,   1e00, eba00000),			\
10451  X(_subs,  1e00, ebb00000),			\
10452  X(_subi,  8000, f1a00000),			\
10453  X(_subis, 8000, f1b00000),			\
10454  X(_sxtb,  b240, fa4ff080),			\
10455  X(_sxth,  b200, fa0ff080),			\
10456  X(_tst,   4200, ea100f00),			\
10457  X(_uxtb,  b2c0, fa5ff080),			\
10458  X(_uxth,  b280, fa1ff080),			\
10459  X(_nop,   bf00, f3af8000),			\
10460  X(_yield, bf10, f3af8001),			\
10461  X(_wfe,   bf20, f3af8002),			\
10462  X(_wfi,   bf30, f3af8003),			\
10463  X(_sev,   bf40, f3af8004),                    \
10464  X(_sevl,  bf50, f3af8005),			\
10465  X(_udf,   de00, f7f0a000)
10466
10467/* To catch errors in encoding functions, the codes are all offset by
10468   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10469   as 16-bit instructions.  */
10470#define X(a,b,c) T_MNEM##a
10471enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10472#undef X
10473
10474#define X(a,b,c) 0x##b
10475static const unsigned short thumb_op16[] = { T16_32_TAB };
10476#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10477#undef X
10478
10479#define X(a,b,c) 0x##c
10480static const unsigned int thumb_op32[] = { T16_32_TAB };
10481#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10482#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
10483#undef X
10484#undef T16_32_TAB
10485
10486/* Thumb instruction encoders, in alphabetical order.  */
10487
10488/* ADDW or SUBW.  */
10489
10490static void
10491do_t_add_sub_w (void)
10492{
10493  int Rd, Rn;
10494
10495  Rd = inst.operands[0].reg;
10496  Rn = inst.operands[1].reg;
10497
10498  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10499     is the SP-{plus,minus}-immediate form of the instruction.  */
10500  if (Rn == REG_SP)
10501    constraint (Rd == REG_PC, BAD_PC);
10502  else
10503    reject_bad_reg (Rd);
10504
10505  inst.instruction |= (Rn << 16) | (Rd << 8);
10506  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10507}
10508
10509/* Parse an add or subtract instruction.  We get here with inst.instruction
10510   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
10511
10512static void
10513do_t_add_sub (void)
10514{
10515  int Rd, Rs, Rn;
10516
10517  Rd = inst.operands[0].reg;
10518  Rs = (inst.operands[1].present
10519	? inst.operands[1].reg    /* Rd, Rs, foo */
10520	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10521
10522  if (Rd == REG_PC)
10523    set_it_insn_type_last ();
10524
10525  if (unified_syntax)
10526    {
10527      bfd_boolean flags;
10528      bfd_boolean narrow;
10529      int opcode;
10530
10531      flags = (inst.instruction == T_MNEM_adds
10532	       || inst.instruction == T_MNEM_subs);
10533      if (flags)
10534	narrow = !in_it_block ();
10535      else
10536	narrow = in_it_block ();
10537      if (!inst.operands[2].isreg)
10538	{
10539	  int add;
10540
10541	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10542	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10543
10544	  add = (inst.instruction == T_MNEM_add
10545		 || inst.instruction == T_MNEM_adds);
10546	  opcode = 0;
10547	  if (inst.size_req != 4)
10548	    {
10549	      /* Attempt to use a narrow opcode, with relaxation if
10550		 appropriate.  */
10551	      if (Rd == REG_SP && Rs == REG_SP && !flags)
10552		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10553	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10554		opcode = T_MNEM_add_sp;
10555	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10556		opcode = T_MNEM_add_pc;
10557	      else if (Rd <= 7 && Rs <= 7 && narrow)
10558		{
10559		  if (flags)
10560		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
10561		  else
10562		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
10563		}
10564	      if (opcode)
10565		{
10566		  inst.instruction = THUMB_OP16(opcode);
10567		  inst.instruction |= (Rd << 4) | Rs;
10568		  if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10569		      || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10570		  {
10571		    if (inst.size_req == 2)
10572		      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10573		    else
10574		      inst.relax = opcode;
10575		  }
10576		}
10577	      else
10578		constraint (inst.size_req == 2, BAD_HIREG);
10579	    }
10580	  if (inst.size_req == 4
10581	      || (inst.size_req != 2 && !opcode))
10582	    {
10583	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10584			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10585			  THUMB1_RELOC_ONLY);
10586	      if (Rd == REG_PC)
10587		{
10588		  constraint (add, BAD_PC);
10589		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10590			     _("only SUBS PC, LR, #const allowed"));
10591		  constraint (inst.reloc.exp.X_op != O_constant,
10592			      _("expression too complex"));
10593		  constraint (inst.reloc.exp.X_add_number < 0
10594			      || inst.reloc.exp.X_add_number > 0xff,
10595			     _("immediate value out of range"));
10596		  inst.instruction = T2_SUBS_PC_LR
10597				     | inst.reloc.exp.X_add_number;
10598		  inst.reloc.type = BFD_RELOC_UNUSED;
10599		  return;
10600		}
10601	      else if (Rs == REG_PC)
10602		{
10603		  /* Always use addw/subw.  */
10604		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10605		  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10606		}
10607	      else
10608		{
10609		  inst.instruction = THUMB_OP32 (inst.instruction);
10610		  inst.instruction = (inst.instruction & 0xe1ffffff)
10611				     | 0x10000000;
10612		  if (flags)
10613		    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10614		  else
10615		    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10616		}
10617	      inst.instruction |= Rd << 8;
10618	      inst.instruction |= Rs << 16;
10619	    }
10620	}
10621      else
10622	{
10623	  unsigned int value = inst.reloc.exp.X_add_number;
10624	  unsigned int shift = inst.operands[2].shift_kind;
10625
10626	  Rn = inst.operands[2].reg;
10627	  /* See if we can do this with a 16-bit instruction.  */
10628	  if (!inst.operands[2].shifted && inst.size_req != 4)
10629	    {
10630	      if (Rd > 7 || Rs > 7 || Rn > 7)
10631		narrow = FALSE;
10632
10633	      if (narrow)
10634		{
10635		  inst.instruction = ((inst.instruction == T_MNEM_adds
10636				       || inst.instruction == T_MNEM_add)
10637				      ? T_OPCODE_ADD_R3
10638				      : T_OPCODE_SUB_R3);
10639		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10640		  return;
10641		}
10642
10643	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10644		{
10645		  /* Thumb-1 cores (except v6-M) require at least one high
10646		     register in a narrow non flag setting add.  */
10647		  if (Rd > 7 || Rn > 7
10648		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10649		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10650		    {
10651		      if (Rd == Rn)
10652			{
10653			  Rn = Rs;
10654			  Rs = Rd;
10655			}
10656		      inst.instruction = T_OPCODE_ADD_HI;
10657		      inst.instruction |= (Rd & 8) << 4;
10658		      inst.instruction |= (Rd & 7);
10659		      inst.instruction |= Rn << 3;
10660		      return;
10661		    }
10662		}
10663	    }
10664
10665	  constraint (Rd == REG_PC, BAD_PC);
10666	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10667	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10668	  constraint (Rs == REG_PC, BAD_PC);
10669	  reject_bad_reg (Rn);
10670
10671	  /* If we get here, it can't be done in 16 bits.  */
10672	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10673		      _("shift must be constant"));
10674	  inst.instruction = THUMB_OP32 (inst.instruction);
10675	  inst.instruction |= Rd << 8;
10676	  inst.instruction |= Rs << 16;
10677	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10678		      _("shift value over 3 not allowed in thumb mode"));
10679	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10680		      _("only LSL shift allowed in thumb mode"));
10681	  encode_thumb32_shifted_operand (2);
10682	}
10683    }
10684  else
10685    {
10686      constraint (inst.instruction == T_MNEM_adds
10687		  || inst.instruction == T_MNEM_subs,
10688		  BAD_THUMB32);
10689
10690      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10691	{
10692	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10693		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10694		      BAD_HIREG);
10695
10696	  inst.instruction = (inst.instruction == T_MNEM_add
10697			      ? 0x0000 : 0x8000);
10698	  inst.instruction |= (Rd << 4) | Rs;
10699	  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10700	  return;
10701	}
10702
10703      Rn = inst.operands[2].reg;
10704      constraint (inst.operands[2].shifted, _("unshifted register required"));
10705
10706      /* We now have Rd, Rs, and Rn set to registers.  */
10707      if (Rd > 7 || Rs > 7 || Rn > 7)
10708	{
10709	  /* Can't do this for SUB.	 */
10710	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10711	  inst.instruction = T_OPCODE_ADD_HI;
10712	  inst.instruction |= (Rd & 8) << 4;
10713	  inst.instruction |= (Rd & 7);
10714	  if (Rs == Rd)
10715	    inst.instruction |= Rn << 3;
10716	  else if (Rn == Rd)
10717	    inst.instruction |= Rs << 3;
10718	  else
10719	    constraint (1, _("dest must overlap one source register"));
10720	}
10721      else
10722	{
10723	  inst.instruction = (inst.instruction == T_MNEM_add
10724			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10725	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10726	}
10727    }
10728}
10729
10730static void
10731do_t_adr (void)
10732{
10733  unsigned Rd;
10734
10735  Rd = inst.operands[0].reg;
10736  reject_bad_reg (Rd);
10737
10738  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10739    {
10740      /* Defer to section relaxation.  */
10741      inst.relax = inst.instruction;
10742      inst.instruction = THUMB_OP16 (inst.instruction);
10743      inst.instruction |= Rd << 4;
10744    }
10745  else if (unified_syntax && inst.size_req != 2)
10746    {
10747      /* Generate a 32-bit opcode.  */
10748      inst.instruction = THUMB_OP32 (inst.instruction);
10749      inst.instruction |= Rd << 8;
10750      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10751      inst.reloc.pc_rel = 1;
10752    }
10753  else
10754    {
10755      /* Generate a 16-bit opcode.  */
10756      inst.instruction = THUMB_OP16 (inst.instruction);
10757      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10758      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
10759      inst.reloc.pc_rel = 1;
10760
10761      inst.instruction |= Rd << 4;
10762    }
10763}
10764
10765/* Arithmetic instructions for which there is just one 16-bit
10766   instruction encoding, and it allows only two low registers.
10767   For maximal compatibility with ARM syntax, we allow three register
10768   operands even when Thumb-32 instructions are not available, as long
10769   as the first two are identical.  For instance, both "sbc r0,r1" and
10770   "sbc r0,r0,r1" are allowed.  */
10771static void
10772do_t_arit3 (void)
10773{
10774  int Rd, Rs, Rn;
10775
10776  Rd = inst.operands[0].reg;
10777  Rs = (inst.operands[1].present
10778	? inst.operands[1].reg    /* Rd, Rs, foo */
10779	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10780  Rn = inst.operands[2].reg;
10781
10782  reject_bad_reg (Rd);
10783  reject_bad_reg (Rs);
10784  if (inst.operands[2].isreg)
10785    reject_bad_reg (Rn);
10786
10787  if (unified_syntax)
10788    {
10789      if (!inst.operands[2].isreg)
10790	{
10791	  /* For an immediate, we always generate a 32-bit opcode;
10792	     section relaxation will shrink it later if possible.  */
10793	  inst.instruction = THUMB_OP32 (inst.instruction);
10794	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10795	  inst.instruction |= Rd << 8;
10796	  inst.instruction |= Rs << 16;
10797	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10798	}
10799      else
10800	{
10801	  bfd_boolean narrow;
10802
10803	  /* See if we can do this with a 16-bit instruction.  */
10804	  if (THUMB_SETS_FLAGS (inst.instruction))
10805	    narrow = !in_it_block ();
10806	  else
10807	    narrow = in_it_block ();
10808
10809	  if (Rd > 7 || Rn > 7 || Rs > 7)
10810	    narrow = FALSE;
10811	  if (inst.operands[2].shifted)
10812	    narrow = FALSE;
10813	  if (inst.size_req == 4)
10814	    narrow = FALSE;
10815
10816	  if (narrow
10817	      && Rd == Rs)
10818	    {
10819	      inst.instruction = THUMB_OP16 (inst.instruction);
10820	      inst.instruction |= Rd;
10821	      inst.instruction |= Rn << 3;
10822	      return;
10823	    }
10824
10825	  /* If we get here, it can't be done in 16 bits.  */
10826	  constraint (inst.operands[2].shifted
10827		      && inst.operands[2].immisreg,
10828		      _("shift must be constant"));
10829	  inst.instruction = THUMB_OP32 (inst.instruction);
10830	  inst.instruction |= Rd << 8;
10831	  inst.instruction |= Rs << 16;
10832	  encode_thumb32_shifted_operand (2);
10833	}
10834    }
10835  else
10836    {
10837      /* On its face this is a lie - the instruction does set the
10838	 flags.  However, the only supported mnemonic in this mode
10839	 says it doesn't.  */
10840      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10841
10842      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10843		  _("unshifted register required"));
10844      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10845      constraint (Rd != Rs,
10846		  _("dest and source1 must be the same register"));
10847
10848      inst.instruction = THUMB_OP16 (inst.instruction);
10849      inst.instruction |= Rd;
10850      inst.instruction |= Rn << 3;
10851    }
10852}
10853
10854/* Similarly, but for instructions where the arithmetic operation is
10855   commutative, so we can allow either of them to be different from
10856   the destination operand in a 16-bit instruction.  For instance, all
10857   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10858   accepted.  */
10859static void
10860do_t_arit3c (void)
10861{
10862  int Rd, Rs, Rn;
10863
10864  Rd = inst.operands[0].reg;
10865  Rs = (inst.operands[1].present
10866	? inst.operands[1].reg    /* Rd, Rs, foo */
10867	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
10868  Rn = inst.operands[2].reg;
10869
10870  reject_bad_reg (Rd);
10871  reject_bad_reg (Rs);
10872  if (inst.operands[2].isreg)
10873    reject_bad_reg (Rn);
10874
10875  if (unified_syntax)
10876    {
10877      if (!inst.operands[2].isreg)
10878	{
10879	  /* For an immediate, we always generate a 32-bit opcode;
10880	     section relaxation will shrink it later if possible.  */
10881	  inst.instruction = THUMB_OP32 (inst.instruction);
10882	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10883	  inst.instruction |= Rd << 8;
10884	  inst.instruction |= Rs << 16;
10885	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10886	}
10887      else
10888	{
10889	  bfd_boolean narrow;
10890
10891	  /* See if we can do this with a 16-bit instruction.  */
10892	  if (THUMB_SETS_FLAGS (inst.instruction))
10893	    narrow = !in_it_block ();
10894	  else
10895	    narrow = in_it_block ();
10896
10897	  if (Rd > 7 || Rn > 7 || Rs > 7)
10898	    narrow = FALSE;
10899	  if (inst.operands[2].shifted)
10900	    narrow = FALSE;
10901	  if (inst.size_req == 4)
10902	    narrow = FALSE;
10903
10904	  if (narrow)
10905	    {
10906	      if (Rd == Rs)
10907		{
10908		  inst.instruction = THUMB_OP16 (inst.instruction);
10909		  inst.instruction |= Rd;
10910		  inst.instruction |= Rn << 3;
10911		  return;
10912		}
10913	      if (Rd == Rn)
10914		{
10915		  inst.instruction = THUMB_OP16 (inst.instruction);
10916		  inst.instruction |= Rd;
10917		  inst.instruction |= Rs << 3;
10918		  return;
10919		}
10920	    }
10921
10922	  /* If we get here, it can't be done in 16 bits.  */
10923	  constraint (inst.operands[2].shifted
10924		      && inst.operands[2].immisreg,
10925		      _("shift must be constant"));
10926	  inst.instruction = THUMB_OP32 (inst.instruction);
10927	  inst.instruction |= Rd << 8;
10928	  inst.instruction |= Rs << 16;
10929	  encode_thumb32_shifted_operand (2);
10930	}
10931    }
10932  else
10933    {
10934      /* On its face this is a lie - the instruction does set the
10935	 flags.  However, the only supported mnemonic in this mode
10936	 says it doesn't.  */
10937      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10938
10939      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10940		  _("unshifted register required"));
10941      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10942
10943      inst.instruction = THUMB_OP16 (inst.instruction);
10944      inst.instruction |= Rd;
10945
10946      if (Rd == Rs)
10947	inst.instruction |= Rn << 3;
10948      else if (Rd == Rn)
10949	inst.instruction |= Rs << 3;
10950      else
10951	constraint (1, _("dest must overlap one source register"));
10952    }
10953}
10954
10955static void
10956do_t_bfc (void)
10957{
10958  unsigned Rd;
10959  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10960  constraint (msb > 32, _("bit-field extends past end of register"));
10961  /* The instruction encoding stores the LSB and MSB,
10962     not the LSB and width.  */
10963  Rd = inst.operands[0].reg;
10964  reject_bad_reg (Rd);
10965  inst.instruction |= Rd << 8;
10966  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10967  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10968  inst.instruction |= msb - 1;
10969}
10970
10971static void
10972do_t_bfi (void)
10973{
10974  int Rd, Rn;
10975  unsigned int msb;
10976
10977  Rd = inst.operands[0].reg;
10978  reject_bad_reg (Rd);
10979
10980  /* #0 in second position is alternative syntax for bfc, which is
10981     the same instruction but with REG_PC in the Rm field.  */
10982  if (!inst.operands[1].isreg)
10983    Rn = REG_PC;
10984  else
10985    {
10986      Rn = inst.operands[1].reg;
10987      reject_bad_reg (Rn);
10988    }
10989
10990  msb = inst.operands[2].imm + inst.operands[3].imm;
10991  constraint (msb > 32, _("bit-field extends past end of register"));
10992  /* The instruction encoding stores the LSB and MSB,
10993     not the LSB and width.  */
10994  inst.instruction |= Rd << 8;
10995  inst.instruction |= Rn << 16;
10996  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10997  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10998  inst.instruction |= msb - 1;
10999}
11000
11001static void
11002do_t_bfx (void)
11003{
11004  unsigned Rd, Rn;
11005
11006  Rd = inst.operands[0].reg;
11007  Rn = inst.operands[1].reg;
11008
11009  reject_bad_reg (Rd);
11010  reject_bad_reg (Rn);
11011
11012  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11013	      _("bit-field extends past end of register"));
11014  inst.instruction |= Rd << 8;
11015  inst.instruction |= Rn << 16;
11016  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11017  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11018  inst.instruction |= inst.operands[3].imm - 1;
11019}
11020
11021/* ARM V5 Thumb BLX (argument parse)
11022	BLX <target_addr>	which is BLX(1)
11023	BLX <Rm>		which is BLX(2)
11024   Unfortunately, there are two different opcodes for this mnemonic.
11025   So, the insns[].value is not used, and the code here zaps values
11026	into inst.instruction.
11027
11028   ??? How to take advantage of the additional two bits of displacement
11029   available in Thumb32 mode?  Need new relocation?  */
11030
11031static void
11032do_t_blx (void)
11033{
11034  set_it_insn_type_last ();
11035
11036  if (inst.operands[0].isreg)
11037    {
11038      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11039      /* We have a register, so this is BLX(2).  */
11040      inst.instruction |= inst.operands[0].reg << 3;
11041    }
11042  else
11043    {
11044      /* No register.  This must be BLX(1).  */
11045      inst.instruction = 0xf000e800;
11046      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11047    }
11048}
11049
11050static void
11051do_t_branch (void)
11052{
11053  int opcode;
11054  int cond;
11055  bfd_reloc_code_real_type reloc;
11056
11057  cond = inst.cond;
11058  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11059
11060  if (in_it_block ())
11061    {
11062      /* Conditional branches inside IT blocks are encoded as unconditional
11063	 branches.  */
11064      cond = COND_ALWAYS;
11065    }
11066  else
11067    cond = inst.cond;
11068
11069  if (cond != COND_ALWAYS)
11070    opcode = T_MNEM_bcond;
11071  else
11072    opcode = inst.instruction;
11073
11074  if (unified_syntax
11075      && (inst.size_req == 4
11076	  || (inst.size_req != 2
11077	      && (inst.operands[0].hasreloc
11078		  || inst.reloc.exp.X_op == O_constant))))
11079    {
11080      inst.instruction = THUMB_OP32(opcode);
11081      if (cond == COND_ALWAYS)
11082	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11083      else
11084	{
11085	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11086		      _("selected architecture does not support "
11087			"wide conditional branch instruction"));
11088
11089	  gas_assert (cond != 0xF);
11090	  inst.instruction |= cond << 22;
11091	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11092	}
11093    }
11094  else
11095    {
11096      inst.instruction = THUMB_OP16(opcode);
11097      if (cond == COND_ALWAYS)
11098	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11099      else
11100	{
11101	  inst.instruction |= cond << 8;
11102	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11103	}
11104      /* Allow section relaxation.  */
11105      if (unified_syntax && inst.size_req != 2)
11106	inst.relax = opcode;
11107    }
11108  inst.reloc.type = reloc;
11109  inst.reloc.pc_rel = 1;
11110}
11111
11112/* Actually do the work for Thumb state bkpt and hlt.  The only difference
11113   between the two is the maximum immediate allowed - which is passed in
11114   RANGE.  */
11115static void
11116do_t_bkpt_hlt1 (int range)
11117{
11118  constraint (inst.cond != COND_ALWAYS,
11119	      _("instruction is always unconditional"));
11120  if (inst.operands[0].present)
11121    {
11122      constraint (inst.operands[0].imm > range,
11123		  _("immediate value out of range"));
11124      inst.instruction |= inst.operands[0].imm;
11125    }
11126
11127  set_it_insn_type (NEUTRAL_IT_INSN);
11128}
11129
11130static void
11131do_t_hlt (void)
11132{
11133  do_t_bkpt_hlt1 (63);
11134}
11135
11136static void
11137do_t_bkpt (void)
11138{
11139  do_t_bkpt_hlt1 (255);
11140}
11141
11142static void
11143do_t_branch23 (void)
11144{
11145  set_it_insn_type_last ();
11146  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11147
11148  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11149     this file.  We used to simply ignore the PLT reloc type here --
11150     the branch encoding is now needed to deal with TLSCALL relocs.
11151     So if we see a PLT reloc now, put it back to how it used to be to
11152     keep the preexisting behaviour.  */
11153  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11154    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11155
11156#if defined(OBJ_COFF)
11157  /* If the destination of the branch is a defined symbol which does not have
11158     the THUMB_FUNC attribute, then we must be calling a function which has
11159     the (interfacearm) attribute.  We look for the Thumb entry point to that
11160     function and change the branch to refer to that function instead.	*/
11161  if (	 inst.reloc.exp.X_op == O_symbol
11162      && inst.reloc.exp.X_add_symbol != NULL
11163      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11164      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11165    inst.reloc.exp.X_add_symbol =
11166      find_real_start (inst.reloc.exp.X_add_symbol);
11167#endif
11168}
11169
11170static void
11171do_t_bx (void)
11172{
11173  set_it_insn_type_last ();
11174  inst.instruction |= inst.operands[0].reg << 3;
11175  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
11176     should cause the alignment to be checked once it is known.	 This is
11177     because BX PC only works if the instruction is word aligned.  */
11178}
11179
11180static void
11181do_t_bxj (void)
11182{
11183  int Rm;
11184
11185  set_it_insn_type_last ();
11186  Rm = inst.operands[0].reg;
11187  reject_bad_reg (Rm);
11188  inst.instruction |= Rm << 16;
11189}
11190
11191static void
11192do_t_clz (void)
11193{
11194  unsigned Rd;
11195  unsigned Rm;
11196
11197  Rd = inst.operands[0].reg;
11198  Rm = inst.operands[1].reg;
11199
11200  reject_bad_reg (Rd);
11201  reject_bad_reg (Rm);
11202
11203  inst.instruction |= Rd << 8;
11204  inst.instruction |= Rm << 16;
11205  inst.instruction |= Rm;
11206}
11207
11208static void
11209do_t_cps (void)
11210{
11211  set_it_insn_type (OUTSIDE_IT_INSN);
11212  inst.instruction |= inst.operands[0].imm;
11213}
11214
11215static void
11216do_t_cpsi (void)
11217{
11218  set_it_insn_type (OUTSIDE_IT_INSN);
11219  if (unified_syntax
11220      && (inst.operands[1].present || inst.size_req == 4)
11221      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11222    {
11223      unsigned int imod = (inst.instruction & 0x0030) >> 4;
11224      inst.instruction = 0xf3af8000;
11225      inst.instruction |= imod << 9;
11226      inst.instruction |= inst.operands[0].imm << 5;
11227      if (inst.operands[1].present)
11228	inst.instruction |= 0x100 | inst.operands[1].imm;
11229    }
11230  else
11231    {
11232      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11233		  && (inst.operands[0].imm & 4),
11234		  _("selected processor does not support 'A' form "
11235		    "of this instruction"));
11236      constraint (inst.operands[1].present || inst.size_req == 4,
11237		  _("Thumb does not support the 2-argument "
11238		    "form of this instruction"));
11239      inst.instruction |= inst.operands[0].imm;
11240    }
11241}
11242
11243/* THUMB CPY instruction (argument parse).  */
11244
11245static void
11246do_t_cpy (void)
11247{
11248  if (inst.size_req == 4)
11249    {
11250      inst.instruction = THUMB_OP32 (T_MNEM_mov);
11251      inst.instruction |= inst.operands[0].reg << 8;
11252      inst.instruction |= inst.operands[1].reg;
11253    }
11254  else
11255    {
11256      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11257      inst.instruction |= (inst.operands[0].reg & 0x7);
11258      inst.instruction |= inst.operands[1].reg << 3;
11259    }
11260}
11261
11262static void
11263do_t_cbz (void)
11264{
11265  set_it_insn_type (OUTSIDE_IT_INSN);
11266  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11267  inst.instruction |= inst.operands[0].reg;
11268  inst.reloc.pc_rel = 1;
11269  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11270}
11271
11272static void
11273do_t_dbg (void)
11274{
11275  inst.instruction |= inst.operands[0].imm;
11276}
11277
11278static void
11279do_t_div (void)
11280{
11281  unsigned Rd, Rn, Rm;
11282
11283  Rd = inst.operands[0].reg;
11284  Rn = (inst.operands[1].present
11285	? inst.operands[1].reg : Rd);
11286  Rm = inst.operands[2].reg;
11287
11288  reject_bad_reg (Rd);
11289  reject_bad_reg (Rn);
11290  reject_bad_reg (Rm);
11291
11292  inst.instruction |= Rd << 8;
11293  inst.instruction |= Rn << 16;
11294  inst.instruction |= Rm;
11295}
11296
11297static void
11298do_t_hint (void)
11299{
11300  if (unified_syntax && inst.size_req == 4)
11301    inst.instruction = THUMB_OP32 (inst.instruction);
11302  else
11303    inst.instruction = THUMB_OP16 (inst.instruction);
11304}
11305
11306static void
11307do_t_it (void)
11308{
11309  unsigned int cond = inst.operands[0].imm;
11310
11311  set_it_insn_type (IT_INSN);
11312  now_it.mask = (inst.instruction & 0xf) | 0x10;
11313  now_it.cc = cond;
11314  now_it.warn_deprecated = FALSE;
11315
11316  /* If the condition is a negative condition, invert the mask.  */
11317  if ((cond & 0x1) == 0x0)
11318    {
11319      unsigned int mask = inst.instruction & 0x000f;
11320
11321      if ((mask & 0x7) == 0)
11322	{
11323	  /* No conversion needed.  */
11324	  now_it.block_length = 1;
11325	}
11326      else if ((mask & 0x3) == 0)
11327	{
11328	  mask ^= 0x8;
11329	  now_it.block_length = 2;
11330	}
11331      else if ((mask & 0x1) == 0)
11332	{
11333	  mask ^= 0xC;
11334	  now_it.block_length = 3;
11335	}
11336      else
11337	{
11338	  mask ^= 0xE;
11339	  now_it.block_length = 4;
11340	}
11341
11342      inst.instruction &= 0xfff0;
11343      inst.instruction |= mask;
11344    }
11345
11346  inst.instruction |= cond << 4;
11347}
11348
11349/* Helper function used for both push/pop and ldm/stm.  */
11350static void
11351encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11352{
11353  bfd_boolean load;
11354
11355  load = (inst.instruction & (1 << 20)) != 0;
11356
11357  if (mask & (1 << 13))
11358    inst.error =  _("SP not allowed in register list");
11359
11360  if ((mask & (1 << base)) != 0
11361      && writeback)
11362    inst.error = _("having the base register in the register list when "
11363		   "using write back is UNPREDICTABLE");
11364
11365  if (load)
11366    {
11367      if (mask & (1 << 15))
11368	{
11369	  if (mask & (1 << 14))
11370	    inst.error = _("LR and PC should not both be in register list");
11371	  else
11372	    set_it_insn_type_last ();
11373	}
11374    }
11375  else
11376    {
11377      if (mask & (1 << 15))
11378	inst.error = _("PC not allowed in register list");
11379    }
11380
11381  if ((mask & (mask - 1)) == 0)
11382    {
11383      /* Single register transfers implemented as str/ldr.  */
11384      if (writeback)
11385	{
11386	  if (inst.instruction & (1 << 23))
11387	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11388	  else
11389	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11390	}
11391      else
11392	{
11393	  if (inst.instruction & (1 << 23))
11394	    inst.instruction = 0x00800000; /* ia -> [base] */
11395	  else
11396	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11397	}
11398
11399      inst.instruction |= 0xf8400000;
11400      if (load)
11401	inst.instruction |= 0x00100000;
11402
11403      mask = ffs (mask) - 1;
11404      mask <<= 12;
11405    }
11406  else if (writeback)
11407    inst.instruction |= WRITE_BACK;
11408
11409  inst.instruction |= mask;
11410  inst.instruction |= base << 16;
11411}
11412
11413static void
11414do_t_ldmstm (void)
11415{
11416  /* This really doesn't seem worth it.  */
11417  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11418	      _("expression too complex"));
11419  constraint (inst.operands[1].writeback,
11420	      _("Thumb load/store multiple does not support {reglist}^"));
11421
11422  if (unified_syntax)
11423    {
11424      bfd_boolean narrow;
11425      unsigned mask;
11426
11427      narrow = FALSE;
11428      /* See if we can use a 16-bit instruction.  */
11429      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11430	  && inst.size_req != 4
11431	  && !(inst.operands[1].imm & ~0xff))
11432	{
11433	  mask = 1 << inst.operands[0].reg;
11434
11435	  if (inst.operands[0].reg <= 7)
11436	    {
11437	      if (inst.instruction == T_MNEM_stmia
11438		  ? inst.operands[0].writeback
11439		  : (inst.operands[0].writeback
11440		     == !(inst.operands[1].imm & mask)))
11441		{
11442		  if (inst.instruction == T_MNEM_stmia
11443		      && (inst.operands[1].imm & mask)
11444		      && (inst.operands[1].imm & (mask - 1)))
11445		    as_warn (_("value stored for r%d is UNKNOWN"),
11446			     inst.operands[0].reg);
11447
11448		  inst.instruction = THUMB_OP16 (inst.instruction);
11449		  inst.instruction |= inst.operands[0].reg << 8;
11450		  inst.instruction |= inst.operands[1].imm;
11451		  narrow = TRUE;
11452		}
11453	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11454		{
11455		  /* This means 1 register in reg list one of 3 situations:
11456		     1. Instruction is stmia, but without writeback.
11457		     2. lmdia without writeback, but with Rn not in
11458			reglist.
11459		     3. ldmia with writeback, but with Rn in reglist.
11460		     Case 3 is UNPREDICTABLE behaviour, so we handle
11461		     case 1 and 2 which can be converted into a 16-bit
11462		     str or ldr. The SP cases are handled below.  */
11463		  unsigned long opcode;
11464		  /* First, record an error for Case 3.  */
11465		  if (inst.operands[1].imm & mask
11466		      && inst.operands[0].writeback)
11467		    inst.error =
11468			_("having the base register in the register list when "
11469			  "using write back is UNPREDICTABLE");
11470
11471		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11472							     : T_MNEM_ldr);
11473		  inst.instruction = THUMB_OP16 (opcode);
11474		  inst.instruction |= inst.operands[0].reg << 3;
11475		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
11476		  narrow = TRUE;
11477		}
11478	    }
11479	  else if (inst.operands[0] .reg == REG_SP)
11480	    {
11481	      if (inst.operands[0].writeback)
11482		{
11483		  inst.instruction =
11484			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11485				    ? T_MNEM_push : T_MNEM_pop);
11486		  inst.instruction |= inst.operands[1].imm;
11487		  narrow = TRUE;
11488		}
11489	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11490		{
11491		  inst.instruction =
11492			THUMB_OP16 (inst.instruction == T_MNEM_stmia
11493				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11494		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11495		  narrow = TRUE;
11496		}
11497	    }
11498	}
11499
11500      if (!narrow)
11501	{
11502	  if (inst.instruction < 0xffff)
11503	    inst.instruction = THUMB_OP32 (inst.instruction);
11504
11505	  encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11506				inst.operands[0].writeback);
11507	}
11508    }
11509  else
11510    {
11511      constraint (inst.operands[0].reg > 7
11512		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11513      constraint (inst.instruction != T_MNEM_ldmia
11514		  && inst.instruction != T_MNEM_stmia,
11515		  _("Thumb-2 instruction only valid in unified syntax"));
11516      if (inst.instruction == T_MNEM_stmia)
11517	{
11518	  if (!inst.operands[0].writeback)
11519	    as_warn (_("this instruction will write back the base register"));
11520	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11521	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11522	    as_warn (_("value stored for r%d is UNKNOWN"),
11523		     inst.operands[0].reg);
11524	}
11525      else
11526	{
11527	  if (!inst.operands[0].writeback
11528	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11529	    as_warn (_("this instruction will write back the base register"));
11530	  else if (inst.operands[0].writeback
11531		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11532	    as_warn (_("this instruction will not write back the base register"));
11533	}
11534
11535      inst.instruction = THUMB_OP16 (inst.instruction);
11536      inst.instruction |= inst.operands[0].reg << 8;
11537      inst.instruction |= inst.operands[1].imm;
11538    }
11539}
11540
11541static void
11542do_t_ldrex (void)
11543{
11544  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11545	      || inst.operands[1].postind || inst.operands[1].writeback
11546	      || inst.operands[1].immisreg || inst.operands[1].shifted
11547	      || inst.operands[1].negative,
11548	      BAD_ADDR_MODE);
11549
11550  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11551
11552  inst.instruction |= inst.operands[0].reg << 12;
11553  inst.instruction |= inst.operands[1].reg << 16;
11554  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11555}
11556
11557static void
11558do_t_ldrexd (void)
11559{
11560  if (!inst.operands[1].present)
11561    {
11562      constraint (inst.operands[0].reg == REG_LR,
11563		  _("r14 not allowed as first register "
11564		    "when second register is omitted"));
11565      inst.operands[1].reg = inst.operands[0].reg + 1;
11566    }
11567  constraint (inst.operands[0].reg == inst.operands[1].reg,
11568	      BAD_OVERLAP);
11569
11570  inst.instruction |= inst.operands[0].reg << 12;
11571  inst.instruction |= inst.operands[1].reg << 8;
11572  inst.instruction |= inst.operands[2].reg << 16;
11573}
11574
11575static void
11576do_t_ldst (void)
11577{
11578  unsigned long opcode;
11579  int Rn;
11580
11581  if (inst.operands[0].isreg
11582      && !inst.operands[0].preind
11583      && inst.operands[0].reg == REG_PC)
11584    set_it_insn_type_last ();
11585
11586  opcode = inst.instruction;
11587  if (unified_syntax)
11588    {
11589      if (!inst.operands[1].isreg)
11590	{
11591	  if (opcode <= 0xffff)
11592	    inst.instruction = THUMB_OP32 (opcode);
11593	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11594	    return;
11595	}
11596      if (inst.operands[1].isreg
11597	  && !inst.operands[1].writeback
11598	  && !inst.operands[1].shifted && !inst.operands[1].postind
11599	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
11600	  && opcode <= 0xffff
11601	  && inst.size_req != 4)
11602	{
11603	  /* Insn may have a 16-bit form.  */
11604	  Rn = inst.operands[1].reg;
11605	  if (inst.operands[1].immisreg)
11606	    {
11607	      inst.instruction = THUMB_OP16 (opcode);
11608	      /* [Rn, Rik] */
11609	      if (Rn <= 7 && inst.operands[1].imm <= 7)
11610		goto op16;
11611	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11612		reject_bad_reg (inst.operands[1].imm);
11613	    }
11614	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11615		    && opcode != T_MNEM_ldrsb)
11616		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11617		   || (Rn == REG_SP && opcode == T_MNEM_str))
11618	    {
11619	      /* [Rn, #const] */
11620	      if (Rn > 7)
11621		{
11622		  if (Rn == REG_PC)
11623		    {
11624		      if (inst.reloc.pc_rel)
11625			opcode = T_MNEM_ldr_pc2;
11626		      else
11627			opcode = T_MNEM_ldr_pc;
11628		    }
11629		  else
11630		    {
11631		      if (opcode == T_MNEM_ldr)
11632			opcode = T_MNEM_ldr_sp;
11633		      else
11634			opcode = T_MNEM_str_sp;
11635		    }
11636		  inst.instruction = inst.operands[0].reg << 8;
11637		}
11638	      else
11639		{
11640		  inst.instruction = inst.operands[0].reg;
11641		  inst.instruction |= inst.operands[1].reg << 3;
11642		}
11643	      inst.instruction |= THUMB_OP16 (opcode);
11644	      if (inst.size_req == 2)
11645		inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11646	      else
11647		inst.relax = opcode;
11648	      return;
11649	    }
11650	}
11651      /* Definitely a 32-bit variant.  */
11652
11653      /* Warning for Erratum 752419.  */
11654      if (opcode == T_MNEM_ldr
11655	  && inst.operands[0].reg == REG_SP
11656	  && inst.operands[1].writeback == 1
11657	  && !inst.operands[1].immisreg)
11658	{
11659	  if (no_cpu_selected ()
11660	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11661		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11662		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11663	    as_warn (_("This instruction may be unpredictable "
11664		       "if executed on M-profile cores "
11665		       "with interrupts enabled."));
11666	}
11667
11668      /* Do some validations regarding addressing modes.  */
11669      if (inst.operands[1].immisreg)
11670	reject_bad_reg (inst.operands[1].imm);
11671
11672      constraint (inst.operands[1].writeback == 1
11673		  && inst.operands[0].reg == inst.operands[1].reg,
11674		  BAD_OVERLAP);
11675
11676      inst.instruction = THUMB_OP32 (opcode);
11677      inst.instruction |= inst.operands[0].reg << 12;
11678      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11679      check_ldr_r15_aligned ();
11680      return;
11681    }
11682
11683  constraint (inst.operands[0].reg > 7, BAD_HIREG);
11684
11685  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11686    {
11687      /* Only [Rn,Rm] is acceptable.  */
11688      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11689      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11690		  || inst.operands[1].postind || inst.operands[1].shifted
11691		  || inst.operands[1].negative,
11692		  _("Thumb does not support this addressing mode"));
11693      inst.instruction = THUMB_OP16 (inst.instruction);
11694      goto op16;
11695    }
11696
11697  inst.instruction = THUMB_OP16 (inst.instruction);
11698  if (!inst.operands[1].isreg)
11699    if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11700      return;
11701
11702  constraint (!inst.operands[1].preind
11703	      || inst.operands[1].shifted
11704	      || inst.operands[1].writeback,
11705	      _("Thumb does not support this addressing mode"));
11706  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11707    {
11708      constraint (inst.instruction & 0x0600,
11709		  _("byte or halfword not valid for base register"));
11710      constraint (inst.operands[1].reg == REG_PC
11711		  && !(inst.instruction & THUMB_LOAD_BIT),
11712		  _("r15 based store not allowed"));
11713      constraint (inst.operands[1].immisreg,
11714		  _("invalid base register for register offset"));
11715
11716      if (inst.operands[1].reg == REG_PC)
11717	inst.instruction = T_OPCODE_LDR_PC;
11718      else if (inst.instruction & THUMB_LOAD_BIT)
11719	inst.instruction = T_OPCODE_LDR_SP;
11720      else
11721	inst.instruction = T_OPCODE_STR_SP;
11722
11723      inst.instruction |= inst.operands[0].reg << 8;
11724      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11725      return;
11726    }
11727
11728  constraint (inst.operands[1].reg > 7, BAD_HIREG);
11729  if (!inst.operands[1].immisreg)
11730    {
11731      /* Immediate offset.  */
11732      inst.instruction |= inst.operands[0].reg;
11733      inst.instruction |= inst.operands[1].reg << 3;
11734      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11735      return;
11736    }
11737
11738  /* Register offset.  */
11739  constraint (inst.operands[1].imm > 7, BAD_HIREG);
11740  constraint (inst.operands[1].negative,
11741	      _("Thumb does not support this addressing mode"));
11742
11743 op16:
11744  switch (inst.instruction)
11745    {
11746    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11747    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11748    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11749    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11750    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11751    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11752    case 0x5600 /* ldrsb */:
11753    case 0x5e00 /* ldrsh */: break;
11754    default: abort ();
11755    }
11756
11757  inst.instruction |= inst.operands[0].reg;
11758  inst.instruction |= inst.operands[1].reg << 3;
11759  inst.instruction |= inst.operands[1].imm << 6;
11760}
11761
11762static void
11763do_t_ldstd (void)
11764{
11765  if (!inst.operands[1].present)
11766    {
11767      inst.operands[1].reg = inst.operands[0].reg + 1;
11768      constraint (inst.operands[0].reg == REG_LR,
11769		  _("r14 not allowed here"));
11770      constraint (inst.operands[0].reg == REG_R12,
11771		  _("r12 not allowed here"));
11772    }
11773
11774  if (inst.operands[2].writeback
11775      && (inst.operands[0].reg == inst.operands[2].reg
11776      || inst.operands[1].reg == inst.operands[2].reg))
11777    as_warn (_("base register written back, and overlaps "
11778	       "one of transfer registers"));
11779
11780  inst.instruction |= inst.operands[0].reg << 12;
11781  inst.instruction |= inst.operands[1].reg << 8;
11782  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11783}
11784
11785static void
11786do_t_ldstt (void)
11787{
11788  inst.instruction |= inst.operands[0].reg << 12;
11789  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11790}
11791
11792static void
11793do_t_mla (void)
11794{
11795  unsigned Rd, Rn, Rm, Ra;
11796
11797  Rd = inst.operands[0].reg;
11798  Rn = inst.operands[1].reg;
11799  Rm = inst.operands[2].reg;
11800  Ra = inst.operands[3].reg;
11801
11802  reject_bad_reg (Rd);
11803  reject_bad_reg (Rn);
11804  reject_bad_reg (Rm);
11805  reject_bad_reg (Ra);
11806
11807  inst.instruction |= Rd << 8;
11808  inst.instruction |= Rn << 16;
11809  inst.instruction |= Rm;
11810  inst.instruction |= Ra << 12;
11811}
11812
11813static void
11814do_t_mlal (void)
11815{
11816  unsigned RdLo, RdHi, Rn, Rm;
11817
11818  RdLo = inst.operands[0].reg;
11819  RdHi = inst.operands[1].reg;
11820  Rn = inst.operands[2].reg;
11821  Rm = inst.operands[3].reg;
11822
11823  reject_bad_reg (RdLo);
11824  reject_bad_reg (RdHi);
11825  reject_bad_reg (Rn);
11826  reject_bad_reg (Rm);
11827
11828  inst.instruction |= RdLo << 12;
11829  inst.instruction |= RdHi << 8;
11830  inst.instruction |= Rn << 16;
11831  inst.instruction |= Rm;
11832}
11833
11834static void
11835do_t_mov_cmp (void)
11836{
11837  unsigned Rn, Rm;
11838
11839  Rn = inst.operands[0].reg;
11840  Rm = inst.operands[1].reg;
11841
11842  if (Rn == REG_PC)
11843    set_it_insn_type_last ();
11844
11845  if (unified_syntax)
11846    {
11847      int r0off = (inst.instruction == T_MNEM_mov
11848		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
11849      unsigned long opcode;
11850      bfd_boolean narrow;
11851      bfd_boolean low_regs;
11852
11853      low_regs = (Rn <= 7 && Rm <= 7);
11854      opcode = inst.instruction;
11855      if (in_it_block ())
11856	narrow = opcode != T_MNEM_movs;
11857      else
11858	narrow = opcode != T_MNEM_movs || low_regs;
11859      if (inst.size_req == 4
11860	  || inst.operands[1].shifted)
11861	narrow = FALSE;
11862
11863      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
11864      if (opcode == T_MNEM_movs && inst.operands[1].isreg
11865	  && !inst.operands[1].shifted
11866	  && Rn == REG_PC
11867	  && Rm == REG_LR)
11868	{
11869	  inst.instruction = T2_SUBS_PC_LR;
11870	  return;
11871	}
11872
11873      if (opcode == T_MNEM_cmp)
11874	{
11875	  constraint (Rn == REG_PC, BAD_PC);
11876	  if (narrow)
11877	    {
11878	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11879		 but valid.  */
11880	      warn_deprecated_sp (Rm);
11881	      /* R15 was documented as a valid choice for Rm in ARMv6,
11882		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
11883		 tools reject R15, so we do too.  */
11884	      constraint (Rm == REG_PC, BAD_PC);
11885	    }
11886	  else
11887	    reject_bad_reg (Rm);
11888	}
11889      else if (opcode == T_MNEM_mov
11890	       || opcode == T_MNEM_movs)
11891	{
11892	  if (inst.operands[1].isreg)
11893	    {
11894	      if (opcode == T_MNEM_movs)
11895		{
11896		  reject_bad_reg (Rn);
11897		  reject_bad_reg (Rm);
11898		}
11899	      else if (narrow)
11900		{
11901		  /* This is mov.n.  */
11902		  if ((Rn == REG_SP || Rn == REG_PC)
11903		      && (Rm == REG_SP || Rm == REG_PC))
11904		    {
11905		      as_tsktsk (_("Use of r%u as a source register is "
11906				 "deprecated when r%u is the destination "
11907				 "register."), Rm, Rn);
11908		    }
11909		}
11910	      else
11911		{
11912		  /* This is mov.w.  */
11913		  constraint (Rn == REG_PC, BAD_PC);
11914		  constraint (Rm == REG_PC, BAD_PC);
11915		  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11916		    constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11917		}
11918	    }
11919	  else
11920	    reject_bad_reg (Rn);
11921	}
11922
11923      if (!inst.operands[1].isreg)
11924	{
11925	  /* Immediate operand.  */
11926	  if (!in_it_block () && opcode == T_MNEM_mov)
11927	    narrow = 0;
11928	  if (low_regs && narrow)
11929	    {
11930	      inst.instruction = THUMB_OP16 (opcode);
11931	      inst.instruction |= Rn << 8;
11932	      if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11933		  || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11934		{
11935		  if (inst.size_req == 2)
11936		    inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11937		  else
11938		    inst.relax = opcode;
11939		}
11940	    }
11941	  else
11942	    {
11943	      constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11944			  && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11945			  THUMB1_RELOC_ONLY);
11946
11947	      inst.instruction = THUMB_OP32 (inst.instruction);
11948	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11949	      inst.instruction |= Rn << r0off;
11950	      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11951	    }
11952	}
11953      else if (inst.operands[1].shifted && inst.operands[1].immisreg
11954	       && (inst.instruction == T_MNEM_mov
11955		   || inst.instruction == T_MNEM_movs))
11956	{
11957	  /* Register shifts are encoded as separate shift instructions.  */
11958	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11959
11960	  if (in_it_block ())
11961	    narrow = !flags;
11962	  else
11963	    narrow = flags;
11964
11965	  if (inst.size_req == 4)
11966	    narrow = FALSE;
11967
11968	  if (!low_regs || inst.operands[1].imm > 7)
11969	    narrow = FALSE;
11970
11971	  if (Rn != Rm)
11972	    narrow = FALSE;
11973
11974	  switch (inst.operands[1].shift_kind)
11975	    {
11976	    case SHIFT_LSL:
11977	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11978	      break;
11979	    case SHIFT_ASR:
11980	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11981	      break;
11982	    case SHIFT_LSR:
11983	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11984	      break;
11985	    case SHIFT_ROR:
11986	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11987	      break;
11988	    default:
11989	      abort ();
11990	    }
11991
11992	  inst.instruction = opcode;
11993	  if (narrow)
11994	    {
11995	      inst.instruction |= Rn;
11996	      inst.instruction |= inst.operands[1].imm << 3;
11997	    }
11998	  else
11999	    {
12000	      if (flags)
12001		inst.instruction |= CONDS_BIT;
12002
12003	      inst.instruction |= Rn << 8;
12004	      inst.instruction |= Rm << 16;
12005	      inst.instruction |= inst.operands[1].imm;
12006	    }
12007	}
12008      else if (!narrow)
12009	{
12010	  /* Some mov with immediate shift have narrow variants.
12011	     Register shifts are handled above.  */
12012	  if (low_regs && inst.operands[1].shifted
12013	      && (inst.instruction == T_MNEM_mov
12014		  || inst.instruction == T_MNEM_movs))
12015	    {
12016	      if (in_it_block ())
12017		narrow = (inst.instruction == T_MNEM_mov);
12018	      else
12019		narrow = (inst.instruction == T_MNEM_movs);
12020	    }
12021
12022	  if (narrow)
12023	    {
12024	      switch (inst.operands[1].shift_kind)
12025		{
12026		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12027		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12028		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12029		default: narrow = FALSE; break;
12030		}
12031	    }
12032
12033	  if (narrow)
12034	    {
12035	      inst.instruction |= Rn;
12036	      inst.instruction |= Rm << 3;
12037	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12038	    }
12039	  else
12040	    {
12041	      inst.instruction = THUMB_OP32 (inst.instruction);
12042	      inst.instruction |= Rn << r0off;
12043	      encode_thumb32_shifted_operand (1);
12044	    }
12045	}
12046      else
12047	switch (inst.instruction)
12048	  {
12049	  case T_MNEM_mov:
12050	    /* In v4t or v5t a move of two lowregs produces unpredictable
12051	       results. Don't allow this.  */
12052	    if (low_regs)
12053	      {
12054		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12055			    "MOV Rd, Rs with two low registers is not "
12056			    "permitted on this architecture");
12057		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12058					arm_ext_v6);
12059	      }
12060
12061	    inst.instruction = T_OPCODE_MOV_HR;
12062	    inst.instruction |= (Rn & 0x8) << 4;
12063	    inst.instruction |= (Rn & 0x7);
12064	    inst.instruction |= Rm << 3;
12065	    break;
12066
12067	  case T_MNEM_movs:
12068	    /* We know we have low registers at this point.
12069	       Generate LSLS Rd, Rs, #0.  */
12070	    inst.instruction = T_OPCODE_LSL_I;
12071	    inst.instruction |= Rn;
12072	    inst.instruction |= Rm << 3;
12073	    break;
12074
12075	  case T_MNEM_cmp:
12076	    if (low_regs)
12077	      {
12078		inst.instruction = T_OPCODE_CMP_LR;
12079		inst.instruction |= Rn;
12080		inst.instruction |= Rm << 3;
12081	      }
12082	    else
12083	      {
12084		inst.instruction = T_OPCODE_CMP_HR;
12085		inst.instruction |= (Rn & 0x8) << 4;
12086		inst.instruction |= (Rn & 0x7);
12087		inst.instruction |= Rm << 3;
12088	      }
12089	    break;
12090	  }
12091      return;
12092    }
12093
12094  inst.instruction = THUMB_OP16 (inst.instruction);
12095
12096  /* PR 10443: Do not silently ignore shifted operands.  */
12097  constraint (inst.operands[1].shifted,
12098	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12099
12100  if (inst.operands[1].isreg)
12101    {
12102      if (Rn < 8 && Rm < 8)
12103	{
12104	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12105	     since a MOV instruction produces unpredictable results.  */
12106	  if (inst.instruction == T_OPCODE_MOV_I8)
12107	    inst.instruction = T_OPCODE_ADD_I3;
12108	  else
12109	    inst.instruction = T_OPCODE_CMP_LR;
12110
12111	  inst.instruction |= Rn;
12112	  inst.instruction |= Rm << 3;
12113	}
12114      else
12115	{
12116	  if (inst.instruction == T_OPCODE_MOV_I8)
12117	    inst.instruction = T_OPCODE_MOV_HR;
12118	  else
12119	    inst.instruction = T_OPCODE_CMP_HR;
12120	  do_t_cpy ();
12121	}
12122    }
12123  else
12124    {
12125      constraint (Rn > 7,
12126		  _("only lo regs allowed with immediate"));
12127      inst.instruction |= Rn << 8;
12128      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12129    }
12130}
12131
12132static void
12133do_t_mov16 (void)
12134{
12135  unsigned Rd;
12136  bfd_vma imm;
12137  bfd_boolean top;
12138
12139  top = (inst.instruction & 0x00800000) != 0;
12140  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12141    {
12142      constraint (top, _(":lower16: not allowed this instruction"));
12143      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12144    }
12145  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12146    {
12147      constraint (!top, _(":upper16: not allowed this instruction"));
12148      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12149    }
12150
12151  Rd = inst.operands[0].reg;
12152  reject_bad_reg (Rd);
12153
12154  inst.instruction |= Rd << 8;
12155  if (inst.reloc.type == BFD_RELOC_UNUSED)
12156    {
12157      imm = inst.reloc.exp.X_add_number;
12158      inst.instruction |= (imm & 0xf000) << 4;
12159      inst.instruction |= (imm & 0x0800) << 15;
12160      inst.instruction |= (imm & 0x0700) << 4;
12161      inst.instruction |= (imm & 0x00ff);
12162    }
12163}
12164
12165static void
12166do_t_mvn_tst (void)
12167{
12168  unsigned Rn, Rm;
12169
12170  Rn = inst.operands[0].reg;
12171  Rm = inst.operands[1].reg;
12172
12173  if (inst.instruction == T_MNEM_cmp
12174      || inst.instruction == T_MNEM_cmn)
12175    constraint (Rn == REG_PC, BAD_PC);
12176  else
12177    reject_bad_reg (Rn);
12178  reject_bad_reg (Rm);
12179
12180  if (unified_syntax)
12181    {
12182      int r0off = (inst.instruction == T_MNEM_mvn
12183		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12184      bfd_boolean narrow;
12185
12186      if (inst.size_req == 4
12187	  || inst.instruction > 0xffff
12188	  || inst.operands[1].shifted
12189	  || Rn > 7 || Rm > 7)
12190	narrow = FALSE;
12191      else if (inst.instruction == T_MNEM_cmn
12192	       || inst.instruction == T_MNEM_tst)
12193	narrow = TRUE;
12194      else if (THUMB_SETS_FLAGS (inst.instruction))
12195	narrow = !in_it_block ();
12196      else
12197	narrow = in_it_block ();
12198
12199      if (!inst.operands[1].isreg)
12200	{
12201	  /* For an immediate, we always generate a 32-bit opcode;
12202	     section relaxation will shrink it later if possible.  */
12203	  if (inst.instruction < 0xffff)
12204	    inst.instruction = THUMB_OP32 (inst.instruction);
12205	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12206	  inst.instruction |= Rn << r0off;
12207	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12208	}
12209      else
12210	{
12211	  /* See if we can do this with a 16-bit instruction.  */
12212	  if (narrow)
12213	    {
12214	      inst.instruction = THUMB_OP16 (inst.instruction);
12215	      inst.instruction |= Rn;
12216	      inst.instruction |= Rm << 3;
12217	    }
12218	  else
12219	    {
12220	      constraint (inst.operands[1].shifted
12221			  && inst.operands[1].immisreg,
12222			  _("shift must be constant"));
12223	      if (inst.instruction < 0xffff)
12224		inst.instruction = THUMB_OP32 (inst.instruction);
12225	      inst.instruction |= Rn << r0off;
12226	      encode_thumb32_shifted_operand (1);
12227	    }
12228	}
12229    }
12230  else
12231    {
12232      constraint (inst.instruction > 0xffff
12233		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12234      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12235		  _("unshifted register required"));
12236      constraint (Rn > 7 || Rm > 7,
12237		  BAD_HIREG);
12238
12239      inst.instruction = THUMB_OP16 (inst.instruction);
12240      inst.instruction |= Rn;
12241      inst.instruction |= Rm << 3;
12242    }
12243}
12244
12245static void
12246do_t_mrs (void)
12247{
12248  unsigned Rd;
12249
12250  if (do_vfp_nsyn_mrs () == SUCCESS)
12251    return;
12252
12253  Rd = inst.operands[0].reg;
12254  reject_bad_reg (Rd);
12255  inst.instruction |= Rd << 8;
12256
12257  if (inst.operands[1].isreg)
12258    {
12259      unsigned br = inst.operands[1].reg;
12260      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12261	as_bad (_("bad register for mrs"));
12262
12263      inst.instruction |= br & (0xf << 16);
12264      inst.instruction |= (br & 0x300) >> 4;
12265      inst.instruction |= (br & SPSR_BIT) >> 2;
12266    }
12267  else
12268    {
12269      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12270
12271      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12272	{
12273	  /* PR gas/12698:  The constraint is only applied for m_profile.
12274	     If the user has specified -march=all, we want to ignore it as
12275	     we are building for any CPU type, including non-m variants.  */
12276	  bfd_boolean m_profile =
12277	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12278	  constraint ((flags != 0) && m_profile, _("selected processor does "
12279						   "not support requested special purpose register"));
12280	}
12281      else
12282	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12283	   devices).  */
12284	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12285		    _("'APSR', 'CPSR' or 'SPSR' expected"));
12286
12287      inst.instruction |= (flags & SPSR_BIT) >> 2;
12288      inst.instruction |= inst.operands[1].imm & 0xff;
12289      inst.instruction |= 0xf0000;
12290    }
12291}
12292
12293static void
12294do_t_msr (void)
12295{
12296  int flags;
12297  unsigned Rn;
12298
12299  if (do_vfp_nsyn_msr () == SUCCESS)
12300    return;
12301
12302  constraint (!inst.operands[1].isreg,
12303	      _("Thumb encoding does not support an immediate here"));
12304
12305  if (inst.operands[0].isreg)
12306    flags = (int)(inst.operands[0].reg);
12307  else
12308    flags = inst.operands[0].imm;
12309
12310  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12311    {
12312      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12313
12314      /* PR gas/12698:  The constraint is only applied for m_profile.
12315	 If the user has specified -march=all, we want to ignore it as
12316	 we are building for any CPU type, including non-m variants.  */
12317      bfd_boolean m_profile =
12318	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12319      constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12320	   && (bits & ~(PSR_s | PSR_f)) != 0)
12321	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12322	      && bits != PSR_f)) && m_profile,
12323	  _("selected processor does not support requested special "
12324	    "purpose register"));
12325    }
12326  else
12327     constraint ((flags & 0xff) != 0, _("selected processor does not support "
12328		 "requested special purpose register"));
12329
12330  Rn = inst.operands[1].reg;
12331  reject_bad_reg (Rn);
12332
12333  inst.instruction |= (flags & SPSR_BIT) >> 2;
12334  inst.instruction |= (flags & 0xf0000) >> 8;
12335  inst.instruction |= (flags & 0x300) >> 4;
12336  inst.instruction |= (flags & 0xff);
12337  inst.instruction |= Rn << 16;
12338}
12339
12340static void
12341do_t_mul (void)
12342{
12343  bfd_boolean narrow;
12344  unsigned Rd, Rn, Rm;
12345
12346  if (!inst.operands[2].present)
12347    inst.operands[2].reg = inst.operands[0].reg;
12348
12349  Rd = inst.operands[0].reg;
12350  Rn = inst.operands[1].reg;
12351  Rm = inst.operands[2].reg;
12352
12353  if (unified_syntax)
12354    {
12355      if (inst.size_req == 4
12356	  || (Rd != Rn
12357	      && Rd != Rm)
12358	  || Rn > 7
12359	  || Rm > 7)
12360	narrow = FALSE;
12361      else if (inst.instruction == T_MNEM_muls)
12362	narrow = !in_it_block ();
12363      else
12364	narrow = in_it_block ();
12365    }
12366  else
12367    {
12368      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12369      constraint (Rn > 7 || Rm > 7,
12370		  BAD_HIREG);
12371      narrow = TRUE;
12372    }
12373
12374  if (narrow)
12375    {
12376      /* 16-bit MULS/Conditional MUL.  */
12377      inst.instruction = THUMB_OP16 (inst.instruction);
12378      inst.instruction |= Rd;
12379
12380      if (Rd == Rn)
12381	inst.instruction |= Rm << 3;
12382      else if (Rd == Rm)
12383	inst.instruction |= Rn << 3;
12384      else
12385	constraint (1, _("dest must overlap one source register"));
12386    }
12387  else
12388    {
12389      constraint (inst.instruction != T_MNEM_mul,
12390		  _("Thumb-2 MUL must not set flags"));
12391      /* 32-bit MUL.  */
12392      inst.instruction = THUMB_OP32 (inst.instruction);
12393      inst.instruction |= Rd << 8;
12394      inst.instruction |= Rn << 16;
12395      inst.instruction |= Rm << 0;
12396
12397      reject_bad_reg (Rd);
12398      reject_bad_reg (Rn);
12399      reject_bad_reg (Rm);
12400    }
12401}
12402
12403static void
12404do_t_mull (void)
12405{
12406  unsigned RdLo, RdHi, Rn, Rm;
12407
12408  RdLo = inst.operands[0].reg;
12409  RdHi = inst.operands[1].reg;
12410  Rn = inst.operands[2].reg;
12411  Rm = inst.operands[3].reg;
12412
12413  reject_bad_reg (RdLo);
12414  reject_bad_reg (RdHi);
12415  reject_bad_reg (Rn);
12416  reject_bad_reg (Rm);
12417
12418  inst.instruction |= RdLo << 12;
12419  inst.instruction |= RdHi << 8;
12420  inst.instruction |= Rn << 16;
12421  inst.instruction |= Rm;
12422
12423 if (RdLo == RdHi)
12424    as_tsktsk (_("rdhi and rdlo must be different"));
12425}
12426
12427static void
12428do_t_nop (void)
12429{
12430  set_it_insn_type (NEUTRAL_IT_INSN);
12431
12432  if (unified_syntax)
12433    {
12434      if (inst.size_req == 4 || inst.operands[0].imm > 15)
12435	{
12436	  inst.instruction = THUMB_OP32 (inst.instruction);
12437	  inst.instruction |= inst.operands[0].imm;
12438	}
12439      else
12440	{
12441	  /* PR9722: Check for Thumb2 availability before
12442	     generating a thumb2 nop instruction.  */
12443	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12444	    {
12445	      inst.instruction = THUMB_OP16 (inst.instruction);
12446	      inst.instruction |= inst.operands[0].imm << 4;
12447	    }
12448	  else
12449	    inst.instruction = 0x46c0;
12450	}
12451    }
12452  else
12453    {
12454      constraint (inst.operands[0].present,
12455		  _("Thumb does not support NOP with hints"));
12456      inst.instruction = 0x46c0;
12457    }
12458}
12459
12460static void
12461do_t_neg (void)
12462{
12463  if (unified_syntax)
12464    {
12465      bfd_boolean narrow;
12466
12467      if (THUMB_SETS_FLAGS (inst.instruction))
12468	narrow = !in_it_block ();
12469      else
12470	narrow = in_it_block ();
12471      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12472	narrow = FALSE;
12473      if (inst.size_req == 4)
12474	narrow = FALSE;
12475
12476      if (!narrow)
12477	{
12478	  inst.instruction = THUMB_OP32 (inst.instruction);
12479	  inst.instruction |= inst.operands[0].reg << 8;
12480	  inst.instruction |= inst.operands[1].reg << 16;
12481	}
12482      else
12483	{
12484	  inst.instruction = THUMB_OP16 (inst.instruction);
12485	  inst.instruction |= inst.operands[0].reg;
12486	  inst.instruction |= inst.operands[1].reg << 3;
12487	}
12488    }
12489  else
12490    {
12491      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12492		  BAD_HIREG);
12493      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12494
12495      inst.instruction = THUMB_OP16 (inst.instruction);
12496      inst.instruction |= inst.operands[0].reg;
12497      inst.instruction |= inst.operands[1].reg << 3;
12498    }
12499}
12500
12501static void
12502do_t_orn (void)
12503{
12504  unsigned Rd, Rn;
12505
12506  Rd = inst.operands[0].reg;
12507  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12508
12509  reject_bad_reg (Rd);
12510  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
12511  reject_bad_reg (Rn);
12512
12513  inst.instruction |= Rd << 8;
12514  inst.instruction |= Rn << 16;
12515
12516  if (!inst.operands[2].isreg)
12517    {
12518      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12519      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12520    }
12521  else
12522    {
12523      unsigned Rm;
12524
12525      Rm = inst.operands[2].reg;
12526      reject_bad_reg (Rm);
12527
12528      constraint (inst.operands[2].shifted
12529		  && inst.operands[2].immisreg,
12530		  _("shift must be constant"));
12531      encode_thumb32_shifted_operand (2);
12532    }
12533}
12534
12535static void
12536do_t_pkhbt (void)
12537{
12538  unsigned Rd, Rn, Rm;
12539
12540  Rd = inst.operands[0].reg;
12541  Rn = inst.operands[1].reg;
12542  Rm = inst.operands[2].reg;
12543
12544  reject_bad_reg (Rd);
12545  reject_bad_reg (Rn);
12546  reject_bad_reg (Rm);
12547
12548  inst.instruction |= Rd << 8;
12549  inst.instruction |= Rn << 16;
12550  inst.instruction |= Rm;
12551  if (inst.operands[3].present)
12552    {
12553      unsigned int val = inst.reloc.exp.X_add_number;
12554      constraint (inst.reloc.exp.X_op != O_constant,
12555		  _("expression too complex"));
12556      inst.instruction |= (val & 0x1c) << 10;
12557      inst.instruction |= (val & 0x03) << 6;
12558    }
12559}
12560
12561static void
12562do_t_pkhtb (void)
12563{
12564  if (!inst.operands[3].present)
12565    {
12566      unsigned Rtmp;
12567
12568      inst.instruction &= ~0x00000020;
12569
12570      /* PR 10168.  Swap the Rm and Rn registers.  */
12571      Rtmp = inst.operands[1].reg;
12572      inst.operands[1].reg = inst.operands[2].reg;
12573      inst.operands[2].reg = Rtmp;
12574    }
12575  do_t_pkhbt ();
12576}
12577
12578static void
12579do_t_pld (void)
12580{
12581  if (inst.operands[0].immisreg)
12582    reject_bad_reg (inst.operands[0].imm);
12583
12584  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12585}
12586
12587static void
12588do_t_push_pop (void)
12589{
12590  unsigned mask;
12591
12592  constraint (inst.operands[0].writeback,
12593	      _("push/pop do not support {reglist}^"));
12594  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12595	      _("expression too complex"));
12596
12597  mask = inst.operands[0].imm;
12598  if (inst.size_req != 4 && (mask & ~0xff) == 0)
12599    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12600  else if (inst.size_req != 4
12601	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12602				       ? REG_LR : REG_PC)))
12603    {
12604      inst.instruction = THUMB_OP16 (inst.instruction);
12605      inst.instruction |= THUMB_PP_PC_LR;
12606      inst.instruction |= mask & 0xff;
12607    }
12608  else if (unified_syntax)
12609    {
12610      inst.instruction = THUMB_OP32 (inst.instruction);
12611      encode_thumb2_ldmstm (13, mask, TRUE);
12612    }
12613  else
12614    {
12615      inst.error = _("invalid register list to push/pop instruction");
12616      return;
12617    }
12618}
12619
12620static void
12621do_t_rbit (void)
12622{
12623  unsigned Rd, Rm;
12624
12625  Rd = inst.operands[0].reg;
12626  Rm = inst.operands[1].reg;
12627
12628  reject_bad_reg (Rd);
12629  reject_bad_reg (Rm);
12630
12631  inst.instruction |= Rd << 8;
12632  inst.instruction |= Rm << 16;
12633  inst.instruction |= Rm;
12634}
12635
12636static void
12637do_t_rev (void)
12638{
12639  unsigned Rd, Rm;
12640
12641  Rd = inst.operands[0].reg;
12642  Rm = inst.operands[1].reg;
12643
12644  reject_bad_reg (Rd);
12645  reject_bad_reg (Rm);
12646
12647  if (Rd <= 7 && Rm <= 7
12648      && inst.size_req != 4)
12649    {
12650      inst.instruction = THUMB_OP16 (inst.instruction);
12651      inst.instruction |= Rd;
12652      inst.instruction |= Rm << 3;
12653    }
12654  else if (unified_syntax)
12655    {
12656      inst.instruction = THUMB_OP32 (inst.instruction);
12657      inst.instruction |= Rd << 8;
12658      inst.instruction |= Rm << 16;
12659      inst.instruction |= Rm;
12660    }
12661  else
12662    inst.error = BAD_HIREG;
12663}
12664
12665static void
12666do_t_rrx (void)
12667{
12668  unsigned Rd, Rm;
12669
12670  Rd = inst.operands[0].reg;
12671  Rm = inst.operands[1].reg;
12672
12673  reject_bad_reg (Rd);
12674  reject_bad_reg (Rm);
12675
12676  inst.instruction |= Rd << 8;
12677  inst.instruction |= Rm;
12678}
12679
12680static void
12681do_t_rsb (void)
12682{
12683  unsigned Rd, Rs;
12684
12685  Rd = inst.operands[0].reg;
12686  Rs = (inst.operands[1].present
12687	? inst.operands[1].reg    /* Rd, Rs, foo */
12688	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
12689
12690  reject_bad_reg (Rd);
12691  reject_bad_reg (Rs);
12692  if (inst.operands[2].isreg)
12693    reject_bad_reg (inst.operands[2].reg);
12694
12695  inst.instruction |= Rd << 8;
12696  inst.instruction |= Rs << 16;
12697  if (!inst.operands[2].isreg)
12698    {
12699      bfd_boolean narrow;
12700
12701      if ((inst.instruction & 0x00100000) != 0)
12702	narrow = !in_it_block ();
12703      else
12704	narrow = in_it_block ();
12705
12706      if (Rd > 7 || Rs > 7)
12707	narrow = FALSE;
12708
12709      if (inst.size_req == 4 || !unified_syntax)
12710	narrow = FALSE;
12711
12712      if (inst.reloc.exp.X_op != O_constant
12713	  || inst.reloc.exp.X_add_number != 0)
12714	narrow = FALSE;
12715
12716      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
12717	 relaxation, but it doesn't seem worth the hassle.  */
12718      if (narrow)
12719	{
12720	  inst.reloc.type = BFD_RELOC_UNUSED;
12721	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
12722	  inst.instruction |= Rs << 3;
12723	  inst.instruction |= Rd;
12724	}
12725      else
12726	{
12727	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12728	  inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12729	}
12730    }
12731  else
12732    encode_thumb32_shifted_operand (2);
12733}
12734
12735static void
12736do_t_setend (void)
12737{
12738  if (warn_on_deprecated
12739      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12740      as_tsktsk (_("setend use is deprecated for ARMv8"));
12741
12742  set_it_insn_type (OUTSIDE_IT_INSN);
12743  if (inst.operands[0].imm)
12744    inst.instruction |= 0x8;
12745}
12746
12747static void
12748do_t_shift (void)
12749{
12750  if (!inst.operands[1].present)
12751    inst.operands[1].reg = inst.operands[0].reg;
12752
12753  if (unified_syntax)
12754    {
12755      bfd_boolean narrow;
12756      int shift_kind;
12757
12758      switch (inst.instruction)
12759	{
12760	case T_MNEM_asr:
12761	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12762	case T_MNEM_lsl:
12763	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12764	case T_MNEM_lsr:
12765	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12766	case T_MNEM_ror:
12767	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12768	default: abort ();
12769	}
12770
12771      if (THUMB_SETS_FLAGS (inst.instruction))
12772	narrow = !in_it_block ();
12773      else
12774	narrow = in_it_block ();
12775      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12776	narrow = FALSE;
12777      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12778	narrow = FALSE;
12779      if (inst.operands[2].isreg
12780	  && (inst.operands[1].reg != inst.operands[0].reg
12781	      || inst.operands[2].reg > 7))
12782	narrow = FALSE;
12783      if (inst.size_req == 4)
12784	narrow = FALSE;
12785
12786      reject_bad_reg (inst.operands[0].reg);
12787      reject_bad_reg (inst.operands[1].reg);
12788
12789      if (!narrow)
12790	{
12791	  if (inst.operands[2].isreg)
12792	    {
12793	      reject_bad_reg (inst.operands[2].reg);
12794	      inst.instruction = THUMB_OP32 (inst.instruction);
12795	      inst.instruction |= inst.operands[0].reg << 8;
12796	      inst.instruction |= inst.operands[1].reg << 16;
12797	      inst.instruction |= inst.operands[2].reg;
12798
12799	      /* PR 12854: Error on extraneous shifts.  */
12800	      constraint (inst.operands[2].shifted,
12801			  _("extraneous shift as part of operand to shift insn"));
12802	    }
12803	  else
12804	    {
12805	      inst.operands[1].shifted = 1;
12806	      inst.operands[1].shift_kind = shift_kind;
12807	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12808					     ? T_MNEM_movs : T_MNEM_mov);
12809	      inst.instruction |= inst.operands[0].reg << 8;
12810	      encode_thumb32_shifted_operand (1);
12811	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
12812	      inst.reloc.type = BFD_RELOC_UNUSED;
12813	    }
12814	}
12815      else
12816	{
12817	  if (inst.operands[2].isreg)
12818	    {
12819	      switch (shift_kind)
12820		{
12821		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12822		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12823		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12824		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12825		default: abort ();
12826		}
12827
12828	      inst.instruction |= inst.operands[0].reg;
12829	      inst.instruction |= inst.operands[2].reg << 3;
12830
12831	      /* PR 12854: Error on extraneous shifts.  */
12832	      constraint (inst.operands[2].shifted,
12833			  _("extraneous shift as part of operand to shift insn"));
12834	    }
12835	  else
12836	    {
12837	      switch (shift_kind)
12838		{
12839		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12840		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12841		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12842		default: abort ();
12843		}
12844	      inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12845	      inst.instruction |= inst.operands[0].reg;
12846	      inst.instruction |= inst.operands[1].reg << 3;
12847	    }
12848	}
12849    }
12850  else
12851    {
12852      constraint (inst.operands[0].reg > 7
12853		  || inst.operands[1].reg > 7, BAD_HIREG);
12854      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12855
12856      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
12857	{
12858	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
12859	  constraint (inst.operands[0].reg != inst.operands[1].reg,
12860		      _("source1 and dest must be same register"));
12861
12862	  switch (inst.instruction)
12863	    {
12864	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12865	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12866	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12867	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12868	    default: abort ();
12869	    }
12870
12871	  inst.instruction |= inst.operands[0].reg;
12872	  inst.instruction |= inst.operands[2].reg << 3;
12873
12874	  /* PR 12854: Error on extraneous shifts.  */
12875	  constraint (inst.operands[2].shifted,
12876		      _("extraneous shift as part of operand to shift insn"));
12877	}
12878      else
12879	{
12880	  switch (inst.instruction)
12881	    {
12882	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12883	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12884	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12885	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12886	    default: abort ();
12887	    }
12888	  inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12889	  inst.instruction |= inst.operands[0].reg;
12890	  inst.instruction |= inst.operands[1].reg << 3;
12891	}
12892    }
12893}
12894
12895static void
12896do_t_simd (void)
12897{
12898  unsigned Rd, Rn, Rm;
12899
12900  Rd = inst.operands[0].reg;
12901  Rn = inst.operands[1].reg;
12902  Rm = inst.operands[2].reg;
12903
12904  reject_bad_reg (Rd);
12905  reject_bad_reg (Rn);
12906  reject_bad_reg (Rm);
12907
12908  inst.instruction |= Rd << 8;
12909  inst.instruction |= Rn << 16;
12910  inst.instruction |= Rm;
12911}
12912
12913static void
12914do_t_simd2 (void)
12915{
12916  unsigned Rd, Rn, Rm;
12917
12918  Rd = inst.operands[0].reg;
12919  Rm = inst.operands[1].reg;
12920  Rn = inst.operands[2].reg;
12921
12922  reject_bad_reg (Rd);
12923  reject_bad_reg (Rn);
12924  reject_bad_reg (Rm);
12925
12926  inst.instruction |= Rd << 8;
12927  inst.instruction |= Rn << 16;
12928  inst.instruction |= Rm;
12929}
12930
12931static void
12932do_t_smc (void)
12933{
12934  unsigned int value = inst.reloc.exp.X_add_number;
12935  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12936	      _("SMC is not permitted on this architecture"));
12937  constraint (inst.reloc.exp.X_op != O_constant,
12938	      _("expression too complex"));
12939  inst.reloc.type = BFD_RELOC_UNUSED;
12940  inst.instruction |= (value & 0xf000) >> 12;
12941  inst.instruction |= (value & 0x0ff0);
12942  inst.instruction |= (value & 0x000f) << 16;
12943  /* PR gas/15623: SMC instructions must be last in an IT block.  */
12944  set_it_insn_type_last ();
12945}
12946
12947static void
12948do_t_hvc (void)
12949{
12950  unsigned int value = inst.reloc.exp.X_add_number;
12951
12952  inst.reloc.type = BFD_RELOC_UNUSED;
12953  inst.instruction |= (value & 0x0fff);
12954  inst.instruction |= (value & 0xf000) << 4;
12955}
12956
12957static void
12958do_t_ssat_usat (int bias)
12959{
12960  unsigned Rd, Rn;
12961
12962  Rd = inst.operands[0].reg;
12963  Rn = inst.operands[2].reg;
12964
12965  reject_bad_reg (Rd);
12966  reject_bad_reg (Rn);
12967
12968  inst.instruction |= Rd << 8;
12969  inst.instruction |= inst.operands[1].imm - bias;
12970  inst.instruction |= Rn << 16;
12971
12972  if (inst.operands[3].present)
12973    {
12974      offsetT shift_amount = inst.reloc.exp.X_add_number;
12975
12976      inst.reloc.type = BFD_RELOC_UNUSED;
12977
12978      constraint (inst.reloc.exp.X_op != O_constant,
12979		  _("expression too complex"));
12980
12981      if (shift_amount != 0)
12982	{
12983	  constraint (shift_amount > 31,
12984		      _("shift expression is too large"));
12985
12986	  if (inst.operands[3].shift_kind == SHIFT_ASR)
12987	    inst.instruction |= 0x00200000;  /* sh bit.  */
12988
12989	  inst.instruction |= (shift_amount & 0x1c) << 10;
12990	  inst.instruction |= (shift_amount & 0x03) << 6;
12991	}
12992    }
12993}
12994
12995static void
12996do_t_ssat (void)
12997{
12998  do_t_ssat_usat (1);
12999}
13000
13001static void
13002do_t_ssat16 (void)
13003{
13004  unsigned Rd, Rn;
13005
13006  Rd = inst.operands[0].reg;
13007  Rn = inst.operands[2].reg;
13008
13009  reject_bad_reg (Rd);
13010  reject_bad_reg (Rn);
13011
13012  inst.instruction |= Rd << 8;
13013  inst.instruction |= inst.operands[1].imm - 1;
13014  inst.instruction |= Rn << 16;
13015}
13016
13017static void
13018do_t_strex (void)
13019{
13020  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13021	      || inst.operands[2].postind || inst.operands[2].writeback
13022	      || inst.operands[2].immisreg || inst.operands[2].shifted
13023	      || inst.operands[2].negative,
13024	      BAD_ADDR_MODE);
13025
13026  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13027
13028  inst.instruction |= inst.operands[0].reg << 8;
13029  inst.instruction |= inst.operands[1].reg << 12;
13030  inst.instruction |= inst.operands[2].reg << 16;
13031  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13032}
13033
13034static void
13035do_t_strexd (void)
13036{
13037  if (!inst.operands[2].present)
13038    inst.operands[2].reg = inst.operands[1].reg + 1;
13039
13040  constraint (inst.operands[0].reg == inst.operands[1].reg
13041	      || inst.operands[0].reg == inst.operands[2].reg
13042	      || inst.operands[0].reg == inst.operands[3].reg,
13043	      BAD_OVERLAP);
13044
13045  inst.instruction |= inst.operands[0].reg;
13046  inst.instruction |= inst.operands[1].reg << 12;
13047  inst.instruction |= inst.operands[2].reg << 8;
13048  inst.instruction |= inst.operands[3].reg << 16;
13049}
13050
13051static void
13052do_t_sxtah (void)
13053{
13054  unsigned Rd, Rn, Rm;
13055
13056  Rd = inst.operands[0].reg;
13057  Rn = inst.operands[1].reg;
13058  Rm = inst.operands[2].reg;
13059
13060  reject_bad_reg (Rd);
13061  reject_bad_reg (Rn);
13062  reject_bad_reg (Rm);
13063
13064  inst.instruction |= Rd << 8;
13065  inst.instruction |= Rn << 16;
13066  inst.instruction |= Rm;
13067  inst.instruction |= inst.operands[3].imm << 4;
13068}
13069
13070static void
13071do_t_sxth (void)
13072{
13073  unsigned Rd, Rm;
13074
13075  Rd = inst.operands[0].reg;
13076  Rm = inst.operands[1].reg;
13077
13078  reject_bad_reg (Rd);
13079  reject_bad_reg (Rm);
13080
13081  if (inst.instruction <= 0xffff
13082      && inst.size_req != 4
13083      && Rd <= 7 && Rm <= 7
13084      && (!inst.operands[2].present || inst.operands[2].imm == 0))
13085    {
13086      inst.instruction = THUMB_OP16 (inst.instruction);
13087      inst.instruction |= Rd;
13088      inst.instruction |= Rm << 3;
13089    }
13090  else if (unified_syntax)
13091    {
13092      if (inst.instruction <= 0xffff)
13093	inst.instruction = THUMB_OP32 (inst.instruction);
13094      inst.instruction |= Rd << 8;
13095      inst.instruction |= Rm;
13096      inst.instruction |= inst.operands[2].imm << 4;
13097    }
13098  else
13099    {
13100      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13101		  _("Thumb encoding does not support rotation"));
13102      constraint (1, BAD_HIREG);
13103    }
13104}
13105
13106static void
13107do_t_swi (void)
13108{
13109  /* We have to do the following check manually as ARM_EXT_OS only applies
13110     to ARM_EXT_V6M.  */
13111  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13112    {
13113      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13114	  /* This only applies to the v6m however, not later architectures.  */
13115	  && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13116	as_bad (_("SVC is not permitted on this architecture"));
13117      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13118    }
13119
13120  inst.reloc.type = BFD_RELOC_ARM_SWI;
13121}
13122
13123static void
13124do_t_tb (void)
13125{
13126  unsigned Rn, Rm;
13127  int half;
13128
13129  half = (inst.instruction & 0x10) != 0;
13130  set_it_insn_type_last ();
13131  constraint (inst.operands[0].immisreg,
13132	      _("instruction requires register index"));
13133
13134  Rn = inst.operands[0].reg;
13135  Rm = inst.operands[0].imm;
13136
13137  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13138    constraint (Rn == REG_SP, BAD_SP);
13139  reject_bad_reg (Rm);
13140
13141  constraint (!half && inst.operands[0].shifted,
13142	      _("instruction does not allow shifted index"));
13143  inst.instruction |= (Rn << 16) | Rm;
13144}
13145
13146static void
13147do_t_udf (void)
13148{
13149  if (!inst.operands[0].present)
13150    inst.operands[0].imm = 0;
13151
13152  if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13153    {
13154      constraint (inst.size_req == 2,
13155                  _("immediate value out of range"));
13156      inst.instruction = THUMB_OP32 (inst.instruction);
13157      inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13158      inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13159    }
13160  else
13161    {
13162      inst.instruction = THUMB_OP16 (inst.instruction);
13163      inst.instruction |= inst.operands[0].imm;
13164    }
13165
13166  set_it_insn_type (NEUTRAL_IT_INSN);
13167}
13168
13169
13170static void
13171do_t_usat (void)
13172{
13173  do_t_ssat_usat (0);
13174}
13175
13176static void
13177do_t_usat16 (void)
13178{
13179  unsigned Rd, Rn;
13180
13181  Rd = inst.operands[0].reg;
13182  Rn = inst.operands[2].reg;
13183
13184  reject_bad_reg (Rd);
13185  reject_bad_reg (Rn);
13186
13187  inst.instruction |= Rd << 8;
13188  inst.instruction |= inst.operands[1].imm;
13189  inst.instruction |= Rn << 16;
13190}
13191
13192/* Neon instruction encoder helpers.  */
13193
13194/* Encodings for the different types for various Neon opcodes.  */
13195
13196/* An "invalid" code for the following tables.  */
13197#define N_INV -1u
13198
13199struct neon_tab_entry
13200{
13201  unsigned integer;
13202  unsigned float_or_poly;
13203  unsigned scalar_or_imm;
13204};
13205
13206/* Map overloaded Neon opcodes to their respective encodings.  */
13207#define NEON_ENC_TAB					\
13208  X(vabd,	0x0000700, 0x1200d00, N_INV),		\
13209  X(vmax,	0x0000600, 0x0000f00, N_INV),		\
13210  X(vmin,	0x0000610, 0x0200f00, N_INV),		\
13211  X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
13212  X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
13213  X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
13214  X(vadd,	0x0000800, 0x0000d00, N_INV),		\
13215  X(vsub,	0x1000800, 0x0200d00, N_INV),		\
13216  X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
13217  X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
13218  X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
13219  /* Register variants of the following two instructions are encoded as
13220     vcge / vcgt with the operands reversed.  */  	\
13221  X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
13222  X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
13223  X(vfma,	N_INV, 0x0000c10, N_INV),		\
13224  X(vfms,	N_INV, 0x0200c10, N_INV),		\
13225  X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
13226  X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
13227  X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
13228  X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
13229  X(vmlal,	0x0800800, N_INV,     0x0800240),	\
13230  X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
13231  X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
13232  X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
13233  X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
13234  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
13235  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
13236  X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
13237  X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
13238  X(vshl,	0x0000400, N_INV,     0x0800510),	\
13239  X(vqshl,	0x0000410, N_INV,     0x0800710),	\
13240  X(vand,	0x0000110, N_INV,     0x0800030),	\
13241  X(vbic,	0x0100110, N_INV,     0x0800030),	\
13242  X(veor,	0x1000110, N_INV,     N_INV),		\
13243  X(vorn,	0x0300110, N_INV,     0x0800010),	\
13244  X(vorr,	0x0200110, N_INV,     0x0800010),	\
13245  X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
13246  X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
13247  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
13248  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
13249  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
13250  X(vst1,	0x0000000, 0x0800000, N_INV),		\
13251  X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
13252  X(vst2,	0x0000100, 0x0800100, N_INV),		\
13253  X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
13254  X(vst3,	0x0000200, 0x0800200, N_INV),		\
13255  X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
13256  X(vst4,	0x0000300, 0x0800300, N_INV),		\
13257  X(vmovn,	0x1b20200, N_INV,     N_INV),		\
13258  X(vtrn,	0x1b20080, N_INV,     N_INV),		\
13259  X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
13260  X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
13261  X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
13262  X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
13263  X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
13264  X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
13265  X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
13266  X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
13267  X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
13268  X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
13269  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
13270  X(vseleq,	0xe000a00, N_INV,     N_INV),		\
13271  X(vselvs,	0xe100a00, N_INV,     N_INV),		\
13272  X(vselge,	0xe200a00, N_INV,     N_INV),		\
13273  X(vselgt,	0xe300a00, N_INV,     N_INV),		\
13274  X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
13275  X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
13276  X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
13277  X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
13278  X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
13279  X(aes,	0x3b00300, N_INV,     N_INV),		\
13280  X(sha3op,	0x2000c00, N_INV,     N_INV),		\
13281  X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
13282  X(sha2op,     0x3ba0380, N_INV,     N_INV)
13283
13284enum neon_opc
13285{
13286#define X(OPC,I,F,S) N_MNEM_##OPC
13287NEON_ENC_TAB
13288#undef X
13289};
13290
13291static const struct neon_tab_entry neon_enc_tab[] =
13292{
13293#define X(OPC,I,F,S) { (I), (F), (S) }
13294NEON_ENC_TAB
13295#undef X
13296};
13297
13298/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
13299#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13300#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
13301#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13302#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13303#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13304#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13305#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13306#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13307#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13308#define NEON_ENC_SINGLE_(X) \
13309  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13310#define NEON_ENC_DOUBLE_(X) \
13311  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13312#define NEON_ENC_FPV8_(X) \
13313  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13314
13315#define NEON_ENCODE(type, inst)					\
13316  do								\
13317    {								\
13318      inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
13319      inst.is_neon = 1;						\
13320    }								\
13321  while (0)
13322
13323#define check_neon_suffixes						\
13324  do									\
13325    {									\
13326      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
13327	{								\
13328	  as_bad (_("invalid neon suffix for non neon instruction"));	\
13329	  return;							\
13330	}								\
13331    }									\
13332  while (0)
13333
13334/* Define shapes for instruction operands. The following mnemonic characters
13335   are used in this table:
13336
13337     F - VFP S<n> register
13338     D - Neon D<n> register
13339     Q - Neon Q<n> register
13340     I - Immediate
13341     S - Scalar
13342     R - ARM register
13343     L - D<n> register list
13344
13345   This table is used to generate various data:
13346     - enumerations of the form NS_DDR to be used as arguments to
13347       neon_select_shape.
13348     - a table classifying shapes into single, double, quad, mixed.
13349     - a table used to drive neon_select_shape.  */
13350
13351#define NEON_SHAPE_DEF			\
13352  X(3, (D, D, D), DOUBLE),		\
13353  X(3, (Q, Q, Q), QUAD),		\
13354  X(3, (D, D, I), DOUBLE),		\
13355  X(3, (Q, Q, I), QUAD),		\
13356  X(3, (D, D, S), DOUBLE),		\
13357  X(3, (Q, Q, S), QUAD),		\
13358  X(2, (D, D), DOUBLE),			\
13359  X(2, (Q, Q), QUAD),			\
13360  X(2, (D, S), DOUBLE),			\
13361  X(2, (Q, S), QUAD),			\
13362  X(2, (D, R), DOUBLE),			\
13363  X(2, (Q, R), QUAD),			\
13364  X(2, (D, I), DOUBLE),			\
13365  X(2, (Q, I), QUAD),			\
13366  X(3, (D, L, D), DOUBLE),		\
13367  X(2, (D, Q), MIXED),			\
13368  X(2, (Q, D), MIXED),			\
13369  X(3, (D, Q, I), MIXED),		\
13370  X(3, (Q, D, I), MIXED),		\
13371  X(3, (Q, D, D), MIXED),		\
13372  X(3, (D, Q, Q), MIXED),		\
13373  X(3, (Q, Q, D), MIXED),		\
13374  X(3, (Q, D, S), MIXED),		\
13375  X(3, (D, Q, S), MIXED),		\
13376  X(4, (D, D, D, I), DOUBLE),		\
13377  X(4, (Q, Q, Q, I), QUAD),		\
13378  X(4, (D, D, S, I), DOUBLE),		\
13379  X(4, (Q, Q, S, I), QUAD),		\
13380  X(2, (F, F), SINGLE),			\
13381  X(3, (F, F, F), SINGLE),		\
13382  X(2, (F, I), SINGLE),			\
13383  X(2, (F, D), MIXED),			\
13384  X(2, (D, F), MIXED),			\
13385  X(3, (F, F, I), MIXED),		\
13386  X(4, (R, R, F, F), SINGLE),		\
13387  X(4, (F, F, R, R), SINGLE),		\
13388  X(3, (D, R, R), DOUBLE),		\
13389  X(3, (R, R, D), DOUBLE),		\
13390  X(2, (S, R), SINGLE),			\
13391  X(2, (R, S), SINGLE),			\
13392  X(2, (F, R), SINGLE),			\
13393  X(2, (R, F), SINGLE),			\
13394/* Half float shape supported so far.  */\
13395  X (2, (H, D), MIXED),			\
13396  X (2, (D, H), MIXED),			\
13397  X (2, (H, F), MIXED),			\
13398  X (2, (F, H), MIXED),			\
13399  X (2, (H, H), HALF),			\
13400  X (2, (H, R), HALF),			\
13401  X (2, (R, H), HALF),			\
13402  X (2, (H, I), HALF),			\
13403  X (3, (H, H, H), HALF),		\
13404  X (3, (H, F, I), MIXED),		\
13405  X (3, (F, H, I), MIXED)
13406
13407#define S2(A,B)		NS_##A##B
13408#define S3(A,B,C)	NS_##A##B##C
13409#define S4(A,B,C,D)	NS_##A##B##C##D
13410
13411#define X(N, L, C) S##N L
13412
13413enum neon_shape
13414{
13415  NEON_SHAPE_DEF,
13416  NS_NULL
13417};
13418
13419#undef X
13420#undef S2
13421#undef S3
13422#undef S4
13423
13424enum neon_shape_class
13425{
13426  SC_HALF,
13427  SC_SINGLE,
13428  SC_DOUBLE,
13429  SC_QUAD,
13430  SC_MIXED
13431};
13432
13433#define X(N, L, C) SC_##C
13434
13435static enum neon_shape_class neon_shape_class[] =
13436{
13437  NEON_SHAPE_DEF
13438};
13439
13440#undef X
13441
13442enum neon_shape_el
13443{
13444  SE_H,
13445  SE_F,
13446  SE_D,
13447  SE_Q,
13448  SE_I,
13449  SE_S,
13450  SE_R,
13451  SE_L
13452};
13453
13454/* Register widths of above.  */
13455static unsigned neon_shape_el_size[] =
13456{
13457  16,
13458  32,
13459  64,
13460  128,
13461  0,
13462  32,
13463  32,
13464  0
13465};
13466
13467struct neon_shape_info
13468{
13469  unsigned els;
13470  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13471};
13472
13473#define S2(A,B)		{ SE_##A, SE_##B }
13474#define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
13475#define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
13476
13477#define X(N, L, C) { N, S##N L }
13478
13479static struct neon_shape_info neon_shape_tab[] =
13480{
13481  NEON_SHAPE_DEF
13482};
13483
13484#undef X
13485#undef S2
13486#undef S3
13487#undef S4
13488
13489/* Bit masks used in type checking given instructions.
13490  'N_EQK' means the type must be the same as (or based on in some way) the key
13491   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13492   set, various other bits can be set as well in order to modify the meaning of
13493   the type constraint.  */
13494
13495enum neon_type_mask
13496{
13497  N_S8   = 0x0000001,
13498  N_S16  = 0x0000002,
13499  N_S32  = 0x0000004,
13500  N_S64  = 0x0000008,
13501  N_U8   = 0x0000010,
13502  N_U16  = 0x0000020,
13503  N_U32  = 0x0000040,
13504  N_U64  = 0x0000080,
13505  N_I8   = 0x0000100,
13506  N_I16  = 0x0000200,
13507  N_I32  = 0x0000400,
13508  N_I64  = 0x0000800,
13509  N_8    = 0x0001000,
13510  N_16   = 0x0002000,
13511  N_32   = 0x0004000,
13512  N_64   = 0x0008000,
13513  N_P8   = 0x0010000,
13514  N_P16  = 0x0020000,
13515  N_F16  = 0x0040000,
13516  N_F32  = 0x0080000,
13517  N_F64  = 0x0100000,
13518  N_P64	 = 0x0200000,
13519  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
13520  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
13521  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
13522  N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
13523  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
13524  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
13525  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
13526  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
13527  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
13528  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
13529  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
13530  N_UTYP = 0,
13531  N_MAX_NONSPECIAL = N_P64
13532};
13533
13534#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13535
13536#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13537#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13538#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13539#define N_S_32     (N_S8 | N_S16 | N_S32)
13540#define N_F_16_32  (N_F16 | N_F32)
13541#define N_SUF_32   (N_SU_32 | N_F_16_32)
13542#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
13543#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13544#define N_F_ALL    (N_F16 | N_F32 | N_F64)
13545
13546/* Pass this as the first type argument to neon_check_type to ignore types
13547   altogether.  */
13548#define N_IGNORE_TYPE (N_KEY | N_EQK)
13549
13550/* Select a "shape" for the current instruction (describing register types or
13551   sizes) from a list of alternatives. Return NS_NULL if the current instruction
13552   doesn't fit. For non-polymorphic shapes, checking is usually done as a
13553   function of operand parsing, so this function doesn't need to be called.
13554   Shapes should be listed in order of decreasing length.  */
13555
13556static enum neon_shape
13557neon_select_shape (enum neon_shape shape, ...)
13558{
13559  va_list ap;
13560  enum neon_shape first_shape = shape;
13561
13562  /* Fix missing optional operands. FIXME: we don't know at this point how
13563     many arguments we should have, so this makes the assumption that we have
13564     > 1. This is true of all current Neon opcodes, I think, but may not be
13565     true in the future.  */
13566  if (!inst.operands[1].present)
13567    inst.operands[1] = inst.operands[0];
13568
13569  va_start (ap, shape);
13570
13571  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13572    {
13573      unsigned j;
13574      int matches = 1;
13575
13576      for (j = 0; j < neon_shape_tab[shape].els; j++)
13577	{
13578	  if (!inst.operands[j].present)
13579	    {
13580	      matches = 0;
13581	      break;
13582	    }
13583
13584	  switch (neon_shape_tab[shape].el[j])
13585	    {
13586	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
13587		 a VFP single precision register operand, it's essentially
13588		 means only half of the register is used.
13589
13590		 If the type specifier is given after the mnemonics, the
13591		 information is stored in inst.vectype.  If the type specifier
13592		 is given after register operand, the information is stored
13593		 in inst.operands[].vectype.
13594
13595		 When there is only one type specifier, and all the register
13596		 operands are the same type of hardware register, the type
13597		 specifier applies to all register operands.
13598
13599		 If no type specifier is given, the shape is inferred from
13600		 operand information.
13601
13602		 for example:
13603		 vadd.f16 s0, s1, s2:		NS_HHH
13604		 vabs.f16 s0, s1:		NS_HH
13605		 vmov.f16 s0, r1:		NS_HR
13606		 vmov.f16 r0, s1:		NS_RH
13607		 vcvt.f16 r0, s1:		NS_RH
13608		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
13609		 vcvt.f16.s32	s2, s2:		NS_HF
13610	      */
13611	    case SE_H:
13612	      if (!(inst.operands[j].isreg
13613		    && inst.operands[j].isvec
13614		    && inst.operands[j].issingle
13615		    && !inst.operands[j].isquad
13616		    && ((inst.vectype.elems == 1
13617			 && inst.vectype.el[0].size == 16)
13618			|| (inst.vectype.elems > 1
13619			    && inst.vectype.el[j].size == 16)
13620			|| (inst.vectype.elems == 0
13621			    && inst.operands[j].vectype.type != NT_invtype
13622			    && inst.operands[j].vectype.size == 16))))
13623		matches = 0;
13624	      break;
13625
13626	    case SE_F:
13627	      if (!(inst.operands[j].isreg
13628		    && inst.operands[j].isvec
13629		    && inst.operands[j].issingle
13630		    && !inst.operands[j].isquad
13631		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13632			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13633			|| (inst.vectype.elems == 0
13634			    && (inst.operands[j].vectype.size == 32
13635				|| inst.operands[j].vectype.type == NT_invtype)))))
13636		matches = 0;
13637	      break;
13638
13639	    case SE_D:
13640	      if (!(inst.operands[j].isreg
13641		    && inst.operands[j].isvec
13642		    && !inst.operands[j].isquad
13643		    && !inst.operands[j].issingle))
13644		matches = 0;
13645	      break;
13646
13647	    case SE_R:
13648	      if (!(inst.operands[j].isreg
13649		    && !inst.operands[j].isvec))
13650		matches = 0;
13651	      break;
13652
13653	    case SE_Q:
13654	      if (!(inst.operands[j].isreg
13655		    && inst.operands[j].isvec
13656		    && inst.operands[j].isquad
13657		    && !inst.operands[j].issingle))
13658		matches = 0;
13659	      break;
13660
13661	    case SE_I:
13662	      if (!(!inst.operands[j].isreg
13663		    && !inst.operands[j].isscalar))
13664		matches = 0;
13665	      break;
13666
13667	    case SE_S:
13668	      if (!(!inst.operands[j].isreg
13669		    && inst.operands[j].isscalar))
13670		matches = 0;
13671	      break;
13672
13673	    case SE_L:
13674	      break;
13675	    }
13676	  if (!matches)
13677	    break;
13678	}
13679      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13680	/* We've matched all the entries in the shape table, and we don't
13681	   have any left over operands which have not been matched.  */
13682	break;
13683    }
13684
13685  va_end (ap);
13686
13687  if (shape == NS_NULL && first_shape != NS_NULL)
13688    first_error (_("invalid instruction shape"));
13689
13690  return shape;
13691}
13692
13693/* True if SHAPE is predominantly a quadword operation (most of the time, this
13694   means the Q bit should be set).  */
13695
13696static int
13697neon_quad (enum neon_shape shape)
13698{
13699  return neon_shape_class[shape] == SC_QUAD;
13700}
13701
13702static void
13703neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13704		       unsigned *g_size)
13705{
13706  /* Allow modification to be made to types which are constrained to be
13707     based on the key element, based on bits set alongside N_EQK.  */
13708  if ((typebits & N_EQK) != 0)
13709    {
13710      if ((typebits & N_HLF) != 0)
13711	*g_size /= 2;
13712      else if ((typebits & N_DBL) != 0)
13713	*g_size *= 2;
13714      if ((typebits & N_SGN) != 0)
13715	*g_type = NT_signed;
13716      else if ((typebits & N_UNS) != 0)
13717	*g_type = NT_unsigned;
13718      else if ((typebits & N_INT) != 0)
13719	*g_type = NT_integer;
13720      else if ((typebits & N_FLT) != 0)
13721	*g_type = NT_float;
13722      else if ((typebits & N_SIZ) != 0)
13723	*g_type = NT_untyped;
13724    }
13725}
13726
13727/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13728   operand type, i.e. the single type specified in a Neon instruction when it
13729   is the only one given.  */
13730
13731static struct neon_type_el
13732neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13733{
13734  struct neon_type_el dest = *key;
13735
13736  gas_assert ((thisarg & N_EQK) != 0);
13737
13738  neon_modify_type_size (thisarg, &dest.type, &dest.size);
13739
13740  return dest;
13741}
13742
13743/* Convert Neon type and size into compact bitmask representation.  */
13744
13745static enum neon_type_mask
13746type_chk_of_el_type (enum neon_el_type type, unsigned size)
13747{
13748  switch (type)
13749    {
13750    case NT_untyped:
13751      switch (size)
13752	{
13753	case 8:  return N_8;
13754	case 16: return N_16;
13755	case 32: return N_32;
13756	case 64: return N_64;
13757	default: ;
13758	}
13759      break;
13760
13761    case NT_integer:
13762      switch (size)
13763	{
13764	case 8:  return N_I8;
13765	case 16: return N_I16;
13766	case 32: return N_I32;
13767	case 64: return N_I64;
13768	default: ;
13769	}
13770      break;
13771
13772    case NT_float:
13773      switch (size)
13774	{
13775	case 16: return N_F16;
13776	case 32: return N_F32;
13777	case 64: return N_F64;
13778	default: ;
13779	}
13780      break;
13781
13782    case NT_poly:
13783      switch (size)
13784	{
13785	case 8:  return N_P8;
13786	case 16: return N_P16;
13787	case 64: return N_P64;
13788	default: ;
13789	}
13790      break;
13791
13792    case NT_signed:
13793      switch (size)
13794	{
13795	case 8:  return N_S8;
13796	case 16: return N_S16;
13797	case 32: return N_S32;
13798	case 64: return N_S64;
13799	default: ;
13800	}
13801      break;
13802
13803    case NT_unsigned:
13804      switch (size)
13805	{
13806	case 8:  return N_U8;
13807	case 16: return N_U16;
13808	case 32: return N_U32;
13809	case 64: return N_U64;
13810	default: ;
13811	}
13812      break;
13813
13814    default: ;
13815    }
13816
13817  return N_UTYP;
13818}
13819
13820/* Convert compact Neon bitmask type representation to a type and size. Only
13821   handles the case where a single bit is set in the mask.  */
13822
13823static int
13824el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13825		     enum neon_type_mask mask)
13826{
13827  if ((mask & N_EQK) != 0)
13828    return FAIL;
13829
13830  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13831    *size = 8;
13832  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13833    *size = 16;
13834  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13835    *size = 32;
13836  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13837    *size = 64;
13838  else
13839    return FAIL;
13840
13841  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13842    *type = NT_signed;
13843  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13844    *type = NT_unsigned;
13845  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13846    *type = NT_integer;
13847  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13848    *type = NT_untyped;
13849  else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13850    *type = NT_poly;
13851  else if ((mask & (N_F_ALL)) != 0)
13852    *type = NT_float;
13853  else
13854    return FAIL;
13855
13856  return SUCCESS;
13857}
13858
13859/* Modify a bitmask of allowed types. This is only needed for type
13860   relaxation.  */
13861
13862static unsigned
13863modify_types_allowed (unsigned allowed, unsigned mods)
13864{
13865  unsigned size;
13866  enum neon_el_type type;
13867  unsigned destmask;
13868  int i;
13869
13870  destmask = 0;
13871
13872  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13873    {
13874      if (el_type_of_type_chk (&type, &size,
13875			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
13876	{
13877	  neon_modify_type_size (mods, &type, &size);
13878	  destmask |= type_chk_of_el_type (type, size);
13879	}
13880    }
13881
13882  return destmask;
13883}
13884
13885/* Check type and return type classification.
13886   The manual states (paraphrase): If one datatype is given, it indicates the
13887   type given in:
13888    - the second operand, if there is one
13889    - the operand, if there is no second operand
13890    - the result, if there are no operands.
13891   This isn't quite good enough though, so we use a concept of a "key" datatype
13892   which is set on a per-instruction basis, which is the one which matters when
13893   only one data type is written.
13894   Note: this function has side-effects (e.g. filling in missing operands). All
13895   Neon instructions should call it before performing bit encoding.  */
13896
13897static struct neon_type_el
13898neon_check_type (unsigned els, enum neon_shape ns, ...)
13899{
13900  va_list ap;
13901  unsigned i, pass, key_el = 0;
13902  unsigned types[NEON_MAX_TYPE_ELS];
13903  enum neon_el_type k_type = NT_invtype;
13904  unsigned k_size = -1u;
13905  struct neon_type_el badtype = {NT_invtype, -1};
13906  unsigned key_allowed = 0;
13907
13908  /* Optional registers in Neon instructions are always (not) in operand 1.
13909     Fill in the missing operand here, if it was omitted.  */
13910  if (els > 1 && !inst.operands[1].present)
13911    inst.operands[1] = inst.operands[0];
13912
13913  /* Suck up all the varargs.  */
13914  va_start (ap, ns);
13915  for (i = 0; i < els; i++)
13916    {
13917      unsigned thisarg = va_arg (ap, unsigned);
13918      if (thisarg == N_IGNORE_TYPE)
13919	{
13920	  va_end (ap);
13921	  return badtype;
13922	}
13923      types[i] = thisarg;
13924      if ((thisarg & N_KEY) != 0)
13925	key_el = i;
13926    }
13927  va_end (ap);
13928
13929  if (inst.vectype.elems > 0)
13930    for (i = 0; i < els; i++)
13931      if (inst.operands[i].vectype.type != NT_invtype)
13932	{
13933	  first_error (_("types specified in both the mnemonic and operands"));
13934	  return badtype;
13935	}
13936
13937  /* Duplicate inst.vectype elements here as necessary.
13938     FIXME: No idea if this is exactly the same as the ARM assembler,
13939     particularly when an insn takes one register and one non-register
13940     operand. */
13941  if (inst.vectype.elems == 1 && els > 1)
13942    {
13943      unsigned j;
13944      inst.vectype.elems = els;
13945      inst.vectype.el[key_el] = inst.vectype.el[0];
13946      for (j = 0; j < els; j++)
13947	if (j != key_el)
13948	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13949						  types[j]);
13950    }
13951  else if (inst.vectype.elems == 0 && els > 0)
13952    {
13953      unsigned j;
13954      /* No types were given after the mnemonic, so look for types specified
13955	 after each operand. We allow some flexibility here; as long as the
13956	 "key" operand has a type, we can infer the others.  */
13957      for (j = 0; j < els; j++)
13958	if (inst.operands[j].vectype.type != NT_invtype)
13959	  inst.vectype.el[j] = inst.operands[j].vectype;
13960
13961      if (inst.operands[key_el].vectype.type != NT_invtype)
13962	{
13963	  for (j = 0; j < els; j++)
13964	    if (inst.operands[j].vectype.type == NT_invtype)
13965	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13966						      types[j]);
13967	}
13968      else
13969	{
13970	  first_error (_("operand types can't be inferred"));
13971	  return badtype;
13972	}
13973    }
13974  else if (inst.vectype.elems != els)
13975    {
13976      first_error (_("type specifier has the wrong number of parts"));
13977      return badtype;
13978    }
13979
13980  for (pass = 0; pass < 2; pass++)
13981    {
13982      for (i = 0; i < els; i++)
13983	{
13984	  unsigned thisarg = types[i];
13985	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13986	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13987	  enum neon_el_type g_type = inst.vectype.el[i].type;
13988	  unsigned g_size = inst.vectype.el[i].size;
13989
13990	  /* Decay more-specific signed & unsigned types to sign-insensitive
13991	     integer types if sign-specific variants are unavailable.  */
13992	  if ((g_type == NT_signed || g_type == NT_unsigned)
13993	      && (types_allowed & N_SU_ALL) == 0)
13994	    g_type = NT_integer;
13995
13996	  /* If only untyped args are allowed, decay any more specific types to
13997	     them. Some instructions only care about signs for some element
13998	     sizes, so handle that properly.  */
13999	  if (((types_allowed & N_UNT) == 0)
14000	      && ((g_size == 8 && (types_allowed & N_8) != 0)
14001		  || (g_size == 16 && (types_allowed & N_16) != 0)
14002		  || (g_size == 32 && (types_allowed & N_32) != 0)
14003		  || (g_size == 64 && (types_allowed & N_64) != 0)))
14004	    g_type = NT_untyped;
14005
14006	  if (pass == 0)
14007	    {
14008	      if ((thisarg & N_KEY) != 0)
14009		{
14010		  k_type = g_type;
14011		  k_size = g_size;
14012		  key_allowed = thisarg & ~N_KEY;
14013
14014		  /* Check architecture constraint on FP16 extension.  */
14015		  if (k_size == 16
14016		      && k_type == NT_float
14017		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14018		    {
14019		      inst.error = _(BAD_FP16);
14020		      return badtype;
14021		    }
14022		}
14023	    }
14024	  else
14025	    {
14026	      if ((thisarg & N_VFP) != 0)
14027		{
14028		  enum neon_shape_el regshape;
14029		  unsigned regwidth, match;
14030
14031		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
14032		  if (ns == NS_NULL)
14033		    {
14034		      first_error (_("invalid instruction shape"));
14035		      return badtype;
14036		    }
14037		  regshape = neon_shape_tab[ns].el[i];
14038		  regwidth = neon_shape_el_size[regshape];
14039
14040		  /* In VFP mode, operands must match register widths. If we
14041		     have a key operand, use its width, else use the width of
14042		     the current operand.  */
14043		  if (k_size != -1u)
14044		    match = k_size;
14045		  else
14046		    match = g_size;
14047
14048		  /* FP16 will use a single precision register.  */
14049		  if (regwidth == 32 && match == 16)
14050		    {
14051		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14052			match = regwidth;
14053		      else
14054			{
14055			  inst.error = _(BAD_FP16);
14056			  return badtype;
14057			}
14058		    }
14059
14060		  if (regwidth != match)
14061		    {
14062		      first_error (_("operand size must match register width"));
14063		      return badtype;
14064		    }
14065		}
14066
14067	      if ((thisarg & N_EQK) == 0)
14068		{
14069		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
14070
14071		  if ((given_type & types_allowed) == 0)
14072		    {
14073		      first_error (_("bad type in Neon instruction"));
14074		      return badtype;
14075		    }
14076		}
14077	      else
14078		{
14079		  enum neon_el_type mod_k_type = k_type;
14080		  unsigned mod_k_size = k_size;
14081		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14082		  if (g_type != mod_k_type || g_size != mod_k_size)
14083		    {
14084		      first_error (_("inconsistent types in Neon instruction"));
14085		      return badtype;
14086		    }
14087		}
14088	    }
14089	}
14090    }
14091
14092  return inst.vectype.el[key_el];
14093}
14094
14095/* Neon-style VFP instruction forwarding.  */
14096
14097/* Thumb VFP instructions have 0xE in the condition field.  */
14098
14099static void
14100do_vfp_cond_or_thumb (void)
14101{
14102  inst.is_neon = 1;
14103
14104  if (thumb_mode)
14105    inst.instruction |= 0xe0000000;
14106  else
14107    inst.instruction |= inst.cond << 28;
14108}
14109
14110/* Look up and encode a simple mnemonic, for use as a helper function for the
14111   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
14112   etc.  It is assumed that operand parsing has already been done, and that the
14113   operands are in the form expected by the given opcode (this isn't necessarily
14114   the same as the form in which they were parsed, hence some massaging must
14115   take place before this function is called).
14116   Checks current arch version against that in the looked-up opcode.  */
14117
14118static void
14119do_vfp_nsyn_opcode (const char *opname)
14120{
14121  const struct asm_opcode *opcode;
14122
14123  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14124
14125  if (!opcode)
14126    abort ();
14127
14128  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14129		thumb_mode ? *opcode->tvariant : *opcode->avariant),
14130	      _(BAD_FPU));
14131
14132  inst.is_neon = 1;
14133
14134  if (thumb_mode)
14135    {
14136      inst.instruction = opcode->tvalue;
14137      opcode->tencode ();
14138    }
14139  else
14140    {
14141      inst.instruction = (inst.cond << 28) | opcode->avalue;
14142      opcode->aencode ();
14143    }
14144}
14145
14146static void
14147do_vfp_nsyn_add_sub (enum neon_shape rs)
14148{
14149  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14150
14151  if (rs == NS_FFF || rs == NS_HHH)
14152    {
14153      if (is_add)
14154	do_vfp_nsyn_opcode ("fadds");
14155      else
14156	do_vfp_nsyn_opcode ("fsubs");
14157
14158      /* ARMv8.2 fp16 instruction.  */
14159      if (rs == NS_HHH)
14160	do_scalar_fp16_v82_encode ();
14161    }
14162  else
14163    {
14164      if (is_add)
14165	do_vfp_nsyn_opcode ("faddd");
14166      else
14167	do_vfp_nsyn_opcode ("fsubd");
14168    }
14169}
14170
14171/* Check operand types to see if this is a VFP instruction, and if so call
14172   PFN ().  */
14173
14174static int
14175try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14176{
14177  enum neon_shape rs;
14178  struct neon_type_el et;
14179
14180  switch (args)
14181    {
14182    case 2:
14183      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14184      et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14185      break;
14186
14187    case 3:
14188      rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14189      et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14190			    N_F_ALL | N_KEY | N_VFP);
14191      break;
14192
14193    default:
14194      abort ();
14195    }
14196
14197  if (et.type != NT_invtype)
14198    {
14199      pfn (rs);
14200      return SUCCESS;
14201    }
14202
14203  inst.error = NULL;
14204  return FAIL;
14205}
14206
14207static void
14208do_vfp_nsyn_mla_mls (enum neon_shape rs)
14209{
14210  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14211
14212  if (rs == NS_FFF || rs == NS_HHH)
14213    {
14214      if (is_mla)
14215	do_vfp_nsyn_opcode ("fmacs");
14216      else
14217	do_vfp_nsyn_opcode ("fnmacs");
14218
14219      /* ARMv8.2 fp16 instruction.  */
14220      if (rs == NS_HHH)
14221	do_scalar_fp16_v82_encode ();
14222    }
14223  else
14224    {
14225      if (is_mla)
14226	do_vfp_nsyn_opcode ("fmacd");
14227      else
14228	do_vfp_nsyn_opcode ("fnmacd");
14229    }
14230}
14231
14232static void
14233do_vfp_nsyn_fma_fms (enum neon_shape rs)
14234{
14235  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14236
14237  if (rs == NS_FFF || rs == NS_HHH)
14238    {
14239      if (is_fma)
14240	do_vfp_nsyn_opcode ("ffmas");
14241      else
14242	do_vfp_nsyn_opcode ("ffnmas");
14243
14244      /* ARMv8.2 fp16 instruction.  */
14245      if (rs == NS_HHH)
14246	do_scalar_fp16_v82_encode ();
14247    }
14248  else
14249    {
14250      if (is_fma)
14251	do_vfp_nsyn_opcode ("ffmad");
14252      else
14253	do_vfp_nsyn_opcode ("ffnmad");
14254    }
14255}
14256
14257static void
14258do_vfp_nsyn_mul (enum neon_shape rs)
14259{
14260  if (rs == NS_FFF || rs == NS_HHH)
14261    {
14262      do_vfp_nsyn_opcode ("fmuls");
14263
14264      /* ARMv8.2 fp16 instruction.  */
14265      if (rs == NS_HHH)
14266	do_scalar_fp16_v82_encode ();
14267    }
14268  else
14269    do_vfp_nsyn_opcode ("fmuld");
14270}
14271
14272static void
14273do_vfp_nsyn_abs_neg (enum neon_shape rs)
14274{
14275  int is_neg = (inst.instruction & 0x80) != 0;
14276  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14277
14278  if (rs == NS_FF || rs == NS_HH)
14279    {
14280      if (is_neg)
14281	do_vfp_nsyn_opcode ("fnegs");
14282      else
14283	do_vfp_nsyn_opcode ("fabss");
14284
14285      /* ARMv8.2 fp16 instruction.  */
14286      if (rs == NS_HH)
14287	do_scalar_fp16_v82_encode ();
14288    }
14289  else
14290    {
14291      if (is_neg)
14292	do_vfp_nsyn_opcode ("fnegd");
14293      else
14294	do_vfp_nsyn_opcode ("fabsd");
14295    }
14296}
14297
14298/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14299   insns belong to Neon, and are handled elsewhere.  */
14300
14301static void
14302do_vfp_nsyn_ldm_stm (int is_dbmode)
14303{
14304  int is_ldm = (inst.instruction & (1 << 20)) != 0;
14305  if (is_ldm)
14306    {
14307      if (is_dbmode)
14308	do_vfp_nsyn_opcode ("fldmdbs");
14309      else
14310	do_vfp_nsyn_opcode ("fldmias");
14311    }
14312  else
14313    {
14314      if (is_dbmode)
14315	do_vfp_nsyn_opcode ("fstmdbs");
14316      else
14317	do_vfp_nsyn_opcode ("fstmias");
14318    }
14319}
14320
14321static void
14322do_vfp_nsyn_sqrt (void)
14323{
14324  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14325  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14326
14327  if (rs == NS_FF || rs == NS_HH)
14328    {
14329      do_vfp_nsyn_opcode ("fsqrts");
14330
14331      /* ARMv8.2 fp16 instruction.  */
14332      if (rs == NS_HH)
14333	do_scalar_fp16_v82_encode ();
14334    }
14335  else
14336    do_vfp_nsyn_opcode ("fsqrtd");
14337}
14338
14339static void
14340do_vfp_nsyn_div (void)
14341{
14342  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14343  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14344		   N_F_ALL | N_KEY | N_VFP);
14345
14346  if (rs == NS_FFF || rs == NS_HHH)
14347    {
14348      do_vfp_nsyn_opcode ("fdivs");
14349
14350      /* ARMv8.2 fp16 instruction.  */
14351      if (rs == NS_HHH)
14352	do_scalar_fp16_v82_encode ();
14353    }
14354  else
14355    do_vfp_nsyn_opcode ("fdivd");
14356}
14357
14358static void
14359do_vfp_nsyn_nmul (void)
14360{
14361  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14362  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14363		   N_F_ALL | N_KEY | N_VFP);
14364
14365  if (rs == NS_FFF || rs == NS_HHH)
14366    {
14367      NEON_ENCODE (SINGLE, inst);
14368      do_vfp_sp_dyadic ();
14369
14370      /* ARMv8.2 fp16 instruction.  */
14371      if (rs == NS_HHH)
14372	do_scalar_fp16_v82_encode ();
14373    }
14374  else
14375    {
14376      NEON_ENCODE (DOUBLE, inst);
14377      do_vfp_dp_rd_rn_rm ();
14378    }
14379  do_vfp_cond_or_thumb ();
14380
14381}
14382
14383static void
14384do_vfp_nsyn_cmp (void)
14385{
14386  enum neon_shape rs;
14387  if (inst.operands[1].isreg)
14388    {
14389      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14390      neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14391
14392      if (rs == NS_FF || rs == NS_HH)
14393	{
14394	  NEON_ENCODE (SINGLE, inst);
14395	  do_vfp_sp_monadic ();
14396	}
14397      else
14398	{
14399	  NEON_ENCODE (DOUBLE, inst);
14400	  do_vfp_dp_rd_rm ();
14401	}
14402    }
14403  else
14404    {
14405      rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14406      neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14407
14408      switch (inst.instruction & 0x0fffffff)
14409	{
14410	case N_MNEM_vcmp:
14411	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14412	  break;
14413	case N_MNEM_vcmpe:
14414	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14415	  break;
14416	default:
14417	  abort ();
14418	}
14419
14420      if (rs == NS_FI || rs == NS_HI)
14421	{
14422	  NEON_ENCODE (SINGLE, inst);
14423	  do_vfp_sp_compare_z ();
14424	}
14425      else
14426	{
14427	  NEON_ENCODE (DOUBLE, inst);
14428	  do_vfp_dp_rd ();
14429	}
14430    }
14431  do_vfp_cond_or_thumb ();
14432
14433  /* ARMv8.2 fp16 instruction.  */
14434  if (rs == NS_HI || rs == NS_HH)
14435    do_scalar_fp16_v82_encode ();
14436}
14437
14438static void
14439nsyn_insert_sp (void)
14440{
14441  inst.operands[1] = inst.operands[0];
14442  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14443  inst.operands[0].reg = REG_SP;
14444  inst.operands[0].isreg = 1;
14445  inst.operands[0].writeback = 1;
14446  inst.operands[0].present = 1;
14447}
14448
14449static void
14450do_vfp_nsyn_push (void)
14451{
14452  nsyn_insert_sp ();
14453
14454  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14455	      _("register list must contain at least 1 and at most 16 "
14456		"registers"));
14457
14458  if (inst.operands[1].issingle)
14459    do_vfp_nsyn_opcode ("fstmdbs");
14460  else
14461    do_vfp_nsyn_opcode ("fstmdbd");
14462}
14463
14464static void
14465do_vfp_nsyn_pop (void)
14466{
14467  nsyn_insert_sp ();
14468
14469  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14470	      _("register list must contain at least 1 and at most 16 "
14471		"registers"));
14472
14473  if (inst.operands[1].issingle)
14474    do_vfp_nsyn_opcode ("fldmias");
14475  else
14476    do_vfp_nsyn_opcode ("fldmiad");
14477}
14478
14479/* Fix up Neon data-processing instructions, ORing in the correct bits for
14480   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
14481
14482static void
14483neon_dp_fixup (struct arm_it* insn)
14484{
14485  unsigned int i = insn->instruction;
14486  insn->is_neon = 1;
14487
14488  if (thumb_mode)
14489    {
14490      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
14491      if (i & (1 << 24))
14492	i |= 1 << 28;
14493
14494      i &= ~(1 << 24);
14495
14496      i |= 0xef000000;
14497    }
14498  else
14499    i |= 0xf2000000;
14500
14501  insn->instruction = i;
14502}
14503
14504/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14505   (0, 1, 2, 3).  */
14506
14507static unsigned
14508neon_logbits (unsigned x)
14509{
14510  return ffs (x) - 4;
14511}
14512
14513#define LOW4(R) ((R) & 0xf)
14514#define HI1(R) (((R) >> 4) & 1)
14515
14516/* Encode insns with bit pattern:
14517
14518  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14519  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
14520
14521  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14522  different meaning for some instruction.  */
14523
14524static void
14525neon_three_same (int isquad, int ubit, int size)
14526{
14527  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14528  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14529  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14530  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14531  inst.instruction |= LOW4 (inst.operands[2].reg);
14532  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14533  inst.instruction |= (isquad != 0) << 6;
14534  inst.instruction |= (ubit != 0) << 24;
14535  if (size != -1)
14536    inst.instruction |= neon_logbits (size) << 20;
14537
14538  neon_dp_fixup (&inst);
14539}
14540
14541/* Encode instructions of the form:
14542
14543  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
14544  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
14545
14546  Don't write size if SIZE == -1.  */
14547
14548static void
14549neon_two_same (int qbit, int ubit, int size)
14550{
14551  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14552  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14553  inst.instruction |= LOW4 (inst.operands[1].reg);
14554  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14555  inst.instruction |= (qbit != 0) << 6;
14556  inst.instruction |= (ubit != 0) << 24;
14557
14558  if (size != -1)
14559    inst.instruction |= neon_logbits (size) << 18;
14560
14561  neon_dp_fixup (&inst);
14562}
14563
14564/* Neon instruction encoders, in approximate order of appearance.  */
14565
14566static void
14567do_neon_dyadic_i_su (void)
14568{
14569  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14570  struct neon_type_el et = neon_check_type (3, rs,
14571    N_EQK, N_EQK, N_SU_32 | N_KEY);
14572  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14573}
14574
14575static void
14576do_neon_dyadic_i64_su (void)
14577{
14578  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14579  struct neon_type_el et = neon_check_type (3, rs,
14580    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14581  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14582}
14583
14584static void
14585neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14586		unsigned immbits)
14587{
14588  unsigned size = et.size >> 3;
14589  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14590  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14591  inst.instruction |= LOW4 (inst.operands[1].reg);
14592  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14593  inst.instruction |= (isquad != 0) << 6;
14594  inst.instruction |= immbits << 16;
14595  inst.instruction |= (size >> 3) << 7;
14596  inst.instruction |= (size & 0x7) << 19;
14597  if (write_ubit)
14598    inst.instruction |= (uval != 0) << 24;
14599
14600  neon_dp_fixup (&inst);
14601}
14602
14603static void
14604do_neon_shl_imm (void)
14605{
14606  if (!inst.operands[2].isreg)
14607    {
14608      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14609      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14610      int imm = inst.operands[2].imm;
14611
14612      constraint (imm < 0 || (unsigned)imm >= et.size,
14613		  _("immediate out of range for shift"));
14614      NEON_ENCODE (IMMED, inst);
14615      neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14616    }
14617  else
14618    {
14619      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14620      struct neon_type_el et = neon_check_type (3, rs,
14621	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14622      unsigned int tmp;
14623
14624      /* VSHL/VQSHL 3-register variants have syntax such as:
14625	   vshl.xx Dd, Dm, Dn
14626	 whereas other 3-register operations encoded by neon_three_same have
14627	 syntax like:
14628	   vadd.xx Dd, Dn, Dm
14629	 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14630	 here.  */
14631      tmp = inst.operands[2].reg;
14632      inst.operands[2].reg = inst.operands[1].reg;
14633      inst.operands[1].reg = tmp;
14634      NEON_ENCODE (INTEGER, inst);
14635      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14636    }
14637}
14638
14639static void
14640do_neon_qshl_imm (void)
14641{
14642  if (!inst.operands[2].isreg)
14643    {
14644      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14645      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14646      int imm = inst.operands[2].imm;
14647
14648      constraint (imm < 0 || (unsigned)imm >= et.size,
14649		  _("immediate out of range for shift"));
14650      NEON_ENCODE (IMMED, inst);
14651      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14652    }
14653  else
14654    {
14655      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14656      struct neon_type_el et = neon_check_type (3, rs,
14657	N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14658      unsigned int tmp;
14659
14660      /* See note in do_neon_shl_imm.  */
14661      tmp = inst.operands[2].reg;
14662      inst.operands[2].reg = inst.operands[1].reg;
14663      inst.operands[1].reg = tmp;
14664      NEON_ENCODE (INTEGER, inst);
14665      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14666    }
14667}
14668
14669static void
14670do_neon_rshl (void)
14671{
14672  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14673  struct neon_type_el et = neon_check_type (3, rs,
14674    N_EQK, N_EQK, N_SU_ALL | N_KEY);
14675  unsigned int tmp;
14676
14677  tmp = inst.operands[2].reg;
14678  inst.operands[2].reg = inst.operands[1].reg;
14679  inst.operands[1].reg = tmp;
14680  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14681}
14682
14683static int
14684neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14685{
14686  /* Handle .I8 pseudo-instructions.  */
14687  if (size == 8)
14688    {
14689      /* Unfortunately, this will make everything apart from zero out-of-range.
14690	 FIXME is this the intended semantics? There doesn't seem much point in
14691	 accepting .I8 if so.  */
14692      immediate |= immediate << 8;
14693      size = 16;
14694    }
14695
14696  if (size >= 32)
14697    {
14698      if (immediate == (immediate & 0x000000ff))
14699	{
14700	  *immbits = immediate;
14701	  return 0x1;
14702	}
14703      else if (immediate == (immediate & 0x0000ff00))
14704	{
14705	  *immbits = immediate >> 8;
14706	  return 0x3;
14707	}
14708      else if (immediate == (immediate & 0x00ff0000))
14709	{
14710	  *immbits = immediate >> 16;
14711	  return 0x5;
14712	}
14713      else if (immediate == (immediate & 0xff000000))
14714	{
14715	  *immbits = immediate >> 24;
14716	  return 0x7;
14717	}
14718      if ((immediate & 0xffff) != (immediate >> 16))
14719	goto bad_immediate;
14720      immediate &= 0xffff;
14721    }
14722
14723  if (immediate == (immediate & 0x000000ff))
14724    {
14725      *immbits = immediate;
14726      return 0x9;
14727    }
14728  else if (immediate == (immediate & 0x0000ff00))
14729    {
14730      *immbits = immediate >> 8;
14731      return 0xb;
14732    }
14733
14734  bad_immediate:
14735  first_error (_("immediate value out of range"));
14736  return FAIL;
14737}
14738
14739static void
14740do_neon_logic (void)
14741{
14742  if (inst.operands[2].present && inst.operands[2].isreg)
14743    {
14744      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14745      neon_check_type (3, rs, N_IGNORE_TYPE);
14746      /* U bit and size field were set as part of the bitmask.  */
14747      NEON_ENCODE (INTEGER, inst);
14748      neon_three_same (neon_quad (rs), 0, -1);
14749    }
14750  else
14751    {
14752      const int three_ops_form = (inst.operands[2].present
14753				  && !inst.operands[2].isreg);
14754      const int immoperand = (three_ops_form ? 2 : 1);
14755      enum neon_shape rs = (three_ops_form
14756			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14757			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14758      struct neon_type_el et = neon_check_type (2, rs,
14759	N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14760      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14761      unsigned immbits;
14762      int cmode;
14763
14764      if (et.type == NT_invtype)
14765	return;
14766
14767      if (three_ops_form)
14768	constraint (inst.operands[0].reg != inst.operands[1].reg,
14769		    _("first and second operands shall be the same register"));
14770
14771      NEON_ENCODE (IMMED, inst);
14772
14773      immbits = inst.operands[immoperand].imm;
14774      if (et.size == 64)
14775	{
14776	  /* .i64 is a pseudo-op, so the immediate must be a repeating
14777	     pattern.  */
14778	  if (immbits != (inst.operands[immoperand].regisimm ?
14779			  inst.operands[immoperand].reg : 0))
14780	    {
14781	      /* Set immbits to an invalid constant.  */
14782	      immbits = 0xdeadbeef;
14783	    }
14784	}
14785
14786      switch (opcode)
14787	{
14788	case N_MNEM_vbic:
14789	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14790	  break;
14791
14792	case N_MNEM_vorr:
14793	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14794	  break;
14795
14796	case N_MNEM_vand:
14797	  /* Pseudo-instruction for VBIC.  */
14798	  neon_invert_size (&immbits, 0, et.size);
14799	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14800	  break;
14801
14802	case N_MNEM_vorn:
14803	  /* Pseudo-instruction for VORR.  */
14804	  neon_invert_size (&immbits, 0, et.size);
14805	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14806	  break;
14807
14808	default:
14809	  abort ();
14810	}
14811
14812      if (cmode == FAIL)
14813	return;
14814
14815      inst.instruction |= neon_quad (rs) << 6;
14816      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14817      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14818      inst.instruction |= cmode << 8;
14819      neon_write_immbits (immbits);
14820
14821      neon_dp_fixup (&inst);
14822    }
14823}
14824
14825static void
14826do_neon_bitfield (void)
14827{
14828  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14829  neon_check_type (3, rs, N_IGNORE_TYPE);
14830  neon_three_same (neon_quad (rs), 0, -1);
14831}
14832
14833static void
14834neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14835		  unsigned destbits)
14836{
14837  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14838  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14839					    types | N_KEY);
14840  if (et.type == NT_float)
14841    {
14842      NEON_ENCODE (FLOAT, inst);
14843      neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14844    }
14845  else
14846    {
14847      NEON_ENCODE (INTEGER, inst);
14848      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14849    }
14850}
14851
14852static void
14853do_neon_dyadic_if_su (void)
14854{
14855  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14856}
14857
14858static void
14859do_neon_dyadic_if_su_d (void)
14860{
14861  /* This version only allow D registers, but that constraint is enforced during
14862     operand parsing so we don't need to do anything extra here.  */
14863  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14864}
14865
14866static void
14867do_neon_dyadic_if_i_d (void)
14868{
14869  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14870     affected if we specify unsigned args.  */
14871  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14872}
14873
14874enum vfp_or_neon_is_neon_bits
14875{
14876  NEON_CHECK_CC = 1,
14877  NEON_CHECK_ARCH = 2,
14878  NEON_CHECK_ARCH8 = 4
14879};
14880
14881/* Call this function if an instruction which may have belonged to the VFP or
14882   Neon instruction sets, but turned out to be a Neon instruction (due to the
14883   operand types involved, etc.). We have to check and/or fix-up a couple of
14884   things:
14885
14886     - Make sure the user hasn't attempted to make a Neon instruction
14887       conditional.
14888     - Alter the value in the condition code field if necessary.
14889     - Make sure that the arch supports Neon instructions.
14890
14891   Which of these operations take place depends on bits from enum
14892   vfp_or_neon_is_neon_bits.
14893
14894   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14895   current instruction's condition is COND_ALWAYS, the condition field is
14896   changed to inst.uncond_value. This is necessary because instructions shared
14897   between VFP and Neon may be conditional for the VFP variants only, and the
14898   unconditional Neon version must have, e.g., 0xF in the condition field.  */
14899
14900static int
14901vfp_or_neon_is_neon (unsigned check)
14902{
14903  /* Conditions are always legal in Thumb mode (IT blocks).  */
14904  if (!thumb_mode && (check & NEON_CHECK_CC))
14905    {
14906      if (inst.cond != COND_ALWAYS)
14907	{
14908	  first_error (_(BAD_COND));
14909	  return FAIL;
14910	}
14911      if (inst.uncond_value != -1)
14912	inst.instruction |= inst.uncond_value << 28;
14913    }
14914
14915  if ((check & NEON_CHECK_ARCH)
14916      && !mark_feature_used (&fpu_neon_ext_v1))
14917    {
14918      first_error (_(BAD_FPU));
14919      return FAIL;
14920    }
14921
14922  if ((check & NEON_CHECK_ARCH8)
14923      && !mark_feature_used (&fpu_neon_ext_armv8))
14924    {
14925      first_error (_(BAD_FPU));
14926      return FAIL;
14927    }
14928
14929  return SUCCESS;
14930}
14931
14932static void
14933do_neon_addsub_if_i (void)
14934{
14935  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14936    return;
14937
14938  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14939    return;
14940
14941  /* The "untyped" case can't happen. Do this to stop the "U" bit being
14942     affected if we specify unsigned args.  */
14943  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14944}
14945
14946/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14947   result to be:
14948     V<op> A,B     (A is operand 0, B is operand 2)
14949   to mean:
14950     V<op> A,B,A
14951   not:
14952     V<op> A,B,B
14953   so handle that case specially.  */
14954
14955static void
14956neon_exchange_operands (void)
14957{
14958  if (inst.operands[1].present)
14959    {
14960      void *scratch = xmalloc (sizeof (inst.operands[0]));
14961
14962      /* Swap operands[1] and operands[2].  */
14963      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14964      inst.operands[1] = inst.operands[2];
14965      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14966      free (scratch);
14967    }
14968  else
14969    {
14970      inst.operands[1] = inst.operands[2];
14971      inst.operands[2] = inst.operands[0];
14972    }
14973}
14974
14975static void
14976neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14977{
14978  if (inst.operands[2].isreg)
14979    {
14980      if (invert)
14981	neon_exchange_operands ();
14982      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14983    }
14984  else
14985    {
14986      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14987      struct neon_type_el et = neon_check_type (2, rs,
14988	N_EQK | N_SIZ, immtypes | N_KEY);
14989
14990      NEON_ENCODE (IMMED, inst);
14991      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14992      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14993      inst.instruction |= LOW4 (inst.operands[1].reg);
14994      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14995      inst.instruction |= neon_quad (rs) << 6;
14996      inst.instruction |= (et.type == NT_float) << 10;
14997      inst.instruction |= neon_logbits (et.size) << 18;
14998
14999      neon_dp_fixup (&inst);
15000    }
15001}
15002
15003static void
15004do_neon_cmp (void)
15005{
15006  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15007}
15008
15009static void
15010do_neon_cmp_inv (void)
15011{
15012  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15013}
15014
15015static void
15016do_neon_ceq (void)
15017{
15018  neon_compare (N_IF_32, N_IF_32, FALSE);
15019}
15020
15021/* For multiply instructions, we have the possibility of 16-bit or 32-bit
15022   scalars, which are encoded in 5 bits, M : Rm.
15023   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15024   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15025   index in M.  */
15026
15027static unsigned
15028neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15029{
15030  unsigned regno = NEON_SCALAR_REG (scalar);
15031  unsigned elno = NEON_SCALAR_INDEX (scalar);
15032
15033  switch (elsize)
15034    {
15035    case 16:
15036      if (regno > 7 || elno > 3)
15037	goto bad_scalar;
15038      return regno | (elno << 3);
15039
15040    case 32:
15041      if (regno > 15 || elno > 1)
15042	goto bad_scalar;
15043      return regno | (elno << 4);
15044
15045    default:
15046    bad_scalar:
15047      first_error (_("scalar out of range for multiply instruction"));
15048    }
15049
15050  return 0;
15051}
15052
15053/* Encode multiply / multiply-accumulate scalar instructions.  */
15054
15055static void
15056neon_mul_mac (struct neon_type_el et, int ubit)
15057{
15058  unsigned scalar;
15059
15060  /* Give a more helpful error message if we have an invalid type.  */
15061  if (et.type == NT_invtype)
15062    return;
15063
15064  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15065  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15066  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15067  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15068  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15069  inst.instruction |= LOW4 (scalar);
15070  inst.instruction |= HI1 (scalar) << 5;
15071  inst.instruction |= (et.type == NT_float) << 8;
15072  inst.instruction |= neon_logbits (et.size) << 20;
15073  inst.instruction |= (ubit != 0) << 24;
15074
15075  neon_dp_fixup (&inst);
15076}
15077
15078static void
15079do_neon_mac_maybe_scalar (void)
15080{
15081  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15082    return;
15083
15084  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15085    return;
15086
15087  if (inst.operands[2].isscalar)
15088    {
15089      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15090      struct neon_type_el et = neon_check_type (3, rs,
15091	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15092      NEON_ENCODE (SCALAR, inst);
15093      neon_mul_mac (et, neon_quad (rs));
15094    }
15095  else
15096    {
15097      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
15098	 affected if we specify unsigned args.  */
15099      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15100    }
15101}
15102
15103static void
15104do_neon_fmac (void)
15105{
15106  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15107    return;
15108
15109  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15110    return;
15111
15112  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15113}
15114
15115static void
15116do_neon_tst (void)
15117{
15118  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15119  struct neon_type_el et = neon_check_type (3, rs,
15120    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15121  neon_three_same (neon_quad (rs), 0, et.size);
15122}
15123
15124/* VMUL with 3 registers allows the P8 type. The scalar version supports the
15125   same types as the MAC equivalents. The polynomial type for this instruction
15126   is encoded the same as the integer type.  */
15127
15128static void
15129do_neon_mul (void)
15130{
15131  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15132    return;
15133
15134  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15135    return;
15136
15137  if (inst.operands[2].isscalar)
15138    do_neon_mac_maybe_scalar ();
15139  else
15140    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15141}
15142
15143static void
15144do_neon_qdmulh (void)
15145{
15146  if (inst.operands[2].isscalar)
15147    {
15148      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15149      struct neon_type_el et = neon_check_type (3, rs,
15150	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15151      NEON_ENCODE (SCALAR, inst);
15152      neon_mul_mac (et, neon_quad (rs));
15153    }
15154  else
15155    {
15156      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15157      struct neon_type_el et = neon_check_type (3, rs,
15158	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15159      NEON_ENCODE (INTEGER, inst);
15160      /* The U bit (rounding) comes from bit mask.  */
15161      neon_three_same (neon_quad (rs), 0, et.size);
15162    }
15163}
15164
15165static void
15166do_neon_qrdmlah (void)
15167{
15168  /* Check we're on the correct architecture.  */
15169  if (!mark_feature_used (&fpu_neon_ext_armv8))
15170    inst.error =
15171      _("instruction form not available on this architecture.");
15172  else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15173    {
15174      as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15175      record_feature_use (&fpu_neon_ext_v8_1);
15176    }
15177
15178  if (inst.operands[2].isscalar)
15179    {
15180      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15181      struct neon_type_el et = neon_check_type (3, rs,
15182	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15183      NEON_ENCODE (SCALAR, inst);
15184      neon_mul_mac (et, neon_quad (rs));
15185    }
15186  else
15187    {
15188      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15189      struct neon_type_el et = neon_check_type (3, rs,
15190	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15191      NEON_ENCODE (INTEGER, inst);
15192      /* The U bit (rounding) comes from bit mask.  */
15193      neon_three_same (neon_quad (rs), 0, et.size);
15194    }
15195}
15196
15197static void
15198do_neon_fcmp_absolute (void)
15199{
15200  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15201  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15202					    N_F_16_32 | N_KEY);
15203  /* Size field comes from bit mask.  */
15204  neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15205}
15206
15207static void
15208do_neon_fcmp_absolute_inv (void)
15209{
15210  neon_exchange_operands ();
15211  do_neon_fcmp_absolute ();
15212}
15213
15214static void
15215do_neon_step (void)
15216{
15217  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15218  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15219					    N_F_16_32 | N_KEY);
15220  neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15221}
15222
15223static void
15224do_neon_abs_neg (void)
15225{
15226  enum neon_shape rs;
15227  struct neon_type_el et;
15228
15229  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15230    return;
15231
15232  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15233    return;
15234
15235  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15236  et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15237
15238  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15239  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15240  inst.instruction |= LOW4 (inst.operands[1].reg);
15241  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15242  inst.instruction |= neon_quad (rs) << 6;
15243  inst.instruction |= (et.type == NT_float) << 10;
15244  inst.instruction |= neon_logbits (et.size) << 18;
15245
15246  neon_dp_fixup (&inst);
15247}
15248
15249static void
15250do_neon_sli (void)
15251{
15252  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15253  struct neon_type_el et = neon_check_type (2, rs,
15254    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15255  int imm = inst.operands[2].imm;
15256  constraint (imm < 0 || (unsigned)imm >= et.size,
15257	      _("immediate out of range for insert"));
15258  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15259}
15260
15261static void
15262do_neon_sri (void)
15263{
15264  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15265  struct neon_type_el et = neon_check_type (2, rs,
15266    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15267  int imm = inst.operands[2].imm;
15268  constraint (imm < 1 || (unsigned)imm > et.size,
15269	      _("immediate out of range for insert"));
15270  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15271}
15272
15273static void
15274do_neon_qshlu_imm (void)
15275{
15276  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15277  struct neon_type_el et = neon_check_type (2, rs,
15278    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15279  int imm = inst.operands[2].imm;
15280  constraint (imm < 0 || (unsigned)imm >= et.size,
15281	      _("immediate out of range for shift"));
15282  /* Only encodes the 'U present' variant of the instruction.
15283     In this case, signed types have OP (bit 8) set to 0.
15284     Unsigned types have OP set to 1.  */
15285  inst.instruction |= (et.type == NT_unsigned) << 8;
15286  /* The rest of the bits are the same as other immediate shifts.  */
15287  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15288}
15289
15290static void
15291do_neon_qmovn (void)
15292{
15293  struct neon_type_el et = neon_check_type (2, NS_DQ,
15294    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15295  /* Saturating move where operands can be signed or unsigned, and the
15296     destination has the same signedness.  */
15297  NEON_ENCODE (INTEGER, inst);
15298  if (et.type == NT_unsigned)
15299    inst.instruction |= 0xc0;
15300  else
15301    inst.instruction |= 0x80;
15302  neon_two_same (0, 1, et.size / 2);
15303}
15304
15305static void
15306do_neon_qmovun (void)
15307{
15308  struct neon_type_el et = neon_check_type (2, NS_DQ,
15309    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15310  /* Saturating move with unsigned results. Operands must be signed.  */
15311  NEON_ENCODE (INTEGER, inst);
15312  neon_two_same (0, 1, et.size / 2);
15313}
15314
15315static void
15316do_neon_rshift_sat_narrow (void)
15317{
15318  /* FIXME: Types for narrowing. If operands are signed, results can be signed
15319     or unsigned. If operands are unsigned, results must also be unsigned.  */
15320  struct neon_type_el et = neon_check_type (2, NS_DQI,
15321    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15322  int imm = inst.operands[2].imm;
15323  /* This gets the bounds check, size encoding and immediate bits calculation
15324     right.  */
15325  et.size /= 2;
15326
15327  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15328     VQMOVN.I<size> <Dd>, <Qm>.  */
15329  if (imm == 0)
15330    {
15331      inst.operands[2].present = 0;
15332      inst.instruction = N_MNEM_vqmovn;
15333      do_neon_qmovn ();
15334      return;
15335    }
15336
15337  constraint (imm < 1 || (unsigned)imm > et.size,
15338	      _("immediate out of range"));
15339  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15340}
15341
15342static void
15343do_neon_rshift_sat_narrow_u (void)
15344{
15345  /* FIXME: Types for narrowing. If operands are signed, results can be signed
15346     or unsigned. If operands are unsigned, results must also be unsigned.  */
15347  struct neon_type_el et = neon_check_type (2, NS_DQI,
15348    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15349  int imm = inst.operands[2].imm;
15350  /* This gets the bounds check, size encoding and immediate bits calculation
15351     right.  */
15352  et.size /= 2;
15353
15354  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15355     VQMOVUN.I<size> <Dd>, <Qm>.  */
15356  if (imm == 0)
15357    {
15358      inst.operands[2].present = 0;
15359      inst.instruction = N_MNEM_vqmovun;
15360      do_neon_qmovun ();
15361      return;
15362    }
15363
15364  constraint (imm < 1 || (unsigned)imm > et.size,
15365	      _("immediate out of range"));
15366  /* FIXME: The manual is kind of unclear about what value U should have in
15367     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15368     must be 1.  */
15369  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15370}
15371
15372static void
15373do_neon_movn (void)
15374{
15375  struct neon_type_el et = neon_check_type (2, NS_DQ,
15376    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15377  NEON_ENCODE (INTEGER, inst);
15378  neon_two_same (0, 1, et.size / 2);
15379}
15380
15381static void
15382do_neon_rshift_narrow (void)
15383{
15384  struct neon_type_el et = neon_check_type (2, NS_DQI,
15385    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15386  int imm = inst.operands[2].imm;
15387  /* This gets the bounds check, size encoding and immediate bits calculation
15388     right.  */
15389  et.size /= 2;
15390
15391  /* If immediate is zero then we are a pseudo-instruction for
15392     VMOVN.I<size> <Dd>, <Qm>  */
15393  if (imm == 0)
15394    {
15395      inst.operands[2].present = 0;
15396      inst.instruction = N_MNEM_vmovn;
15397      do_neon_movn ();
15398      return;
15399    }
15400
15401  constraint (imm < 1 || (unsigned)imm > et.size,
15402	      _("immediate out of range for narrowing operation"));
15403  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15404}
15405
15406static void
15407do_neon_shll (void)
15408{
15409  /* FIXME: Type checking when lengthening.  */
15410  struct neon_type_el et = neon_check_type (2, NS_QDI,
15411    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15412  unsigned imm = inst.operands[2].imm;
15413
15414  if (imm == et.size)
15415    {
15416      /* Maximum shift variant.  */
15417      NEON_ENCODE (INTEGER, inst);
15418      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15419      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15420      inst.instruction |= LOW4 (inst.operands[1].reg);
15421      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15422      inst.instruction |= neon_logbits (et.size) << 18;
15423
15424      neon_dp_fixup (&inst);
15425    }
15426  else
15427    {
15428      /* A more-specific type check for non-max versions.  */
15429      et = neon_check_type (2, NS_QDI,
15430	N_EQK | N_DBL, N_SU_32 | N_KEY);
15431      NEON_ENCODE (IMMED, inst);
15432      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15433    }
15434}
15435
15436/* Check the various types for the VCVT instruction, and return which version
15437   the current instruction is.  */
15438
15439#define CVT_FLAVOUR_VAR							      \
15440  CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
15441  CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
15442  CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
15443  CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
15444  /* Half-precision conversions.  */					      \
15445  CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
15446  CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
15447  CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
15448  CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
15449  CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
15450  CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
15451  /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
15452     Compared with single/double precision variants, only the co-processor    \
15453     field is different, so the encoding flow is reused here.  */	      \
15454  CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
15455  CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
15456  CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15457  CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15458  /* VFP instructions.  */						      \
15459  CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
15460  CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
15461  CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15462  CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15463  CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
15464  CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
15465  /* VFP instructions with bitshift.  */				      \
15466  CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
15467  CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
15468  CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
15469  CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
15470  CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
15471  CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
15472  CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
15473  CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
15474
15475#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15476  neon_cvt_flavour_##C,
15477
15478/* The different types of conversions we can do.  */
15479enum neon_cvt_flavour
15480{
15481  CVT_FLAVOUR_VAR
15482  neon_cvt_flavour_invalid,
15483  neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15484};
15485
15486#undef CVT_VAR
15487
15488static enum neon_cvt_flavour
15489get_neon_cvt_flavour (enum neon_shape rs)
15490{
15491#define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
15492  et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
15493  if (et.type != NT_invtype)				\
15494    {							\
15495      inst.error = NULL;				\
15496      return (neon_cvt_flavour_##C);			\
15497    }
15498
15499  struct neon_type_el et;
15500  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15501			|| rs == NS_FF) ? N_VFP : 0;
15502  /* The instruction versions which take an immediate take one register
15503     argument, which is extended to the width of the full register. Thus the
15504     "source" and "destination" registers must have the same width.  Hack that
15505     here by making the size equal to the key (wider, in this case) operand.  */
15506  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15507
15508  CVT_FLAVOUR_VAR;
15509
15510  return neon_cvt_flavour_invalid;
15511#undef CVT_VAR
15512}
15513
15514enum neon_cvt_mode
15515{
15516  neon_cvt_mode_a,
15517  neon_cvt_mode_n,
15518  neon_cvt_mode_p,
15519  neon_cvt_mode_m,
15520  neon_cvt_mode_z,
15521  neon_cvt_mode_x,
15522  neon_cvt_mode_r
15523};
15524
15525/* Neon-syntax VFP conversions.  */
15526
15527static void
15528do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15529{
15530  const char *opname = 0;
15531
15532  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15533      || rs == NS_FHI || rs == NS_HFI)
15534    {
15535      /* Conversions with immediate bitshift.  */
15536      const char *enc[] =
15537	{
15538#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15539	  CVT_FLAVOUR_VAR
15540	  NULL
15541#undef CVT_VAR
15542	};
15543
15544      if (flavour < (int) ARRAY_SIZE (enc))
15545	{
15546	  opname = enc[flavour];
15547	  constraint (inst.operands[0].reg != inst.operands[1].reg,
15548		      _("operands 0 and 1 must be the same register"));
15549	  inst.operands[1] = inst.operands[2];
15550	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15551	}
15552    }
15553  else
15554    {
15555      /* Conversions without bitshift.  */
15556      const char *enc[] =
15557	{
15558#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15559	  CVT_FLAVOUR_VAR
15560	  NULL
15561#undef CVT_VAR
15562	};
15563
15564      if (flavour < (int) ARRAY_SIZE (enc))
15565	opname = enc[flavour];
15566    }
15567
15568  if (opname)
15569    do_vfp_nsyn_opcode (opname);
15570
15571  /* ARMv8.2 fp16 VCVT instruction.  */
15572  if (flavour == neon_cvt_flavour_s32_f16
15573      || flavour == neon_cvt_flavour_u32_f16
15574      || flavour == neon_cvt_flavour_f16_u32
15575      || flavour == neon_cvt_flavour_f16_s32)
15576    do_scalar_fp16_v82_encode ();
15577}
15578
15579static void
15580do_vfp_nsyn_cvtz (void)
15581{
15582  enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15583  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15584  const char *enc[] =
15585    {
15586#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15587      CVT_FLAVOUR_VAR
15588      NULL
15589#undef CVT_VAR
15590    };
15591
15592  if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15593    do_vfp_nsyn_opcode (enc[flavour]);
15594}
15595
15596static void
15597do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15598		      enum neon_cvt_mode mode)
15599{
15600  int sz, op;
15601  int rm;
15602
15603  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15604     D register operands.  */
15605  if (flavour == neon_cvt_flavour_s32_f64
15606      || flavour == neon_cvt_flavour_u32_f64)
15607    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15608		_(BAD_FPU));
15609
15610  if (flavour == neon_cvt_flavour_s32_f16
15611      || flavour == neon_cvt_flavour_u32_f16)
15612    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15613		_(BAD_FP16));
15614
15615  set_it_insn_type (OUTSIDE_IT_INSN);
15616
15617  switch (flavour)
15618    {
15619    case neon_cvt_flavour_s32_f64:
15620      sz = 1;
15621      op = 1;
15622      break;
15623    case neon_cvt_flavour_s32_f32:
15624      sz = 0;
15625      op = 1;
15626      break;
15627    case neon_cvt_flavour_s32_f16:
15628      sz = 0;
15629      op = 1;
15630      break;
15631    case neon_cvt_flavour_u32_f64:
15632      sz = 1;
15633      op = 0;
15634      break;
15635    case neon_cvt_flavour_u32_f32:
15636      sz = 0;
15637      op = 0;
15638      break;
15639    case neon_cvt_flavour_u32_f16:
15640      sz = 0;
15641      op = 0;
15642      break;
15643    default:
15644      first_error (_("invalid instruction shape"));
15645      return;
15646    }
15647
15648  switch (mode)
15649    {
15650    case neon_cvt_mode_a: rm = 0; break;
15651    case neon_cvt_mode_n: rm = 1; break;
15652    case neon_cvt_mode_p: rm = 2; break;
15653    case neon_cvt_mode_m: rm = 3; break;
15654    default: first_error (_("invalid rounding mode")); return;
15655    }
15656
15657  NEON_ENCODE (FPV8, inst);
15658  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15659  encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15660  inst.instruction |= sz << 8;
15661
15662  /* ARMv8.2 fp16 VCVT instruction.  */
15663  if (flavour == neon_cvt_flavour_s32_f16
15664      ||flavour == neon_cvt_flavour_u32_f16)
15665    do_scalar_fp16_v82_encode ();
15666  inst.instruction |= op << 7;
15667  inst.instruction |= rm << 16;
15668  inst.instruction |= 0xf0000000;
15669  inst.is_neon = TRUE;
15670}
15671
15672static void
15673do_neon_cvt_1 (enum neon_cvt_mode mode)
15674{
15675  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15676					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15677					  NS_FH, NS_HF, NS_FHI, NS_HFI,
15678					  NS_NULL);
15679  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15680
15681  if (flavour == neon_cvt_flavour_invalid)
15682    return;
15683
15684  /* PR11109: Handle round-to-zero for VCVT conversions.  */
15685  if (mode == neon_cvt_mode_z
15686      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15687      && (flavour == neon_cvt_flavour_s16_f16
15688	  || flavour == neon_cvt_flavour_u16_f16
15689	  || flavour == neon_cvt_flavour_s32_f32
15690	  || flavour == neon_cvt_flavour_u32_f32
15691	  || flavour == neon_cvt_flavour_s32_f64
15692	  || flavour == neon_cvt_flavour_u32_f64)
15693      && (rs == NS_FD || rs == NS_FF))
15694    {
15695      do_vfp_nsyn_cvtz ();
15696      return;
15697    }
15698
15699  /* ARMv8.2 fp16 VCVT conversions.  */
15700  if (mode == neon_cvt_mode_z
15701      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15702      && (flavour == neon_cvt_flavour_s32_f16
15703	  || flavour == neon_cvt_flavour_u32_f16)
15704      && (rs == NS_FH))
15705    {
15706      do_vfp_nsyn_cvtz ();
15707      do_scalar_fp16_v82_encode ();
15708      return;
15709    }
15710
15711  /* VFP rather than Neon conversions.  */
15712  if (flavour >= neon_cvt_flavour_first_fp)
15713    {
15714      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15715	do_vfp_nsyn_cvt (rs, flavour);
15716      else
15717	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15718
15719      return;
15720    }
15721
15722  switch (rs)
15723    {
15724    case NS_DDI:
15725    case NS_QQI:
15726      {
15727	unsigned immbits;
15728	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15729			     0x0000100, 0x1000100, 0x0, 0x1000000};
15730
15731	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15732	  return;
15733
15734	/* Fixed-point conversion with #0 immediate is encoded as an
15735	   integer conversion.  */
15736	if (inst.operands[2].present && inst.operands[2].imm == 0)
15737	  goto int_encode;
15738	NEON_ENCODE (IMMED, inst);
15739	if (flavour != neon_cvt_flavour_invalid)
15740	  inst.instruction |= enctab[flavour];
15741	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15742	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15743	inst.instruction |= LOW4 (inst.operands[1].reg);
15744	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15745	inst.instruction |= neon_quad (rs) << 6;
15746	inst.instruction |= 1 << 21;
15747	if (flavour < neon_cvt_flavour_s16_f16)
15748	  {
15749	    inst.instruction |= 1 << 21;
15750	    immbits = 32 - inst.operands[2].imm;
15751	    inst.instruction |= immbits << 16;
15752	  }
15753	else
15754	  {
15755	    inst.instruction |= 3 << 20;
15756	    immbits = 16 - inst.operands[2].imm;
15757	    inst.instruction |= immbits << 16;
15758	    inst.instruction &= ~(1 << 9);
15759	  }
15760
15761	neon_dp_fixup (&inst);
15762      }
15763      break;
15764
15765    case NS_DD:
15766    case NS_QQ:
15767      if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15768	{
15769	  NEON_ENCODE (FLOAT, inst);
15770	  set_it_insn_type (OUTSIDE_IT_INSN);
15771
15772	  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15773	    return;
15774
15775	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15776	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15777	  inst.instruction |= LOW4 (inst.operands[1].reg);
15778	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15779	  inst.instruction |= neon_quad (rs) << 6;
15780	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15781			       || flavour == neon_cvt_flavour_u32_f32) << 7;
15782	  inst.instruction |= mode << 8;
15783	  if (flavour == neon_cvt_flavour_u16_f16
15784	      || flavour == neon_cvt_flavour_s16_f16)
15785	    /* Mask off the original size bits and reencode them.  */
15786	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15787
15788	  if (thumb_mode)
15789	    inst.instruction |= 0xfc000000;
15790	  else
15791	    inst.instruction |= 0xf0000000;
15792	}
15793      else
15794	{
15795    int_encode:
15796	  {
15797	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15798				  0x100, 0x180, 0x0, 0x080};
15799
15800	    NEON_ENCODE (INTEGER, inst);
15801
15802	    if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15803	      return;
15804
15805	    if (flavour != neon_cvt_flavour_invalid)
15806	      inst.instruction |= enctab[flavour];
15807
15808	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15809	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15810	    inst.instruction |= LOW4 (inst.operands[1].reg);
15811	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15812	    inst.instruction |= neon_quad (rs) << 6;
15813	    if (flavour >= neon_cvt_flavour_s16_f16
15814		&& flavour <= neon_cvt_flavour_f16_u16)
15815	      /* Half precision.  */
15816	      inst.instruction |= 1 << 18;
15817	    else
15818	      inst.instruction |= 2 << 18;
15819
15820	    neon_dp_fixup (&inst);
15821	  }
15822	}
15823      break;
15824
15825    /* Half-precision conversions for Advanced SIMD -- neon.  */
15826    case NS_QD:
15827    case NS_DQ:
15828
15829      if ((rs == NS_DQ)
15830	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15831	  {
15832	    as_bad (_("operand size must match register width"));
15833	    break;
15834	  }
15835
15836      if ((rs == NS_QD)
15837	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15838	  {
15839	    as_bad (_("operand size must match register width"));
15840	    break;
15841	  }
15842
15843      if (rs == NS_DQ)
15844	inst.instruction = 0x3b60600;
15845      else
15846	inst.instruction = 0x3b60700;
15847
15848      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15849      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15850      inst.instruction |= LOW4 (inst.operands[1].reg);
15851      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15852      neon_dp_fixup (&inst);
15853      break;
15854
15855    default:
15856      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
15857      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15858	do_vfp_nsyn_cvt (rs, flavour);
15859      else
15860	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15861    }
15862}
15863
15864static void
15865do_neon_cvtr (void)
15866{
15867  do_neon_cvt_1 (neon_cvt_mode_x);
15868}
15869
15870static void
15871do_neon_cvt (void)
15872{
15873  do_neon_cvt_1 (neon_cvt_mode_z);
15874}
15875
15876static void
15877do_neon_cvta (void)
15878{
15879  do_neon_cvt_1 (neon_cvt_mode_a);
15880}
15881
15882static void
15883do_neon_cvtn (void)
15884{
15885  do_neon_cvt_1 (neon_cvt_mode_n);
15886}
15887
15888static void
15889do_neon_cvtp (void)
15890{
15891  do_neon_cvt_1 (neon_cvt_mode_p);
15892}
15893
15894static void
15895do_neon_cvtm (void)
15896{
15897  do_neon_cvt_1 (neon_cvt_mode_m);
15898}
15899
15900static void
15901do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15902{
15903  if (is_double)
15904    mark_feature_used (&fpu_vfp_ext_armv8);
15905
15906  encode_arm_vfp_reg (inst.operands[0].reg,
15907		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15908  encode_arm_vfp_reg (inst.operands[1].reg,
15909		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15910  inst.instruction |= to ? 0x10000 : 0;
15911  inst.instruction |= t ? 0x80 : 0;
15912  inst.instruction |= is_double ? 0x100 : 0;
15913  do_vfp_cond_or_thumb ();
15914}
15915
15916static void
15917do_neon_cvttb_1 (bfd_boolean t)
15918{
15919  enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15920					  NS_DF, NS_DH, NS_NULL);
15921
15922  if (rs == NS_NULL)
15923    return;
15924  else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15925    {
15926      inst.error = NULL;
15927      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15928    }
15929  else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15930    {
15931      inst.error = NULL;
15932      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15933    }
15934  else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15935    {
15936      /* The VCVTB and VCVTT instructions with D-register operands
15937         don't work for SP only targets.  */
15938      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15939		  _(BAD_FPU));
15940
15941      inst.error = NULL;
15942      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15943    }
15944  else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15945    {
15946      /* The VCVTB and VCVTT instructions with D-register operands
15947         don't work for SP only targets.  */
15948      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15949		  _(BAD_FPU));
15950
15951      inst.error = NULL;
15952      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15953    }
15954  else
15955    return;
15956}
15957
15958static void
15959do_neon_cvtb (void)
15960{
15961  do_neon_cvttb_1 (FALSE);
15962}
15963
15964
15965static void
15966do_neon_cvtt (void)
15967{
15968  do_neon_cvttb_1 (TRUE);
15969}
15970
15971static void
15972neon_move_immediate (void)
15973{
15974  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15975  struct neon_type_el et = neon_check_type (2, rs,
15976    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15977  unsigned immlo, immhi = 0, immbits;
15978  int op, cmode, float_p;
15979
15980  constraint (et.type == NT_invtype,
15981	      _("operand size must be specified for immediate VMOV"));
15982
15983  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
15984  op = (inst.instruction & (1 << 5)) != 0;
15985
15986  immlo = inst.operands[1].imm;
15987  if (inst.operands[1].regisimm)
15988    immhi = inst.operands[1].reg;
15989
15990  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15991	      _("immediate has bits set outside the operand size"));
15992
15993  float_p = inst.operands[1].immisfloat;
15994
15995  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15996					et.size, et.type)) == FAIL)
15997    {
15998      /* Invert relevant bits only.  */
15999      neon_invert_size (&immlo, &immhi, et.size);
16000      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16001	 with one or the other; those cases are caught by
16002	 neon_cmode_for_move_imm.  */
16003      op = !op;
16004      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16005					    &op, et.size, et.type)) == FAIL)
16006	{
16007	  first_error (_("immediate out of range"));
16008	  return;
16009	}
16010    }
16011
16012  inst.instruction &= ~(1 << 5);
16013  inst.instruction |= op << 5;
16014
16015  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16016  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16017  inst.instruction |= neon_quad (rs) << 6;
16018  inst.instruction |= cmode << 8;
16019
16020  neon_write_immbits (immbits);
16021}
16022
16023static void
16024do_neon_mvn (void)
16025{
16026  if (inst.operands[1].isreg)
16027    {
16028      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16029
16030      NEON_ENCODE (INTEGER, inst);
16031      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16032      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16033      inst.instruction |= LOW4 (inst.operands[1].reg);
16034      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16035      inst.instruction |= neon_quad (rs) << 6;
16036    }
16037  else
16038    {
16039      NEON_ENCODE (IMMED, inst);
16040      neon_move_immediate ();
16041    }
16042
16043  neon_dp_fixup (&inst);
16044}
16045
16046/* Encode instructions of form:
16047
16048  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
16049  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
16050
16051static void
16052neon_mixed_length (struct neon_type_el et, unsigned size)
16053{
16054  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16055  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16056  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16057  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16058  inst.instruction |= LOW4 (inst.operands[2].reg);
16059  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16060  inst.instruction |= (et.type == NT_unsigned) << 24;
16061  inst.instruction |= neon_logbits (size) << 20;
16062
16063  neon_dp_fixup (&inst);
16064}
16065
16066static void
16067do_neon_dyadic_long (void)
16068{
16069  /* FIXME: Type checking for lengthening op.  */
16070  struct neon_type_el et = neon_check_type (3, NS_QDD,
16071    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16072  neon_mixed_length (et, et.size);
16073}
16074
16075static void
16076do_neon_abal (void)
16077{
16078  struct neon_type_el et = neon_check_type (3, NS_QDD,
16079    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16080  neon_mixed_length (et, et.size);
16081}
16082
16083static void
16084neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16085{
16086  if (inst.operands[2].isscalar)
16087    {
16088      struct neon_type_el et = neon_check_type (3, NS_QDS,
16089	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16090      NEON_ENCODE (SCALAR, inst);
16091      neon_mul_mac (et, et.type == NT_unsigned);
16092    }
16093  else
16094    {
16095      struct neon_type_el et = neon_check_type (3, NS_QDD,
16096	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16097      NEON_ENCODE (INTEGER, inst);
16098      neon_mixed_length (et, et.size);
16099    }
16100}
16101
16102static void
16103do_neon_mac_maybe_scalar_long (void)
16104{
16105  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16106}
16107
16108static void
16109do_neon_dyadic_wide (void)
16110{
16111  struct neon_type_el et = neon_check_type (3, NS_QQD,
16112    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16113  neon_mixed_length (et, et.size);
16114}
16115
16116static void
16117do_neon_dyadic_narrow (void)
16118{
16119  struct neon_type_el et = neon_check_type (3, NS_QDD,
16120    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16121  /* Operand sign is unimportant, and the U bit is part of the opcode,
16122     so force the operand type to integer.  */
16123  et.type = NT_integer;
16124  neon_mixed_length (et, et.size / 2);
16125}
16126
16127static void
16128do_neon_mul_sat_scalar_long (void)
16129{
16130  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16131}
16132
16133static void
16134do_neon_vmull (void)
16135{
16136  if (inst.operands[2].isscalar)
16137    do_neon_mac_maybe_scalar_long ();
16138  else
16139    {
16140      struct neon_type_el et = neon_check_type (3, NS_QDD,
16141	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16142
16143      if (et.type == NT_poly)
16144	NEON_ENCODE (POLY, inst);
16145      else
16146	NEON_ENCODE (INTEGER, inst);
16147
16148      /* For polynomial encoding the U bit must be zero, and the size must
16149	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16150	 obviously, as 0b10).  */
16151      if (et.size == 64)
16152	{
16153	  /* Check we're on the correct architecture.  */
16154	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
16155	    inst.error =
16156	      _("Instruction form not available on this architecture.");
16157
16158	  et.size = 32;
16159	}
16160
16161      neon_mixed_length (et, et.size);
16162    }
16163}
16164
16165static void
16166do_neon_ext (void)
16167{
16168  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16169  struct neon_type_el et = neon_check_type (3, rs,
16170    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16171  unsigned imm = (inst.operands[3].imm * et.size) / 8;
16172
16173  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16174	      _("shift out of range"));
16175  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16176  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16177  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16178  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16179  inst.instruction |= LOW4 (inst.operands[2].reg);
16180  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16181  inst.instruction |= neon_quad (rs) << 6;
16182  inst.instruction |= imm << 8;
16183
16184  neon_dp_fixup (&inst);
16185}
16186
16187static void
16188do_neon_rev (void)
16189{
16190  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16191  struct neon_type_el et = neon_check_type (2, rs,
16192    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16193  unsigned op = (inst.instruction >> 7) & 3;
16194  /* N (width of reversed regions) is encoded as part of the bitmask. We
16195     extract it here to check the elements to be reversed are smaller.
16196     Otherwise we'd get a reserved instruction.  */
16197  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16198  gas_assert (elsize != 0);
16199  constraint (et.size >= elsize,
16200	      _("elements must be smaller than reversal region"));
16201  neon_two_same (neon_quad (rs), 1, et.size);
16202}
16203
16204static void
16205do_neon_dup (void)
16206{
16207  if (inst.operands[1].isscalar)
16208    {
16209      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16210      struct neon_type_el et = neon_check_type (2, rs,
16211	N_EQK, N_8 | N_16 | N_32 | N_KEY);
16212      unsigned sizebits = et.size >> 3;
16213      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16214      int logsize = neon_logbits (et.size);
16215      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16216
16217      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16218	return;
16219
16220      NEON_ENCODE (SCALAR, inst);
16221      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16222      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16223      inst.instruction |= LOW4 (dm);
16224      inst.instruction |= HI1 (dm) << 5;
16225      inst.instruction |= neon_quad (rs) << 6;
16226      inst.instruction |= x << 17;
16227      inst.instruction |= sizebits << 16;
16228
16229      neon_dp_fixup (&inst);
16230    }
16231  else
16232    {
16233      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16234      struct neon_type_el et = neon_check_type (2, rs,
16235	N_8 | N_16 | N_32 | N_KEY, N_EQK);
16236      /* Duplicate ARM register to lanes of vector.  */
16237      NEON_ENCODE (ARMREG, inst);
16238      switch (et.size)
16239	{
16240	case 8:  inst.instruction |= 0x400000; break;
16241	case 16: inst.instruction |= 0x000020; break;
16242	case 32: inst.instruction |= 0x000000; break;
16243	default: break;
16244	}
16245      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16246      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16247      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16248      inst.instruction |= neon_quad (rs) << 21;
16249      /* The encoding for this instruction is identical for the ARM and Thumb
16250	 variants, except for the condition field.  */
16251      do_vfp_cond_or_thumb ();
16252    }
16253}
16254
16255/* VMOV has particularly many variations. It can be one of:
16256     0. VMOV<c><q> <Qd>, <Qm>
16257     1. VMOV<c><q> <Dd>, <Dm>
16258   (Register operations, which are VORR with Rm = Rn.)
16259     2. VMOV<c><q>.<dt> <Qd>, #<imm>
16260     3. VMOV<c><q>.<dt> <Dd>, #<imm>
16261   (Immediate loads.)
16262     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16263   (ARM register to scalar.)
16264     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16265   (Two ARM registers to vector.)
16266     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16267   (Scalar to ARM register.)
16268     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16269   (Vector to two ARM registers.)
16270     8. VMOV.F32 <Sd>, <Sm>
16271     9. VMOV.F64 <Dd>, <Dm>
16272   (VFP register moves.)
16273    10. VMOV.F32 <Sd>, #imm
16274    11. VMOV.F64 <Dd>, #imm
16275   (VFP float immediate load.)
16276    12. VMOV <Rd>, <Sm>
16277   (VFP single to ARM reg.)
16278    13. VMOV <Sd>, <Rm>
16279   (ARM reg to VFP single.)
16280    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16281   (Two ARM regs to two VFP singles.)
16282    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16283   (Two VFP singles to two ARM regs.)
16284
16285   These cases can be disambiguated using neon_select_shape, except cases 1/9
16286   and 3/11 which depend on the operand type too.
16287
16288   All the encoded bits are hardcoded by this function.
16289
16290   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16291   Cases 5, 7 may be used with VFPv2 and above.
16292
16293   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16294   can specify a type where it doesn't make sense to, and is ignored).  */
16295
16296static void
16297do_neon_mov (void)
16298{
16299  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16300					  NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16301					  NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16302					  NS_HR, NS_RH, NS_HI, NS_NULL);
16303  struct neon_type_el et;
16304  const char *ldconst = 0;
16305
16306  switch (rs)
16307    {
16308    case NS_DD:  /* case 1/9.  */
16309      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16310      /* It is not an error here if no type is given.  */
16311      inst.error = NULL;
16312      if (et.type == NT_float && et.size == 64)
16313	{
16314	  do_vfp_nsyn_opcode ("fcpyd");
16315	  break;
16316	}
16317      /* fall through.  */
16318
16319    case NS_QQ:  /* case 0/1.  */
16320      {
16321	if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16322	  return;
16323	/* The architecture manual I have doesn't explicitly state which
16324	   value the U bit should have for register->register moves, but
16325	   the equivalent VORR instruction has U = 0, so do that.  */
16326	inst.instruction = 0x0200110;
16327	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16328	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16329	inst.instruction |= LOW4 (inst.operands[1].reg);
16330	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16331	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16332	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16333	inst.instruction |= neon_quad (rs) << 6;
16334
16335	neon_dp_fixup (&inst);
16336      }
16337      break;
16338
16339    case NS_DI:  /* case 3/11.  */
16340      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16341      inst.error = NULL;
16342      if (et.type == NT_float && et.size == 64)
16343	{
16344	  /* case 11 (fconstd).  */
16345	  ldconst = "fconstd";
16346	  goto encode_fconstd;
16347	}
16348      /* fall through.  */
16349
16350    case NS_QI:  /* case 2/3.  */
16351      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16352	return;
16353      inst.instruction = 0x0800010;
16354      neon_move_immediate ();
16355      neon_dp_fixup (&inst);
16356      break;
16357
16358    case NS_SR:  /* case 4.  */
16359      {
16360	unsigned bcdebits = 0;
16361	int logsize;
16362	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16363	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16364
16365	/* .<size> is optional here, defaulting to .32. */
16366	if (inst.vectype.elems == 0
16367	    && inst.operands[0].vectype.type == NT_invtype
16368	    && inst.operands[1].vectype.type == NT_invtype)
16369	  {
16370	    inst.vectype.el[0].type = NT_untyped;
16371	    inst.vectype.el[0].size = 32;
16372	    inst.vectype.elems = 1;
16373	  }
16374
16375	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16376	logsize = neon_logbits (et.size);
16377
16378	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16379		    _(BAD_FPU));
16380	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16381		    && et.size != 32, _(BAD_FPU));
16382	constraint (et.type == NT_invtype, _("bad type for scalar"));
16383	constraint (x >= 64 / et.size, _("scalar index out of range"));
16384
16385	switch (et.size)
16386	  {
16387	  case 8:  bcdebits = 0x8; break;
16388	  case 16: bcdebits = 0x1; break;
16389	  case 32: bcdebits = 0x0; break;
16390	  default: ;
16391	  }
16392
16393	bcdebits |= x << logsize;
16394
16395	inst.instruction = 0xe000b10;
16396	do_vfp_cond_or_thumb ();
16397	inst.instruction |= LOW4 (dn) << 16;
16398	inst.instruction |= HI1 (dn) << 7;
16399	inst.instruction |= inst.operands[1].reg << 12;
16400	inst.instruction |= (bcdebits & 3) << 5;
16401	inst.instruction |= (bcdebits >> 2) << 21;
16402      }
16403      break;
16404
16405    case NS_DRR:  /* case 5 (fmdrr).  */
16406      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16407		  _(BAD_FPU));
16408
16409      inst.instruction = 0xc400b10;
16410      do_vfp_cond_or_thumb ();
16411      inst.instruction |= LOW4 (inst.operands[0].reg);
16412      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16413      inst.instruction |= inst.operands[1].reg << 12;
16414      inst.instruction |= inst.operands[2].reg << 16;
16415      break;
16416
16417    case NS_RS:  /* case 6.  */
16418      {
16419	unsigned logsize;
16420	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16421	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16422	unsigned abcdebits = 0;
16423
16424	/* .<dt> is optional here, defaulting to .32. */
16425	if (inst.vectype.elems == 0
16426	    && inst.operands[0].vectype.type == NT_invtype
16427	    && inst.operands[1].vectype.type == NT_invtype)
16428	  {
16429	    inst.vectype.el[0].type = NT_untyped;
16430	    inst.vectype.el[0].size = 32;
16431	    inst.vectype.elems = 1;
16432	  }
16433
16434	et = neon_check_type (2, NS_NULL,
16435			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16436	logsize = neon_logbits (et.size);
16437
16438	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16439		    _(BAD_FPU));
16440	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16441		    && et.size != 32, _(BAD_FPU));
16442	constraint (et.type == NT_invtype, _("bad type for scalar"));
16443	constraint (x >= 64 / et.size, _("scalar index out of range"));
16444
16445	switch (et.size)
16446	  {
16447	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16448	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16449	  case 32: abcdebits = 0x00; break;
16450	  default: ;
16451	  }
16452
16453	abcdebits |= x << logsize;
16454	inst.instruction = 0xe100b10;
16455	do_vfp_cond_or_thumb ();
16456	inst.instruction |= LOW4 (dn) << 16;
16457	inst.instruction |= HI1 (dn) << 7;
16458	inst.instruction |= inst.operands[0].reg << 12;
16459	inst.instruction |= (abcdebits & 3) << 5;
16460	inst.instruction |= (abcdebits >> 2) << 21;
16461      }
16462      break;
16463
16464    case NS_RRD:  /* case 7 (fmrrd).  */
16465      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16466		  _(BAD_FPU));
16467
16468      inst.instruction = 0xc500b10;
16469      do_vfp_cond_or_thumb ();
16470      inst.instruction |= inst.operands[0].reg << 12;
16471      inst.instruction |= inst.operands[1].reg << 16;
16472      inst.instruction |= LOW4 (inst.operands[2].reg);
16473      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16474      break;
16475
16476    case NS_FF:  /* case 8 (fcpys).  */
16477      do_vfp_nsyn_opcode ("fcpys");
16478      break;
16479
16480    case NS_HI:
16481    case NS_FI:  /* case 10 (fconsts).  */
16482      ldconst = "fconsts";
16483      encode_fconstd:
16484      if (is_quarter_float (inst.operands[1].imm))
16485	{
16486	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16487	  do_vfp_nsyn_opcode (ldconst);
16488
16489	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
16490	  if (rs == NS_HI)
16491	    do_scalar_fp16_v82_encode ();
16492	}
16493      else
16494	first_error (_("immediate out of range"));
16495      break;
16496
16497    case NS_RH:
16498    case NS_RF:  /* case 12 (fmrs).  */
16499      do_vfp_nsyn_opcode ("fmrs");
16500      /* ARMv8.2 fp16 vmov.f16 instruction.  */
16501      if (rs == NS_RH)
16502	do_scalar_fp16_v82_encode ();
16503      break;
16504
16505    case NS_HR:
16506    case NS_FR:  /* case 13 (fmsr).  */
16507      do_vfp_nsyn_opcode ("fmsr");
16508      /* ARMv8.2 fp16 vmov.f16 instruction.  */
16509      if (rs == NS_HR)
16510	do_scalar_fp16_v82_encode ();
16511      break;
16512
16513    /* The encoders for the fmrrs and fmsrr instructions expect three operands
16514       (one of which is a list), but we have parsed four.  Do some fiddling to
16515       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16516       expect.  */
16517    case NS_RRFF:  /* case 14 (fmrrs).  */
16518      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16519		  _("VFP registers must be adjacent"));
16520      inst.operands[2].imm = 2;
16521      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16522      do_vfp_nsyn_opcode ("fmrrs");
16523      break;
16524
16525    case NS_FFRR:  /* case 15 (fmsrr).  */
16526      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16527		  _("VFP registers must be adjacent"));
16528      inst.operands[1] = inst.operands[2];
16529      inst.operands[2] = inst.operands[3];
16530      inst.operands[0].imm = 2;
16531      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16532      do_vfp_nsyn_opcode ("fmsrr");
16533      break;
16534
16535    case NS_NULL:
16536      /* neon_select_shape has determined that the instruction
16537	 shape is wrong and has already set the error message.  */
16538      break;
16539
16540    default:
16541      abort ();
16542    }
16543}
16544
16545static void
16546do_neon_rshift_round_imm (void)
16547{
16548  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16549  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16550  int imm = inst.operands[2].imm;
16551
16552  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
16553  if (imm == 0)
16554    {
16555      inst.operands[2].present = 0;
16556      do_neon_mov ();
16557      return;
16558    }
16559
16560  constraint (imm < 1 || (unsigned)imm > et.size,
16561	      _("immediate out of range for shift"));
16562  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16563		  et.size - imm);
16564}
16565
16566static void
16567do_neon_movhf (void)
16568{
16569  enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16570  constraint (rs != NS_HH, _("invalid suffix"));
16571
16572  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16573	      _(BAD_FPU));
16574
16575  do_vfp_sp_monadic ();
16576
16577  inst.is_neon = 1;
16578  inst.instruction |= 0xf0000000;
16579}
16580
16581static void
16582do_neon_movl (void)
16583{
16584  struct neon_type_el et = neon_check_type (2, NS_QD,
16585    N_EQK | N_DBL, N_SU_32 | N_KEY);
16586  unsigned sizebits = et.size >> 3;
16587  inst.instruction |= sizebits << 19;
16588  neon_two_same (0, et.type == NT_unsigned, -1);
16589}
16590
16591static void
16592do_neon_trn (void)
16593{
16594  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16595  struct neon_type_el et = neon_check_type (2, rs,
16596    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16597  NEON_ENCODE (INTEGER, inst);
16598  neon_two_same (neon_quad (rs), 1, et.size);
16599}
16600
16601static void
16602do_neon_zip_uzp (void)
16603{
16604  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16605  struct neon_type_el et = neon_check_type (2, rs,
16606    N_EQK, N_8 | N_16 | N_32 | N_KEY);
16607  if (rs == NS_DD && et.size == 32)
16608    {
16609      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
16610      inst.instruction = N_MNEM_vtrn;
16611      do_neon_trn ();
16612      return;
16613    }
16614  neon_two_same (neon_quad (rs), 1, et.size);
16615}
16616
16617static void
16618do_neon_sat_abs_neg (void)
16619{
16620  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16621  struct neon_type_el et = neon_check_type (2, rs,
16622    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16623  neon_two_same (neon_quad (rs), 1, et.size);
16624}
16625
16626static void
16627do_neon_pair_long (void)
16628{
16629  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16630  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16631  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
16632  inst.instruction |= (et.type == NT_unsigned) << 7;
16633  neon_two_same (neon_quad (rs), 1, et.size);
16634}
16635
16636static void
16637do_neon_recip_est (void)
16638{
16639  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16640  struct neon_type_el et = neon_check_type (2, rs,
16641    N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16642  inst.instruction |= (et.type == NT_float) << 8;
16643  neon_two_same (neon_quad (rs), 1, et.size);
16644}
16645
16646static void
16647do_neon_cls (void)
16648{
16649  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16650  struct neon_type_el et = neon_check_type (2, rs,
16651    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16652  neon_two_same (neon_quad (rs), 1, et.size);
16653}
16654
16655static void
16656do_neon_clz (void)
16657{
16658  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16659  struct neon_type_el et = neon_check_type (2, rs,
16660    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16661  neon_two_same (neon_quad (rs), 1, et.size);
16662}
16663
16664static void
16665do_neon_cnt (void)
16666{
16667  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16668  struct neon_type_el et = neon_check_type (2, rs,
16669    N_EQK | N_INT, N_8 | N_KEY);
16670  neon_two_same (neon_quad (rs), 1, et.size);
16671}
16672
16673static void
16674do_neon_swp (void)
16675{
16676  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16677  neon_two_same (neon_quad (rs), 1, -1);
16678}
16679
16680static void
16681do_neon_tbl_tbx (void)
16682{
16683  unsigned listlenbits;
16684  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16685
16686  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16687    {
16688      first_error (_("bad list length for table lookup"));
16689      return;
16690    }
16691
16692  listlenbits = inst.operands[1].imm - 1;
16693  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16694  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16695  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16696  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16697  inst.instruction |= LOW4 (inst.operands[2].reg);
16698  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16699  inst.instruction |= listlenbits << 8;
16700
16701  neon_dp_fixup (&inst);
16702}
16703
16704static void
16705do_neon_ldm_stm (void)
16706{
16707  /* P, U and L bits are part of bitmask.  */
16708  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16709  unsigned offsetbits = inst.operands[1].imm * 2;
16710
16711  if (inst.operands[1].issingle)
16712    {
16713      do_vfp_nsyn_ldm_stm (is_dbmode);
16714      return;
16715    }
16716
16717  constraint (is_dbmode && !inst.operands[0].writeback,
16718	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
16719
16720  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16721	      _("register list must contain at least 1 and at most 16 "
16722		"registers"));
16723
16724  inst.instruction |= inst.operands[0].reg << 16;
16725  inst.instruction |= inst.operands[0].writeback << 21;
16726  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16727  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16728
16729  inst.instruction |= offsetbits;
16730
16731  do_vfp_cond_or_thumb ();
16732}
16733
16734static void
16735do_neon_ldr_str (void)
16736{
16737  int is_ldr = (inst.instruction & (1 << 20)) != 0;
16738
16739  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16740     And is UNPREDICTABLE in thumb mode.  */
16741  if (!is_ldr
16742      && inst.operands[1].reg == REG_PC
16743      && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16744    {
16745      if (thumb_mode)
16746	inst.error = _("Use of PC here is UNPREDICTABLE");
16747      else if (warn_on_deprecated)
16748	as_tsktsk (_("Use of PC here is deprecated"));
16749    }
16750
16751  if (inst.operands[0].issingle)
16752    {
16753      if (is_ldr)
16754	do_vfp_nsyn_opcode ("flds");
16755      else
16756	do_vfp_nsyn_opcode ("fsts");
16757
16758      /* ARMv8.2 vldr.16/vstr.16 instruction.  */
16759      if (inst.vectype.el[0].size == 16)
16760	do_scalar_fp16_v82_encode ();
16761    }
16762  else
16763    {
16764      if (is_ldr)
16765	do_vfp_nsyn_opcode ("fldd");
16766      else
16767	do_vfp_nsyn_opcode ("fstd");
16768    }
16769}
16770
16771/* "interleave" version also handles non-interleaving register VLD1/VST1
16772   instructions.  */
16773
16774static void
16775do_neon_ld_st_interleave (void)
16776{
16777  struct neon_type_el et = neon_check_type (1, NS_NULL,
16778					    N_8 | N_16 | N_32 | N_64);
16779  unsigned alignbits = 0;
16780  unsigned idx;
16781  /* The bits in this table go:
16782     0: register stride of one (0) or two (1)
16783     1,2: register list length, minus one (1, 2, 3, 4).
16784     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16785     We use -1 for invalid entries.  */
16786  const int typetable[] =
16787    {
16788      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
16789       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
16790       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
16791       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
16792    };
16793  int typebits;
16794
16795  if (et.type == NT_invtype)
16796    return;
16797
16798  if (inst.operands[1].immisalign)
16799    switch (inst.operands[1].imm >> 8)
16800      {
16801      case 64: alignbits = 1; break;
16802      case 128:
16803	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16804	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16805	  goto bad_alignment;
16806	alignbits = 2;
16807	break;
16808      case 256:
16809	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16810	  goto bad_alignment;
16811	alignbits = 3;
16812	break;
16813      default:
16814      bad_alignment:
16815	first_error (_("bad alignment"));
16816	return;
16817      }
16818
16819  inst.instruction |= alignbits << 4;
16820  inst.instruction |= neon_logbits (et.size) << 6;
16821
16822  /* Bits [4:6] of the immediate in a list specifier encode register stride
16823     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16824     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16825     up the right value for "type" in a table based on this value and the given
16826     list style, then stick it back.  */
16827  idx = ((inst.operands[0].imm >> 4) & 7)
16828	| (((inst.instruction >> 8) & 3) << 3);
16829
16830  typebits = typetable[idx];
16831
16832  constraint (typebits == -1, _("bad list type for instruction"));
16833  constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16834	      _("bad element type for instruction"));
16835
16836  inst.instruction &= ~0xf00;
16837  inst.instruction |= typebits << 8;
16838}
16839
16840/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16841   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16842   otherwise. The variable arguments are a list of pairs of legal (size, align)
16843   values, terminated with -1.  */
16844
16845static int
16846neon_alignment_bit (int size, int align, int *do_alignment, ...)
16847{
16848  va_list ap;
16849  int result = FAIL, thissize, thisalign;
16850
16851  if (!inst.operands[1].immisalign)
16852    {
16853      *do_alignment = 0;
16854      return SUCCESS;
16855    }
16856
16857  va_start (ap, do_alignment);
16858
16859  do
16860    {
16861      thissize = va_arg (ap, int);
16862      if (thissize == -1)
16863	break;
16864      thisalign = va_arg (ap, int);
16865
16866      if (size == thissize && align == thisalign)
16867	result = SUCCESS;
16868    }
16869  while (result != SUCCESS);
16870
16871  va_end (ap);
16872
16873  if (result == SUCCESS)
16874    *do_alignment = 1;
16875  else
16876    first_error (_("unsupported alignment for instruction"));
16877
16878  return result;
16879}
16880
16881static void
16882do_neon_ld_st_lane (void)
16883{
16884  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16885  int align_good, do_alignment = 0;
16886  int logsize = neon_logbits (et.size);
16887  int align = inst.operands[1].imm >> 8;
16888  int n = (inst.instruction >> 8) & 3;
16889  int max_el = 64 / et.size;
16890
16891  if (et.type == NT_invtype)
16892    return;
16893
16894  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16895	      _("bad list length"));
16896  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16897	      _("scalar index out of range"));
16898  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16899	      && et.size == 8,
16900	      _("stride of 2 unavailable when element size is 8"));
16901
16902  switch (n)
16903    {
16904    case 0:  /* VLD1 / VST1.  */
16905      align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16906				       32, 32, -1);
16907      if (align_good == FAIL)
16908	return;
16909      if (do_alignment)
16910	{
16911	  unsigned alignbits = 0;
16912	  switch (et.size)
16913	    {
16914	    case 16: alignbits = 0x1; break;
16915	    case 32: alignbits = 0x3; break;
16916	    default: ;
16917	    }
16918	  inst.instruction |= alignbits << 4;
16919	}
16920      break;
16921
16922    case 1:  /* VLD2 / VST2.  */
16923      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16924		      16, 32, 32, 64, -1);
16925      if (align_good == FAIL)
16926	return;
16927      if (do_alignment)
16928	inst.instruction |= 1 << 4;
16929      break;
16930
16931    case 2:  /* VLD3 / VST3.  */
16932      constraint (inst.operands[1].immisalign,
16933		  _("can't use alignment with this instruction"));
16934      break;
16935
16936    case 3:  /* VLD4 / VST4.  */
16937      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16938				       16, 64, 32, 64, 32, 128, -1);
16939      if (align_good == FAIL)
16940	return;
16941      if (do_alignment)
16942	{
16943	  unsigned alignbits = 0;
16944	  switch (et.size)
16945	    {
16946	    case 8:  alignbits = 0x1; break;
16947	    case 16: alignbits = 0x1; break;
16948	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16949	    default: ;
16950	    }
16951	  inst.instruction |= alignbits << 4;
16952	}
16953      break;
16954
16955    default: ;
16956    }
16957
16958  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
16959  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16960    inst.instruction |= 1 << (4 + logsize);
16961
16962  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16963  inst.instruction |= logsize << 10;
16964}
16965
16966/* Encode single n-element structure to all lanes VLD<n> instructions.  */
16967
16968static void
16969do_neon_ld_dup (void)
16970{
16971  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16972  int align_good, do_alignment = 0;
16973
16974  if (et.type == NT_invtype)
16975    return;
16976
16977  switch ((inst.instruction >> 8) & 3)
16978    {
16979    case 0:  /* VLD1.  */
16980      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16981      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16982				       &do_alignment, 16, 16, 32, 32, -1);
16983      if (align_good == FAIL)
16984	return;
16985      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16986	{
16987	case 1: break;
16988	case 2: inst.instruction |= 1 << 5; break;
16989	default: first_error (_("bad list length")); return;
16990	}
16991      inst.instruction |= neon_logbits (et.size) << 6;
16992      break;
16993
16994    case 1:  /* VLD2.  */
16995      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16996				       &do_alignment, 8, 16, 16, 32, 32, 64,
16997				       -1);
16998      if (align_good == FAIL)
16999	return;
17000      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17001		  _("bad list length"));
17002      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17003	inst.instruction |= 1 << 5;
17004      inst.instruction |= neon_logbits (et.size) << 6;
17005      break;
17006
17007    case 2:  /* VLD3.  */
17008      constraint (inst.operands[1].immisalign,
17009		  _("can't use alignment with this instruction"));
17010      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17011		  _("bad list length"));
17012      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17013	inst.instruction |= 1 << 5;
17014      inst.instruction |= neon_logbits (et.size) << 6;
17015      break;
17016
17017    case 3:  /* VLD4.  */
17018      {
17019	int align = inst.operands[1].imm >> 8;
17020	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17021					 16, 64, 32, 64, 32, 128, -1);
17022	if (align_good == FAIL)
17023	  return;
17024	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17025		    _("bad list length"));
17026	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17027	  inst.instruction |= 1 << 5;
17028	if (et.size == 32 && align == 128)
17029	  inst.instruction |= 0x3 << 6;
17030	else
17031	  inst.instruction |= neon_logbits (et.size) << 6;
17032      }
17033      break;
17034
17035    default: ;
17036    }
17037
17038  inst.instruction |= do_alignment << 4;
17039}
17040
17041/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17042   apart from bits [11:4].  */
17043
17044static void
17045do_neon_ldx_stx (void)
17046{
17047  if (inst.operands[1].isreg)
17048    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17049
17050  switch (NEON_LANE (inst.operands[0].imm))
17051    {
17052    case NEON_INTERLEAVE_LANES:
17053      NEON_ENCODE (INTERLV, inst);
17054      do_neon_ld_st_interleave ();
17055      break;
17056
17057    case NEON_ALL_LANES:
17058      NEON_ENCODE (DUP, inst);
17059      if (inst.instruction == N_INV)
17060	{
17061	  first_error ("only loads support such operands");
17062	  break;
17063	}
17064      do_neon_ld_dup ();
17065      break;
17066
17067    default:
17068      NEON_ENCODE (LANE, inst);
17069      do_neon_ld_st_lane ();
17070    }
17071
17072  /* L bit comes from bit mask.  */
17073  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17074  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17075  inst.instruction |= inst.operands[1].reg << 16;
17076
17077  if (inst.operands[1].postind)
17078    {
17079      int postreg = inst.operands[1].imm & 0xf;
17080      constraint (!inst.operands[1].immisreg,
17081		  _("post-index must be a register"));
17082      constraint (postreg == 0xd || postreg == 0xf,
17083		  _("bad register for post-index"));
17084      inst.instruction |= postreg;
17085    }
17086  else
17087    {
17088      constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17089      constraint (inst.reloc.exp.X_op != O_constant
17090		  || inst.reloc.exp.X_add_number != 0,
17091		  BAD_ADDR_MODE);
17092
17093      if (inst.operands[1].writeback)
17094	{
17095	  inst.instruction |= 0xd;
17096	}
17097      else
17098	inst.instruction |= 0xf;
17099    }
17100
17101  if (thumb_mode)
17102    inst.instruction |= 0xf9000000;
17103  else
17104    inst.instruction |= 0xf4000000;
17105}
17106
17107/* FP v8.  */
17108static void
17109do_vfp_nsyn_fpv8 (enum neon_shape rs)
17110{
17111  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17112     D register operands.  */
17113  if (neon_shape_class[rs] == SC_DOUBLE)
17114    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17115		_(BAD_FPU));
17116
17117  NEON_ENCODE (FPV8, inst);
17118
17119  if (rs == NS_FFF || rs == NS_HHH)
17120    {
17121      do_vfp_sp_dyadic ();
17122
17123      /* ARMv8.2 fp16 instruction.  */
17124      if (rs == NS_HHH)
17125	do_scalar_fp16_v82_encode ();
17126    }
17127  else
17128    do_vfp_dp_rd_rn_rm ();
17129
17130  if (rs == NS_DDD)
17131    inst.instruction |= 0x100;
17132
17133  inst.instruction |= 0xf0000000;
17134}
17135
17136static void
17137do_vsel (void)
17138{
17139  set_it_insn_type (OUTSIDE_IT_INSN);
17140
17141  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17142    first_error (_("invalid instruction shape"));
17143}
17144
17145static void
17146do_vmaxnm (void)
17147{
17148  set_it_insn_type (OUTSIDE_IT_INSN);
17149
17150  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17151    return;
17152
17153  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17154    return;
17155
17156  neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17157}
17158
17159static void
17160do_vrint_1 (enum neon_cvt_mode mode)
17161{
17162  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17163  struct neon_type_el et;
17164
17165  if (rs == NS_NULL)
17166    return;
17167
17168  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17169     D register operands.  */
17170  if (neon_shape_class[rs] == SC_DOUBLE)
17171    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17172		_(BAD_FPU));
17173
17174  et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17175			| N_VFP);
17176  if (et.type != NT_invtype)
17177    {
17178      /* VFP encodings.  */
17179      if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17180	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17181	set_it_insn_type (OUTSIDE_IT_INSN);
17182
17183      NEON_ENCODE (FPV8, inst);
17184      if (rs == NS_FF || rs == NS_HH)
17185	do_vfp_sp_monadic ();
17186      else
17187	do_vfp_dp_rd_rm ();
17188
17189      switch (mode)
17190	{
17191	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17192	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17193	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17194	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17195	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17196	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17197	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17198	default: abort ();
17199	}
17200
17201      inst.instruction |= (rs == NS_DD) << 8;
17202      do_vfp_cond_or_thumb ();
17203
17204      /* ARMv8.2 fp16 vrint instruction.  */
17205      if (rs == NS_HH)
17206      do_scalar_fp16_v82_encode ();
17207    }
17208  else
17209    {
17210      /* Neon encodings (or something broken...).  */
17211      inst.error = NULL;
17212      et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17213
17214      if (et.type == NT_invtype)
17215	return;
17216
17217      set_it_insn_type (OUTSIDE_IT_INSN);
17218      NEON_ENCODE (FLOAT, inst);
17219
17220      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17221	return;
17222
17223      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17224      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17225      inst.instruction |= LOW4 (inst.operands[1].reg);
17226      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17227      inst.instruction |= neon_quad (rs) << 6;
17228      /* Mask off the original size bits and reencode them.  */
17229      inst.instruction = ((inst.instruction & 0xfff3ffff)
17230			  | neon_logbits (et.size) << 18);
17231
17232      switch (mode)
17233	{
17234	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17235	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17236	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17237	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17238	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17239	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17240	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17241	default: abort ();
17242	}
17243
17244      if (thumb_mode)
17245	inst.instruction |= 0xfc000000;
17246      else
17247	inst.instruction |= 0xf0000000;
17248    }
17249}
17250
17251static void
17252do_vrintx (void)
17253{
17254  do_vrint_1 (neon_cvt_mode_x);
17255}
17256
17257static void
17258do_vrintz (void)
17259{
17260  do_vrint_1 (neon_cvt_mode_z);
17261}
17262
17263static void
17264do_vrintr (void)
17265{
17266  do_vrint_1 (neon_cvt_mode_r);
17267}
17268
17269static void
17270do_vrinta (void)
17271{
17272  do_vrint_1 (neon_cvt_mode_a);
17273}
17274
17275static void
17276do_vrintn (void)
17277{
17278  do_vrint_1 (neon_cvt_mode_n);
17279}
17280
17281static void
17282do_vrintp (void)
17283{
17284  do_vrint_1 (neon_cvt_mode_p);
17285}
17286
17287static void
17288do_vrintm (void)
17289{
17290  do_vrint_1 (neon_cvt_mode_m);
17291}
17292
17293static unsigned
17294neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17295{
17296  unsigned regno = NEON_SCALAR_REG (opnd);
17297  unsigned elno = NEON_SCALAR_INDEX (opnd);
17298
17299  if (elsize == 16 && elno < 2 && regno < 16)
17300    return regno | (elno << 4);
17301  else if (elsize == 32 && elno == 0)
17302    return regno;
17303
17304  first_error (_("scalar out of range"));
17305  return 0;
17306}
17307
17308static void
17309do_vcmla (void)
17310{
17311  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17312	      _(BAD_FPU));
17313  constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17314  unsigned rot = inst.reloc.exp.X_add_number;
17315  constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17316	      _("immediate out of range"));
17317  rot /= 90;
17318  if (inst.operands[2].isscalar)
17319    {
17320      enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17321      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17322				       N_KEY | N_F16 | N_F32).size;
17323      unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17324      inst.is_neon = 1;
17325      inst.instruction = 0xfe000800;
17326      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17327      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17328      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17329      inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17330      inst.instruction |= LOW4 (m);
17331      inst.instruction |= HI1 (m) << 5;
17332      inst.instruction |= neon_quad (rs) << 6;
17333      inst.instruction |= rot << 20;
17334      inst.instruction |= (size == 32) << 23;
17335    }
17336  else
17337    {
17338      enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17339      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17340				       N_KEY | N_F16 | N_F32).size;
17341      neon_three_same (neon_quad (rs), 0, -1);
17342      inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
17343      inst.instruction |= 0xfc200800;
17344      inst.instruction |= rot << 23;
17345      inst.instruction |= (size == 32) << 20;
17346    }
17347}
17348
17349static void
17350do_vcadd (void)
17351{
17352  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17353	      _(BAD_FPU));
17354  constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17355  unsigned rot = inst.reloc.exp.X_add_number;
17356  constraint (rot != 90 && rot != 270, _("immediate out of range"));
17357  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17358  unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17359				   N_KEY | N_F16 | N_F32).size;
17360  neon_three_same (neon_quad (rs), 0, -1);
17361  inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
17362  inst.instruction |= 0xfc800800;
17363  inst.instruction |= (rot == 270) << 24;
17364  inst.instruction |= (size == 32) << 20;
17365}
17366
17367/* Crypto v1 instructions.  */
17368static void
17369do_crypto_2op_1 (unsigned elttype, int op)
17370{
17371  set_it_insn_type (OUTSIDE_IT_INSN);
17372
17373  if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17374      == NT_invtype)
17375    return;
17376
17377  inst.error = NULL;
17378
17379  NEON_ENCODE (INTEGER, inst);
17380  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17381  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17382  inst.instruction |= LOW4 (inst.operands[1].reg);
17383  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17384  if (op != -1)
17385    inst.instruction |= op << 6;
17386
17387  if (thumb_mode)
17388    inst.instruction |= 0xfc000000;
17389  else
17390    inst.instruction |= 0xf0000000;
17391}
17392
17393static void
17394do_crypto_3op_1 (int u, int op)
17395{
17396  set_it_insn_type (OUTSIDE_IT_INSN);
17397
17398  if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17399		       N_32 | N_UNT | N_KEY).type == NT_invtype)
17400    return;
17401
17402  inst.error = NULL;
17403
17404  NEON_ENCODE (INTEGER, inst);
17405  neon_three_same (1, u, 8 << op);
17406}
17407
17408static void
17409do_aese (void)
17410{
17411  do_crypto_2op_1 (N_8, 0);
17412}
17413
17414static void
17415do_aesd (void)
17416{
17417  do_crypto_2op_1 (N_8, 1);
17418}
17419
17420static void
17421do_aesmc (void)
17422{
17423  do_crypto_2op_1 (N_8, 2);
17424}
17425
17426static void
17427do_aesimc (void)
17428{
17429  do_crypto_2op_1 (N_8, 3);
17430}
17431
17432static void
17433do_sha1c (void)
17434{
17435  do_crypto_3op_1 (0, 0);
17436}
17437
17438static void
17439do_sha1p (void)
17440{
17441  do_crypto_3op_1 (0, 1);
17442}
17443
17444static void
17445do_sha1m (void)
17446{
17447  do_crypto_3op_1 (0, 2);
17448}
17449
17450static void
17451do_sha1su0 (void)
17452{
17453  do_crypto_3op_1 (0, 3);
17454}
17455
17456static void
17457do_sha256h (void)
17458{
17459  do_crypto_3op_1 (1, 0);
17460}
17461
17462static void
17463do_sha256h2 (void)
17464{
17465  do_crypto_3op_1 (1, 1);
17466}
17467
17468static void
17469do_sha256su1 (void)
17470{
17471  do_crypto_3op_1 (1, 2);
17472}
17473
17474static void
17475do_sha1h (void)
17476{
17477  do_crypto_2op_1 (N_32, -1);
17478}
17479
17480static void
17481do_sha1su1 (void)
17482{
17483  do_crypto_2op_1 (N_32, 0);
17484}
17485
17486static void
17487do_sha256su0 (void)
17488{
17489  do_crypto_2op_1 (N_32, 1);
17490}
17491
17492static void
17493do_crc32_1 (unsigned int poly, unsigned int sz)
17494{
17495  unsigned int Rd = inst.operands[0].reg;
17496  unsigned int Rn = inst.operands[1].reg;
17497  unsigned int Rm = inst.operands[2].reg;
17498
17499  set_it_insn_type (OUTSIDE_IT_INSN);
17500  inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17501  inst.instruction |= LOW4 (Rn) << 16;
17502  inst.instruction |= LOW4 (Rm);
17503  inst.instruction |= sz << (thumb_mode ? 4 : 21);
17504  inst.instruction |= poly << (thumb_mode ? 20 : 9);
17505
17506  if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17507    as_warn (UNPRED_REG ("r15"));
17508  if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17509    as_warn (UNPRED_REG ("r13"));
17510}
17511
17512static void
17513do_crc32b (void)
17514{
17515  do_crc32_1 (0, 0);
17516}
17517
17518static void
17519do_crc32h (void)
17520{
17521  do_crc32_1 (0, 1);
17522}
17523
17524static void
17525do_crc32w (void)
17526{
17527  do_crc32_1 (0, 2);
17528}
17529
17530static void
17531do_crc32cb (void)
17532{
17533  do_crc32_1 (1, 0);
17534}
17535
17536static void
17537do_crc32ch (void)
17538{
17539  do_crc32_1 (1, 1);
17540}
17541
17542static void
17543do_crc32cw (void)
17544{
17545  do_crc32_1 (1, 2);
17546}
17547
17548static void
17549do_vjcvt (void)
17550{
17551  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17552	      _(BAD_FPU));
17553  neon_check_type (2, NS_FD, N_S32, N_F64);
17554  do_vfp_sp_dp_cvt ();
17555  do_vfp_cond_or_thumb ();
17556}
17557
17558
17559/* Overall per-instruction processing.	*/
17560
17561/* We need to be able to fix up arbitrary expressions in some statements.
17562   This is so that we can handle symbols that are an arbitrary distance from
17563   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17564   which returns part of an address in a form which will be valid for
17565   a data instruction.	We do this by pushing the expression into a symbol
17566   in the expr_section, and creating a fix for that.  */
17567
17568static void
17569fix_new_arm (fragS *	   frag,
17570	     int	   where,
17571	     short int	   size,
17572	     expressionS * exp,
17573	     int	   pc_rel,
17574	     int	   reloc)
17575{
17576  fixS *	   new_fix;
17577
17578  switch (exp->X_op)
17579    {
17580    case O_constant:
17581      if (pc_rel)
17582	{
17583	  /* Create an absolute valued symbol, so we have something to
17584	     refer to in the object file.  Unfortunately for us, gas's
17585	     generic expression parsing will already have folded out
17586	     any use of .set foo/.type foo %function that may have
17587	     been used to set type information of the target location,
17588	     that's being specified symbolically.  We have to presume
17589	     the user knows what they are doing.  */
17590	  char name[16 + 8];
17591	  symbolS *symbol;
17592
17593	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17594
17595	  symbol = symbol_find_or_make (name);
17596	  S_SET_SEGMENT (symbol, absolute_section);
17597	  symbol_set_frag (symbol, &zero_address_frag);
17598	  S_SET_VALUE (symbol, exp->X_add_number);
17599	  exp->X_op = O_symbol;
17600	  exp->X_add_symbol = symbol;
17601	  exp->X_add_number = 0;
17602	}
17603      /* FALLTHROUGH */
17604    case O_symbol:
17605    case O_add:
17606    case O_subtract:
17607      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17608			     (enum bfd_reloc_code_real) reloc);
17609      break;
17610
17611    default:
17612      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17613				  pc_rel, (enum bfd_reloc_code_real) reloc);
17614      break;
17615    }
17616
17617  /* Mark whether the fix is to a THUMB instruction, or an ARM
17618     instruction.  */
17619  new_fix->tc_fix_data = thumb_mode;
17620}
17621
17622/* Create a frg for an instruction requiring relaxation.  */
17623static void
17624output_relax_insn (void)
17625{
17626  char * to;
17627  symbolS *sym;
17628  int offset;
17629
17630  /* The size of the instruction is unknown, so tie the debug info to the
17631     start of the instruction.  */
17632  dwarf2_emit_insn (0);
17633
17634  switch (inst.reloc.exp.X_op)
17635    {
17636    case O_symbol:
17637      sym = inst.reloc.exp.X_add_symbol;
17638      offset = inst.reloc.exp.X_add_number;
17639      break;
17640    case O_constant:
17641      sym = NULL;
17642      offset = inst.reloc.exp.X_add_number;
17643      break;
17644    default:
17645      sym = make_expr_symbol (&inst.reloc.exp);
17646      offset = 0;
17647      break;
17648  }
17649  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17650		 inst.relax, sym, offset, NULL/*offset, opcode*/);
17651  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17652}
17653
17654/* Write a 32-bit thumb instruction to buf.  */
17655static void
17656put_thumb32_insn (char * buf, unsigned long insn)
17657{
17658  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17659  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17660}
17661
17662static void
17663output_inst (const char * str)
17664{
17665  char * to = NULL;
17666
17667  if (inst.error)
17668    {
17669      as_bad ("%s -- `%s'", inst.error, str);
17670      return;
17671    }
17672  if (inst.relax)
17673    {
17674      output_relax_insn ();
17675      return;
17676    }
17677  if (inst.size == 0)
17678    return;
17679
17680  to = frag_more (inst.size);
17681  /* PR 9814: Record the thumb mode into the current frag so that we know
17682     what type of NOP padding to use, if necessary.  We override any previous
17683     setting so that if the mode has changed then the NOPS that we use will
17684     match the encoding of the last instruction in the frag.  */
17685  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17686
17687  if (thumb_mode && (inst.size > THUMB_SIZE))
17688    {
17689      gas_assert (inst.size == (2 * THUMB_SIZE));
17690      put_thumb32_insn (to, inst.instruction);
17691    }
17692  else if (inst.size > INSN_SIZE)
17693    {
17694      gas_assert (inst.size == (2 * INSN_SIZE));
17695      md_number_to_chars (to, inst.instruction, INSN_SIZE);
17696      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17697    }
17698  else
17699    md_number_to_chars (to, inst.instruction, inst.size);
17700
17701  if (inst.reloc.type != BFD_RELOC_UNUSED)
17702    fix_new_arm (frag_now, to - frag_now->fr_literal,
17703		 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17704		 inst.reloc.type);
17705
17706  dwarf2_emit_insn (inst.size);
17707}
17708
17709static char *
17710output_it_inst (int cond, int mask, char * to)
17711{
17712  unsigned long instruction = 0xbf00;
17713
17714  mask &= 0xf;
17715  instruction |= mask;
17716  instruction |= cond << 4;
17717
17718  if (to == NULL)
17719    {
17720      to = frag_more (2);
17721#ifdef OBJ_ELF
17722      dwarf2_emit_insn (2);
17723#endif
17724    }
17725
17726  md_number_to_chars (to, instruction, 2);
17727
17728  return to;
17729}
17730
17731/* Tag values used in struct asm_opcode's tag field.  */
17732enum opcode_tag
17733{
17734  OT_unconditional,	/* Instruction cannot be conditionalized.
17735			   The ARM condition field is still 0xE.  */
17736  OT_unconditionalF,	/* Instruction cannot be conditionalized
17737			   and carries 0xF in its ARM condition field.  */
17738  OT_csuffix,		/* Instruction takes a conditional suffix.  */
17739  OT_csuffixF,		/* Some forms of the instruction take a conditional
17740			   suffix, others place 0xF where the condition field
17741			   would be.  */
17742  OT_cinfix3,		/* Instruction takes a conditional infix,
17743			   beginning at character index 3.  (In
17744			   unified mode, it becomes a suffix.)  */
17745  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
17746			    tsts, cmps, cmns, and teqs. */
17747  OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
17748			   character index 3, even in unified mode.  Used for
17749			   legacy instructions where suffix and infix forms
17750			   may be ambiguous.  */
17751  OT_csuf_or_in3,	/* Instruction takes either a conditional
17752			   suffix or an infix at character index 3.  */
17753  OT_odd_infix_unc,	/* This is the unconditional variant of an
17754			   instruction that takes a conditional infix
17755			   at an unusual position.  In unified mode,
17756			   this variant will accept a suffix.  */
17757  OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
17758			   are the conditional variants of instructions that
17759			   take conditional infixes in unusual positions.
17760			   The infix appears at character index
17761			   (tag - OT_odd_infix_0).  These are not accepted
17762			   in unified mode.  */
17763};
17764
17765/* Subroutine of md_assemble, responsible for looking up the primary
17766   opcode from the mnemonic the user wrote.  STR points to the
17767   beginning of the mnemonic.
17768
17769   This is not simply a hash table lookup, because of conditional
17770   variants.  Most instructions have conditional variants, which are
17771   expressed with a _conditional affix_ to the mnemonic.  If we were
17772   to encode each conditional variant as a literal string in the opcode
17773   table, it would have approximately 20,000 entries.
17774
17775   Most mnemonics take this affix as a suffix, and in unified syntax,
17776   'most' is upgraded to 'all'.  However, in the divided syntax, some
17777   instructions take the affix as an infix, notably the s-variants of
17778   the arithmetic instructions.  Of those instructions, all but six
17779   have the infix appear after the third character of the mnemonic.
17780
17781   Accordingly, the algorithm for looking up primary opcodes given
17782   an identifier is:
17783
17784   1. Look up the identifier in the opcode table.
17785      If we find a match, go to step U.
17786
17787   2. Look up the last two characters of the identifier in the
17788      conditions table.  If we find a match, look up the first N-2
17789      characters of the identifier in the opcode table.  If we
17790      find a match, go to step CE.
17791
17792   3. Look up the fourth and fifth characters of the identifier in
17793      the conditions table.  If we find a match, extract those
17794      characters from the identifier, and look up the remaining
17795      characters in the opcode table.  If we find a match, go
17796      to step CM.
17797
17798   4. Fail.
17799
17800   U. Examine the tag field of the opcode structure, in case this is
17801      one of the six instructions with its conditional infix in an
17802      unusual place.  If it is, the tag tells us where to find the
17803      infix; look it up in the conditions table and set inst.cond
17804      accordingly.  Otherwise, this is an unconditional instruction.
17805      Again set inst.cond accordingly.  Return the opcode structure.
17806
17807  CE. Examine the tag field to make sure this is an instruction that
17808      should receive a conditional suffix.  If it is not, fail.
17809      Otherwise, set inst.cond from the suffix we already looked up,
17810      and return the opcode structure.
17811
17812  CM. Examine the tag field to make sure this is an instruction that
17813      should receive a conditional infix after the third character.
17814      If it is not, fail.  Otherwise, undo the edits to the current
17815      line of input and proceed as for case CE.  */
17816
17817static const struct asm_opcode *
17818opcode_lookup (char **str)
17819{
17820  char *end, *base;
17821  char *affix;
17822  const struct asm_opcode *opcode;
17823  const struct asm_cond *cond;
17824  char save[2];
17825
17826  /* Scan up to the end of the mnemonic, which must end in white space,
17827     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
17828  for (base = end = *str; *end != '\0'; end++)
17829    if (*end == ' ' || *end == '.')
17830      break;
17831
17832  if (end == base)
17833    return NULL;
17834
17835  /* Handle a possible width suffix and/or Neon type suffix.  */
17836  if (end[0] == '.')
17837    {
17838      int offset = 2;
17839
17840      /* The .w and .n suffixes are only valid if the unified syntax is in
17841	 use.  */
17842      if (unified_syntax && end[1] == 'w')
17843	inst.size_req = 4;
17844      else if (unified_syntax && end[1] == 'n')
17845	inst.size_req = 2;
17846      else
17847	offset = 0;
17848
17849      inst.vectype.elems = 0;
17850
17851      *str = end + offset;
17852
17853      if (end[offset] == '.')
17854	{
17855	  /* See if we have a Neon type suffix (possible in either unified or
17856	     non-unified ARM syntax mode).  */
17857	  if (parse_neon_type (&inst.vectype, str) == FAIL)
17858	    return NULL;
17859	}
17860      else if (end[offset] != '\0' && end[offset] != ' ')
17861	return NULL;
17862    }
17863  else
17864    *str = end;
17865
17866  /* Look for unaffixed or special-case affixed mnemonic.  */
17867  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17868						    end - base);
17869  if (opcode)
17870    {
17871      /* step U */
17872      if (opcode->tag < OT_odd_infix_0)
17873	{
17874	  inst.cond = COND_ALWAYS;
17875	  return opcode;
17876	}
17877
17878      if (warn_on_deprecated && unified_syntax)
17879	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17880      affix = base + (opcode->tag - OT_odd_infix_0);
17881      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17882      gas_assert (cond);
17883
17884      inst.cond = cond->value;
17885      return opcode;
17886    }
17887
17888  /* Cannot have a conditional suffix on a mnemonic of less than two
17889     characters.  */
17890  if (end - base < 3)
17891    return NULL;
17892
17893  /* Look for suffixed mnemonic.  */
17894  affix = end - 2;
17895  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17896  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17897						    affix - base);
17898  if (opcode && cond)
17899    {
17900      /* step CE */
17901      switch (opcode->tag)
17902	{
17903	case OT_cinfix3_legacy:
17904	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
17905	  break;
17906
17907	case OT_cinfix3:
17908	case OT_cinfix3_deprecated:
17909	case OT_odd_infix_unc:
17910	  if (!unified_syntax)
17911	    return 0;
17912	  /* Fall through.  */
17913
17914	case OT_csuffix:
17915	case OT_csuffixF:
17916	case OT_csuf_or_in3:
17917	  inst.cond = cond->value;
17918	  return opcode;
17919
17920	case OT_unconditional:
17921	case OT_unconditionalF:
17922	  if (thumb_mode)
17923	    inst.cond = cond->value;
17924	  else
17925	    {
17926	      /* Delayed diagnostic.  */
17927	      inst.error = BAD_COND;
17928	      inst.cond = COND_ALWAYS;
17929	    }
17930	  return opcode;
17931
17932	default:
17933	  return NULL;
17934	}
17935    }
17936
17937  /* Cannot have a usual-position infix on a mnemonic of less than
17938     six characters (five would be a suffix).  */
17939  if (end - base < 6)
17940    return NULL;
17941
17942  /* Look for infixed mnemonic in the usual position.  */
17943  affix = base + 3;
17944  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17945  if (!cond)
17946    return NULL;
17947
17948  memcpy (save, affix, 2);
17949  memmove (affix, affix + 2, (end - affix) - 2);
17950  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17951						    (end - base) - 2);
17952  memmove (affix + 2, affix, (end - affix) - 2);
17953  memcpy (affix, save, 2);
17954
17955  if (opcode
17956      && (opcode->tag == OT_cinfix3
17957	  || opcode->tag == OT_cinfix3_deprecated
17958	  || opcode->tag == OT_csuf_or_in3
17959	  || opcode->tag == OT_cinfix3_legacy))
17960    {
17961      /* Step CM.  */
17962      if (warn_on_deprecated && unified_syntax
17963	  && (opcode->tag == OT_cinfix3
17964	      || opcode->tag == OT_cinfix3_deprecated))
17965	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17966
17967      inst.cond = cond->value;
17968      return opcode;
17969    }
17970
17971  return NULL;
17972}
17973
17974/* This function generates an initial IT instruction, leaving its block
17975   virtually open for the new instructions. Eventually,
17976   the mask will be updated by now_it_add_mask () each time
17977   a new instruction needs to be included in the IT block.
17978   Finally, the block is closed with close_automatic_it_block ().
17979   The block closure can be requested either from md_assemble (),
17980   a tencode (), or due to a label hook.  */
17981
17982static void
17983new_automatic_it_block (int cond)
17984{
17985  now_it.state = AUTOMATIC_IT_BLOCK;
17986  now_it.mask = 0x18;
17987  now_it.cc = cond;
17988  now_it.block_length = 1;
17989  mapping_state (MAP_THUMB);
17990  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17991  now_it.warn_deprecated = FALSE;
17992  now_it.insn_cond = TRUE;
17993}
17994
17995/* Close an automatic IT block.
17996   See comments in new_automatic_it_block ().  */
17997
17998static void
17999close_automatic_it_block (void)
18000{
18001  now_it.mask = 0x10;
18002  now_it.block_length = 0;
18003}
18004
18005/* Update the mask of the current automatically-generated IT
18006   instruction. See comments in new_automatic_it_block ().  */
18007
18008static void
18009now_it_add_mask (int cond)
18010{
18011#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
18012#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
18013					      | ((bitvalue) << (nbit)))
18014  const int resulting_bit = (cond & 1);
18015
18016  now_it.mask &= 0xf;
18017  now_it.mask = SET_BIT_VALUE (now_it.mask,
18018				   resulting_bit,
18019				  (5 - now_it.block_length));
18020  now_it.mask = SET_BIT_VALUE (now_it.mask,
18021				   1,
18022				   ((5 - now_it.block_length) - 1) );
18023  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18024
18025#undef CLEAR_BIT
18026#undef SET_BIT_VALUE
18027}
18028
18029/* The IT blocks handling machinery is accessed through the these functions:
18030     it_fsm_pre_encode ()               from md_assemble ()
18031     set_it_insn_type ()                optional, from the tencode functions
18032     set_it_insn_type_last ()           ditto
18033     in_it_block ()                     ditto
18034     it_fsm_post_encode ()              from md_assemble ()
18035     force_automatic_it_block_close ()  from label habdling functions
18036
18037   Rationale:
18038     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18039	initializing the IT insn type with a generic initial value depending
18040	on the inst.condition.
18041     2) During the tencode function, two things may happen:
18042	a) The tencode function overrides the IT insn type by
18043	   calling either set_it_insn_type (type) or set_it_insn_type_last ().
18044	b) The tencode function queries the IT block state by
18045	   calling in_it_block () (i.e. to determine narrow/not narrow mode).
18046
18047	Both set_it_insn_type and in_it_block run the internal FSM state
18048	handling function (handle_it_state), because: a) setting the IT insn
18049	type may incur in an invalid state (exiting the function),
18050	and b) querying the state requires the FSM to be updated.
18051	Specifically we want to avoid creating an IT block for conditional
18052	branches, so it_fsm_pre_encode is actually a guess and we can't
18053	determine whether an IT block is required until the tencode () routine
18054	has decided what type of instruction this actually it.
18055	Because of this, if set_it_insn_type and in_it_block have to be used,
18056	set_it_insn_type has to be called first.
18057
18058	set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18059	determines the insn IT type depending on the inst.cond code.
18060	When a tencode () routine encodes an instruction that can be
18061	either outside an IT block, or, in the case of being inside, has to be
18062	the last one, set_it_insn_type_last () will determine the proper
18063	IT instruction type based on the inst.cond code. Otherwise,
18064	set_it_insn_type can be called for overriding that logic or
18065	for covering other cases.
18066
18067	Calling handle_it_state () may not transition the IT block state to
18068	OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18069	still queried. Instead, if the FSM determines that the state should
18070	be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18071	after the tencode () function: that's what it_fsm_post_encode () does.
18072
18073	Since in_it_block () calls the state handling function to get an
18074	updated state, an error may occur (due to invalid insns combination).
18075	In that case, inst.error is set.
18076	Therefore, inst.error has to be checked after the execution of
18077	the tencode () routine.
18078
18079     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18080	any pending state change (if any) that didn't take place in
18081	handle_it_state () as explained above.  */
18082
18083static void
18084it_fsm_pre_encode (void)
18085{
18086  if (inst.cond != COND_ALWAYS)
18087    inst.it_insn_type = INSIDE_IT_INSN;
18088  else
18089    inst.it_insn_type = OUTSIDE_IT_INSN;
18090
18091  now_it.state_handled = 0;
18092}
18093
18094/* IT state FSM handling function.  */
18095
18096static int
18097handle_it_state (void)
18098{
18099  now_it.state_handled = 1;
18100  now_it.insn_cond = FALSE;
18101
18102  switch (now_it.state)
18103    {
18104    case OUTSIDE_IT_BLOCK:
18105      switch (inst.it_insn_type)
18106	{
18107	case OUTSIDE_IT_INSN:
18108	  break;
18109
18110	case INSIDE_IT_INSN:
18111	case INSIDE_IT_LAST_INSN:
18112	  if (thumb_mode == 0)
18113	    {
18114	      if (unified_syntax
18115		  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18116		as_tsktsk (_("Warning: conditional outside an IT block"\
18117			     " for Thumb."));
18118	    }
18119	  else
18120	    {
18121	      if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18122		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18123		{
18124		  /* Automatically generate the IT instruction.  */
18125		  new_automatic_it_block (inst.cond);
18126		  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18127		    close_automatic_it_block ();
18128		}
18129	      else
18130		{
18131		  inst.error = BAD_OUT_IT;
18132		  return FAIL;
18133		}
18134	    }
18135	  break;
18136
18137	case IF_INSIDE_IT_LAST_INSN:
18138	case NEUTRAL_IT_INSN:
18139	  break;
18140
18141	case IT_INSN:
18142	  now_it.state = MANUAL_IT_BLOCK;
18143	  now_it.block_length = 0;
18144	  break;
18145	}
18146      break;
18147
18148    case AUTOMATIC_IT_BLOCK:
18149      /* Three things may happen now:
18150	 a) We should increment current it block size;
18151	 b) We should close current it block (closing insn or 4 insns);
18152	 c) We should close current it block and start a new one (due
18153	 to incompatible conditions or
18154	 4 insns-length block reached).  */
18155
18156      switch (inst.it_insn_type)
18157	{
18158	case OUTSIDE_IT_INSN:
18159	  /* The closure of the block shall happen immediately,
18160	     so any in_it_block () call reports the block as closed.  */
18161	  force_automatic_it_block_close ();
18162	  break;
18163
18164	case INSIDE_IT_INSN:
18165	case INSIDE_IT_LAST_INSN:
18166	case IF_INSIDE_IT_LAST_INSN:
18167	  now_it.block_length++;
18168
18169	  if (now_it.block_length > 4
18170	      || !now_it_compatible (inst.cond))
18171	    {
18172	      force_automatic_it_block_close ();
18173	      if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18174		new_automatic_it_block (inst.cond);
18175	    }
18176	  else
18177	    {
18178	      now_it.insn_cond = TRUE;
18179	      now_it_add_mask (inst.cond);
18180	    }
18181
18182	  if (now_it.state == AUTOMATIC_IT_BLOCK
18183	      && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18184		  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18185	    close_automatic_it_block ();
18186	  break;
18187
18188	case NEUTRAL_IT_INSN:
18189	  now_it.block_length++;
18190	  now_it.insn_cond = TRUE;
18191
18192	  if (now_it.block_length > 4)
18193	    force_automatic_it_block_close ();
18194	  else
18195	    now_it_add_mask (now_it.cc & 1);
18196	  break;
18197
18198	case IT_INSN:
18199	  close_automatic_it_block ();
18200	  now_it.state = MANUAL_IT_BLOCK;
18201	  break;
18202	}
18203      break;
18204
18205    case MANUAL_IT_BLOCK:
18206      {
18207	/* Check conditional suffixes.  */
18208	const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18209	int is_last;
18210	now_it.mask <<= 1;
18211	now_it.mask &= 0x1f;
18212	is_last = (now_it.mask == 0x10);
18213	now_it.insn_cond = TRUE;
18214
18215	switch (inst.it_insn_type)
18216	  {
18217	  case OUTSIDE_IT_INSN:
18218	    inst.error = BAD_NOT_IT;
18219	    return FAIL;
18220
18221	  case INSIDE_IT_INSN:
18222	    if (cond != inst.cond)
18223	      {
18224		inst.error = BAD_IT_COND;
18225		return FAIL;
18226	      }
18227	    break;
18228
18229	  case INSIDE_IT_LAST_INSN:
18230	  case IF_INSIDE_IT_LAST_INSN:
18231	    if (cond != inst.cond)
18232	      {
18233		inst.error = BAD_IT_COND;
18234		return FAIL;
18235	      }
18236	    if (!is_last)
18237	      {
18238		inst.error = BAD_BRANCH;
18239		return FAIL;
18240	      }
18241	    break;
18242
18243	  case NEUTRAL_IT_INSN:
18244	    /* The BKPT instruction is unconditional even in an IT block.  */
18245	    break;
18246
18247	  case IT_INSN:
18248	    inst.error = BAD_IT_IT;
18249	    return FAIL;
18250	  }
18251      }
18252      break;
18253    }
18254
18255  return SUCCESS;
18256}
18257
18258struct depr_insn_mask
18259{
18260  unsigned long pattern;
18261  unsigned long mask;
18262  const char* description;
18263};
18264
18265/* List of 16-bit instruction patterns deprecated in an IT block in
18266   ARMv8.  */
18267static const struct depr_insn_mask depr_it_insns[] = {
18268  { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18269  { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18270  { 0xa000, 0xb800, N_("ADR") },
18271  { 0x4800, 0xf800, N_("Literal loads") },
18272  { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18273  { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18274  /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18275     field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
18276  { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18277  { 0, 0, NULL }
18278};
18279
18280static void
18281it_fsm_post_encode (void)
18282{
18283  int is_last;
18284
18285  if (!now_it.state_handled)
18286    handle_it_state ();
18287
18288  if (now_it.insn_cond
18289      && !now_it.warn_deprecated
18290      && warn_on_deprecated
18291      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18292    {
18293      if (inst.instruction >= 0x10000)
18294	{
18295	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18296		     "deprecated in ARMv8"));
18297	  now_it.warn_deprecated = TRUE;
18298	}
18299      else
18300	{
18301	  const struct depr_insn_mask *p = depr_it_insns;
18302
18303	  while (p->mask != 0)
18304	    {
18305	      if ((inst.instruction & p->mask) == p->pattern)
18306		{
18307		  as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18308			     "of the following class are deprecated in ARMv8: "
18309			     "%s"), p->description);
18310		  now_it.warn_deprecated = TRUE;
18311		  break;
18312		}
18313
18314	      ++p;
18315	    }
18316	}
18317
18318      if (now_it.block_length > 1)
18319	{
18320	  as_tsktsk (_("IT blocks containing more than one conditional "
18321		     "instruction are deprecated in ARMv8"));
18322	  now_it.warn_deprecated = TRUE;
18323	}
18324    }
18325
18326  is_last = (now_it.mask == 0x10);
18327  if (is_last)
18328    {
18329      now_it.state = OUTSIDE_IT_BLOCK;
18330      now_it.mask = 0;
18331    }
18332}
18333
18334static void
18335force_automatic_it_block_close (void)
18336{
18337  if (now_it.state == AUTOMATIC_IT_BLOCK)
18338    {
18339      close_automatic_it_block ();
18340      now_it.state = OUTSIDE_IT_BLOCK;
18341      now_it.mask = 0;
18342    }
18343}
18344
18345static int
18346in_it_block (void)
18347{
18348  if (!now_it.state_handled)
18349    handle_it_state ();
18350
18351  return now_it.state != OUTSIDE_IT_BLOCK;
18352}
18353
18354/* Whether OPCODE only has T32 encoding.  Since this function is only used by
18355   t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18356   here, hence the "known" in the function name.  */
18357
18358static bfd_boolean
18359known_t32_only_insn (const struct asm_opcode *opcode)
18360{
18361  /* Original Thumb-1 wide instruction.  */
18362  if (opcode->tencode == do_t_blx
18363      || opcode->tencode == do_t_branch23
18364      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18365      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18366    return TRUE;
18367
18368  /* Wide-only instruction added to ARMv8-M Baseline.  */
18369  if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18370      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18371      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18372      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18373    return TRUE;
18374
18375  return FALSE;
18376}
18377
18378/* Whether wide instruction variant can be used if available for a valid OPCODE
18379   in ARCH.  */
18380
18381static bfd_boolean
18382t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18383{
18384  if (known_t32_only_insn (opcode))
18385    return TRUE;
18386
18387  /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
18388     of variant T3 of B.W is checked in do_t_branch.  */
18389  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18390      && opcode->tencode == do_t_branch)
18391    return TRUE;
18392
18393  /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit.  */
18394  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18395      && opcode->tencode == do_t_mov_cmp
18396      /* Make sure CMP instruction is not affected.  */
18397      && opcode->aencode == do_mov)
18398    return TRUE;
18399
18400  /* Wide instruction variants of all instructions with narrow *and* wide
18401     variants become available with ARMv6t2.  Other opcodes are either
18402     narrow-only or wide-only and are thus available if OPCODE is valid.  */
18403  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18404    return TRUE;
18405
18406  /* OPCODE with narrow only instruction variant or wide variant not
18407     available.  */
18408  return FALSE;
18409}
18410
18411void
18412md_assemble (char *str)
18413{
18414  char *p = str;
18415  const struct asm_opcode * opcode;
18416
18417  /* Align the previous label if needed.  */
18418  if (last_label_seen != NULL)
18419    {
18420      symbol_set_frag (last_label_seen, frag_now);
18421      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18422      S_SET_SEGMENT (last_label_seen, now_seg);
18423    }
18424
18425  memset (&inst, '\0', sizeof (inst));
18426  inst.reloc.type = BFD_RELOC_UNUSED;
18427
18428  opcode = opcode_lookup (&p);
18429  if (!opcode)
18430    {
18431      /* It wasn't an instruction, but it might be a register alias of
18432	 the form alias .req reg, or a Neon .dn/.qn directive.  */
18433      if (! create_register_alias (str, p)
18434	  && ! create_neon_reg_alias (str, p))
18435	as_bad (_("bad instruction `%s'"), str);
18436
18437      return;
18438    }
18439
18440  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18441    as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18442
18443  /* The value which unconditional instructions should have in place of the
18444     condition field.  */
18445  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18446
18447  if (thumb_mode)
18448    {
18449      arm_feature_set variant;
18450
18451      variant = cpu_variant;
18452      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
18453      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18454	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18455      /* Check that this instruction is supported for this CPU.  */
18456      if (!opcode->tvariant
18457	  || (thumb_mode == 1
18458	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18459	{
18460	  as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18461	  return;
18462	}
18463      if (inst.cond != COND_ALWAYS && !unified_syntax
18464	  && opcode->tencode != do_t_branch)
18465	{
18466	  as_bad (_("Thumb does not support conditional execution"));
18467	  return;
18468	}
18469
18470      /* Two things are addressed here:
18471	 1) Implicit require narrow instructions on Thumb-1.
18472	    This avoids relaxation accidentally introducing Thumb-2
18473	    instructions.
18474	 2) Reject wide instructions in non Thumb-2 cores.
18475
18476	 Only instructions with narrow and wide variants need to be handled
18477	 but selecting all non wide-only instructions is easier.  */
18478      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18479	  && !t32_insn_ok (variant, opcode))
18480	{
18481	  if (inst.size_req == 0)
18482	    inst.size_req = 2;
18483	  else if (inst.size_req == 4)
18484	    {
18485	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18486		as_bad (_("selected processor does not support 32bit wide "
18487			  "variant of instruction `%s'"), str);
18488	      else
18489		as_bad (_("selected processor does not support `%s' in "
18490			  "Thumb-2 mode"), str);
18491	      return;
18492	    }
18493	}
18494
18495      inst.instruction = opcode->tvalue;
18496
18497      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18498	{
18499	  /* Prepare the it_insn_type for those encodings that don't set
18500	     it.  */
18501	  it_fsm_pre_encode ();
18502
18503	  opcode->tencode ();
18504
18505	  it_fsm_post_encode ();
18506	}
18507
18508      if (!(inst.error || inst.relax))
18509	{
18510	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18511	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
18512	  if (inst.size_req && inst.size_req != inst.size)
18513	    {
18514	      as_bad (_("cannot honor width suffix -- `%s'"), str);
18515	      return;
18516	    }
18517	}
18518
18519      /* Something has gone badly wrong if we try to relax a fixed size
18520	 instruction.  */
18521      gas_assert (inst.size_req == 0 || !inst.relax);
18522
18523      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18524			      *opcode->tvariant);
18525      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18526	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
18527	 of relaxable instructions will be considered later after we finish all
18528	 relaxation.  */
18529      if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18530	variant = arm_arch_none;
18531      else
18532	variant = cpu_variant;
18533      if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18534	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18535				arm_ext_v6t2);
18536
18537      check_neon_suffixes;
18538
18539      if (!inst.error)
18540	{
18541	  mapping_state (MAP_THUMB);
18542	}
18543    }
18544  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18545    {
18546      bfd_boolean is_bx;
18547
18548      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
18549      is_bx = (opcode->aencode == do_bx);
18550
18551      /* Check that this instruction is supported for this CPU.  */
18552      if (!(is_bx && fix_v4bx)
18553	  && !(opcode->avariant &&
18554	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18555	{
18556	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18557	  return;
18558	}
18559      if (inst.size_req)
18560	{
18561	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18562	  return;
18563	}
18564
18565      inst.instruction = opcode->avalue;
18566      if (opcode->tag == OT_unconditionalF)
18567	inst.instruction |= 0xFU << 28;
18568      else
18569	inst.instruction |= inst.cond << 28;
18570      inst.size = INSN_SIZE;
18571      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18572	{
18573	  it_fsm_pre_encode ();
18574	  opcode->aencode ();
18575	  it_fsm_post_encode ();
18576	}
18577      /* Arm mode bx is marked as both v4T and v5 because it's still required
18578	 on a hypothetical non-thumb v5 core.  */
18579      if (is_bx)
18580	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18581      else
18582	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18583				*opcode->avariant);
18584
18585      check_neon_suffixes;
18586
18587      if (!inst.error)
18588	{
18589	  mapping_state (MAP_ARM);
18590	}
18591    }
18592  else
18593    {
18594      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18595		"-- `%s'"), str);
18596      return;
18597    }
18598  output_inst (str);
18599}
18600
18601static void
18602check_it_blocks_finished (void)
18603{
18604#ifdef OBJ_ELF
18605  asection *sect;
18606
18607  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18608    if (seg_info (sect)->tc_segment_info_data.current_it.state
18609	== MANUAL_IT_BLOCK)
18610      {
18611	as_warn (_("section '%s' finished with an open IT block."),
18612		 sect->name);
18613      }
18614#else
18615  if (now_it.state == MANUAL_IT_BLOCK)
18616    as_warn (_("file finished with an open IT block."));
18617#endif
18618}
18619
18620/* Various frobbings of labels and their addresses.  */
18621
18622void
18623arm_start_line_hook (void)
18624{
18625  last_label_seen = NULL;
18626}
18627
18628void
18629arm_frob_label (symbolS * sym)
18630{
18631  last_label_seen = sym;
18632
18633  ARM_SET_THUMB (sym, thumb_mode);
18634
18635#if defined OBJ_COFF || defined OBJ_ELF
18636  ARM_SET_INTERWORK (sym, support_interwork);
18637#endif
18638
18639  force_automatic_it_block_close ();
18640
18641  /* Note - do not allow local symbols (.Lxxx) to be labelled
18642     as Thumb functions.  This is because these labels, whilst
18643     they exist inside Thumb code, are not the entry points for
18644     possible ARM->Thumb calls.	 Also, these labels can be used
18645     as part of a computed goto or switch statement.  eg gcc
18646     can generate code that looks like this:
18647
18648		ldr  r2, [pc, .Laaa]
18649		lsl  r3, r3, #2
18650		ldr  r2, [r3, r2]
18651		mov  pc, r2
18652
18653       .Lbbb:  .word .Lxxx
18654       .Lccc:  .word .Lyyy
18655       ..etc...
18656       .Laaa:	.word Lbbb
18657
18658     The first instruction loads the address of the jump table.
18659     The second instruction converts a table index into a byte offset.
18660     The third instruction gets the jump address out of the table.
18661     The fourth instruction performs the jump.
18662
18663     If the address stored at .Laaa is that of a symbol which has the
18664     Thumb_Func bit set, then the linker will arrange for this address
18665     to have the bottom bit set, which in turn would mean that the
18666     address computation performed by the third instruction would end
18667     up with the bottom bit set.  Since the ARM is capable of unaligned
18668     word loads, the instruction would then load the incorrect address
18669     out of the jump table, and chaos would ensue.  */
18670  if (label_is_thumb_function_name
18671      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18672      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18673    {
18674      /* When the address of a Thumb function is taken the bottom
18675	 bit of that address should be set.  This will allow
18676	 interworking between Arm and Thumb functions to work
18677	 correctly.  */
18678
18679      THUMB_SET_FUNC (sym, 1);
18680
18681      label_is_thumb_function_name = FALSE;
18682    }
18683
18684  dwarf2_emit_label (sym);
18685}
18686
18687bfd_boolean
18688arm_data_in_code (void)
18689{
18690  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18691    {
18692      *input_line_pointer = '/';
18693      input_line_pointer += 5;
18694      *input_line_pointer = 0;
18695      return TRUE;
18696    }
18697
18698  return FALSE;
18699}
18700
18701char *
18702arm_canonicalize_symbol_name (char * name)
18703{
18704  int len;
18705
18706  if (thumb_mode && (len = strlen (name)) > 5
18707      && streq (name + len - 5, "/data"))
18708    *(name + len - 5) = 0;
18709
18710  return name;
18711}
18712
18713/* Table of all register names defined by default.  The user can
18714   define additional names with .req.  Note that all register names
18715   should appear in both upper and lowercase variants.	Some registers
18716   also have mixed-case names.	*/
18717
18718#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18719#define REGNUM(p,n,t) REGDEF(p##n, n, t)
18720#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18721#define REGSET(p,t) \
18722  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18723  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18724  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18725  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18726#define REGSETH(p,t) \
18727  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18728  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18729  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18730  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18731#define REGSET2(p,t) \
18732  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18733  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18734  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18735  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18736#define SPLRBANK(base,bank,t) \
18737  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18738  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18739  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18740  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18741  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18742  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18743
18744static const struct reg_entry reg_names[] =
18745{
18746  /* ARM integer registers.  */
18747  REGSET(r, RN), REGSET(R, RN),
18748
18749  /* ATPCS synonyms.  */
18750  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18751  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18752  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18753
18754  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18755  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18756  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18757
18758  /* Well-known aliases.  */
18759  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18760  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18761
18762  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18763  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18764
18765  /* Coprocessor numbers.  */
18766  REGSET(p, CP), REGSET(P, CP),
18767
18768  /* Coprocessor register numbers.  The "cr" variants are for backward
18769     compatibility.  */
18770  REGSET(c,  CN), REGSET(C, CN),
18771  REGSET(cr, CN), REGSET(CR, CN),
18772
18773  /* ARM banked registers.  */
18774  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18775  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18776  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18777  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18778  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18779  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18780  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18781
18782  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18783  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18784  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18785  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18786  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18787  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18788  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18789  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18790
18791  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18792  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18793  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18794  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18795  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18796  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18797  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18798  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18799  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18800
18801  /* FPA registers.  */
18802  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18803  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18804
18805  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18806  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18807
18808  /* VFP SP registers.	*/
18809  REGSET(s,VFS),  REGSET(S,VFS),
18810  REGSETH(s,VFS), REGSETH(S,VFS),
18811
18812  /* VFP DP Registers.	*/
18813  REGSET(d,VFD),  REGSET(D,VFD),
18814  /* Extra Neon DP registers.  */
18815  REGSETH(d,VFD), REGSETH(D,VFD),
18816
18817  /* Neon QP registers.  */
18818  REGSET2(q,NQ),  REGSET2(Q,NQ),
18819
18820  /* VFP control registers.  */
18821  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18822  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18823  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18824  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18825  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18826  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18827  REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
18828
18829  /* Maverick DSP coprocessor registers.  */
18830  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
18831  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
18832
18833  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18834  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18835  REGDEF(dspsc,0,DSPSC),
18836
18837  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18838  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18839  REGDEF(DSPSC,0,DSPSC),
18840
18841  /* iWMMXt data registers - p0, c0-15.	 */
18842  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18843
18844  /* iWMMXt control registers - p1, c0-3.  */
18845  REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
18846  REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
18847  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
18848  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
18849
18850  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
18851  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
18852  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
18853  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
18854  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
18855
18856  /* XScale accumulator registers.  */
18857  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18858};
18859#undef REGDEF
18860#undef REGNUM
18861#undef REGSET
18862
18863/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
18864   within psr_required_here.  */
18865static const struct asm_psr psrs[] =
18866{
18867  /* Backward compatibility notation.  Note that "all" is no longer
18868     truly all possible PSR bits.  */
18869  {"all",  PSR_c | PSR_f},
18870  {"flg",  PSR_f},
18871  {"ctl",  PSR_c},
18872
18873  /* Individual flags.	*/
18874  {"f",	   PSR_f},
18875  {"c",	   PSR_c},
18876  {"x",	   PSR_x},
18877  {"s",	   PSR_s},
18878
18879  /* Combinations of flags.  */
18880  {"fs",   PSR_f | PSR_s},
18881  {"fx",   PSR_f | PSR_x},
18882  {"fc",   PSR_f | PSR_c},
18883  {"sf",   PSR_s | PSR_f},
18884  {"sx",   PSR_s | PSR_x},
18885  {"sc",   PSR_s | PSR_c},
18886  {"xf",   PSR_x | PSR_f},
18887  {"xs",   PSR_x | PSR_s},
18888  {"xc",   PSR_x | PSR_c},
18889  {"cf",   PSR_c | PSR_f},
18890  {"cs",   PSR_c | PSR_s},
18891  {"cx",   PSR_c | PSR_x},
18892  {"fsx",  PSR_f | PSR_s | PSR_x},
18893  {"fsc",  PSR_f | PSR_s | PSR_c},
18894  {"fxs",  PSR_f | PSR_x | PSR_s},
18895  {"fxc",  PSR_f | PSR_x | PSR_c},
18896  {"fcs",  PSR_f | PSR_c | PSR_s},
18897  {"fcx",  PSR_f | PSR_c | PSR_x},
18898  {"sfx",  PSR_s | PSR_f | PSR_x},
18899  {"sfc",  PSR_s | PSR_f | PSR_c},
18900  {"sxf",  PSR_s | PSR_x | PSR_f},
18901  {"sxc",  PSR_s | PSR_x | PSR_c},
18902  {"scf",  PSR_s | PSR_c | PSR_f},
18903  {"scx",  PSR_s | PSR_c | PSR_x},
18904  {"xfs",  PSR_x | PSR_f | PSR_s},
18905  {"xfc",  PSR_x | PSR_f | PSR_c},
18906  {"xsf",  PSR_x | PSR_s | PSR_f},
18907  {"xsc",  PSR_x | PSR_s | PSR_c},
18908  {"xcf",  PSR_x | PSR_c | PSR_f},
18909  {"xcs",  PSR_x | PSR_c | PSR_s},
18910  {"cfs",  PSR_c | PSR_f | PSR_s},
18911  {"cfx",  PSR_c | PSR_f | PSR_x},
18912  {"csf",  PSR_c | PSR_s | PSR_f},
18913  {"csx",  PSR_c | PSR_s | PSR_x},
18914  {"cxf",  PSR_c | PSR_x | PSR_f},
18915  {"cxs",  PSR_c | PSR_x | PSR_s},
18916  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18917  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18918  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18919  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18920  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18921  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18922  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18923  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18924  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18925  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18926  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18927  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18928  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18929  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18930  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18931  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18932  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18933  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18934  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18935  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18936  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18937  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18938  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18939  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18940};
18941
18942/* Table of V7M psr names.  */
18943static const struct asm_psr v7m_psrs[] =
18944{
18945  {"apsr",	   0x0 }, {"APSR",	   0x0 },
18946  {"iapsr",	   0x1 }, {"IAPSR",	   0x1 },
18947  {"eapsr",	   0x2 }, {"EAPSR",	   0x2 },
18948  {"psr",	   0x3 }, {"PSR",	   0x3 },
18949  {"xpsr",	   0x3 }, {"XPSR",	   0x3 }, {"xPSR",	  3 },
18950  {"ipsr",	   0x5 }, {"IPSR",	   0x5 },
18951  {"epsr",	   0x6 }, {"EPSR",	   0x6 },
18952  {"iepsr",	   0x7 }, {"IEPSR",	   0x7 },
18953  {"msp",	   0x8 }, {"MSP",	   0x8 },
18954  {"psp",	   0x9 }, {"PSP",	   0x9 },
18955  {"msplim",	   0xa }, {"MSPLIM",	   0xa },
18956  {"psplim",	   0xb }, {"PSPLIM",	   0xb },
18957  {"primask",	   0x10}, {"PRIMASK",	   0x10},
18958  {"basepri",	   0x11}, {"BASEPRI",	   0x11},
18959  {"basepri_max",  0x12}, {"BASEPRI_MAX",  0x12},
18960  {"faultmask",	   0x13}, {"FAULTMASK",	   0x13},
18961  {"control",	   0x14}, {"CONTROL",	   0x14},
18962  {"msp_ns",	   0x88}, {"MSP_NS",	   0x88},
18963  {"psp_ns",	   0x89}, {"PSP_NS",	   0x89},
18964  {"msplim_ns",	   0x8a}, {"MSPLIM_NS",	   0x8a},
18965  {"psplim_ns",	   0x8b}, {"PSPLIM_NS",	   0x8b},
18966  {"primask_ns",   0x90}, {"PRIMASK_NS",   0x90},
18967  {"basepri_ns",   0x91}, {"BASEPRI_NS",   0x91},
18968  {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18969  {"control_ns",   0x94}, {"CONTROL_NS",   0x94},
18970  {"sp_ns",	   0x98}, {"SP_NS",	   0x98 }
18971};
18972
18973/* Table of all shift-in-operand names.	 */
18974static const struct asm_shift_name shift_names [] =
18975{
18976  { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
18977  { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
18978  { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
18979  { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
18980  { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
18981  { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX }
18982};
18983
18984/* Table of all explicit relocation names.  */
18985#ifdef OBJ_ELF
18986static struct reloc_entry reloc_names[] =
18987{
18988  { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
18989  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
18990  { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
18991  { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18992  { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18993  { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
18994  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
18995  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
18996  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
18997  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18998  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
18999  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19000  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19001	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19002  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19003	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19004  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19005	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
19006};
19007#endif
19008
19009/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
19010static const struct asm_cond conds[] =
19011{
19012  {"eq", 0x0},
19013  {"ne", 0x1},
19014  {"cs", 0x2}, {"hs", 0x2},
19015  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19016  {"mi", 0x4},
19017  {"pl", 0x5},
19018  {"vs", 0x6},
19019  {"vc", 0x7},
19020  {"hi", 0x8},
19021  {"ls", 0x9},
19022  {"ge", 0xa},
19023  {"lt", 0xb},
19024  {"gt", 0xc},
19025  {"le", 0xd},
19026  {"al", 0xe}
19027};
19028
19029#define UL_BARRIER(L,U,CODE,FEAT) \
19030  { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19031  { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19032
19033static struct asm_barrier_opt barrier_opt_names[] =
19034{
19035  UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
19036  UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
19037  UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
19038  UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
19039  UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
19040  UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
19041  UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
19042  UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
19043  UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
19044  UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
19045  UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
19046  UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
19047  UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
19048  UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
19049  UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
19050  UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
19051};
19052
19053#undef UL_BARRIER
19054
19055/* Table of ARM-format instructions.	*/
19056
19057/* Macros for gluing together operand strings.  N.B. In all cases
19058   other than OPS0, the trailing OP_stop comes from default
19059   zero-initialization of the unspecified elements of the array.  */
19060#define OPS0()		  { OP_stop, }
19061#define OPS1(a)		  { OP_##a, }
19062#define OPS2(a,b)	  { OP_##a,OP_##b, }
19063#define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
19064#define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
19065#define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19066#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19067
19068/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19069   This is useful when mixing operands for ARM and THUMB, i.e. using the
19070   MIX_ARM_THUMB_OPERANDS macro.
19071   In order to use these macros, prefix the number of operands with _
19072   e.g. _3.  */
19073#define OPS_1(a)	   { a, }
19074#define OPS_2(a,b)	   { a,b, }
19075#define OPS_3(a,b,c)	   { a,b,c, }
19076#define OPS_4(a,b,c,d)	   { a,b,c,d, }
19077#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
19078#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19079
19080/* These macros abstract out the exact format of the mnemonic table and
19081   save some repeated characters.  */
19082
19083/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
19084#define TxCE(mnem, op, top, nops, ops, ae, te) \
19085  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19086    THUMB_VARIANT, do_##ae, do_##te }
19087
19088/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19089   a T_MNEM_xyz enumerator.  */
19090#define TCE(mnem, aop, top, nops, ops, ae, te) \
19091      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19092#define tCE(mnem, aop, top, nops, ops, ae, te) \
19093      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19094
19095/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19096   infix after the third character.  */
19097#define TxC3(mnem, op, top, nops, ops, ae, te) \
19098  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19099    THUMB_VARIANT, do_##ae, do_##te }
19100#define TxC3w(mnem, op, top, nops, ops, ae, te) \
19101  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19102    THUMB_VARIANT, do_##ae, do_##te }
19103#define TC3(mnem, aop, top, nops, ops, ae, te) \
19104      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19105#define TC3w(mnem, aop, top, nops, ops, ae, te) \
19106      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19107#define tC3(mnem, aop, top, nops, ops, ae, te) \
19108      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19109#define tC3w(mnem, aop, top, nops, ops, ae, te) \
19110      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19111
19112/* Mnemonic that cannot be conditionalized.  The ARM condition-code
19113   field is still 0xE.  Many of the Thumb variants can be executed
19114   conditionally, so this is checked separately.  */
19115#define TUE(mnem, op, top, nops, ops, ae, te)				\
19116  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19117    THUMB_VARIANT, do_##ae, do_##te }
19118
19119/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19120   Used by mnemonics that have very minimal differences in the encoding for
19121   ARM and Thumb variants and can be handled in a common function.  */
19122#define TUEc(mnem, op, top, nops, ops, en) \
19123  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19124    THUMB_VARIANT, do_##en, do_##en }
19125
19126/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19127   condition code field.  */
19128#define TUF(mnem, op, top, nops, ops, ae, te)				\
19129  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19130    THUMB_VARIANT, do_##ae, do_##te }
19131
19132/* ARM-only variants of all the above.  */
19133#define CE(mnem,  op, nops, ops, ae)	\
19134  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19135
19136#define C3(mnem, op, nops, ops, ae)	\
19137  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19138
19139/* Legacy mnemonics that always have conditional infix after the third
19140   character.  */
19141#define CL(mnem, op, nops, ops, ae)	\
19142  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19143    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19144
19145/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
19146#define cCE(mnem,  op, nops, ops, ae)	\
19147  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19148
19149/* Legacy coprocessor instructions where conditional infix and conditional
19150   suffix are ambiguous.  For consistency this includes all FPA instructions,
19151   not just the potentially ambiguous ones.  */
19152#define cCL(mnem, op, nops, ops, ae)	\
19153  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19154    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19155
19156/* Coprocessor, takes either a suffix or a position-3 infix
19157   (for an FPA corner case). */
19158#define C3E(mnem, op, nops, ops, ae) \
19159  { mnem, OPS##nops ops, OT_csuf_or_in3, \
19160    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19161
19162#define xCM_(m1, m2, m3, op, nops, ops, ae)	\
19163  { m1 #m2 m3, OPS##nops ops, \
19164    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19165    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19166
19167#define CM(m1, m2, op, nops, ops, ae)	\
19168  xCM_ (m1,   , m2, op, nops, ops, ae),	\
19169  xCM_ (m1, eq, m2, op, nops, ops, ae),	\
19170  xCM_ (m1, ne, m2, op, nops, ops, ae),	\
19171  xCM_ (m1, cs, m2, op, nops, ops, ae),	\
19172  xCM_ (m1, hs, m2, op, nops, ops, ae),	\
19173  xCM_ (m1, cc, m2, op, nops, ops, ae),	\
19174  xCM_ (m1, ul, m2, op, nops, ops, ae),	\
19175  xCM_ (m1, lo, m2, op, nops, ops, ae),	\
19176  xCM_ (m1, mi, m2, op, nops, ops, ae),	\
19177  xCM_ (m1, pl, m2, op, nops, ops, ae),	\
19178  xCM_ (m1, vs, m2, op, nops, ops, ae),	\
19179  xCM_ (m1, vc, m2, op, nops, ops, ae),	\
19180  xCM_ (m1, hi, m2, op, nops, ops, ae),	\
19181  xCM_ (m1, ls, m2, op, nops, ops, ae),	\
19182  xCM_ (m1, ge, m2, op, nops, ops, ae),	\
19183  xCM_ (m1, lt, m2, op, nops, ops, ae),	\
19184  xCM_ (m1, gt, m2, op, nops, ops, ae),	\
19185  xCM_ (m1, le, m2, op, nops, ops, ae),	\
19186  xCM_ (m1, al, m2, op, nops, ops, ae)
19187
19188#define UE(mnem, op, nops, ops, ae)	\
19189  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19190
19191#define UF(mnem, op, nops, ops, ae)	\
19192  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19193
19194/* Neon data-processing. ARM versions are unconditional with cond=0xf.
19195   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19196   use the same encoding function for each.  */
19197#define NUF(mnem, op, nops, ops, enc)					\
19198  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
19199    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19200
19201/* Neon data processing, version which indirects through neon_enc_tab for
19202   the various overloaded versions of opcodes.  */
19203#define nUF(mnem, op, nops, ops, enc)					\
19204  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
19205    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19206
19207/* Neon insn with conditional suffix for the ARM version, non-overloaded
19208   version.  */
19209#define NCE_tag(mnem, op, nops, ops, enc, tag)				\
19210  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
19211    THUMB_VARIANT, do_##enc, do_##enc }
19212
19213#define NCE(mnem, op, nops, ops, enc)					\
19214   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19215
19216#define NCEF(mnem, op, nops, ops, enc)					\
19217    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19218
19219/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
19220#define nCE_tag(mnem, op, nops, ops, enc, tag)				\
19221  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
19222    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19223
19224#define nCE(mnem, op, nops, ops, enc)					\
19225   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19226
19227#define nCEF(mnem, op, nops, ops, enc)					\
19228    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19229
19230#define do_0 0
19231
19232static const struct asm_opcode insns[] =
19233{
19234#define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
19235#define THUMB_VARIANT  & arm_ext_v4t
19236 tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
19237 tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
19238 tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
19239 tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
19240 tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
19241 tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
19242 tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
19243 tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
19244 tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
19245 tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
19246 tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
19247 tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
19248 tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
19249 tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
19250 tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
19251 tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
19252
19253 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19254    for setting PSR flag bits.  They are obsolete in V6 and do not
19255    have Thumb equivalents. */
19256 tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
19257 tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
19258  CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
19259 tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
19260 tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
19261  CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
19262 tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
19263 tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
19264  CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
19265
19266 tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
19267 tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
19268 tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
19269 tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
19270
19271 tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
19272 tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19273 tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19274								OP_RRnpc),
19275					OP_ADDRGLDR),ldst, t_ldst),
19276 tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19277
19278 tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19279 tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19280 tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19281 tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19282 tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19283 tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
19284
19285 TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
19286 TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
19287 tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
19288 TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
19289
19290  /* Pseudo ops.  */
19291 tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
19292  C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
19293 tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
19294 tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
19295
19296  /* Thumb-compatibility pseudo ops.  */
19297 tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
19298 tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
19299 tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
19300 tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
19301 tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
19302 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
19303 tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
19304 tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
19305 tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
19306 tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
19307 tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
19308 tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
19309
19310 /* These may simplify to neg.  */
19311 TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19312 TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19313
19314#undef  THUMB_VARIANT
19315#define THUMB_VARIANT  & arm_ext_v6
19316
19317 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
19318
19319 /* V1 instructions with no Thumb analogue prior to V6T2.  */
19320#undef  THUMB_VARIANT
19321#define THUMB_VARIANT  & arm_ext_v6t2
19322
19323 TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
19324 TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
19325  CL("teqp",	130f000,           2, (RR, SH),      cmp),
19326
19327 TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19328 TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19329 TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
19330 TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19331
19332 TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19333 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19334
19335 TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19336 TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19337
19338 /* V1 instructions with no Thumb analogue at all.  */
19339  CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
19340  C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
19341
19342  C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
19343  C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
19344  C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
19345  C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
19346  C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
19347  C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
19348  C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
19349  C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
19350
19351#undef  ARM_VARIANT
19352#define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
19353#undef  THUMB_VARIANT
19354#define THUMB_VARIANT  & arm_ext_v4t
19355
19356 tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
19357 tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
19358
19359#undef  THUMB_VARIANT
19360#define THUMB_VARIANT  & arm_ext_v6t2
19361
19362 TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19363  C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19364
19365  /* Generic coprocessor instructions.	*/
19366 TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
19367 TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19368 TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19369 TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19370 TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
19371 TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19372 TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
19373
19374#undef  ARM_VARIANT
19375#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
19376
19377  CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19378  C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19379
19380#undef  ARM_VARIANT
19381#define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
19382#undef  THUMB_VARIANT
19383#define THUMB_VARIANT  & arm_ext_msr
19384
19385 TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19386 TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19387
19388#undef  ARM_VARIANT
19389#define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
19390#undef  THUMB_VARIANT
19391#define THUMB_VARIANT  & arm_ext_v6t2
19392
19393 TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19394  CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19395 TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19396  CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19397 TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19398  CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19399 TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19400  CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19401
19402#undef  ARM_VARIANT
19403#define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
19404#undef  THUMB_VARIANT
19405#define THUMB_VARIANT  & arm_ext_v4t
19406
19407 tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19408 tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19409 tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19410 tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19411 tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19412 tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19413
19414#undef  ARM_VARIANT
19415#define ARM_VARIANT  & arm_ext_v4t_5
19416
19417  /* ARM Architecture 4T.  */
19418  /* Note: bx (and blx) are required on V5, even if the processor does
19419     not support Thumb.	 */
19420 TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
19421
19422#undef  ARM_VARIANT
19423#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
19424#undef  THUMB_VARIANT
19425#define THUMB_VARIANT  & arm_ext_v5t
19426
19427  /* Note: blx has 2 variants; the .value coded here is for
19428     BLX(2).  Only this variant has conditional execution.  */
19429 TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
19430 TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
19431
19432#undef  THUMB_VARIANT
19433#define THUMB_VARIANT  & arm_ext_v6t2
19434
19435 TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
19436 TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
19437 TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
19438 TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
19439 TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
19440 TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
19441 TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19442 TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
19443
19444#undef  ARM_VARIANT
19445#define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
19446#undef  THUMB_VARIANT
19447#define THUMB_VARIANT  & arm_ext_v5exp
19448
19449 TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19450 TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19451 TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19452 TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19453
19454 TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19455 TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
19456
19457 TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19458 TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19459 TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19460 TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
19461
19462 TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19463 TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19464 TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19465 TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19466
19467 TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19468 TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
19469
19470 TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19471 TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19472 TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19473 TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
19474
19475#undef  ARM_VARIANT
19476#define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
19477#undef  THUMB_VARIANT
19478#define THUMB_VARIANT  & arm_ext_v6t2
19479
19480 TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
19481 TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19482     ldrd, t_ldstd),
19483 TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19484				       ADDRGLDRS), ldrd, t_ldstd),
19485
19486 TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19487 TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19488
19489#undef  ARM_VARIANT
19490#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
19491
19492 TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
19493
19494#undef  ARM_VARIANT
19495#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
19496#undef  THUMB_VARIANT
19497#define THUMB_VARIANT  & arm_ext_v6
19498
19499 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
19500 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
19501 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19502 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19503 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
19504 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19505 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19506 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19507 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
19508 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
19509
19510#undef  THUMB_VARIANT
19511#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19512
19513 TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
19514 TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19515				      strex,  t_strex),
19516#undef  THUMB_VARIANT
19517#define THUMB_VARIANT  & arm_ext_v6t2
19518
19519 TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19520 TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19521
19522 TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
19523 TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
19524
19525/*  ARM V6 not included in V7M.  */
19526#undef  THUMB_VARIANT
19527#define THUMB_VARIANT  & arm_ext_v6_notm
19528 TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19529 TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19530  UF(rfeib,	9900a00,           1, (RRw),			   rfe),
19531  UF(rfeda,	8100a00,           1, (RRw),			   rfe),
19532 TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
19533 TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
19534  UF(rfefa,	8100a00,           1, (RRw),			   rfe),
19535 TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
19536  UF(rfeed,	9900a00,           1, (RRw),			   rfe),
19537 TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19538 TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19539 TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
19540  UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
19541  UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
19542  UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
19543  UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
19544 TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
19545 TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
19546 TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
19547
19548/*  ARM V6 not included in V7M (eg. integer SIMD).  */
19549#undef  THUMB_VARIANT
19550#define THUMB_VARIANT  & arm_ext_v6_dsp
19551 TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
19552 TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
19553 TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19554 TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19555 TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19556 /* Old name for QASX.  */
19557 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19558 TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19559 /* Old name for QSAX.  */
19560 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19561 TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19562 TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19563 TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19564 TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19565 TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19566 /* Old name for SASX.  */
19567 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19568 TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19569 TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19570 TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19571 /* Old name for SHASX.  */
19572 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19573 TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19574 /* Old name for SHSAX.  */
19575 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19576 TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19577 TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19578 TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19579 /* Old name for SSAX.  */
19580 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19581 TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19582 TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19583 TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19584 TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19585 TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19586 /* Old name for UASX.  */
19587 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19588 TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19589 TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19590 TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19591 /* Old name for UHASX.  */
19592 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19593 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19594 /* Old name for UHSAX.  */
19595 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19596 TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19597 TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19598 TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19599 TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19600 TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19601 /* Old name for UQASX.  */
19602 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19603 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19604 /* Old name for UQSAX.  */
19605 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19606 TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19607 TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19608 TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19609 TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19610 /* Old name for USAX.  */
19611 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19612 TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19613 TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19614 TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19615 TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19616 TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
19617 TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19618 TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19619 TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19620 TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
19621 TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
19622 TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19623 TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19624 TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19625 TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19626 TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19627 TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19628 TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19629 TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19630 TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19631 TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19632 TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19633 TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19634 TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19635 TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19636 TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19637 TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19638 TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19639 TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
19640 TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
19641 TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
19642 TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
19643 TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
19644 TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
19645
19646#undef  ARM_VARIANT
19647#define ARM_VARIANT   & arm_ext_v6k
19648#undef  THUMB_VARIANT
19649#define THUMB_VARIANT & arm_ext_v6k
19650
19651 tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
19652 tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
19653 tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
19654 tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
19655
19656#undef  THUMB_VARIANT
19657#define THUMB_VARIANT  & arm_ext_v6_notm
19658 TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19659				      ldrexd, t_ldrexd),
19660 TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19661				       RRnpcb), strexd, t_strexd),
19662
19663#undef  THUMB_VARIANT
19664#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19665 TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19666     rd_rn,  rd_rn),
19667 TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19668     rd_rn,  rd_rn),
19669 TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19670     strex, t_strexbh),
19671 TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19672     strex, t_strexbh),
19673 TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
19674
19675#undef  ARM_VARIANT
19676#define ARM_VARIANT    & arm_ext_sec
19677#undef  THUMB_VARIANT
19678#define THUMB_VARIANT  & arm_ext_sec
19679
19680 TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
19681
19682#undef	ARM_VARIANT
19683#define	ARM_VARIANT    & arm_ext_virt
19684#undef	THUMB_VARIANT
19685#define	THUMB_VARIANT    & arm_ext_virt
19686
19687 TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19688 TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
19689
19690#undef	ARM_VARIANT
19691#define	ARM_VARIANT    & arm_ext_pan
19692#undef	THUMB_VARIANT
19693#define	THUMB_VARIANT  & arm_ext_pan
19694
19695 TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
19696
19697#undef  ARM_VARIANT
19698#define ARM_VARIANT    & arm_ext_v6t2
19699#undef  THUMB_VARIANT
19700#define THUMB_VARIANT  & arm_ext_v6t2
19701
19702 TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
19703 TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19704 TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19705 TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
19706
19707 TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19708 TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
19709
19710 TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19711 TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19712 TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19713 TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19714
19715#undef  THUMB_VARIANT
19716#define THUMB_VARIANT  & arm_ext_v6t2_v8m
19717 TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19718 TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
19719
19720 /* Thumb-only instructions.  */
19721#undef  ARM_VARIANT
19722#define ARM_VARIANT NULL
19723  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
19724  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
19725
19726 /* ARM does not really have an IT instruction, so always allow it.
19727    The opcode is copied from Thumb in order to allow warnings in
19728    -mimplicit-it=[never | arm] modes.  */
19729#undef  ARM_VARIANT
19730#define ARM_VARIANT  & arm_ext_v1
19731#undef  THUMB_VARIANT
19732#define THUMB_VARIANT  & arm_ext_v6t2
19733
19734 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
19735 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
19736 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
19737 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
19738 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
19739 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
19740 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
19741 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
19742 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
19743 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
19744 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
19745 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
19746 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
19747 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
19748 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
19749 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
19750 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19751 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19752
19753 /* Thumb2 only instructions.  */
19754#undef  ARM_VARIANT
19755#define ARM_VARIANT  NULL
19756
19757 TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19758 TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19759 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
19760 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
19761 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
19762 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
19763
19764 /* Hardware division instructions.  */
19765#undef  ARM_VARIANT
19766#define ARM_VARIANT    & arm_ext_adiv
19767#undef  THUMB_VARIANT
19768#define THUMB_VARIANT  & arm_ext_div
19769
19770 TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19771 TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19772
19773 /* ARM V6M/V7 instructions.  */
19774#undef  ARM_VARIANT
19775#define ARM_VARIANT    & arm_ext_barrier
19776#undef  THUMB_VARIANT
19777#define THUMB_VARIANT  & arm_ext_barrier
19778
19779 TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19780 TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19781 TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19782
19783 /* ARM V7 instructions.  */
19784#undef  ARM_VARIANT
19785#define ARM_VARIANT    & arm_ext_v7
19786#undef  THUMB_VARIANT
19787#define THUMB_VARIANT  & arm_ext_v7
19788
19789 TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
19790 TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
19791
19792#undef  ARM_VARIANT
19793#define ARM_VARIANT    & arm_ext_mp
19794#undef  THUMB_VARIANT
19795#define THUMB_VARIANT  & arm_ext_mp
19796
19797 TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
19798
19799 /* AArchv8 instructions.  */
19800#undef  ARM_VARIANT
19801#define ARM_VARIANT   & arm_ext_v8
19802
19803/* Instructions shared between armv8-a and armv8-m.  */
19804#undef  THUMB_VARIANT
19805#define THUMB_VARIANT & arm_ext_atomics
19806
19807 TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19808 TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19809 TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19810 TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19811 TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19812 TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
19813 TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
19814 TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
19815 TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
19816 TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19817							stlex,  t_stlex),
19818 TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19819							stlex, t_stlex),
19820 TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19821							stlex, t_stlex),
19822#undef  THUMB_VARIANT
19823#define THUMB_VARIANT & arm_ext_v8
19824
19825 tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
19826 TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
19827 TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19828							ldrexd, t_ldrexd),
19829 TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19830							strexd, t_strexd),
19831 /* ARMv8 T32 only.  */
19832#undef  ARM_VARIANT
19833#define ARM_VARIANT  NULL
19834 TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
19835 TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
19836 TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
19837
19838  /* FP for ARMv8.  */
19839#undef  ARM_VARIANT
19840#define ARM_VARIANT   & fpu_vfp_ext_armv8xd
19841#undef  THUMB_VARIANT
19842#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19843
19844  nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
19845  nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
19846  nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
19847  nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
19848  nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19849  nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ),	vmaxnm),
19850  nUF(vcvta,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvta),
19851  nUF(vcvtn,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtn),
19852  nUF(vcvtp,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtp),
19853  nUF(vcvtm,  _vcvta,  2, (RNSDQ, oRNSDQ),		neon_cvtm),
19854  nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
19855  nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintz),
19856  nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintx),
19857  nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ),		vrinta),
19858  nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintn),
19859  nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintp),
19860  nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ),		vrintm),
19861
19862  /* Crypto v1 extensions.  */
19863#undef  ARM_VARIANT
19864#define ARM_VARIANT & fpu_crypto_ext_armv8
19865#undef  THUMB_VARIANT
19866#define THUMB_VARIANT & fpu_crypto_ext_armv8
19867
19868  nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19869  nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19870  nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19871  nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19872  nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19873  nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19874  nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19875  nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19876  nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19877  nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19878  nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19879  nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19880  nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19881  nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19882
19883#undef  ARM_VARIANT
19884#define ARM_VARIANT   & crc_ext_armv8
19885#undef  THUMB_VARIANT
19886#define THUMB_VARIANT & crc_ext_armv8
19887  TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19888  TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19889  TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19890  TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19891  TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19892  TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19893
19894 /* ARMv8.2 RAS extension.  */
19895#undef  ARM_VARIANT
19896#define ARM_VARIANT   & arm_ext_ras
19897#undef  THUMB_VARIANT
19898#define THUMB_VARIANT & arm_ext_ras
19899 TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
19900
19901#undef  ARM_VARIANT
19902#define ARM_VARIANT   & arm_ext_v8_3
19903#undef  THUMB_VARIANT
19904#define THUMB_VARIANT & arm_ext_v8_3
19905 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
19906 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
19907 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
19908
19909#undef  ARM_VARIANT
19910#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
19911#undef  THUMB_VARIANT
19912#define THUMB_VARIANT NULL
19913
19914 cCE("wfs",	e200110, 1, (RR),	     rd),
19915 cCE("rfs",	e300110, 1, (RR),	     rd),
19916 cCE("wfc",	e400110, 1, (RR),	     rd),
19917 cCE("rfc",	e500110, 1, (RR),	     rd),
19918
19919 cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19920 cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19921 cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19922 cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19923
19924 cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19925 cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19926 cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19927 cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
19928
19929 cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
19930 cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
19931 cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
19932 cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
19933 cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
19934 cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
19935 cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
19936 cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
19937 cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
19938 cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
19939 cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
19940 cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
19941
19942 cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
19943 cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
19944 cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
19945 cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
19946 cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
19947 cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
19948 cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
19949 cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
19950 cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
19951 cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
19952 cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
19953 cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
19954
19955 cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
19956 cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
19957 cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
19958 cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
19959 cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
19960 cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
19961 cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
19962 cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
19963 cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
19964 cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
19965 cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
19966 cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
19967
19968 cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
19969 cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
19970 cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
19971 cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
19972 cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
19973 cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
19974 cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
19975 cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
19976 cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
19977 cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
19978 cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
19979 cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
19980
19981 cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
19982 cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
19983 cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
19984 cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
19985 cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
19986 cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
19987 cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
19988 cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
19989 cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
19990 cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
19991 cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
19992 cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
19993
19994 cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
19995 cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
19996 cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
19997 cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
19998 cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
19999 cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
20000 cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
20001 cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
20002 cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
20003 cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
20004 cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
20005 cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
20006
20007 cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
20008 cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
20009 cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
20010 cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
20011 cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
20012 cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
20013 cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
20014 cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
20015 cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
20016 cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
20017 cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
20018 cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
20019
20020 cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
20021 cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
20022 cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
20023 cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
20024 cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
20025 cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
20026 cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
20027 cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
20028 cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
20029 cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
20030 cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
20031 cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
20032
20033 cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
20034 cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
20035 cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
20036 cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
20037 cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
20038 cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
20039 cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
20040 cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
20041 cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
20042 cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
20043 cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
20044 cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
20045
20046 cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
20047 cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
20048 cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
20049 cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
20050 cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
20051 cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
20052 cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
20053 cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
20054 cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
20055 cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
20056 cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
20057 cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
20058
20059 cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
20060 cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
20061 cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
20062 cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
20063 cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
20064 cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
20065 cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
20066 cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
20067 cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
20068 cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
20069 cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
20070 cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
20071
20072 cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
20073 cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
20074 cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
20075 cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
20076 cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
20077 cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
20078 cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
20079 cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
20080 cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
20081 cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
20082 cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
20083 cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
20084
20085 cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
20086 cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
20087 cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
20088 cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
20089 cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
20090 cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
20091 cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
20092 cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
20093 cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
20094 cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
20095 cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
20096 cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
20097
20098 cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
20099 cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
20100 cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
20101 cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
20102 cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
20103 cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
20104 cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
20105 cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
20106 cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
20107 cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
20108 cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
20109 cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
20110
20111 cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
20112 cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
20113 cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
20114 cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
20115 cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
20116 cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
20117 cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
20118 cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
20119 cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
20120 cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
20121 cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
20122 cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
20123
20124 cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
20125 cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
20126 cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
20127 cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
20128 cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
20129 cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
20130 cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
20131 cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
20132 cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
20133 cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
20134 cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
20135 cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
20136
20137 cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20138 cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20139 cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20140 cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20141 cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20142 cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20143 cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20144 cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20145 cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20146 cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20147 cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20148 cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20149
20150 cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20152 cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20153 cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20154 cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20155 cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20156 cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20157 cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20158 cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20159 cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20160 cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20161 cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20162
20163 cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20164 cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20165 cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20166 cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20167 cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20168 cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20169 cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20170 cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20171 cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20172 cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20173 cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20174 cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20175
20176 cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20177 cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20178 cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20179 cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20180 cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20181 cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20182 cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20183 cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20184 cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20185 cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20186 cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20187 cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20188
20189 cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20190 cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20191 cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20192 cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20193 cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20194 cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20195 cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20196 cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20197 cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20198 cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20199 cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20200 cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20201
20202 cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20203 cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20204 cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20205 cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20206 cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20207 cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20208 cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20209 cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20210 cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20211 cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20212 cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20213 cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20214
20215 cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20216 cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20217 cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20218 cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20219 cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20220 cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20221 cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20222 cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20223 cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20224 cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20225 cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20226 cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20227
20228 cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20229 cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20230 cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20231 cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20232 cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20233 cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20234 cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20235 cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20236 cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20237 cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20238 cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20239 cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20240
20241 cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20242 cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20243 cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20244 cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20245 cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20246 cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20247 cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20248 cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20249 cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20250 cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20251 cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20252 cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20253
20254 cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20255 cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20256 cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20257 cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20258 cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20259 cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20260 cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20261 cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20262 cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20263 cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20264 cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20265 cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20266
20267 cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20268 cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20269 cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20270 cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20271 cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20272 cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20273 cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20274 cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20275 cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20276 cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20277 cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20278 cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20279
20280 cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20281 cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20282 cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20283 cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20284 cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20285 cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20286 cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20287 cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20288 cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20289 cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20290 cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20291 cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20292
20293 cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20294 cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20295 cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20296 cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20297 cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20298 cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20299 cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20300 cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20301 cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20302 cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20303 cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20304 cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20305
20306 cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
20307 C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
20308 cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
20309 C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
20310
20311 cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
20312 cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
20313 cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
20314 cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
20315 cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
20316 cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
20317 cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
20318 cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
20319 cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
20320 cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
20321 cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
20322 cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
20323
20324  /* The implementation of the FIX instruction is broken on some
20325     assemblers, in that it accepts a precision specifier as well as a
20326     rounding specifier, despite the fact that this is meaningless.
20327     To be more compatible, we accept it as well, though of course it
20328     does not set any bits.  */
20329 cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
20330 cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
20331 cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
20332 cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
20333 cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
20334 cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
20335 cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
20336 cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
20337 cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
20338 cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
20339 cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
20340 cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
20341 cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
20342
20343  /* Instructions that were new with the real FPA, call them V2.  */
20344#undef  ARM_VARIANT
20345#define ARM_VARIANT  & fpu_fpa_ext_v2
20346
20347 cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20348 cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20349 cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20350 cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20351 cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20352 cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20353
20354#undef  ARM_VARIANT
20355#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
20356
20357  /* Moves and type conversions.  */
20358 cCE("fcpys",	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20359 cCE("fmrs",	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
20360 cCE("fmsr",	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
20361 cCE("fmstat",	ef1fa10, 0, (),		      noargs),
20362 cCE("vmrs",	ef00a10, 2, (APSR_RR, RVC),   vmrs),
20363 cCE("vmsr",	ee00a10, 2, (RVC, RR),        vmsr),
20364 cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20365 cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20366 cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20367 cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20368 cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20369 cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20370 cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
20371 cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
20372
20373  /* Memory operations.	 */
20374 cCE("flds",	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
20375 cCE("fsts",	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
20376 cCE("fldmias",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20377 cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20378 cCE("fldmdbs",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20379 cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20380 cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20381 cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20382 cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20383 cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20384 cCE("fstmias",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20385 cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
20386 cCE("fstmdbs",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20387 cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
20388 cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20389 cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
20390 cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20391 cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
20392
20393  /* Monadic operations.  */
20394 cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20395 cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20396 cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20397
20398  /* Dyadic operations.	 */
20399 cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20400 cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20401 cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20402 cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20403 cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20404 cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20405 cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20406 cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20407 cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20408
20409  /* Comparisons.  */
20410 cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
20411 cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
20412 cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
20413 cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
20414
20415 /* Double precision load/store are still present on single precision
20416    implementations.  */
20417 cCE("fldd",	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
20418 cCE("fstd",	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
20419 cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20420 cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20421 cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20422 cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20423 cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20424 cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
20425 cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20426 cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
20427
20428#undef  ARM_VARIANT
20429#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
20430
20431  /* Moves and type conversions.  */
20432 cCE("fcpyd",	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20433 cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20434 cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20435 cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
20436 cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
20437 cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
20438 cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
20439 cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20440 cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
20441 cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20442 cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20443 cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20444 cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
20445
20446  /* Monadic operations.  */
20447 cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20448 cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20449 cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20450
20451  /* Dyadic operations.	 */
20452 cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20453 cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20454 cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20455 cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20456 cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20457 cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20458 cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20459 cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20460 cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20461
20462  /* Comparisons.  */
20463 cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20464 cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
20465 cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
20466 cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
20467
20468#undef  ARM_VARIANT
20469#define ARM_VARIANT  & fpu_vfp_ext_v2
20470
20471 cCE("fmsrr",	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20472 cCE("fmrrs",	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20473 cCE("fmdrr",	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
20474 cCE("fmrrd",	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
20475
20476/* Instructions which may belong to either the Neon or VFP instruction sets.
20477   Individual encoder functions perform additional architecture checks.  */
20478#undef  ARM_VARIANT
20479#define ARM_VARIANT    & fpu_vfp_ext_v1xd
20480#undef  THUMB_VARIANT
20481#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
20482
20483  /* These mnemonics are unique to VFP.  */
20484 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
20485 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20486 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20487 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20488 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20489 nCE(vcmp,      _vcmp,    2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
20490 nCE(vcmpe,     _vcmpe,   2, (RVSD, RSVD_FI0),    vfp_nsyn_cmp),
20491 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
20492 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
20493 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
20494
20495  /* Mnemonics shared by Neon and VFP.  */
20496 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20497 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20498 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20499
20500 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20501 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20502
20503 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20504 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20505
20506 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20507 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20508 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20509 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20510 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20511 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20512 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20513 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20514
20515 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20516 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
20517 NCEF(vcvtb,	eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20518 NCEF(vcvtt,	eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20519
20520
20521  /* NOTE: All VMOV encoding is special-cased!  */
20522 NCE(vmov,      0,       1, (VMOV), neon_mov),
20523 NCE(vmovq,     0,       1, (VMOV), neon_mov),
20524
20525#undef  ARM_VARIANT
20526#define ARM_VARIANT    & arm_ext_fp16
20527#undef  THUMB_VARIANT
20528#define THUMB_VARIANT  & arm_ext_fp16
20529 /* New instructions added from v8.2, allowing the extraction and insertion of
20530    the upper 16 bits of a 32-bit vector register.  */
20531 NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
20532 NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
20533
20534#undef  THUMB_VARIANT
20535#define THUMB_VARIANT  & fpu_neon_ext_v1
20536#undef  ARM_VARIANT
20537#define ARM_VARIANT    & fpu_neon_ext_v1
20538
20539  /* Data processing with three registers of the same length.  */
20540  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
20541 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
20542 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
20543 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20544 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20545 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20546 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20547 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20548 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
20549  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
20550 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20551 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
20552 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20553 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
20554 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20555 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
20556 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20557 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
20558  /* If not immediate, fall back to neon_dyadic_i64_su.
20559     shl_imm should accept I8 I16 I32 I64,
20560     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
20561 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20562 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
20563 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20564 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
20565  /* Logic ops, types optional & ignored.  */
20566 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20567 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20568 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20569 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20570 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20571 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20572 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20573 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
20574 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
20575 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
20576  /* Bitfield ops, untyped.  */
20577 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20578 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20579 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20580 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20581 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20582 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
20583  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
20584 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20585 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20586 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20587 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20588 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20589 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
20590  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20591     back to neon_dyadic_if_su.  */
20592 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20593 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
20594 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20595 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
20596 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20597 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
20598 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20599 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
20600  /* Comparison. Type I8 I16 I32 F32.  */
20601 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20602 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
20603  /* As above, D registers only.  */
20604 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
20605 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
20606  /* Int and float variants, signedness unimportant.  */
20607 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
20608 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
20609 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
20610  /* Add/sub take types I8 I16 I32 I64 F32.  */
20611 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
20612 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
20613  /* vtst takes sizes 8, 16, 32.  */
20614 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20615 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
20616  /* VMUL takes I8 I16 I32 F32 P8.  */
20617 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
20618  /* VQD{R}MULH takes S16 S32.  */
20619 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20620 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
20621 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20622 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
20623 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20624 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
20625 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20626 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
20627 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20628 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
20629 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20630 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
20631 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
20632 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
20633 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
20634 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
20635 /* ARM v8.1 extension.  */
20636 nUF (vqrdmlah,  _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20637 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
20638 nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20639 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
20640
20641  /* Two address, int/float. Types S8 S16 S32 F32.  */
20642 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
20643 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
20644
20645  /* Data processing with two registers and a shift amount.  */
20646  /* Right shifts, and variants with rounding.
20647     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
20648 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20649 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
20650 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20651 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
20652 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
20653 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
20654 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
20655 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
20656  /* Shift and insert. Sizes accepted 8 16 32 64.  */
20657 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20658 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
20659 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20660 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
20661  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
20662 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20663 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
20664  /* Right shift immediate, saturating & narrowing, with rounding variants.
20665     Types accepted S16 S32 S64 U16 U32 U64.  */
20666 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20667 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20668  /* As above, unsigned. Types accepted S16 S32 S64.  */
20669 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20670 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20671  /* Right shift narrowing. Types accepted I16 I32 I64.  */
20672 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20673 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20674  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
20675 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
20676  /* CVT with optional immediate for fixed-point variant.  */
20677 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
20678
20679 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
20680 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
20681
20682  /* Data processing, three registers of different lengths.  */
20683  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
20684 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
20685 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
20686 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
20687 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
20688  /* If not scalar, fall back to neon_dyadic_long.
20689     Vector types as above, scalar types S16 S32 U16 U32.  */
20690 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20691 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20692  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
20693 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20694 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20695  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
20696 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20697 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20698 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20699 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
20700  /* Saturating doubling multiplies. Types S16 S32.  */
20701 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20702 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20703 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20704  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20705     S16 S32 U16 U32.  */
20706 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
20707
20708  /* Extract. Size 8.  */
20709 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20710 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
20711
20712  /* Two registers, miscellaneous.  */
20713  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
20714 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
20715 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
20716 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
20717 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
20718 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
20719 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
20720  /* Vector replicate. Sizes 8 16 32.  */
20721 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
20722 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
20723  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
20724 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
20725  /* VMOVN. Types I16 I32 I64.  */
20726 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
20727  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
20728 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
20729  /* VQMOVUN. Types S16 S32 S64.  */
20730 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
20731  /* VZIP / VUZP. Sizes 8 16 32.  */
20732 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20733 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
20734 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
20735 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
20736  /* VQABS / VQNEG. Types S8 S16 S32.  */
20737 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20738 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20739 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
20740 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
20741  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
20742 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
20743 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
20744 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
20745 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
20746  /* Reciprocal estimates.  Types U32 F16 F32.  */
20747 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
20748 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
20749 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
20750 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
20751  /* VCLS. Types S8 S16 S32.  */
20752 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
20753 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
20754  /* VCLZ. Types I8 I16 I32.  */
20755 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
20756 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
20757  /* VCNT. Size 8.  */
20758 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
20759 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
20760  /* Two address, untyped.  */
20761 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
20762 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
20763  /* VTRN. Sizes 8 16 32.  */
20764 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
20765 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
20766
20767  /* Table lookup. Size 8.  */
20768 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20769 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20770
20771#undef  THUMB_VARIANT
20772#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
20773#undef  ARM_VARIANT
20774#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
20775
20776  /* Neon element/structure load/store.  */
20777 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20778 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20779 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20780 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20781 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20782 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20783 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20784 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
20785
20786#undef  THUMB_VARIANT
20787#define THUMB_VARIANT & fpu_vfp_ext_v3xd
20788#undef  ARM_VARIANT
20789#define ARM_VARIANT   & fpu_vfp_ext_v3xd
20790 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
20791 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20792 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20793 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20794 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20795 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20796 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20797 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
20798 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
20799
20800#undef  THUMB_VARIANT
20801#define THUMB_VARIANT  & fpu_vfp_ext_v3
20802#undef  ARM_VARIANT
20803#define ARM_VARIANT    & fpu_vfp_ext_v3
20804
20805 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
20806 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20807 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20808 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20809 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20810 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20811 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20812 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
20813 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
20814
20815#undef  ARM_VARIANT
20816#define ARM_VARIANT    & fpu_vfp_ext_fma
20817#undef  THUMB_VARIANT
20818#define THUMB_VARIANT  & fpu_vfp_ext_fma
20819 /* Mnemonics shared by Neon and VFP.  These are included in the
20820    VFP FMA variant; NEON and VFP FMA always includes the NEON
20821    FMA instructions.  */
20822 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20823 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20824 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20825    the v form should always be used.  */
20826 cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20827 cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
20828 cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20829 cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
20830 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20831 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20832
20833#undef THUMB_VARIANT
20834#undef  ARM_VARIANT
20835#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
20836
20837 cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20838 cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20839 cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20840 cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20841 cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20842 cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20843 cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20844 cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20845
20846#undef  ARM_VARIANT
20847#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
20848
20849 cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
20850 cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
20851 cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
20852 cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
20853 cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
20854 cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
20855 cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
20856 cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
20857 cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
20858 cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20859 cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20860 cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20861 cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20862 cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20863 cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
20864 cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20865 cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20866 cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
20867 cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
20868 cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
20869 cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20870 cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20871 cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20872 cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20873 cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20874 cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
20875 cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
20876 cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
20877 cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
20878 cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
20879 cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
20880 cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
20881 cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
20882 cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
20883 cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
20884 cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
20885 cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
20886 cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20887 cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20888 cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20889 cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20890 cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20891 cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20892 cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20893 cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20894 cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20895 cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20896 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20897 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20898 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20899 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20900 cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20901 cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20902 cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20903 cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20904 cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20905 cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20906 cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20907 cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20908 cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20909 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20910 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20911 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20912 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20913 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20914 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20915 cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20916 cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20917 cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20918 cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20919 cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20920 cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20921 cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20922 cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20923 cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20924 cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20925 cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20926 cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20927 cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20928 cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20929 cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20930 cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20931 cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20932 cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20933 cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20934 cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20935 cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20936 cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20937 cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
20938 cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20939 cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20940 cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20941 cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20942 cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20943 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20944 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20945 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20946 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20947 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20948 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20949 cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20950 cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20951 cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20952 cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20953 cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20954 cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20955 cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20956 cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20957 cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20958 cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20959 cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
20960 cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20961 cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20962 cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20963 cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20964 cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20965 cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20966 cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20967 cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20968 cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20969 cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20970 cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20971 cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20972 cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20973 cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20974 cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20975 cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20976 cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20977 cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
20978 cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20979 cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
20980 cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
20981 cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
20982 cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20983 cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20984 cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20985 cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20986 cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20987 cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20988 cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20989 cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20990 cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20991 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
20992 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
20993 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
20994 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
20995 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
20996 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
20997 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20998 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
20999 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
21000 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
21001 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
21002 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
21003 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
21004 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
21005 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
21006 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
21007 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
21008 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
21009 cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
21010 cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
21011
21012#undef  ARM_VARIANT
21013#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
21014
21015 cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
21016 cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
21017 cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
21018 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
21019 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
21020 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
21021 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21022 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21023 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21024 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21025 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21026 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21027 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21028 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21029 cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21030 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21031 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21032 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21033 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21034 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21035 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21036 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21037 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21038 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21039 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21040 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21041 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21042 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21043 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21044 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21045 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21046 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21047 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21048 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21049 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21050 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21051 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21052 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21053 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21054 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21055 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21056 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21057 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21058 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21059 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21060 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21061 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21062 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21063 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21064 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21065 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21066 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21067 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21068 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21069 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21070 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21071 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
21072
21073#undef  ARM_VARIANT
21074#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
21075
21076 cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
21077 cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
21078 cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
21079 cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
21080 cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
21081 cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
21082 cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
21083 cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
21084 cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
21085 cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
21086 cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
21087 cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
21088 cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
21089 cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
21090 cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
21091 cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
21092 cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
21093 cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
21094 cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
21095 cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
21096 cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
21097 cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
21098 cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
21099 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
21100 cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
21101 cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
21102 cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
21103 cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
21104 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
21105 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
21106 cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
21107 cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
21108 cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
21109 cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
21110 cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
21111 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
21112 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
21113 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
21114 cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
21115 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
21116 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
21117 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
21118 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
21119 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
21120 cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
21121 cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
21122 cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
21123 cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
21124 cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
21125 cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
21126 cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
21127 cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
21128 cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
21129 cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
21130 cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
21131 cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
21132 cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
21133 cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
21134 cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
21135 cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
21136 cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
21137 cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
21138 cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
21139 cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
21140 cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
21141 cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
21142 cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
21143 cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
21144 cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
21145 cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
21146 cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
21147 cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
21148 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21149 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21150 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21151 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21152
21153 /* ARMv8-M instructions.  */
21154#undef  ARM_VARIANT
21155#define ARM_VARIANT NULL
21156#undef  THUMB_VARIANT
21157#define THUMB_VARIANT & arm_ext_v8m
21158 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21159 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21160 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21161 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21162 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21163 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21164 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21165
21166 /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
21167    instructions behave as nop if no VFP is present.  */
21168#undef  THUMB_VARIANT
21169#define THUMB_VARIANT & arm_ext_v8m_main
21170 TUEc("vlldm",	0,	 ec300a00, 1, (RRnpc),	rn),
21171 TUEc("vlstm",	0,	 ec200a00, 1, (RRnpc),	rn),
21172};
21173#undef ARM_VARIANT
21174#undef THUMB_VARIANT
21175#undef TCE
21176#undef TUE
21177#undef TUF
21178#undef TCC
21179#undef cCE
21180#undef cCL
21181#undef C3E
21182#undef CE
21183#undef CM
21184#undef UE
21185#undef UF
21186#undef UT
21187#undef NUF
21188#undef nUF
21189#undef NCE
21190#undef nCE
21191#undef OPS0
21192#undef OPS1
21193#undef OPS2
21194#undef OPS3
21195#undef OPS4
21196#undef OPS5
21197#undef OPS6
21198#undef do_0
21199
21200/* MD interface: bits in the object file.  */
21201
21202/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21203   for use in the a.out file, and stores them in the array pointed to by buf.
21204   This knows about the endian-ness of the target machine and does
21205   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
21206   2 (short) and 4 (long)  Floating numbers are put out as a series of
21207   LITTLENUMS (shorts, here at least).	*/
21208
21209void
21210md_number_to_chars (char * buf, valueT val, int n)
21211{
21212  if (target_big_endian)
21213    number_to_chars_bigendian (buf, val, n);
21214  else
21215    number_to_chars_littleendian (buf, val, n);
21216}
21217
21218static valueT
21219md_chars_to_number (char * buf, int n)
21220{
21221  valueT result = 0;
21222  unsigned char * where = (unsigned char *) buf;
21223
21224  if (target_big_endian)
21225    {
21226      while (n--)
21227	{
21228	  result <<= 8;
21229	  result |= (*where++ & 255);
21230	}
21231    }
21232  else
21233    {
21234      while (n--)
21235	{
21236	  result <<= 8;
21237	  result |= (where[n] & 255);
21238	}
21239    }
21240
21241  return result;
21242}
21243
21244/* MD interface: Sections.  */
21245
21246/* Calculate the maximum variable size (i.e., excluding fr_fix)
21247   that an rs_machine_dependent frag may reach.  */
21248
21249unsigned int
21250arm_frag_max_var (fragS *fragp)
21251{
21252  /* We only use rs_machine_dependent for variable-size Thumb instructions,
21253     which are either THUMB_SIZE (2) or INSN_SIZE (4).
21254
21255     Note that we generate relaxable instructions even for cases that don't
21256     really need it, like an immediate that's a trivial constant.  So we're
21257     overestimating the instruction size for some of those cases.  Rather
21258     than putting more intelligence here, it would probably be better to
21259     avoid generating a relaxation frag in the first place when it can be
21260     determined up front that a short instruction will suffice.  */
21261
21262  gas_assert (fragp->fr_type == rs_machine_dependent);
21263  return INSN_SIZE;
21264}
21265
21266/* Estimate the size of a frag before relaxing.  Assume everything fits in
21267   2 bytes.  */
21268
21269int
21270md_estimate_size_before_relax (fragS * fragp,
21271			       segT    segtype ATTRIBUTE_UNUSED)
21272{
21273  fragp->fr_var = 2;
21274  return 2;
21275}
21276
21277/* Convert a machine dependent frag.  */
21278
21279void
21280md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21281{
21282  unsigned long insn;
21283  unsigned long old_op;
21284  char *buf;
21285  expressionS exp;
21286  fixS *fixp;
21287  int reloc_type;
21288  int pc_rel;
21289  int opcode;
21290
21291  buf = fragp->fr_literal + fragp->fr_fix;
21292
21293  old_op = bfd_get_16(abfd, buf);
21294  if (fragp->fr_symbol)
21295    {
21296      exp.X_op = O_symbol;
21297      exp.X_add_symbol = fragp->fr_symbol;
21298    }
21299  else
21300    {
21301      exp.X_op = O_constant;
21302    }
21303  exp.X_add_number = fragp->fr_offset;
21304  opcode = fragp->fr_subtype;
21305  switch (opcode)
21306    {
21307    case T_MNEM_ldr_pc:
21308    case T_MNEM_ldr_pc2:
21309    case T_MNEM_ldr_sp:
21310    case T_MNEM_str_sp:
21311    case T_MNEM_ldr:
21312    case T_MNEM_ldrb:
21313    case T_MNEM_ldrh:
21314    case T_MNEM_str:
21315    case T_MNEM_strb:
21316    case T_MNEM_strh:
21317      if (fragp->fr_var == 4)
21318	{
21319	  insn = THUMB_OP32 (opcode);
21320	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21321	    {
21322	      insn |= (old_op & 0x700) << 4;
21323	    }
21324	  else
21325	    {
21326	      insn |= (old_op & 7) << 12;
21327	      insn |= (old_op & 0x38) << 13;
21328	    }
21329	  insn |= 0x00000c00;
21330	  put_thumb32_insn (buf, insn);
21331	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21332	}
21333      else
21334	{
21335	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21336	}
21337      pc_rel = (opcode == T_MNEM_ldr_pc2);
21338      break;
21339    case T_MNEM_adr:
21340      if (fragp->fr_var == 4)
21341	{
21342	  insn = THUMB_OP32 (opcode);
21343	  insn |= (old_op & 0xf0) << 4;
21344	  put_thumb32_insn (buf, insn);
21345	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21346	}
21347      else
21348	{
21349	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21350	  exp.X_add_number -= 4;
21351	}
21352      pc_rel = 1;
21353      break;
21354    case T_MNEM_mov:
21355    case T_MNEM_movs:
21356    case T_MNEM_cmp:
21357    case T_MNEM_cmn:
21358      if (fragp->fr_var == 4)
21359	{
21360	  int r0off = (opcode == T_MNEM_mov
21361		       || opcode == T_MNEM_movs) ? 0 : 8;
21362	  insn = THUMB_OP32 (opcode);
21363	  insn = (insn & 0xe1ffffff) | 0x10000000;
21364	  insn |= (old_op & 0x700) << r0off;
21365	  put_thumb32_insn (buf, insn);
21366	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21367	}
21368      else
21369	{
21370	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21371	}
21372      pc_rel = 0;
21373      break;
21374    case T_MNEM_b:
21375      if (fragp->fr_var == 4)
21376	{
21377	  insn = THUMB_OP32(opcode);
21378	  put_thumb32_insn (buf, insn);
21379	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21380	}
21381      else
21382	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21383      pc_rel = 1;
21384      break;
21385    case T_MNEM_bcond:
21386      if (fragp->fr_var == 4)
21387	{
21388	  insn = THUMB_OP32(opcode);
21389	  insn |= (old_op & 0xf00) << 14;
21390	  put_thumb32_insn (buf, insn);
21391	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21392	}
21393      else
21394	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21395      pc_rel = 1;
21396      break;
21397    case T_MNEM_add_sp:
21398    case T_MNEM_add_pc:
21399    case T_MNEM_inc_sp:
21400    case T_MNEM_dec_sp:
21401      if (fragp->fr_var == 4)
21402	{
21403	  /* ??? Choose between add and addw.  */
21404	  insn = THUMB_OP32 (opcode);
21405	  insn |= (old_op & 0xf0) << 4;
21406	  put_thumb32_insn (buf, insn);
21407	  if (opcode == T_MNEM_add_pc)
21408	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
21409	  else
21410	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21411	}
21412      else
21413	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21414      pc_rel = 0;
21415      break;
21416
21417    case T_MNEM_addi:
21418    case T_MNEM_addis:
21419    case T_MNEM_subi:
21420    case T_MNEM_subis:
21421      if (fragp->fr_var == 4)
21422	{
21423	  insn = THUMB_OP32 (opcode);
21424	  insn |= (old_op & 0xf0) << 4;
21425	  insn |= (old_op & 0xf) << 16;
21426	  put_thumb32_insn (buf, insn);
21427	  if (insn & (1 << 20))
21428	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21429	  else
21430	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21431	}
21432      else
21433	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21434      pc_rel = 0;
21435      break;
21436    default:
21437      abort ();
21438    }
21439  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21440		      (enum bfd_reloc_code_real) reloc_type);
21441  fixp->fx_file = fragp->fr_file;
21442  fixp->fx_line = fragp->fr_line;
21443  fragp->fr_fix += fragp->fr_var;
21444
21445  /* Set whether we use thumb-2 ISA based on final relaxation results.  */
21446  if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21447      && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21448    ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21449}
21450
21451/* Return the size of a relaxable immediate operand instruction.
21452   SHIFT and SIZE specify the form of the allowable immediate.  */
21453static int
21454relax_immediate (fragS *fragp, int size, int shift)
21455{
21456  offsetT offset;
21457  offsetT mask;
21458  offsetT low;
21459
21460  /* ??? Should be able to do better than this.  */
21461  if (fragp->fr_symbol)
21462    return 4;
21463
21464  low = (1 << shift) - 1;
21465  mask = (1 << (shift + size)) - (1 << shift);
21466  offset = fragp->fr_offset;
21467  /* Force misaligned offsets to 32-bit variant.  */
21468  if (offset & low)
21469    return 4;
21470  if (offset & ~mask)
21471    return 4;
21472  return 2;
21473}
21474
21475/* Get the address of a symbol during relaxation.  */
21476static addressT
21477relaxed_symbol_addr (fragS *fragp, long stretch)
21478{
21479  fragS *sym_frag;
21480  addressT addr;
21481  symbolS *sym;
21482
21483  sym = fragp->fr_symbol;
21484  sym_frag = symbol_get_frag (sym);
21485  know (S_GET_SEGMENT (sym) != absolute_section
21486	|| sym_frag == &zero_address_frag);
21487  addr = S_GET_VALUE (sym) + fragp->fr_offset;
21488
21489  /* If frag has yet to be reached on this pass, assume it will
21490     move by STRETCH just as we did.  If this is not so, it will
21491     be because some frag between grows, and that will force
21492     another pass.  */
21493
21494  if (stretch != 0
21495      && sym_frag->relax_marker != fragp->relax_marker)
21496    {
21497      fragS *f;
21498
21499      /* Adjust stretch for any alignment frag.  Note that if have
21500	 been expanding the earlier code, the symbol may be
21501	 defined in what appears to be an earlier frag.  FIXME:
21502	 This doesn't handle the fr_subtype field, which specifies
21503	 a maximum number of bytes to skip when doing an
21504	 alignment.  */
21505      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21506	{
21507	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21508	    {
21509	      if (stretch < 0)
21510		stretch = - ((- stretch)
21511			     & ~ ((1 << (int) f->fr_offset) - 1));
21512	      else
21513		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21514	      if (stretch == 0)
21515		break;
21516	    }
21517	}
21518      if (f != NULL)
21519	addr += stretch;
21520    }
21521
21522  return addr;
21523}
21524
21525/* Return the size of a relaxable adr pseudo-instruction or PC-relative
21526   load.  */
21527static int
21528relax_adr (fragS *fragp, asection *sec, long stretch)
21529{
21530  addressT addr;
21531  offsetT val;
21532
21533  /* Assume worst case for symbols not known to be in the same section.  */
21534  if (fragp->fr_symbol == NULL
21535      || !S_IS_DEFINED (fragp->fr_symbol)
21536      || sec != S_GET_SEGMENT (fragp->fr_symbol)
21537      || S_IS_WEAK (fragp->fr_symbol))
21538    return 4;
21539
21540  val = relaxed_symbol_addr (fragp, stretch);
21541  addr = fragp->fr_address + fragp->fr_fix;
21542  addr = (addr + 4) & ~3;
21543  /* Force misaligned targets to 32-bit variant.  */
21544  if (val & 3)
21545    return 4;
21546  val -= addr;
21547  if (val < 0 || val > 1020)
21548    return 4;
21549  return 2;
21550}
21551
21552/* Return the size of a relaxable add/sub immediate instruction.  */
21553static int
21554relax_addsub (fragS *fragp, asection *sec)
21555{
21556  char *buf;
21557  int op;
21558
21559  buf = fragp->fr_literal + fragp->fr_fix;
21560  op = bfd_get_16(sec->owner, buf);
21561  if ((op & 0xf) == ((op >> 4) & 0xf))
21562    return relax_immediate (fragp, 8, 0);
21563  else
21564    return relax_immediate (fragp, 3, 0);
21565}
21566
21567/* Return TRUE iff the definition of symbol S could be pre-empted
21568   (overridden) at link or load time.  */
21569static bfd_boolean
21570symbol_preemptible (symbolS *s)
21571{
21572  /* Weak symbols can always be pre-empted.  */
21573  if (S_IS_WEAK (s))
21574    return TRUE;
21575
21576  /* Non-global symbols cannot be pre-empted. */
21577  if (! S_IS_EXTERNAL (s))
21578    return FALSE;
21579
21580#ifdef OBJ_ELF
21581  /* In ELF, a global symbol can be marked protected, or private.  In that
21582     case it can't be pre-empted (other definitions in the same link unit
21583     would violate the ODR).  */
21584  if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21585    return FALSE;
21586#endif
21587
21588  /* Other global symbols might be pre-empted.  */
21589  return TRUE;
21590}
21591
21592/* Return the size of a relaxable branch instruction.  BITS is the
21593   size of the offset field in the narrow instruction.  */
21594
21595static int
21596relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21597{
21598  addressT addr;
21599  offsetT val;
21600  offsetT limit;
21601
21602  /* Assume worst case for symbols not known to be in the same section.  */
21603  if (!S_IS_DEFINED (fragp->fr_symbol)
21604      || sec != S_GET_SEGMENT (fragp->fr_symbol)
21605      || S_IS_WEAK (fragp->fr_symbol))
21606    return 4;
21607
21608#ifdef OBJ_ELF
21609  /* A branch to a function in ARM state will require interworking.  */
21610  if (S_IS_DEFINED (fragp->fr_symbol)
21611      && ARM_IS_FUNC (fragp->fr_symbol))
21612      return 4;
21613#endif
21614
21615  if (symbol_preemptible (fragp->fr_symbol))
21616    return 4;
21617
21618  val = relaxed_symbol_addr (fragp, stretch);
21619  addr = fragp->fr_address + fragp->fr_fix + 4;
21620  val -= addr;
21621
21622  /* Offset is a signed value *2 */
21623  limit = 1 << bits;
21624  if (val >= limit || val < -limit)
21625    return 4;
21626  return 2;
21627}
21628
21629
21630/* Relax a machine dependent frag.  This returns the amount by which
21631   the current size of the frag should change.  */
21632
21633int
21634arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21635{
21636  int oldsize;
21637  int newsize;
21638
21639  oldsize = fragp->fr_var;
21640  switch (fragp->fr_subtype)
21641    {
21642    case T_MNEM_ldr_pc2:
21643      newsize = relax_adr (fragp, sec, stretch);
21644      break;
21645    case T_MNEM_ldr_pc:
21646    case T_MNEM_ldr_sp:
21647    case T_MNEM_str_sp:
21648      newsize = relax_immediate (fragp, 8, 2);
21649      break;
21650    case T_MNEM_ldr:
21651    case T_MNEM_str:
21652      newsize = relax_immediate (fragp, 5, 2);
21653      break;
21654    case T_MNEM_ldrh:
21655    case T_MNEM_strh:
21656      newsize = relax_immediate (fragp, 5, 1);
21657      break;
21658    case T_MNEM_ldrb:
21659    case T_MNEM_strb:
21660      newsize = relax_immediate (fragp, 5, 0);
21661      break;
21662    case T_MNEM_adr:
21663      newsize = relax_adr (fragp, sec, stretch);
21664      break;
21665    case T_MNEM_mov:
21666    case T_MNEM_movs:
21667    case T_MNEM_cmp:
21668    case T_MNEM_cmn:
21669      newsize = relax_immediate (fragp, 8, 0);
21670      break;
21671    case T_MNEM_b:
21672      newsize = relax_branch (fragp, sec, 11, stretch);
21673      break;
21674    case T_MNEM_bcond:
21675      newsize = relax_branch (fragp, sec, 8, stretch);
21676      break;
21677    case T_MNEM_add_sp:
21678    case T_MNEM_add_pc:
21679      newsize = relax_immediate (fragp, 8, 2);
21680      break;
21681    case T_MNEM_inc_sp:
21682    case T_MNEM_dec_sp:
21683      newsize = relax_immediate (fragp, 7, 2);
21684      break;
21685    case T_MNEM_addi:
21686    case T_MNEM_addis:
21687    case T_MNEM_subi:
21688    case T_MNEM_subis:
21689      newsize = relax_addsub (fragp, sec);
21690      break;
21691    default:
21692      abort ();
21693    }
21694
21695  fragp->fr_var = newsize;
21696  /* Freeze wide instructions that are at or before the same location as
21697     in the previous pass.  This avoids infinite loops.
21698     Don't freeze them unconditionally because targets may be artificially
21699     misaligned by the expansion of preceding frags.  */
21700  if (stretch <= 0 && newsize > 2)
21701    {
21702      md_convert_frag (sec->owner, sec, fragp);
21703      frag_wane (fragp);
21704    }
21705
21706  return newsize - oldsize;
21707}
21708
21709/* Round up a section size to the appropriate boundary.	 */
21710
21711valueT
21712md_section_align (segT	 segment ATTRIBUTE_UNUSED,
21713		  valueT size)
21714{
21715#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21716  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21717    {
21718      /* For a.out, force the section size to be aligned.  If we don't do
21719	 this, BFD will align it for us, but it will not write out the
21720	 final bytes of the section.  This may be a bug in BFD, but it is
21721	 easier to fix it here since that is how the other a.out targets
21722	 work.  */
21723      int align;
21724
21725      align = bfd_get_section_alignment (stdoutput, segment);
21726      size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21727    }
21728#endif
21729
21730  return size;
21731}
21732
21733/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
21734   of an rs_align_code fragment.  */
21735
21736void
21737arm_handle_align (fragS * fragP)
21738{
21739  static unsigned char const arm_noop[2][2][4] =
21740    {
21741      {  /* ARMv1 */
21742	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
21743	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
21744      },
21745      {  /* ARMv6k */
21746	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
21747	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
21748      },
21749    };
21750  static unsigned char const thumb_noop[2][2][2] =
21751    {
21752      {  /* Thumb-1 */
21753	{0xc0, 0x46},  /* LE */
21754	{0x46, 0xc0},  /* BE */
21755      },
21756      {  /* Thumb-2 */
21757	{0x00, 0xbf},  /* LE */
21758	{0xbf, 0x00}   /* BE */
21759      }
21760    };
21761  static unsigned char const wide_thumb_noop[2][4] =
21762    {  /* Wide Thumb-2 */
21763      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
21764      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
21765    };
21766
21767  unsigned bytes, fix, noop_size;
21768  char * p;
21769  const unsigned char * noop;
21770  const unsigned char *narrow_noop = NULL;
21771#ifdef OBJ_ELF
21772  enum mstate state;
21773#endif
21774
21775  if (fragP->fr_type != rs_align_code)
21776    return;
21777
21778  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21779  p = fragP->fr_literal + fragP->fr_fix;
21780  fix = 0;
21781
21782  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21783    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21784
21785  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21786
21787  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21788    {
21789      if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21790			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21791	{
21792	  narrow_noop = thumb_noop[1][target_big_endian];
21793	  noop = wide_thumb_noop[target_big_endian];
21794	}
21795      else
21796	noop = thumb_noop[0][target_big_endian];
21797      noop_size = 2;
21798#ifdef OBJ_ELF
21799      state = MAP_THUMB;
21800#endif
21801    }
21802  else
21803    {
21804      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21805					   ? selected_cpu : arm_arch_none,
21806					   arm_ext_v6k) != 0]
21807		     [target_big_endian];
21808      noop_size = 4;
21809#ifdef OBJ_ELF
21810      state = MAP_ARM;
21811#endif
21812    }
21813
21814  fragP->fr_var = noop_size;
21815
21816  if (bytes & (noop_size - 1))
21817    {
21818      fix = bytes & (noop_size - 1);
21819#ifdef OBJ_ELF
21820      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21821#endif
21822      memset (p, 0, fix);
21823      p += fix;
21824      bytes -= fix;
21825    }
21826
21827  if (narrow_noop)
21828    {
21829      if (bytes & noop_size)
21830	{
21831	  /* Insert a narrow noop.  */
21832	  memcpy (p, narrow_noop, noop_size);
21833	  p += noop_size;
21834	  bytes -= noop_size;
21835	  fix += noop_size;
21836	}
21837
21838      /* Use wide noops for the remainder */
21839      noop_size = 4;
21840    }
21841
21842  while (bytes >= noop_size)
21843    {
21844      memcpy (p, noop, noop_size);
21845      p += noop_size;
21846      bytes -= noop_size;
21847      fix += noop_size;
21848    }
21849
21850  fragP->fr_fix += fix;
21851}
21852
21853/* Called from md_do_align.  Used to create an alignment
21854   frag in a code section.  */
21855
21856void
21857arm_frag_align_code (int n, int max)
21858{
21859  char * p;
21860
21861  /* We assume that there will never be a requirement
21862     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
21863  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21864    {
21865      char err_msg[128];
21866
21867      sprintf (err_msg,
21868	_("alignments greater than %d bytes not supported in .text sections."),
21869	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21870      as_fatal ("%s", err_msg);
21871    }
21872
21873  p = frag_var (rs_align_code,
21874		MAX_MEM_FOR_RS_ALIGN_CODE,
21875		1,
21876		(relax_substateT) max,
21877		(symbolS *) NULL,
21878		(offsetT) n,
21879		(char *) NULL);
21880  *p = 0;
21881}
21882
21883/* Perform target specific initialisation of a frag.
21884   Note - despite the name this initialisation is not done when the frag
21885   is created, but only when its type is assigned.  A frag can be created
21886   and used a long time before its type is set, so beware of assuming that
21887   this initialisationis performed first.  */
21888
21889#ifndef OBJ_ELF
21890void
21891arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21892{
21893  /* Record whether this frag is in an ARM or a THUMB area.  */
21894  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21895}
21896
21897#else /* OBJ_ELF is defined.  */
21898void
21899arm_init_frag (fragS * fragP, int max_chars)
21900{
21901  int frag_thumb_mode;
21902
21903  /* If the current ARM vs THUMB mode has not already
21904     been recorded into this frag then do so now.  */
21905  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21906    fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21907
21908  frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21909
21910  /* Record a mapping symbol for alignment frags.  We will delete this
21911     later if the alignment ends up empty.  */
21912  switch (fragP->fr_type)
21913    {
21914    case rs_align:
21915    case rs_align_test:
21916    case rs_fill:
21917      mapping_state_2 (MAP_DATA, max_chars);
21918      break;
21919    case rs_align_code:
21920      mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21921      break;
21922    default:
21923      break;
21924    }
21925}
21926
21927/* When we change sections we need to issue a new mapping symbol.  */
21928
21929void
21930arm_elf_change_section (void)
21931{
21932  /* Link an unlinked unwind index table section to the .text section.	*/
21933  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21934      && elf_linked_to_section (now_seg) == NULL)
21935    elf_linked_to_section (now_seg) = text_section;
21936}
21937
21938int
21939arm_elf_section_type (const char * str, size_t len)
21940{
21941  if (len == 5 && strncmp (str, "exidx", 5) == 0)
21942    return SHT_ARM_EXIDX;
21943
21944  return -1;
21945}
21946
21947/* Code to deal with unwinding tables.	*/
21948
21949static void add_unwind_adjustsp (offsetT);
21950
21951/* Generate any deferred unwind frame offset.  */
21952
21953static void
21954flush_pending_unwind (void)
21955{
21956  offsetT offset;
21957
21958  offset = unwind.pending_offset;
21959  unwind.pending_offset = 0;
21960  if (offset != 0)
21961    add_unwind_adjustsp (offset);
21962}
21963
21964/* Add an opcode to this list for this function.  Two-byte opcodes should
21965   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
21966   order.  */
21967
21968static void
21969add_unwind_opcode (valueT op, int length)
21970{
21971  /* Add any deferred stack adjustment.	 */
21972  if (unwind.pending_offset)
21973    flush_pending_unwind ();
21974
21975  unwind.sp_restored = 0;
21976
21977  if (unwind.opcode_count + length > unwind.opcode_alloc)
21978    {
21979      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21980      if (unwind.opcodes)
21981	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21982				     unwind.opcode_alloc);
21983      else
21984	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21985    }
21986  while (length > 0)
21987    {
21988      length--;
21989      unwind.opcodes[unwind.opcode_count] = op & 0xff;
21990      op >>= 8;
21991      unwind.opcode_count++;
21992    }
21993}
21994
21995/* Add unwind opcodes to adjust the stack pointer.  */
21996
21997static void
21998add_unwind_adjustsp (offsetT offset)
21999{
22000  valueT op;
22001
22002  if (offset > 0x200)
22003    {
22004      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
22005      char bytes[5];
22006      int n;
22007      valueT o;
22008
22009      /* Long form: 0xb2, uleb128.  */
22010      /* This might not fit in a word so add the individual bytes,
22011	 remembering the list is built in reverse order.  */
22012      o = (valueT) ((offset - 0x204) >> 2);
22013      if (o == 0)
22014	add_unwind_opcode (0, 1);
22015
22016      /* Calculate the uleb128 encoding of the offset.	*/
22017      n = 0;
22018      while (o)
22019	{
22020	  bytes[n] = o & 0x7f;
22021	  o >>= 7;
22022	  if (o)
22023	    bytes[n] |= 0x80;
22024	  n++;
22025	}
22026      /* Add the insn.	*/
22027      for (; n; n--)
22028	add_unwind_opcode (bytes[n - 1], 1);
22029      add_unwind_opcode (0xb2, 1);
22030    }
22031  else if (offset > 0x100)
22032    {
22033      /* Two short opcodes.  */
22034      add_unwind_opcode (0x3f, 1);
22035      op = (offset - 0x104) >> 2;
22036      add_unwind_opcode (op, 1);
22037    }
22038  else if (offset > 0)
22039    {
22040      /* Short opcode.	*/
22041      op = (offset - 4) >> 2;
22042      add_unwind_opcode (op, 1);
22043    }
22044  else if (offset < 0)
22045    {
22046      offset = -offset;
22047      while (offset > 0x100)
22048	{
22049	  add_unwind_opcode (0x7f, 1);
22050	  offset -= 0x100;
22051	}
22052      op = ((offset - 4) >> 2) | 0x40;
22053      add_unwind_opcode (op, 1);
22054    }
22055}
22056
22057/* Finish the list of unwind opcodes for this function.	 */
22058static void
22059finish_unwind_opcodes (void)
22060{
22061  valueT op;
22062
22063  if (unwind.fp_used)
22064    {
22065      /* Adjust sp as necessary.  */
22066      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22067      flush_pending_unwind ();
22068
22069      /* After restoring sp from the frame pointer.  */
22070      op = 0x90 | unwind.fp_reg;
22071      add_unwind_opcode (op, 1);
22072    }
22073  else
22074    flush_pending_unwind ();
22075}
22076
22077
22078/* Start an exception table entry.  If idx is nonzero this is an index table
22079   entry.  */
22080
22081static void
22082start_unwind_section (const segT text_seg, int idx)
22083{
22084  const char * text_name;
22085  const char * prefix;
22086  const char * prefix_once;
22087  const char * group_name;
22088  char * sec_name;
22089  int type;
22090  int flags;
22091  int linkonce;
22092
22093  if (idx)
22094    {
22095      prefix = ELF_STRING_ARM_unwind;
22096      prefix_once = ELF_STRING_ARM_unwind_once;
22097      type = SHT_ARM_EXIDX;
22098    }
22099  else
22100    {
22101      prefix = ELF_STRING_ARM_unwind_info;
22102      prefix_once = ELF_STRING_ARM_unwind_info_once;
22103      type = SHT_PROGBITS;
22104    }
22105
22106  text_name = segment_name (text_seg);
22107  if (streq (text_name, ".text"))
22108    text_name = "";
22109
22110  if (strncmp (text_name, ".gnu.linkonce.t.",
22111	       strlen (".gnu.linkonce.t.")) == 0)
22112    {
22113      prefix = prefix_once;
22114      text_name += strlen (".gnu.linkonce.t.");
22115    }
22116
22117  sec_name = concat (prefix, text_name, (char *) NULL);
22118
22119  flags = SHF_ALLOC;
22120  linkonce = 0;
22121  group_name = 0;
22122
22123  /* Handle COMDAT group.  */
22124  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22125    {
22126      group_name = elf_group_name (text_seg);
22127      if (group_name == NULL)
22128	{
22129	  as_bad (_("Group section `%s' has no group signature"),
22130		  segment_name (text_seg));
22131	  ignore_rest_of_line ();
22132	  return;
22133	}
22134      flags |= SHF_GROUP;
22135      linkonce = 1;
22136    }
22137
22138  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
22139
22140  /* Set the section link for index tables.  */
22141  if (idx)
22142    elf_linked_to_section (now_seg) = text_seg;
22143}
22144
22145
22146/* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
22147   personality routine data.  Returns zero, or the index table value for
22148   an inline entry.  */
22149
22150static valueT
22151create_unwind_entry (int have_data)
22152{
22153  int size;
22154  addressT where;
22155  char *ptr;
22156  /* The current word of data.	*/
22157  valueT data;
22158  /* The number of bytes left in this word.  */
22159  int n;
22160
22161  finish_unwind_opcodes ();
22162
22163  /* Remember the current text section.	 */
22164  unwind.saved_seg = now_seg;
22165  unwind.saved_subseg = now_subseg;
22166
22167  start_unwind_section (now_seg, 0);
22168
22169  if (unwind.personality_routine == NULL)
22170    {
22171      if (unwind.personality_index == -2)
22172	{
22173	  if (have_data)
22174	    as_bad (_("handlerdata in cantunwind frame"));
22175	  return 1; /* EXIDX_CANTUNWIND.  */
22176	}
22177
22178      /* Use a default personality routine if none is specified.  */
22179      if (unwind.personality_index == -1)
22180	{
22181	  if (unwind.opcode_count > 3)
22182	    unwind.personality_index = 1;
22183	  else
22184	    unwind.personality_index = 0;
22185	}
22186
22187      /* Space for the personality routine entry.  */
22188      if (unwind.personality_index == 0)
22189	{
22190	  if (unwind.opcode_count > 3)
22191	    as_bad (_("too many unwind opcodes for personality routine 0"));
22192
22193	  if (!have_data)
22194	    {
22195	      /* All the data is inline in the index table.  */
22196	      data = 0x80;
22197	      n = 3;
22198	      while (unwind.opcode_count > 0)
22199		{
22200		  unwind.opcode_count--;
22201		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22202		  n--;
22203		}
22204
22205	      /* Pad with "finish" opcodes.  */
22206	      while (n--)
22207		data = (data << 8) | 0xb0;
22208
22209	      return data;
22210	    }
22211	  size = 0;
22212	}
22213      else
22214	/* We get two opcodes "free" in the first word.	 */
22215	size = unwind.opcode_count - 2;
22216    }
22217  else
22218    {
22219      /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
22220      if (unwind.personality_index != -1)
22221	{
22222	  as_bad (_("attempt to recreate an unwind entry"));
22223	  return 1;
22224	}
22225
22226      /* An extra byte is required for the opcode count.	*/
22227      size = unwind.opcode_count + 1;
22228    }
22229
22230  size = (size + 3) >> 2;
22231  if (size > 0xff)
22232    as_bad (_("too many unwind opcodes"));
22233
22234  frag_align (2, 0, 0);
22235  record_alignment (now_seg, 2);
22236  unwind.table_entry = expr_build_dot ();
22237
22238  /* Allocate the table entry.	*/
22239  ptr = frag_more ((size << 2) + 4);
22240  /* PR 13449: Zero the table entries in case some of them are not used.  */
22241  memset (ptr, 0, (size << 2) + 4);
22242  where = frag_now_fix () - ((size << 2) + 4);
22243
22244  switch (unwind.personality_index)
22245    {
22246    case -1:
22247      /* ??? Should this be a PLT generating relocation?  */
22248      /* Custom personality routine.  */
22249      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22250	       BFD_RELOC_ARM_PREL31);
22251
22252      where += 4;
22253      ptr += 4;
22254
22255      /* Set the first byte to the number of additional words.	*/
22256      data = size > 0 ? size - 1 : 0;
22257      n = 3;
22258      break;
22259
22260    /* ABI defined personality routines.  */
22261    case 0:
22262      /* Three opcodes bytes are packed into the first word.  */
22263      data = 0x80;
22264      n = 3;
22265      break;
22266
22267    case 1:
22268    case 2:
22269      /* The size and first two opcode bytes go in the first word.  */
22270      data = ((0x80 + unwind.personality_index) << 8) | size;
22271      n = 2;
22272      break;
22273
22274    default:
22275      /* Should never happen.  */
22276      abort ();
22277    }
22278
22279  /* Pack the opcodes into words (MSB first), reversing the list at the same
22280     time.  */
22281  while (unwind.opcode_count > 0)
22282    {
22283      if (n == 0)
22284	{
22285	  md_number_to_chars (ptr, data, 4);
22286	  ptr += 4;
22287	  n = 4;
22288	  data = 0;
22289	}
22290      unwind.opcode_count--;
22291      n--;
22292      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22293    }
22294
22295  /* Finish off the last word.	*/
22296  if (n < 4)
22297    {
22298      /* Pad with "finish" opcodes.  */
22299      while (n--)
22300	data = (data << 8) | 0xb0;
22301
22302      md_number_to_chars (ptr, data, 4);
22303    }
22304
22305  if (!have_data)
22306    {
22307      /* Add an empty descriptor if there is no user-specified data.   */
22308      ptr = frag_more (4);
22309      md_number_to_chars (ptr, 0, 4);
22310    }
22311
22312  return 0;
22313}
22314
22315
22316/* Initialize the DWARF-2 unwind information for this procedure.  */
22317
22318void
22319tc_arm_frame_initial_instructions (void)
22320{
22321  cfi_add_CFA_def_cfa (REG_SP, 0);
22322}
22323#endif /* OBJ_ELF */
22324
22325/* Convert REGNAME to a DWARF-2 register number.  */
22326
22327int
22328tc_arm_regname_to_dw2regnum (char *regname)
22329{
22330  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22331  if (reg != FAIL)
22332    return reg;
22333
22334  /* PR 16694: Allow VFP registers as well.  */
22335  reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22336  if (reg != FAIL)
22337    return 64 + reg;
22338
22339  reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22340  if (reg != FAIL)
22341    return reg + 256;
22342
22343  return -1;
22344}
22345
22346#ifdef TE_PE
22347void
22348tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22349{
22350  expressionS exp;
22351
22352  exp.X_op = O_secrel;
22353  exp.X_add_symbol = symbol;
22354  exp.X_add_number = 0;
22355  emit_expr (&exp, size);
22356}
22357#endif
22358
22359/* MD interface: Symbol and relocation handling.  */
22360
22361/* Return the address within the segment that a PC-relative fixup is
22362   relative to.  For ARM, PC-relative fixups applied to instructions
22363   are generally relative to the location of the fixup plus 8 bytes.
22364   Thumb branches are offset by 4, and Thumb loads relative to PC
22365   require special handling.  */
22366
22367long
22368md_pcrel_from_section (fixS * fixP, segT seg)
22369{
22370  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22371
22372  /* If this is pc-relative and we are going to emit a relocation
22373     then we just want to put out any pipeline compensation that the linker
22374     will need.  Otherwise we want to use the calculated base.
22375     For WinCE we skip the bias for externals as well, since this
22376     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
22377  if (fixP->fx_pcrel
22378      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22379	  || (arm_force_relocation (fixP)
22380#ifdef TE_WINCE
22381	      && !S_IS_EXTERNAL (fixP->fx_addsy)
22382#endif
22383	      )))
22384    base = 0;
22385
22386
22387  switch (fixP->fx_r_type)
22388    {
22389      /* PC relative addressing on the Thumb is slightly odd as the
22390	 bottom two bits of the PC are forced to zero for the
22391	 calculation.  This happens *after* application of the
22392	 pipeline offset.  However, Thumb adrl already adjusts for
22393	 this, so we need not do it again.  */
22394    case BFD_RELOC_ARM_THUMB_ADD:
22395      return base & ~3;
22396
22397    case BFD_RELOC_ARM_THUMB_OFFSET:
22398    case BFD_RELOC_ARM_T32_OFFSET_IMM:
22399    case BFD_RELOC_ARM_T32_ADD_PC12:
22400    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22401      return (base + 4) & ~3;
22402
22403      /* Thumb branches are simply offset by +4.  */
22404    case BFD_RELOC_THUMB_PCREL_BRANCH7:
22405    case BFD_RELOC_THUMB_PCREL_BRANCH9:
22406    case BFD_RELOC_THUMB_PCREL_BRANCH12:
22407    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22408    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22409      return base + 4;
22410
22411    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22412      if (fixP->fx_addsy
22413	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22414	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22415	  && ARM_IS_FUNC (fixP->fx_addsy)
22416	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22417	base = fixP->fx_where + fixP->fx_frag->fr_address;
22418       return base + 4;
22419
22420      /* BLX is like branches above, but forces the low two bits of PC to
22421	 zero.  */
22422    case BFD_RELOC_THUMB_PCREL_BLX:
22423      if (fixP->fx_addsy
22424	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22425	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22426	  && THUMB_IS_FUNC (fixP->fx_addsy)
22427	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22428	base = fixP->fx_where + fixP->fx_frag->fr_address;
22429      return (base + 4) & ~3;
22430
22431      /* ARM mode branches are offset by +8.  However, the Windows CE
22432	 loader expects the relocation not to take this into account.  */
22433    case BFD_RELOC_ARM_PCREL_BLX:
22434      if (fixP->fx_addsy
22435	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22436	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22437	  && ARM_IS_FUNC (fixP->fx_addsy)
22438	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22439	base = fixP->fx_where + fixP->fx_frag->fr_address;
22440      return base + 8;
22441
22442    case BFD_RELOC_ARM_PCREL_CALL:
22443      if (fixP->fx_addsy
22444	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22445	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22446	  && THUMB_IS_FUNC (fixP->fx_addsy)
22447	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22448	base = fixP->fx_where + fixP->fx_frag->fr_address;
22449      return base + 8;
22450
22451    case BFD_RELOC_ARM_PCREL_BRANCH:
22452    case BFD_RELOC_ARM_PCREL_JUMP:
22453    case BFD_RELOC_ARM_PLT32:
22454#ifdef TE_WINCE
22455      /* When handling fixups immediately, because we have already
22456	 discovered the value of a symbol, or the address of the frag involved
22457	 we must account for the offset by +8, as the OS loader will never see the reloc.
22458	 see fixup_segment() in write.c
22459	 The S_IS_EXTERNAL test handles the case of global symbols.
22460	 Those need the calculated base, not just the pipe compensation the linker will need.  */
22461      if (fixP->fx_pcrel
22462	  && fixP->fx_addsy != NULL
22463	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22464	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22465	return base + 8;
22466      return base;
22467#else
22468      return base + 8;
22469#endif
22470
22471
22472      /* ARM mode loads relative to PC are also offset by +8.  Unlike
22473	 branches, the Windows CE loader *does* expect the relocation
22474	 to take this into account.  */
22475    case BFD_RELOC_ARM_OFFSET_IMM:
22476    case BFD_RELOC_ARM_OFFSET_IMM8:
22477    case BFD_RELOC_ARM_HWLITERAL:
22478    case BFD_RELOC_ARM_LITERAL:
22479    case BFD_RELOC_ARM_CP_OFF_IMM:
22480      return base + 8;
22481
22482
22483      /* Other PC-relative relocations are un-offset.  */
22484    default:
22485      return base;
22486    }
22487}
22488
22489static bfd_boolean flag_warn_syms = TRUE;
22490
22491bfd_boolean
22492arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22493{
22494  /* PR 18347 - Warn if the user attempts to create a symbol with the same
22495     name as an ARM instruction.  Whilst strictly speaking it is allowed, it
22496     does mean that the resulting code might be very confusing to the reader.
22497     Also this warning can be triggered if the user omits an operand before
22498     an immediate address, eg:
22499
22500       LDR =foo
22501
22502     GAS treats this as an assignment of the value of the symbol foo to a
22503     symbol LDR, and so (without this code) it will not issue any kind of
22504     warning or error message.
22505
22506     Note - ARM instructions are case-insensitive but the strings in the hash
22507     table are all stored in lower case, so we must first ensure that name is
22508     lower case too.  */
22509  if (flag_warn_syms && arm_ops_hsh)
22510    {
22511      char * nbuf = strdup (name);
22512      char * p;
22513
22514      for (p = nbuf; *p; p++)
22515	*p = TOLOWER (*p);
22516      if (hash_find (arm_ops_hsh, nbuf) != NULL)
22517	{
22518	  static struct hash_control * already_warned = NULL;
22519
22520	  if (already_warned == NULL)
22521	    already_warned = hash_new ();
22522	  /* Only warn about the symbol once.  To keep the code
22523	     simple we let hash_insert do the lookup for us.  */
22524	  if (hash_insert (already_warned, name, NULL) == NULL)
22525	    as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22526	}
22527      else
22528	free (nbuf);
22529    }
22530
22531  return FALSE;
22532}
22533
22534/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22535   Otherwise we have no need to default values of symbols.  */
22536
22537symbolS *
22538md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22539{
22540#ifdef OBJ_ELF
22541  if (name[0] == '_' && name[1] == 'G'
22542      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22543    {
22544      if (!GOT_symbol)
22545	{
22546	  if (symbol_find (name))
22547	    as_bad (_("GOT already in the symbol table"));
22548
22549	  GOT_symbol = symbol_new (name, undefined_section,
22550				   (valueT) 0, & zero_address_frag);
22551	}
22552
22553      return GOT_symbol;
22554    }
22555#endif
22556
22557  return NULL;
22558}
22559
22560/* Subroutine of md_apply_fix.	 Check to see if an immediate can be
22561   computed as two separate immediate values, added together.  We
22562   already know that this value cannot be computed by just one ARM
22563   instruction.	 */
22564
22565static unsigned int
22566validate_immediate_twopart (unsigned int   val,
22567			    unsigned int * highpart)
22568{
22569  unsigned int a;
22570  unsigned int i;
22571
22572  for (i = 0; i < 32; i += 2)
22573    if (((a = rotate_left (val, i)) & 0xff) != 0)
22574      {
22575	if (a & 0xff00)
22576	  {
22577	    if (a & ~ 0xffff)
22578	      continue;
22579	    * highpart = (a  >> 8) | ((i + 24) << 7);
22580	  }
22581	else if (a & 0xff0000)
22582	  {
22583	    if (a & 0xff000000)
22584	      continue;
22585	    * highpart = (a >> 16) | ((i + 16) << 7);
22586	  }
22587	else
22588	  {
22589	    gas_assert (a & 0xff000000);
22590	    * highpart = (a >> 24) | ((i + 8) << 7);
22591	  }
22592
22593	return (a & 0xff) | (i << 7);
22594      }
22595
22596  return FAIL;
22597}
22598
22599static int
22600validate_offset_imm (unsigned int val, int hwse)
22601{
22602  if ((hwse && val > 255) || val > 4095)
22603    return FAIL;
22604  return val;
22605}
22606
22607/* Subroutine of md_apply_fix.	 Do those data_ops which can take a
22608   negative immediate constant by altering the instruction.  A bit of
22609   a hack really.
22610	MOV <-> MVN
22611	AND <-> BIC
22612	ADC <-> SBC
22613	by inverting the second operand, and
22614	ADD <-> SUB
22615	CMP <-> CMN
22616	by negating the second operand.	 */
22617
22618static int
22619negate_data_op (unsigned long * instruction,
22620		unsigned long	value)
22621{
22622  int op, new_inst;
22623  unsigned long negated, inverted;
22624
22625  negated = encode_arm_immediate (-value);
22626  inverted = encode_arm_immediate (~value);
22627
22628  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22629  switch (op)
22630    {
22631      /* First negates.	 */
22632    case OPCODE_SUB:		 /* ADD <-> SUB	 */
22633      new_inst = OPCODE_ADD;
22634      value = negated;
22635      break;
22636
22637    case OPCODE_ADD:
22638      new_inst = OPCODE_SUB;
22639      value = negated;
22640      break;
22641
22642    case OPCODE_CMP:		 /* CMP <-> CMN	 */
22643      new_inst = OPCODE_CMN;
22644      value = negated;
22645      break;
22646
22647    case OPCODE_CMN:
22648      new_inst = OPCODE_CMP;
22649      value = negated;
22650      break;
22651
22652      /* Now Inverted ops.  */
22653    case OPCODE_MOV:		 /* MOV <-> MVN	 */
22654      new_inst = OPCODE_MVN;
22655      value = inverted;
22656      break;
22657
22658    case OPCODE_MVN:
22659      new_inst = OPCODE_MOV;
22660      value = inverted;
22661      break;
22662
22663    case OPCODE_AND:		 /* AND <-> BIC	 */
22664      new_inst = OPCODE_BIC;
22665      value = inverted;
22666      break;
22667
22668    case OPCODE_BIC:
22669      new_inst = OPCODE_AND;
22670      value = inverted;
22671      break;
22672
22673    case OPCODE_ADC:		  /* ADC <-> SBC  */
22674      new_inst = OPCODE_SBC;
22675      value = inverted;
22676      break;
22677
22678    case OPCODE_SBC:
22679      new_inst = OPCODE_ADC;
22680      value = inverted;
22681      break;
22682
22683      /* We cannot do anything.	 */
22684    default:
22685      return FAIL;
22686    }
22687
22688  if (value == (unsigned) FAIL)
22689    return FAIL;
22690
22691  *instruction &= OPCODE_MASK;
22692  *instruction |= new_inst << DATA_OP_SHIFT;
22693  return value;
22694}
22695
22696/* Like negate_data_op, but for Thumb-2.   */
22697
22698static unsigned int
22699thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22700{
22701  int op, new_inst;
22702  int rd;
22703  unsigned int negated, inverted;
22704
22705  negated = encode_thumb32_immediate (-value);
22706  inverted = encode_thumb32_immediate (~value);
22707
22708  rd = (*instruction >> 8) & 0xf;
22709  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22710  switch (op)
22711    {
22712      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
22713    case T2_OPCODE_SUB:
22714      new_inst = T2_OPCODE_ADD;
22715      value = negated;
22716      break;
22717
22718    case T2_OPCODE_ADD:
22719      new_inst = T2_OPCODE_SUB;
22720      value = negated;
22721      break;
22722
22723      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
22724    case T2_OPCODE_ORR:
22725      new_inst = T2_OPCODE_ORN;
22726      value = inverted;
22727      break;
22728
22729    case T2_OPCODE_ORN:
22730      new_inst = T2_OPCODE_ORR;
22731      value = inverted;
22732      break;
22733
22734      /* AND <-> BIC.  TST has no inverted equivalent.  */
22735    case T2_OPCODE_AND:
22736      new_inst = T2_OPCODE_BIC;
22737      if (rd == 15)
22738	value = FAIL;
22739      else
22740	value = inverted;
22741      break;
22742
22743    case T2_OPCODE_BIC:
22744      new_inst = T2_OPCODE_AND;
22745      value = inverted;
22746      break;
22747
22748      /* ADC <-> SBC  */
22749    case T2_OPCODE_ADC:
22750      new_inst = T2_OPCODE_SBC;
22751      value = inverted;
22752      break;
22753
22754    case T2_OPCODE_SBC:
22755      new_inst = T2_OPCODE_ADC;
22756      value = inverted;
22757      break;
22758
22759      /* We cannot do anything.	 */
22760    default:
22761      return FAIL;
22762    }
22763
22764  if (value == (unsigned int)FAIL)
22765    return FAIL;
22766
22767  *instruction &= T2_OPCODE_MASK;
22768  *instruction |= new_inst << T2_DATA_OP_SHIFT;
22769  return value;
22770}
22771
22772/* Read a 32-bit thumb instruction from buf.  */
22773static unsigned long
22774get_thumb32_insn (char * buf)
22775{
22776  unsigned long insn;
22777  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22778  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22779
22780  return insn;
22781}
22782
22783
22784/* We usually want to set the low bit on the address of thumb function
22785   symbols.  In particular .word foo - . should have the low bit set.
22786   Generic code tries to fold the difference of two symbols to
22787   a constant.  Prevent this and force a relocation when the first symbols
22788   is a thumb function.  */
22789
22790bfd_boolean
22791arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22792{
22793  if (op == O_subtract
22794      && l->X_op == O_symbol
22795      && r->X_op == O_symbol
22796      && THUMB_IS_FUNC (l->X_add_symbol))
22797    {
22798      l->X_op = O_subtract;
22799      l->X_op_symbol = r->X_add_symbol;
22800      l->X_add_number -= r->X_add_number;
22801      return TRUE;
22802    }
22803
22804  /* Process as normal.  */
22805  return FALSE;
22806}
22807
22808/* Encode Thumb2 unconditional branches and calls. The encoding
22809   for the 2 are identical for the immediate values.  */
22810
22811static void
22812encode_thumb2_b_bl_offset (char * buf, offsetT value)
22813{
22814#define T2I1I2MASK  ((1 << 13) | (1 << 11))
22815  offsetT newval;
22816  offsetT newval2;
22817  addressT S, I1, I2, lo, hi;
22818
22819  S = (value >> 24) & 0x01;
22820  I1 = (value >> 23) & 0x01;
22821  I2 = (value >> 22) & 0x01;
22822  hi = (value >> 12) & 0x3ff;
22823  lo = (value >> 1) & 0x7ff;
22824  newval   = md_chars_to_number (buf, THUMB_SIZE);
22825  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22826  newval  |= (S << 10) | hi;
22827  newval2 &=  ~T2I1I2MASK;
22828  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22829  md_number_to_chars (buf, newval, THUMB_SIZE);
22830  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22831}
22832
22833void
22834md_apply_fix (fixS *	fixP,
22835	       valueT * valP,
22836	       segT	seg)
22837{
22838  offsetT	 value = * valP;
22839  offsetT	 newval;
22840  unsigned int	 newimm;
22841  unsigned long	 temp;
22842  int		 sign;
22843  char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22844
22845  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22846
22847  /* Note whether this will delete the relocation.  */
22848
22849  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22850    fixP->fx_done = 1;
22851
22852  /* On a 64-bit host, silently truncate 'value' to 32 bits for
22853     consistency with the behaviour on 32-bit hosts.  Remember value
22854     for emit_reloc.  */
22855  value &= 0xffffffff;
22856  value ^= 0x80000000;
22857  value -= 0x80000000;
22858
22859  *valP = value;
22860  fixP->fx_addnumber = value;
22861
22862  /* Same treatment for fixP->fx_offset.  */
22863  fixP->fx_offset &= 0xffffffff;
22864  fixP->fx_offset ^= 0x80000000;
22865  fixP->fx_offset -= 0x80000000;
22866
22867  switch (fixP->fx_r_type)
22868    {
22869    case BFD_RELOC_NONE:
22870      /* This will need to go in the object file.  */
22871      fixP->fx_done = 0;
22872      break;
22873
22874    case BFD_RELOC_ARM_IMMEDIATE:
22875      /* We claim that this fixup has been processed here,
22876	 even if in fact we generate an error because we do
22877	 not have a reloc for it, so tc_gen_reloc will reject it.  */
22878      fixP->fx_done = 1;
22879
22880      if (fixP->fx_addsy)
22881	{
22882	  const char *msg = 0;
22883
22884	  if (! S_IS_DEFINED (fixP->fx_addsy))
22885	    msg = _("undefined symbol %s used as an immediate value");
22886	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22887	    msg = _("symbol %s is in a different section");
22888	  else if (S_IS_WEAK (fixP->fx_addsy))
22889	    msg = _("symbol %s is weak and may be overridden later");
22890
22891	  if (msg)
22892	    {
22893	      as_bad_where (fixP->fx_file, fixP->fx_line,
22894			    msg, S_GET_NAME (fixP->fx_addsy));
22895	      break;
22896	    }
22897	}
22898
22899      temp = md_chars_to_number (buf, INSN_SIZE);
22900
22901      /* If the offset is negative, we should use encoding A2 for ADR.  */
22902      if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22903	newimm = negate_data_op (&temp, value);
22904      else
22905	{
22906	  newimm = encode_arm_immediate (value);
22907
22908	  /* If the instruction will fail, see if we can fix things up by
22909	     changing the opcode.  */
22910	  if (newimm == (unsigned int) FAIL)
22911	    newimm = negate_data_op (&temp, value);
22912	  /* MOV accepts both ARM modified immediate (A1 encoding) and
22913	     UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
22914	     When disassembling, MOV is preferred when there is no encoding
22915	     overlap.  */
22916	  if (newimm == (unsigned int) FAIL
22917	      && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
22918	      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
22919	      && !((temp >> SBIT_SHIFT) & 0x1)
22920	      && value >= 0 && value <= 0xffff)
22921	    {
22922	      /* Clear bits[23:20] to change encoding from A1 to A2.  */
22923	      temp &= 0xff0fffff;
22924	      /* Encoding high 4bits imm.  Code below will encode the remaining
22925		 low 12bits.  */
22926	      temp |= (value & 0x0000f000) << 4;
22927	      newimm = value & 0x00000fff;
22928	    }
22929	}
22930
22931      if (newimm == (unsigned int) FAIL)
22932	{
22933	  as_bad_where (fixP->fx_file, fixP->fx_line,
22934			_("invalid constant (%lx) after fixup"),
22935			(unsigned long) value);
22936	  break;
22937	}
22938
22939      newimm |= (temp & 0xfffff000);
22940      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22941      break;
22942
22943    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22944      {
22945	unsigned int highpart = 0;
22946	unsigned int newinsn  = 0xe1a00000; /* nop.  */
22947
22948	if (fixP->fx_addsy)
22949	  {
22950	    const char *msg = 0;
22951
22952	    if (! S_IS_DEFINED (fixP->fx_addsy))
22953	      msg = _("undefined symbol %s used as an immediate value");
22954	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22955	      msg = _("symbol %s is in a different section");
22956	    else if (S_IS_WEAK (fixP->fx_addsy))
22957	      msg = _("symbol %s is weak and may be overridden later");
22958
22959	    if (msg)
22960	      {
22961		as_bad_where (fixP->fx_file, fixP->fx_line,
22962			      msg, S_GET_NAME (fixP->fx_addsy));
22963		break;
22964	      }
22965	  }
22966
22967	newimm = encode_arm_immediate (value);
22968	temp = md_chars_to_number (buf, INSN_SIZE);
22969
22970	/* If the instruction will fail, see if we can fix things up by
22971	   changing the opcode.	 */
22972	if (newimm == (unsigned int) FAIL
22973	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22974	  {
22975	    /* No ?  OK - try using two ADD instructions to generate
22976	       the value.  */
22977	    newimm = validate_immediate_twopart (value, & highpart);
22978
22979	    /* Yes - then make sure that the second instruction is
22980	       also an add.  */
22981	    if (newimm != (unsigned int) FAIL)
22982	      newinsn = temp;
22983	    /* Still No ?  Try using a negated value.  */
22984	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22985	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22986	    /* Otherwise - give up.  */
22987	    else
22988	      {
22989		as_bad_where (fixP->fx_file, fixP->fx_line,
22990			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22991			      (long) value);
22992		break;
22993	      }
22994
22995	    /* Replace the first operand in the 2nd instruction (which
22996	       is the PC) with the destination register.  We have
22997	       already added in the PC in the first instruction and we
22998	       do not want to do it again.  */
22999	    newinsn &= ~ 0xf0000;
23000	    newinsn |= ((newinsn & 0x0f000) << 4);
23001	  }
23002
23003	newimm |= (temp & 0xfffff000);
23004	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23005
23006	highpart |= (newinsn & 0xfffff000);
23007	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23008      }
23009      break;
23010
23011    case BFD_RELOC_ARM_OFFSET_IMM:
23012      if (!fixP->fx_done && seg->use_rela_p)
23013	value = 0;
23014      /* Fall through.  */
23015
23016    case BFD_RELOC_ARM_LITERAL:
23017      sign = value > 0;
23018
23019      if (value < 0)
23020	value = - value;
23021
23022      if (validate_offset_imm (value, 0) == FAIL)
23023	{
23024	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23025	    as_bad_where (fixP->fx_file, fixP->fx_line,
23026			  _("invalid literal constant: pool needs to be closer"));
23027	  else
23028	    as_bad_where (fixP->fx_file, fixP->fx_line,
23029			  _("bad immediate value for offset (%ld)"),
23030			  (long) value);
23031	  break;
23032	}
23033
23034      newval = md_chars_to_number (buf, INSN_SIZE);
23035      if (value == 0)
23036	newval &= 0xfffff000;
23037      else
23038	{
23039	  newval &= 0xff7ff000;
23040	  newval |= value | (sign ? INDEX_UP : 0);
23041	}
23042      md_number_to_chars (buf, newval, INSN_SIZE);
23043      break;
23044
23045    case BFD_RELOC_ARM_OFFSET_IMM8:
23046    case BFD_RELOC_ARM_HWLITERAL:
23047      sign = value > 0;
23048
23049      if (value < 0)
23050	value = - value;
23051
23052      if (validate_offset_imm (value, 1) == FAIL)
23053	{
23054	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23055	    as_bad_where (fixP->fx_file, fixP->fx_line,
23056			  _("invalid literal constant: pool needs to be closer"));
23057	  else
23058	    as_bad_where (fixP->fx_file, fixP->fx_line,
23059			  _("bad immediate value for 8-bit offset (%ld)"),
23060			  (long) value);
23061	  break;
23062	}
23063
23064      newval = md_chars_to_number (buf, INSN_SIZE);
23065      if (value == 0)
23066	newval &= 0xfffff0f0;
23067      else
23068	{
23069	  newval &= 0xff7ff0f0;
23070	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23071	}
23072      md_number_to_chars (buf, newval, INSN_SIZE);
23073      break;
23074
23075    case BFD_RELOC_ARM_T32_OFFSET_U8:
23076      if (value < 0 || value > 1020 || value % 4 != 0)
23077	as_bad_where (fixP->fx_file, fixP->fx_line,
23078		      _("bad immediate value for offset (%ld)"), (long) value);
23079      value /= 4;
23080
23081      newval = md_chars_to_number (buf+2, THUMB_SIZE);
23082      newval |= value;
23083      md_number_to_chars (buf+2, newval, THUMB_SIZE);
23084      break;
23085
23086    case BFD_RELOC_ARM_T32_OFFSET_IMM:
23087      /* This is a complicated relocation used for all varieties of Thumb32
23088	 load/store instruction with immediate offset:
23089
23090	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23091						   *4, optional writeback(W)
23092						   (doubleword load/store)
23093
23094	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23095	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23096	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23097	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23098	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23099
23100	 Uppercase letters indicate bits that are already encoded at
23101	 this point.  Lowercase letters are our problem.  For the
23102	 second block of instructions, the secondary opcode nybble
23103	 (bits 8..11) is present, and bit 23 is zero, even if this is
23104	 a PC-relative operation.  */
23105      newval = md_chars_to_number (buf, THUMB_SIZE);
23106      newval <<= 16;
23107      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23108
23109      if ((newval & 0xf0000000) == 0xe0000000)
23110	{
23111	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
23112	  if (value >= 0)
23113	    newval |= (1 << 23);
23114	  else
23115	    value = -value;
23116	  if (value % 4 != 0)
23117	    {
23118	      as_bad_where (fixP->fx_file, fixP->fx_line,
23119			    _("offset not a multiple of 4"));
23120	      break;
23121	    }
23122	  value /= 4;
23123	  if (value > 0xff)
23124	    {
23125	      as_bad_where (fixP->fx_file, fixP->fx_line,
23126			    _("offset out of range"));
23127	      break;
23128	    }
23129	  newval &= ~0xff;
23130	}
23131      else if ((newval & 0x000f0000) == 0x000f0000)
23132	{
23133	  /* PC-relative, 12-bit offset.  */
23134	  if (value >= 0)
23135	    newval |= (1 << 23);
23136	  else
23137	    value = -value;
23138	  if (value > 0xfff)
23139	    {
23140	      as_bad_where (fixP->fx_file, fixP->fx_line,
23141			    _("offset out of range"));
23142	      break;
23143	    }
23144	  newval &= ~0xfff;
23145	}
23146      else if ((newval & 0x00000100) == 0x00000100)
23147	{
23148	  /* Writeback: 8-bit, +/- offset.  */
23149	  if (value >= 0)
23150	    newval |= (1 << 9);
23151	  else
23152	    value = -value;
23153	  if (value > 0xff)
23154	    {
23155	      as_bad_where (fixP->fx_file, fixP->fx_line,
23156			    _("offset out of range"));
23157	      break;
23158	    }
23159	  newval &= ~0xff;
23160	}
23161      else if ((newval & 0x00000f00) == 0x00000e00)
23162	{
23163	  /* T-instruction: positive 8-bit offset.  */
23164	  if (value < 0 || value > 0xff)
23165	    {
23166	      as_bad_where (fixP->fx_file, fixP->fx_line,
23167			    _("offset out of range"));
23168	      break;
23169	    }
23170	  newval &= ~0xff;
23171	  newval |= value;
23172	}
23173      else
23174	{
23175	  /* Positive 12-bit or negative 8-bit offset.  */
23176	  int limit;
23177	  if (value >= 0)
23178	    {
23179	      newval |= (1 << 23);
23180	      limit = 0xfff;
23181	    }
23182	  else
23183	    {
23184	      value = -value;
23185	      limit = 0xff;
23186	    }
23187	  if (value > limit)
23188	    {
23189	      as_bad_where (fixP->fx_file, fixP->fx_line,
23190			    _("offset out of range"));
23191	      break;
23192	    }
23193	  newval &= ~limit;
23194	}
23195
23196      newval |= value;
23197      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23198      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23199      break;
23200
23201    case BFD_RELOC_ARM_SHIFT_IMM:
23202      newval = md_chars_to_number (buf, INSN_SIZE);
23203      if (((unsigned long) value) > 32
23204	  || (value == 32
23205	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23206	{
23207	  as_bad_where (fixP->fx_file, fixP->fx_line,
23208			_("shift expression is too large"));
23209	  break;
23210	}
23211
23212      if (value == 0)
23213	/* Shifts of zero must be done as lsl.	*/
23214	newval &= ~0x60;
23215      else if (value == 32)
23216	value = 0;
23217      newval &= 0xfffff07f;
23218      newval |= (value & 0x1f) << 7;
23219      md_number_to_chars (buf, newval, INSN_SIZE);
23220      break;
23221
23222    case BFD_RELOC_ARM_T32_IMMEDIATE:
23223    case BFD_RELOC_ARM_T32_ADD_IMM:
23224    case BFD_RELOC_ARM_T32_IMM12:
23225    case BFD_RELOC_ARM_T32_ADD_PC12:
23226      /* We claim that this fixup has been processed here,
23227	 even if in fact we generate an error because we do
23228	 not have a reloc for it, so tc_gen_reloc will reject it.  */
23229      fixP->fx_done = 1;
23230
23231      if (fixP->fx_addsy
23232	  && ! S_IS_DEFINED (fixP->fx_addsy))
23233	{
23234	  as_bad_where (fixP->fx_file, fixP->fx_line,
23235			_("undefined symbol %s used as an immediate value"),
23236			S_GET_NAME (fixP->fx_addsy));
23237	  break;
23238	}
23239
23240      newval = md_chars_to_number (buf, THUMB_SIZE);
23241      newval <<= 16;
23242      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23243
23244      newimm = FAIL;
23245      if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23246	   /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23247	      Thumb2 modified immediate encoding (T2).  */
23248	   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23249	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23250	{
23251	  newimm = encode_thumb32_immediate (value);
23252	  if (newimm == (unsigned int) FAIL)
23253	    newimm = thumb32_negate_data_op (&newval, value);
23254	}
23255      if (newimm == (unsigned int) FAIL)
23256	{
23257	  if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23258	    {
23259	      /* Turn add/sum into addw/subw.  */
23260	      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23261		newval = (newval & 0xfeffffff) | 0x02000000;
23262	      /* No flat 12-bit imm encoding for addsw/subsw.  */
23263	      if ((newval & 0x00100000) == 0)
23264		{
23265		  /* 12 bit immediate for addw/subw.  */
23266		  if (value < 0)
23267		    {
23268		      value = -value;
23269		      newval ^= 0x00a00000;
23270		    }
23271		  if (value > 0xfff)
23272		    newimm = (unsigned int) FAIL;
23273		  else
23274		    newimm = value;
23275		}
23276	    }
23277	  else
23278	    {
23279	      /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23280		 UINT16 (T3 encoding), MOVW only accepts UINT16.  When
23281		 disassembling, MOV is preferred when there is no encoding
23282		 overlap.
23283		 NOTE: MOV is using ORR opcode under Thumb 2 mode.  */
23284	      if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23285		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23286		  && !((newval >> T2_SBIT_SHIFT) & 0x1)
23287		  && value >= 0 && value <=0xffff)
23288		{
23289		  /* Toggle bit[25] to change encoding from T2 to T3.  */
23290		  newval ^= 1 << 25;
23291		  /* Clear bits[19:16].  */
23292		  newval &= 0xfff0ffff;
23293		  /* Encoding high 4bits imm.  Code below will encode the
23294		     remaining low 12bits.  */
23295		  newval |= (value & 0x0000f000) << 4;
23296		  newimm = value & 0x00000fff;
23297		}
23298	    }
23299	}
23300
23301      if (newimm == (unsigned int)FAIL)
23302	{
23303	  as_bad_where (fixP->fx_file, fixP->fx_line,
23304			_("invalid constant (%lx) after fixup"),
23305			(unsigned long) value);
23306	  break;
23307	}
23308
23309      newval |= (newimm & 0x800) << 15;
23310      newval |= (newimm & 0x700) << 4;
23311      newval |= (newimm & 0x0ff);
23312
23313      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23314      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23315      break;
23316
23317    case BFD_RELOC_ARM_SMC:
23318      if (((unsigned long) value) > 0xffff)
23319	as_bad_where (fixP->fx_file, fixP->fx_line,
23320		      _("invalid smc expression"));
23321      newval = md_chars_to_number (buf, INSN_SIZE);
23322      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23323      md_number_to_chars (buf, newval, INSN_SIZE);
23324      break;
23325
23326    case BFD_RELOC_ARM_HVC:
23327      if (((unsigned long) value) > 0xffff)
23328	as_bad_where (fixP->fx_file, fixP->fx_line,
23329		      _("invalid hvc expression"));
23330      newval = md_chars_to_number (buf, INSN_SIZE);
23331      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23332      md_number_to_chars (buf, newval, INSN_SIZE);
23333      break;
23334
23335    case BFD_RELOC_ARM_SWI:
23336      if (fixP->tc_fix_data != 0)
23337	{
23338	  if (((unsigned long) value) > 0xff)
23339	    as_bad_where (fixP->fx_file, fixP->fx_line,
23340			  _("invalid swi expression"));
23341	  newval = md_chars_to_number (buf, THUMB_SIZE);
23342	  newval |= value;
23343	  md_number_to_chars (buf, newval, THUMB_SIZE);
23344	}
23345      else
23346	{
23347	  if (((unsigned long) value) > 0x00ffffff)
23348	    as_bad_where (fixP->fx_file, fixP->fx_line,
23349			  _("invalid swi expression"));
23350	  newval = md_chars_to_number (buf, INSN_SIZE);
23351	  newval |= value;
23352	  md_number_to_chars (buf, newval, INSN_SIZE);
23353	}
23354      break;
23355
23356    case BFD_RELOC_ARM_MULTI:
23357      if (((unsigned long) value) > 0xffff)
23358	as_bad_where (fixP->fx_file, fixP->fx_line,
23359		      _("invalid expression in load/store multiple"));
23360      newval = value | md_chars_to_number (buf, INSN_SIZE);
23361      md_number_to_chars (buf, newval, INSN_SIZE);
23362      break;
23363
23364#ifdef OBJ_ELF
23365    case BFD_RELOC_ARM_PCREL_CALL:
23366
23367      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23368	  && fixP->fx_addsy
23369	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23370	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23371	  && THUMB_IS_FUNC (fixP->fx_addsy))
23372	/* Flip the bl to blx. This is a simple flip
23373	   bit here because we generate PCREL_CALL for
23374	   unconditional bls.  */
23375	{
23376	  newval = md_chars_to_number (buf, INSN_SIZE);
23377	  newval = newval | 0x10000000;
23378	  md_number_to_chars (buf, newval, INSN_SIZE);
23379	  temp = 1;
23380	  fixP->fx_done = 1;
23381	}
23382      else
23383	temp = 3;
23384      goto arm_branch_common;
23385
23386    case BFD_RELOC_ARM_PCREL_JUMP:
23387      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23388	  && fixP->fx_addsy
23389	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23390	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23391	  && THUMB_IS_FUNC (fixP->fx_addsy))
23392	{
23393	  /* This would map to a bl<cond>, b<cond>,
23394	     b<always> to a Thumb function. We
23395	     need to force a relocation for this particular
23396	     case.  */
23397	  newval = md_chars_to_number (buf, INSN_SIZE);
23398	  fixP->fx_done = 0;
23399	}
23400      /* Fall through.  */
23401
23402    case BFD_RELOC_ARM_PLT32:
23403#endif
23404    case BFD_RELOC_ARM_PCREL_BRANCH:
23405      temp = 3;
23406      goto arm_branch_common;
23407
23408    case BFD_RELOC_ARM_PCREL_BLX:
23409
23410      temp = 1;
23411      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23412	  && fixP->fx_addsy
23413	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23414	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23415	  && ARM_IS_FUNC (fixP->fx_addsy))
23416	{
23417	  /* Flip the blx to a bl and warn.  */
23418	  const char *name = S_GET_NAME (fixP->fx_addsy);
23419	  newval = 0xeb000000;
23420	  as_warn_where (fixP->fx_file, fixP->fx_line,
23421			 _("blx to '%s' an ARM ISA state function changed to bl"),
23422			  name);
23423	  md_number_to_chars (buf, newval, INSN_SIZE);
23424	  temp = 3;
23425	  fixP->fx_done = 1;
23426	}
23427
23428#ifdef OBJ_ELF
23429       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23430	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23431#endif
23432
23433    arm_branch_common:
23434      /* We are going to store value (shifted right by two) in the
23435	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
23436	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
23437	 also be be clear.  */
23438      if (value & temp)
23439	as_bad_where (fixP->fx_file, fixP->fx_line,
23440		      _("misaligned branch destination"));
23441      if ((value & (offsetT)0xfe000000) != (offsetT)0
23442	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23443	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23444
23445      if (fixP->fx_done || !seg->use_rela_p)
23446	{
23447	  newval = md_chars_to_number (buf, INSN_SIZE);
23448	  newval |= (value >> 2) & 0x00ffffff;
23449	  /* Set the H bit on BLX instructions.  */
23450	  if (temp == 1)
23451	    {
23452	      if (value & 2)
23453		newval |= 0x01000000;
23454	      else
23455		newval &= ~0x01000000;
23456	    }
23457	  md_number_to_chars (buf, newval, INSN_SIZE);
23458	}
23459      break;
23460
23461    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23462      /* CBZ can only branch forward.  */
23463
23464      /* Attempts to use CBZ to branch to the next instruction
23465	 (which, strictly speaking, are prohibited) will be turned into
23466	 no-ops.
23467
23468	 FIXME: It may be better to remove the instruction completely and
23469	 perform relaxation.  */
23470      if (value == -2)
23471	{
23472	  newval = md_chars_to_number (buf, THUMB_SIZE);
23473	  newval = 0xbf00; /* NOP encoding T1 */
23474	  md_number_to_chars (buf, newval, THUMB_SIZE);
23475	}
23476      else
23477	{
23478	  if (value & ~0x7e)
23479	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23480
23481	  if (fixP->fx_done || !seg->use_rela_p)
23482	    {
23483	      newval = md_chars_to_number (buf, THUMB_SIZE);
23484	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23485	      md_number_to_chars (buf, newval, THUMB_SIZE);
23486	    }
23487	}
23488      break;
23489
23490    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
23491      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23492	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23493
23494      if (fixP->fx_done || !seg->use_rela_p)
23495	{
23496	  newval = md_chars_to_number (buf, THUMB_SIZE);
23497	  newval |= (value & 0x1ff) >> 1;
23498	  md_number_to_chars (buf, newval, THUMB_SIZE);
23499	}
23500      break;
23501
23502    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
23503      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23504	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23505
23506      if (fixP->fx_done || !seg->use_rela_p)
23507	{
23508	  newval = md_chars_to_number (buf, THUMB_SIZE);
23509	  newval |= (value & 0xfff) >> 1;
23510	  md_number_to_chars (buf, newval, THUMB_SIZE);
23511	}
23512      break;
23513
23514    case BFD_RELOC_THUMB_PCREL_BRANCH20:
23515      if (fixP->fx_addsy
23516	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23517	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23518	  && ARM_IS_FUNC (fixP->fx_addsy)
23519	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23520	{
23521	  /* Force a relocation for a branch 20 bits wide.  */
23522	  fixP->fx_done = 0;
23523	}
23524      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23525	as_bad_where (fixP->fx_file, fixP->fx_line,
23526		      _("conditional branch out of range"));
23527
23528      if (fixP->fx_done || !seg->use_rela_p)
23529	{
23530	  offsetT newval2;
23531	  addressT S, J1, J2, lo, hi;
23532
23533	  S  = (value & 0x00100000) >> 20;
23534	  J2 = (value & 0x00080000) >> 19;
23535	  J1 = (value & 0x00040000) >> 18;
23536	  hi = (value & 0x0003f000) >> 12;
23537	  lo = (value & 0x00000ffe) >> 1;
23538
23539	  newval   = md_chars_to_number (buf, THUMB_SIZE);
23540	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23541	  newval  |= (S << 10) | hi;
23542	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
23543	  md_number_to_chars (buf, newval, THUMB_SIZE);
23544	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23545	}
23546      break;
23547
23548    case BFD_RELOC_THUMB_PCREL_BLX:
23549      /* If there is a blx from a thumb state function to
23550	 another thumb function flip this to a bl and warn
23551	 about it.  */
23552
23553      if (fixP->fx_addsy
23554	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23555	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23556	  && THUMB_IS_FUNC (fixP->fx_addsy))
23557	{
23558	  const char *name = S_GET_NAME (fixP->fx_addsy);
23559	  as_warn_where (fixP->fx_file, fixP->fx_line,
23560			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23561			 name);
23562	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23563	  newval = newval | 0x1000;
23564	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23565	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23566	  fixP->fx_done = 1;
23567	}
23568
23569
23570      goto thumb_bl_common;
23571
23572    case BFD_RELOC_THUMB_PCREL_BRANCH23:
23573      /* A bl from Thumb state ISA to an internal ARM state function
23574	 is converted to a blx.  */
23575      if (fixP->fx_addsy
23576	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23577	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23578	  && ARM_IS_FUNC (fixP->fx_addsy)
23579	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23580	{
23581	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23582	  newval = newval & ~0x1000;
23583	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23584	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23585	  fixP->fx_done = 1;
23586	}
23587
23588    thumb_bl_common:
23589
23590      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23591	/* For a BLX instruction, make sure that the relocation is rounded up
23592	   to a word boundary.  This follows the semantics of the instruction
23593	   which specifies that bit 1 of the target address will come from bit
23594	   1 of the base address.  */
23595	value = (value + 3) & ~ 3;
23596
23597#ifdef OBJ_ELF
23598       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23599	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23600	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23601#endif
23602
23603      if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23604	{
23605	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23606	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23607	  else if ((value & ~0x1ffffff)
23608		   && ((value & ~0x1ffffff) != ~0x1ffffff))
23609	    as_bad_where (fixP->fx_file, fixP->fx_line,
23610			  _("Thumb2 branch out of range"));
23611	}
23612
23613      if (fixP->fx_done || !seg->use_rela_p)
23614	encode_thumb2_b_bl_offset (buf, value);
23615
23616      break;
23617
23618    case BFD_RELOC_THUMB_PCREL_BRANCH25:
23619      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23620	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23621
23622      if (fixP->fx_done || !seg->use_rela_p)
23623	  encode_thumb2_b_bl_offset (buf, value);
23624
23625      break;
23626
23627    case BFD_RELOC_8:
23628      if (fixP->fx_done || !seg->use_rela_p)
23629	*buf = value;
23630      break;
23631
23632    case BFD_RELOC_16:
23633      if (fixP->fx_done || !seg->use_rela_p)
23634	md_number_to_chars (buf, value, 2);
23635      break;
23636
23637#ifdef OBJ_ELF
23638    case BFD_RELOC_ARM_TLS_CALL:
23639    case BFD_RELOC_ARM_THM_TLS_CALL:
23640    case BFD_RELOC_ARM_TLS_DESCSEQ:
23641    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23642    case BFD_RELOC_ARM_TLS_GOTDESC:
23643    case BFD_RELOC_ARM_TLS_GD32:
23644    case BFD_RELOC_ARM_TLS_LE32:
23645    case BFD_RELOC_ARM_TLS_IE32:
23646    case BFD_RELOC_ARM_TLS_LDM32:
23647    case BFD_RELOC_ARM_TLS_LDO32:
23648      S_SET_THREAD_LOCAL (fixP->fx_addsy);
23649      break;
23650
23651    case BFD_RELOC_ARM_GOT32:
23652    case BFD_RELOC_ARM_GOTOFF:
23653      break;
23654
23655    case BFD_RELOC_ARM_GOT_PREL:
23656      if (fixP->fx_done || !seg->use_rela_p)
23657	md_number_to_chars (buf, value, 4);
23658      break;
23659
23660    case BFD_RELOC_ARM_TARGET2:
23661      /* TARGET2 is not partial-inplace, so we need to write the
23662	 addend here for REL targets, because it won't be written out
23663	 during reloc processing later.  */
23664      if (fixP->fx_done || !seg->use_rela_p)
23665	md_number_to_chars (buf, fixP->fx_offset, 4);
23666      break;
23667#endif
23668
23669    case BFD_RELOC_RVA:
23670    case BFD_RELOC_32:
23671    case BFD_RELOC_ARM_TARGET1:
23672    case BFD_RELOC_ARM_ROSEGREL32:
23673    case BFD_RELOC_ARM_SBREL32:
23674    case BFD_RELOC_32_PCREL:
23675#ifdef TE_PE
23676    case BFD_RELOC_32_SECREL:
23677#endif
23678      if (fixP->fx_done || !seg->use_rela_p)
23679#ifdef TE_WINCE
23680	/* For WinCE we only do this for pcrel fixups.  */
23681	if (fixP->fx_done || fixP->fx_pcrel)
23682#endif
23683	  md_number_to_chars (buf, value, 4);
23684      break;
23685
23686#ifdef OBJ_ELF
23687    case BFD_RELOC_ARM_PREL31:
23688      if (fixP->fx_done || !seg->use_rela_p)
23689	{
23690	  newval = md_chars_to_number (buf, 4) & 0x80000000;
23691	  if ((value ^ (value >> 1)) & 0x40000000)
23692	    {
23693	      as_bad_where (fixP->fx_file, fixP->fx_line,
23694			    _("rel31 relocation overflow"));
23695	    }
23696	  newval |= value & 0x7fffffff;
23697	  md_number_to_chars (buf, newval, 4);
23698	}
23699      break;
23700#endif
23701
23702    case BFD_RELOC_ARM_CP_OFF_IMM:
23703    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23704      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23705	newval = md_chars_to_number (buf, INSN_SIZE);
23706      else
23707	newval = get_thumb32_insn (buf);
23708      if ((newval & 0x0f200f00) == 0x0d000900)
23709	{
23710	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
23711	     has permitted values that are multiples of 2, in the range 0
23712	     to 510.  */
23713	  if (value < -510 || value > 510 || (value & 1))
23714	    as_bad_where (fixP->fx_file, fixP->fx_line,
23715			  _("co-processor offset out of range"));
23716	}
23717      else if (value < -1023 || value > 1023 || (value & 3))
23718	as_bad_where (fixP->fx_file, fixP->fx_line,
23719		      _("co-processor offset out of range"));
23720    cp_off_common:
23721      sign = value > 0;
23722      if (value < 0)
23723	value = -value;
23724      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23725	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23726	newval = md_chars_to_number (buf, INSN_SIZE);
23727      else
23728	newval = get_thumb32_insn (buf);
23729      if (value == 0)
23730	newval &= 0xffffff00;
23731      else
23732	{
23733	  newval &= 0xff7fff00;
23734	  if ((newval & 0x0f200f00) == 0x0d000900)
23735	    {
23736	      /* This is a fp16 vstr/vldr.
23737
23738		 It requires the immediate offset in the instruction is shifted
23739		 left by 1 to be a half-word offset.
23740
23741		 Here, left shift by 1 first, and later right shift by 2
23742		 should get the right offset.  */
23743	      value <<= 1;
23744	    }
23745	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23746	}
23747      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23748	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23749	md_number_to_chars (buf, newval, INSN_SIZE);
23750      else
23751	put_thumb32_insn (buf, newval);
23752      break;
23753
23754    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23755    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23756      if (value < -255 || value > 255)
23757	as_bad_where (fixP->fx_file, fixP->fx_line,
23758		      _("co-processor offset out of range"));
23759      value *= 4;
23760      goto cp_off_common;
23761
23762    case BFD_RELOC_ARM_THUMB_OFFSET:
23763      newval = md_chars_to_number (buf, THUMB_SIZE);
23764      /* Exactly what ranges, and where the offset is inserted depends
23765	 on the type of instruction, we can establish this from the
23766	 top 4 bits.  */
23767      switch (newval >> 12)
23768	{
23769	case 4: /* PC load.  */
23770	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23771	     forced to zero for these loads; md_pcrel_from has already
23772	     compensated for this.  */
23773	  if (value & 3)
23774	    as_bad_where (fixP->fx_file, fixP->fx_line,
23775			  _("invalid offset, target not word aligned (0x%08lX)"),
23776			  (((unsigned long) fixP->fx_frag->fr_address
23777			    + (unsigned long) fixP->fx_where) & ~3)
23778			  + (unsigned long) value);
23779
23780	  if (value & ~0x3fc)
23781	    as_bad_where (fixP->fx_file, fixP->fx_line,
23782			  _("invalid offset, value too big (0x%08lX)"),
23783			  (long) value);
23784
23785	  newval |= value >> 2;
23786	  break;
23787
23788	case 9: /* SP load/store.  */
23789	  if (value & ~0x3fc)
23790	    as_bad_where (fixP->fx_file, fixP->fx_line,
23791			  _("invalid offset, value too big (0x%08lX)"),
23792			  (long) value);
23793	  newval |= value >> 2;
23794	  break;
23795
23796	case 6: /* Word load/store.  */
23797	  if (value & ~0x7c)
23798	    as_bad_where (fixP->fx_file, fixP->fx_line,
23799			  _("invalid offset, value too big (0x%08lX)"),
23800			  (long) value);
23801	  newval |= value << 4; /* 6 - 2.  */
23802	  break;
23803
23804	case 7: /* Byte load/store.  */
23805	  if (value & ~0x1f)
23806	    as_bad_where (fixP->fx_file, fixP->fx_line,
23807			  _("invalid offset, value too big (0x%08lX)"),
23808			  (long) value);
23809	  newval |= value << 6;
23810	  break;
23811
23812	case 8: /* Halfword load/store.	 */
23813	  if (value & ~0x3e)
23814	    as_bad_where (fixP->fx_file, fixP->fx_line,
23815			  _("invalid offset, value too big (0x%08lX)"),
23816			  (long) value);
23817	  newval |= value << 5; /* 6 - 1.  */
23818	  break;
23819
23820	default:
23821	  as_bad_where (fixP->fx_file, fixP->fx_line,
23822			"Unable to process relocation for thumb opcode: %lx",
23823			(unsigned long) newval);
23824	  break;
23825	}
23826      md_number_to_chars (buf, newval, THUMB_SIZE);
23827      break;
23828
23829    case BFD_RELOC_ARM_THUMB_ADD:
23830      /* This is a complicated relocation, since we use it for all of
23831	 the following immediate relocations:
23832
23833	    3bit ADD/SUB
23834	    8bit ADD/SUB
23835	    9bit ADD/SUB SP word-aligned
23836	   10bit ADD PC/SP word-aligned
23837
23838	 The type of instruction being processed is encoded in the
23839	 instruction field:
23840
23841	   0x8000  SUB
23842	   0x00F0  Rd
23843	   0x000F  Rs
23844      */
23845      newval = md_chars_to_number (buf, THUMB_SIZE);
23846      {
23847	int rd = (newval >> 4) & 0xf;
23848	int rs = newval & 0xf;
23849	int subtract = !!(newval & 0x8000);
23850
23851	/* Check for HI regs, only very restricted cases allowed:
23852	   Adjusting SP, and using PC or SP to get an address.	*/
23853	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23854	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
23855	  as_bad_where (fixP->fx_file, fixP->fx_line,
23856			_("invalid Hi register with immediate"));
23857
23858	/* If value is negative, choose the opposite instruction.  */
23859	if (value < 0)
23860	  {
23861	    value = -value;
23862	    subtract = !subtract;
23863	    if (value < 0)
23864	      as_bad_where (fixP->fx_file, fixP->fx_line,
23865			    _("immediate value out of range"));
23866	  }
23867
23868	if (rd == REG_SP)
23869	  {
23870 	    if (value & ~0x1fc)
23871	      as_bad_where (fixP->fx_file, fixP->fx_line,
23872			    _("invalid immediate for stack address calculation"));
23873	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23874	    newval |= value >> 2;
23875	  }
23876	else if (rs == REG_PC || rs == REG_SP)
23877	  {
23878	    /* PR gas/18541.  If the addition is for a defined symbol
23879	       within range of an ADR instruction then accept it.  */
23880	    if (subtract
23881		&& value == 4
23882		&& fixP->fx_addsy != NULL)
23883	      {
23884		subtract = 0;
23885
23886		if (! S_IS_DEFINED (fixP->fx_addsy)
23887		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
23888		    || S_IS_WEAK (fixP->fx_addsy))
23889		  {
23890		    as_bad_where (fixP->fx_file, fixP->fx_line,
23891				  _("address calculation needs a strongly defined nearby symbol"));
23892		  }
23893		else
23894		  {
23895		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23896
23897		    /* Round up to the next 4-byte boundary.  */
23898		    if (v & 3)
23899		      v = (v + 3) & ~ 3;
23900		    else
23901		      v += 4;
23902		    v = S_GET_VALUE (fixP->fx_addsy) - v;
23903
23904		    if (v & ~0x3fc)
23905		      {
23906			as_bad_where (fixP->fx_file, fixP->fx_line,
23907				      _("symbol too far away"));
23908		      }
23909		    else
23910		      {
23911			fixP->fx_done = 1;
23912			value = v;
23913		      }
23914		  }
23915	      }
23916
23917	    if (subtract || value & ~0x3fc)
23918	      as_bad_where (fixP->fx_file, fixP->fx_line,
23919			    _("invalid immediate for address calculation (value = 0x%08lX)"),
23920			    (unsigned long) (subtract ? - value : value));
23921	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23922	    newval |= rd << 8;
23923	    newval |= value >> 2;
23924	  }
23925	else if (rs == rd)
23926	  {
23927	    if (value & ~0xff)
23928	      as_bad_where (fixP->fx_file, fixP->fx_line,
23929			    _("immediate value out of range"));
23930	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23931	    newval |= (rd << 8) | value;
23932	  }
23933	else
23934	  {
23935	    if (value & ~0x7)
23936	      as_bad_where (fixP->fx_file, fixP->fx_line,
23937			    _("immediate value out of range"));
23938	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23939	    newval |= rd | (rs << 3) | (value << 6);
23940	  }
23941      }
23942      md_number_to_chars (buf, newval, THUMB_SIZE);
23943      break;
23944
23945    case BFD_RELOC_ARM_THUMB_IMM:
23946      newval = md_chars_to_number (buf, THUMB_SIZE);
23947      if (value < 0 || value > 255)
23948	as_bad_where (fixP->fx_file, fixP->fx_line,
23949		      _("invalid immediate: %ld is out of range"),
23950		      (long) value);
23951      newval |= value;
23952      md_number_to_chars (buf, newval, THUMB_SIZE);
23953      break;
23954
23955    case BFD_RELOC_ARM_THUMB_SHIFT:
23956      /* 5bit shift value (0..32).  LSL cannot take 32.	 */
23957      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23958      temp = newval & 0xf800;
23959      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23960	as_bad_where (fixP->fx_file, fixP->fx_line,
23961		      _("invalid shift value: %ld"), (long) value);
23962      /* Shifts of zero must be encoded as LSL.	 */
23963      if (value == 0)
23964	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23965      /* Shifts of 32 are encoded as zero.  */
23966      else if (value == 32)
23967	value = 0;
23968      newval |= value << 6;
23969      md_number_to_chars (buf, newval, THUMB_SIZE);
23970      break;
23971
23972    case BFD_RELOC_VTABLE_INHERIT:
23973    case BFD_RELOC_VTABLE_ENTRY:
23974      fixP->fx_done = 0;
23975      return;
23976
23977    case BFD_RELOC_ARM_MOVW:
23978    case BFD_RELOC_ARM_MOVT:
23979    case BFD_RELOC_ARM_THUMB_MOVW:
23980    case BFD_RELOC_ARM_THUMB_MOVT:
23981      if (fixP->fx_done || !seg->use_rela_p)
23982	{
23983	  /* REL format relocations are limited to a 16-bit addend.  */
23984	  if (!fixP->fx_done)
23985	    {
23986	      if (value < -0x8000 || value > 0x7fff)
23987		  as_bad_where (fixP->fx_file, fixP->fx_line,
23988				_("offset out of range"));
23989	    }
23990	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23991		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23992	    {
23993	      value >>= 16;
23994	    }
23995
23996	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23997	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23998	    {
23999	      newval = get_thumb32_insn (buf);
24000	      newval &= 0xfbf08f00;
24001	      newval |= (value & 0xf000) << 4;
24002	      newval |= (value & 0x0800) << 15;
24003	      newval |= (value & 0x0700) << 4;
24004	      newval |= (value & 0x00ff);
24005	      put_thumb32_insn (buf, newval);
24006	    }
24007	  else
24008	    {
24009	      newval = md_chars_to_number (buf, 4);
24010	      newval &= 0xfff0f000;
24011	      newval |= value & 0x0fff;
24012	      newval |= (value & 0xf000) << 4;
24013	      md_number_to_chars (buf, newval, 4);
24014	    }
24015	}
24016      return;
24017
24018   case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24019   case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24020   case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24021   case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24022      gas_assert (!fixP->fx_done);
24023      {
24024	bfd_vma insn;
24025	bfd_boolean is_mov;
24026	bfd_vma encoded_addend = value;
24027
24028	/* Check that addend can be encoded in instruction.  */
24029	if (!seg->use_rela_p && (value < 0 || value > 255))
24030	  as_bad_where (fixP->fx_file, fixP->fx_line,
24031			_("the offset 0x%08lX is not representable"),
24032			(unsigned long) encoded_addend);
24033
24034	/* Extract the instruction.  */
24035	insn = md_chars_to_number (buf, THUMB_SIZE);
24036	is_mov = (insn & 0xf800) == 0x2000;
24037
24038	/* Encode insn.  */
24039	if (is_mov)
24040	  {
24041	    if (!seg->use_rela_p)
24042	      insn |= encoded_addend;
24043	  }
24044	else
24045	  {
24046	    int rd, rs;
24047
24048	    /* Extract the instruction.  */
24049	     /* Encoding is the following
24050		0x8000  SUB
24051		0x00F0  Rd
24052		0x000F  Rs
24053	     */
24054	     /* The following conditions must be true :
24055		- ADD
24056		- Rd == Rs
24057		- Rd <= 7
24058	     */
24059	    rd = (insn >> 4) & 0xf;
24060	    rs = insn & 0xf;
24061	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
24062	      as_bad_where (fixP->fx_file, fixP->fx_line,
24063			_("Unable to process relocation for thumb opcode: %lx"),
24064			(unsigned long) insn);
24065
24066	    /* Encode as ADD immediate8 thumb 1 code.  */
24067	    insn = 0x3000 | (rd << 8);
24068
24069	    /* Place the encoded addend into the first 8 bits of the
24070	       instruction.  */
24071	    if (!seg->use_rela_p)
24072	      insn |= encoded_addend;
24073	  }
24074
24075	/* Update the instruction.  */
24076	md_number_to_chars (buf, insn, THUMB_SIZE);
24077      }
24078      break;
24079
24080   case BFD_RELOC_ARM_ALU_PC_G0_NC:
24081   case BFD_RELOC_ARM_ALU_PC_G0:
24082   case BFD_RELOC_ARM_ALU_PC_G1_NC:
24083   case BFD_RELOC_ARM_ALU_PC_G1:
24084   case BFD_RELOC_ARM_ALU_PC_G2:
24085   case BFD_RELOC_ARM_ALU_SB_G0_NC:
24086   case BFD_RELOC_ARM_ALU_SB_G0:
24087   case BFD_RELOC_ARM_ALU_SB_G1_NC:
24088   case BFD_RELOC_ARM_ALU_SB_G1:
24089   case BFD_RELOC_ARM_ALU_SB_G2:
24090     gas_assert (!fixP->fx_done);
24091     if (!seg->use_rela_p)
24092       {
24093	 bfd_vma insn;
24094	 bfd_vma encoded_addend;
24095	 bfd_vma addend_abs = abs (value);
24096
24097	 /* Check that the absolute value of the addend can be
24098	    expressed as an 8-bit constant plus a rotation.  */
24099	 encoded_addend = encode_arm_immediate (addend_abs);
24100	 if (encoded_addend == (unsigned int) FAIL)
24101	   as_bad_where (fixP->fx_file, fixP->fx_line,
24102			 _("the offset 0x%08lX is not representable"),
24103			 (unsigned long) addend_abs);
24104
24105	 /* Extract the instruction.  */
24106	 insn = md_chars_to_number (buf, INSN_SIZE);
24107
24108	 /* If the addend is positive, use an ADD instruction.
24109	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
24110	 insn &= 0xff1fffff;
24111	 if (value < 0)
24112	   insn |= 1 << 22;
24113	 else
24114	   insn |= 1 << 23;
24115
24116	 /* Place the encoded addend into the first 12 bits of the
24117	    instruction.  */
24118	 insn &= 0xfffff000;
24119	 insn |= encoded_addend;
24120
24121	 /* Update the instruction.  */
24122	 md_number_to_chars (buf, insn, INSN_SIZE);
24123       }
24124     break;
24125
24126    case BFD_RELOC_ARM_LDR_PC_G0:
24127    case BFD_RELOC_ARM_LDR_PC_G1:
24128    case BFD_RELOC_ARM_LDR_PC_G2:
24129    case BFD_RELOC_ARM_LDR_SB_G0:
24130    case BFD_RELOC_ARM_LDR_SB_G1:
24131    case BFD_RELOC_ARM_LDR_SB_G2:
24132      gas_assert (!fixP->fx_done);
24133      if (!seg->use_rela_p)
24134	{
24135	  bfd_vma insn;
24136	  bfd_vma addend_abs = abs (value);
24137
24138	  /* Check that the absolute value of the addend can be
24139	     encoded in 12 bits.  */
24140	  if (addend_abs >= 0x1000)
24141	    as_bad_where (fixP->fx_file, fixP->fx_line,
24142			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24143			  (unsigned long) addend_abs);
24144
24145	  /* Extract the instruction.  */
24146	  insn = md_chars_to_number (buf, INSN_SIZE);
24147
24148	  /* If the addend is negative, clear bit 23 of the instruction.
24149	     Otherwise set it.  */
24150	  if (value < 0)
24151	    insn &= ~(1 << 23);
24152	  else
24153	    insn |= 1 << 23;
24154
24155	  /* Place the absolute value of the addend into the first 12 bits
24156	     of the instruction.  */
24157	  insn &= 0xfffff000;
24158	  insn |= addend_abs;
24159
24160	  /* Update the instruction.  */
24161	  md_number_to_chars (buf, insn, INSN_SIZE);
24162	}
24163      break;
24164
24165    case BFD_RELOC_ARM_LDRS_PC_G0:
24166    case BFD_RELOC_ARM_LDRS_PC_G1:
24167    case BFD_RELOC_ARM_LDRS_PC_G2:
24168    case BFD_RELOC_ARM_LDRS_SB_G0:
24169    case BFD_RELOC_ARM_LDRS_SB_G1:
24170    case BFD_RELOC_ARM_LDRS_SB_G2:
24171      gas_assert (!fixP->fx_done);
24172      if (!seg->use_rela_p)
24173	{
24174	  bfd_vma insn;
24175	  bfd_vma addend_abs = abs (value);
24176
24177	  /* Check that the absolute value of the addend can be
24178	     encoded in 8 bits.  */
24179	  if (addend_abs >= 0x100)
24180	    as_bad_where (fixP->fx_file, fixP->fx_line,
24181			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24182			  (unsigned long) addend_abs);
24183
24184	  /* Extract the instruction.  */
24185	  insn = md_chars_to_number (buf, INSN_SIZE);
24186
24187	  /* If the addend is negative, clear bit 23 of the instruction.
24188	     Otherwise set it.  */
24189	  if (value < 0)
24190	    insn &= ~(1 << 23);
24191	  else
24192	    insn |= 1 << 23;
24193
24194	  /* Place the first four bits of the absolute value of the addend
24195	     into the first 4 bits of the instruction, and the remaining
24196	     four into bits 8 .. 11.  */
24197	  insn &= 0xfffff0f0;
24198	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24199
24200	  /* Update the instruction.  */
24201	  md_number_to_chars (buf, insn, INSN_SIZE);
24202	}
24203      break;
24204
24205    case BFD_RELOC_ARM_LDC_PC_G0:
24206    case BFD_RELOC_ARM_LDC_PC_G1:
24207    case BFD_RELOC_ARM_LDC_PC_G2:
24208    case BFD_RELOC_ARM_LDC_SB_G0:
24209    case BFD_RELOC_ARM_LDC_SB_G1:
24210    case BFD_RELOC_ARM_LDC_SB_G2:
24211      gas_assert (!fixP->fx_done);
24212      if (!seg->use_rela_p)
24213	{
24214	  bfd_vma insn;
24215	  bfd_vma addend_abs = abs (value);
24216
24217	  /* Check that the absolute value of the addend is a multiple of
24218	     four and, when divided by four, fits in 8 bits.  */
24219	  if (addend_abs & 0x3)
24220	    as_bad_where (fixP->fx_file, fixP->fx_line,
24221			  _("bad offset 0x%08lX (must be word-aligned)"),
24222			  (unsigned long) addend_abs);
24223
24224	  if ((addend_abs >> 2) > 0xff)
24225	    as_bad_where (fixP->fx_file, fixP->fx_line,
24226			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24227			  (unsigned long) addend_abs);
24228
24229	  /* Extract the instruction.  */
24230	  insn = md_chars_to_number (buf, INSN_SIZE);
24231
24232	  /* If the addend is negative, clear bit 23 of the instruction.
24233	     Otherwise set it.  */
24234	  if (value < 0)
24235	    insn &= ~(1 << 23);
24236	  else
24237	    insn |= 1 << 23;
24238
24239	  /* Place the addend (divided by four) into the first eight
24240	     bits of the instruction.  */
24241	  insn &= 0xfffffff0;
24242	  insn |= addend_abs >> 2;
24243
24244	  /* Update the instruction.  */
24245	  md_number_to_chars (buf, insn, INSN_SIZE);
24246	}
24247      break;
24248
24249    case BFD_RELOC_ARM_V4BX:
24250      /* This will need to go in the object file.  */
24251      fixP->fx_done = 0;
24252      break;
24253
24254    case BFD_RELOC_UNUSED:
24255    default:
24256      as_bad_where (fixP->fx_file, fixP->fx_line,
24257		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24258    }
24259}
24260
24261/* Translate internal representation of relocation info to BFD target
24262   format.  */
24263
24264arelent *
24265tc_gen_reloc (asection *section, fixS *fixp)
24266{
24267  arelent * reloc;
24268  bfd_reloc_code_real_type code;
24269
24270  reloc = XNEW (arelent);
24271
24272  reloc->sym_ptr_ptr = XNEW (asymbol *);
24273  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24274  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24275
24276  if (fixp->fx_pcrel)
24277    {
24278      if (section->use_rela_p)
24279	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24280      else
24281	fixp->fx_offset = reloc->address;
24282    }
24283  reloc->addend = fixp->fx_offset;
24284
24285  switch (fixp->fx_r_type)
24286    {
24287    case BFD_RELOC_8:
24288      if (fixp->fx_pcrel)
24289	{
24290	  code = BFD_RELOC_8_PCREL;
24291	  break;
24292	}
24293      /* Fall through.  */
24294
24295    case BFD_RELOC_16:
24296      if (fixp->fx_pcrel)
24297	{
24298	  code = BFD_RELOC_16_PCREL;
24299	  break;
24300	}
24301      /* Fall through.  */
24302
24303    case BFD_RELOC_32:
24304      if (fixp->fx_pcrel)
24305	{
24306	  code = BFD_RELOC_32_PCREL;
24307	  break;
24308	}
24309      /* Fall through.  */
24310
24311    case BFD_RELOC_ARM_MOVW:
24312      if (fixp->fx_pcrel)
24313	{
24314	  code = BFD_RELOC_ARM_MOVW_PCREL;
24315	  break;
24316	}
24317      /* Fall through.  */
24318
24319    case BFD_RELOC_ARM_MOVT:
24320      if (fixp->fx_pcrel)
24321	{
24322	  code = BFD_RELOC_ARM_MOVT_PCREL;
24323	  break;
24324	}
24325      /* Fall through.  */
24326
24327    case BFD_RELOC_ARM_THUMB_MOVW:
24328      if (fixp->fx_pcrel)
24329	{
24330	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24331	  break;
24332	}
24333      /* Fall through.  */
24334
24335    case BFD_RELOC_ARM_THUMB_MOVT:
24336      if (fixp->fx_pcrel)
24337	{
24338	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24339	  break;
24340	}
24341      /* Fall through.  */
24342
24343    case BFD_RELOC_NONE:
24344    case BFD_RELOC_ARM_PCREL_BRANCH:
24345    case BFD_RELOC_ARM_PCREL_BLX:
24346    case BFD_RELOC_RVA:
24347    case BFD_RELOC_THUMB_PCREL_BRANCH7:
24348    case BFD_RELOC_THUMB_PCREL_BRANCH9:
24349    case BFD_RELOC_THUMB_PCREL_BRANCH12:
24350    case BFD_RELOC_THUMB_PCREL_BRANCH20:
24351    case BFD_RELOC_THUMB_PCREL_BRANCH23:
24352    case BFD_RELOC_THUMB_PCREL_BRANCH25:
24353    case BFD_RELOC_VTABLE_ENTRY:
24354    case BFD_RELOC_VTABLE_INHERIT:
24355#ifdef TE_PE
24356    case BFD_RELOC_32_SECREL:
24357#endif
24358      code = fixp->fx_r_type;
24359      break;
24360
24361    case BFD_RELOC_THUMB_PCREL_BLX:
24362#ifdef OBJ_ELF
24363      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24364	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24365      else
24366#endif
24367	code = BFD_RELOC_THUMB_PCREL_BLX;
24368      break;
24369
24370    case BFD_RELOC_ARM_LITERAL:
24371    case BFD_RELOC_ARM_HWLITERAL:
24372      /* If this is called then the a literal has
24373	 been referenced across a section boundary.  */
24374      as_bad_where (fixp->fx_file, fixp->fx_line,
24375		    _("literal referenced across section boundary"));
24376      return NULL;
24377
24378#ifdef OBJ_ELF
24379    case BFD_RELOC_ARM_TLS_CALL:
24380    case BFD_RELOC_ARM_THM_TLS_CALL:
24381    case BFD_RELOC_ARM_TLS_DESCSEQ:
24382    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24383    case BFD_RELOC_ARM_GOT32:
24384    case BFD_RELOC_ARM_GOTOFF:
24385    case BFD_RELOC_ARM_GOT_PREL:
24386    case BFD_RELOC_ARM_PLT32:
24387    case BFD_RELOC_ARM_TARGET1:
24388    case BFD_RELOC_ARM_ROSEGREL32:
24389    case BFD_RELOC_ARM_SBREL32:
24390    case BFD_RELOC_ARM_PREL31:
24391    case BFD_RELOC_ARM_TARGET2:
24392    case BFD_RELOC_ARM_TLS_LDO32:
24393    case BFD_RELOC_ARM_PCREL_CALL:
24394    case BFD_RELOC_ARM_PCREL_JUMP:
24395    case BFD_RELOC_ARM_ALU_PC_G0_NC:
24396    case BFD_RELOC_ARM_ALU_PC_G0:
24397    case BFD_RELOC_ARM_ALU_PC_G1_NC:
24398    case BFD_RELOC_ARM_ALU_PC_G1:
24399    case BFD_RELOC_ARM_ALU_PC_G2:
24400    case BFD_RELOC_ARM_LDR_PC_G0:
24401    case BFD_RELOC_ARM_LDR_PC_G1:
24402    case BFD_RELOC_ARM_LDR_PC_G2:
24403    case BFD_RELOC_ARM_LDRS_PC_G0:
24404    case BFD_RELOC_ARM_LDRS_PC_G1:
24405    case BFD_RELOC_ARM_LDRS_PC_G2:
24406    case BFD_RELOC_ARM_LDC_PC_G0:
24407    case BFD_RELOC_ARM_LDC_PC_G1:
24408    case BFD_RELOC_ARM_LDC_PC_G2:
24409    case BFD_RELOC_ARM_ALU_SB_G0_NC:
24410    case BFD_RELOC_ARM_ALU_SB_G0:
24411    case BFD_RELOC_ARM_ALU_SB_G1_NC:
24412    case BFD_RELOC_ARM_ALU_SB_G1:
24413    case BFD_RELOC_ARM_ALU_SB_G2:
24414    case BFD_RELOC_ARM_LDR_SB_G0:
24415    case BFD_RELOC_ARM_LDR_SB_G1:
24416    case BFD_RELOC_ARM_LDR_SB_G2:
24417    case BFD_RELOC_ARM_LDRS_SB_G0:
24418    case BFD_RELOC_ARM_LDRS_SB_G1:
24419    case BFD_RELOC_ARM_LDRS_SB_G2:
24420    case BFD_RELOC_ARM_LDC_SB_G0:
24421    case BFD_RELOC_ARM_LDC_SB_G1:
24422    case BFD_RELOC_ARM_LDC_SB_G2:
24423    case BFD_RELOC_ARM_V4BX:
24424    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24425    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24426    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24427    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24428      code = fixp->fx_r_type;
24429      break;
24430
24431    case BFD_RELOC_ARM_TLS_GOTDESC:
24432    case BFD_RELOC_ARM_TLS_GD32:
24433    case BFD_RELOC_ARM_TLS_LE32:
24434    case BFD_RELOC_ARM_TLS_IE32:
24435    case BFD_RELOC_ARM_TLS_LDM32:
24436      /* BFD will include the symbol's address in the addend.
24437	 But we don't want that, so subtract it out again here.  */
24438      if (!S_IS_COMMON (fixp->fx_addsy))
24439	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24440      code = fixp->fx_r_type;
24441      break;
24442#endif
24443
24444    case BFD_RELOC_ARM_IMMEDIATE:
24445      as_bad_where (fixp->fx_file, fixp->fx_line,
24446		    _("internal relocation (type: IMMEDIATE) not fixed up"));
24447      return NULL;
24448
24449    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24450      as_bad_where (fixp->fx_file, fixp->fx_line,
24451		    _("ADRL used for a symbol not defined in the same file"));
24452      return NULL;
24453
24454    case BFD_RELOC_ARM_OFFSET_IMM:
24455      if (section->use_rela_p)
24456	{
24457	  code = fixp->fx_r_type;
24458	  break;
24459	}
24460
24461      if (fixp->fx_addsy != NULL
24462	  && !S_IS_DEFINED (fixp->fx_addsy)
24463	  && S_IS_LOCAL (fixp->fx_addsy))
24464	{
24465	  as_bad_where (fixp->fx_file, fixp->fx_line,
24466			_("undefined local label `%s'"),
24467			S_GET_NAME (fixp->fx_addsy));
24468	  return NULL;
24469	}
24470
24471      as_bad_where (fixp->fx_file, fixp->fx_line,
24472		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24473      return NULL;
24474
24475    default:
24476      {
24477	const char * type;
24478
24479	switch (fixp->fx_r_type)
24480	  {
24481	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
24482	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
24483	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
24484	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
24485	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
24486	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
24487	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
24488	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24489	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24490	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
24491	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
24492	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
24493	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24494	  default:			   type = _("<unknown>"); break;
24495	  }
24496	as_bad_where (fixp->fx_file, fixp->fx_line,
24497		      _("cannot represent %s relocation in this object file format"),
24498		      type);
24499	return NULL;
24500      }
24501    }
24502
24503#ifdef OBJ_ELF
24504  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24505      && GOT_symbol
24506      && fixp->fx_addsy == GOT_symbol)
24507    {
24508      code = BFD_RELOC_ARM_GOTPC;
24509      reloc->addend = fixp->fx_offset = reloc->address;
24510    }
24511#endif
24512
24513  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24514
24515  if (reloc->howto == NULL)
24516    {
24517      as_bad_where (fixp->fx_file, fixp->fx_line,
24518		    _("cannot represent %s relocation in this object file format"),
24519		    bfd_get_reloc_code_name (code));
24520      return NULL;
24521    }
24522
24523  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24524     vtable entry to be used in the relocation's section offset.  */
24525  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24526    reloc->address = fixp->fx_offset;
24527
24528  return reloc;
24529}
24530
24531/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
24532
24533void
24534cons_fix_new_arm (fragS *	frag,
24535		  int		where,
24536		  int		size,
24537		  expressionS * exp,
24538		  bfd_reloc_code_real_type reloc)
24539{
24540  int pcrel = 0;
24541
24542  /* Pick a reloc.
24543     FIXME: @@ Should look at CPU word size.  */
24544  switch (size)
24545    {
24546    case 1:
24547      reloc = BFD_RELOC_8;
24548      break;
24549    case 2:
24550      reloc = BFD_RELOC_16;
24551      break;
24552    case 4:
24553    default:
24554      reloc = BFD_RELOC_32;
24555      break;
24556    case 8:
24557      reloc = BFD_RELOC_64;
24558      break;
24559    }
24560
24561#ifdef TE_PE
24562  if (exp->X_op == O_secrel)
24563  {
24564    exp->X_op = O_symbol;
24565    reloc = BFD_RELOC_32_SECREL;
24566  }
24567#endif
24568
24569  fix_new_exp (frag, where, size, exp, pcrel, reloc);
24570}
24571
24572#if defined (OBJ_COFF)
24573void
24574arm_validate_fix (fixS * fixP)
24575{
24576  /* If the destination of the branch is a defined symbol which does not have
24577     the THUMB_FUNC attribute, then we must be calling a function which has
24578     the (interfacearm) attribute.  We look for the Thumb entry point to that
24579     function and change the branch to refer to that function instead.	*/
24580  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24581      && fixP->fx_addsy != NULL
24582      && S_IS_DEFINED (fixP->fx_addsy)
24583      && ! THUMB_IS_FUNC (fixP->fx_addsy))
24584    {
24585      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24586    }
24587}
24588#endif
24589
24590
24591int
24592arm_force_relocation (struct fix * fixp)
24593{
24594#if defined (OBJ_COFF) && defined (TE_PE)
24595  if (fixp->fx_r_type == BFD_RELOC_RVA)
24596    return 1;
24597#endif
24598
24599  /* In case we have a call or a branch to a function in ARM ISA mode from
24600     a thumb function or vice-versa force the relocation. These relocations
24601     are cleared off for some cores that might have blx and simple transformations
24602     are possible.  */
24603
24604#ifdef OBJ_ELF
24605  switch (fixp->fx_r_type)
24606    {
24607    case BFD_RELOC_ARM_PCREL_JUMP:
24608    case BFD_RELOC_ARM_PCREL_CALL:
24609    case BFD_RELOC_THUMB_PCREL_BLX:
24610      if (THUMB_IS_FUNC (fixp->fx_addsy))
24611	return 1;
24612      break;
24613
24614    case BFD_RELOC_ARM_PCREL_BLX:
24615    case BFD_RELOC_THUMB_PCREL_BRANCH25:
24616    case BFD_RELOC_THUMB_PCREL_BRANCH20:
24617    case BFD_RELOC_THUMB_PCREL_BRANCH23:
24618      if (ARM_IS_FUNC (fixp->fx_addsy))
24619	return 1;
24620      break;
24621
24622    default:
24623      break;
24624    }
24625#endif
24626
24627  /* Resolve these relocations even if the symbol is extern or weak.
24628     Technically this is probably wrong due to symbol preemption.
24629     In practice these relocations do not have enough range to be useful
24630     at dynamic link time, and some code (e.g. in the Linux kernel)
24631     expects these references to be resolved.  */
24632  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24633      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24634      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24635      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24636      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24637      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24638      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24639      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24640      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24641      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24642      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24643      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24644      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24645      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24646    return 0;
24647
24648  /* Always leave these relocations for the linker.  */
24649  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24650       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24651      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24652    return 1;
24653
24654  /* Always generate relocations against function symbols.  */
24655  if (fixp->fx_r_type == BFD_RELOC_32
24656      && fixp->fx_addsy
24657      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24658    return 1;
24659
24660  return generic_force_reloc (fixp);
24661}
24662
24663#if defined (OBJ_ELF) || defined (OBJ_COFF)
24664/* Relocations against function names must be left unadjusted,
24665   so that the linker can use this information to generate interworking
24666   stubs.  The MIPS version of this function
24667   also prevents relocations that are mips-16 specific, but I do not
24668   know why it does this.
24669
24670   FIXME:
24671   There is one other problem that ought to be addressed here, but
24672   which currently is not:  Taking the address of a label (rather
24673   than a function) and then later jumping to that address.  Such
24674   addresses also ought to have their bottom bit set (assuming that
24675   they reside in Thumb code), but at the moment they will not.	 */
24676
24677bfd_boolean
24678arm_fix_adjustable (fixS * fixP)
24679{
24680  if (fixP->fx_addsy == NULL)
24681    return 1;
24682
24683  /* Preserve relocations against symbols with function type.  */
24684  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24685    return FALSE;
24686
24687  if (THUMB_IS_FUNC (fixP->fx_addsy)
24688      && fixP->fx_subsy == NULL)
24689    return FALSE;
24690
24691  /* We need the symbol name for the VTABLE entries.  */
24692  if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24693      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24694    return FALSE;
24695
24696  /* Don't allow symbols to be discarded on GOT related relocs.	 */
24697  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24698      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24699      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24700      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24701      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24702      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24703      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24704      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24705      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24706      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24707      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24708      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24709      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24710      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24711    return FALSE;
24712
24713  /* Similarly for group relocations.  */
24714  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24715       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24716      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24717    return FALSE;
24718
24719  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
24720  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24721      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24722      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24723      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24724      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24725      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24726      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24727      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24728    return FALSE;
24729
24730  /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24731     offsets, so keep these symbols.  */
24732  if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24733      && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24734    return FALSE;
24735
24736  return TRUE;
24737}
24738#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24739
24740#ifdef OBJ_ELF
24741const char *
24742elf32_arm_target_format (void)
24743{
24744#ifdef TE_SYMBIAN
24745  return (target_big_endian
24746	  ? "elf32-bigarm-symbian"
24747	  : "elf32-littlearm-symbian");
24748#elif defined (TE_VXWORKS)
24749  return (target_big_endian
24750	  ? "elf32-bigarm-vxworks"
24751	  : "elf32-littlearm-vxworks");
24752#elif defined (TE_NACL)
24753  return (target_big_endian
24754	  ? "elf32-bigarm-nacl"
24755	  : "elf32-littlearm-nacl");
24756#else
24757  if (target_big_endian)
24758    return "elf32-bigarm";
24759  else
24760    return "elf32-littlearm";
24761#endif
24762}
24763
24764void
24765armelf_frob_symbol (symbolS * symp,
24766		    int *     puntp)
24767{
24768  elf_frob_symbol (symp, puntp);
24769}
24770#endif
24771
24772/* MD interface: Finalization.	*/
24773
24774void
24775arm_cleanup (void)
24776{
24777  literal_pool * pool;
24778
24779  /* Ensure that all the IT blocks are properly closed.  */
24780  check_it_blocks_finished ();
24781
24782  for (pool = list_of_pools; pool; pool = pool->next)
24783    {
24784      /* Put it at the end of the relevant section.  */
24785      subseg_set (pool->section, pool->sub_section);
24786#ifdef OBJ_ELF
24787      arm_elf_change_section ();
24788#endif
24789      s_ltorg (0);
24790    }
24791}
24792
24793#ifdef OBJ_ELF
24794/* Remove any excess mapping symbols generated for alignment frags in
24795   SEC.  We may have created a mapping symbol before a zero byte
24796   alignment; remove it if there's a mapping symbol after the
24797   alignment.  */
24798static void
24799check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24800		       void *dummy ATTRIBUTE_UNUSED)
24801{
24802  segment_info_type *seginfo = seg_info (sec);
24803  fragS *fragp;
24804
24805  if (seginfo == NULL || seginfo->frchainP == NULL)
24806    return;
24807
24808  for (fragp = seginfo->frchainP->frch_root;
24809       fragp != NULL;
24810       fragp = fragp->fr_next)
24811    {
24812      symbolS *sym = fragp->tc_frag_data.last_map;
24813      fragS *next = fragp->fr_next;
24814
24815      /* Variable-sized frags have been converted to fixed size by
24816	 this point.  But if this was variable-sized to start with,
24817	 there will be a fixed-size frag after it.  So don't handle
24818	 next == NULL.  */
24819      if (sym == NULL || next == NULL)
24820	continue;
24821
24822      if (S_GET_VALUE (sym) < next->fr_address)
24823	/* Not at the end of this frag.  */
24824	continue;
24825      know (S_GET_VALUE (sym) == next->fr_address);
24826
24827      do
24828	{
24829	  if (next->tc_frag_data.first_map != NULL)
24830	    {
24831	      /* Next frag starts with a mapping symbol.  Discard this
24832		 one.  */
24833	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24834	      break;
24835	    }
24836
24837	  if (next->fr_next == NULL)
24838	    {
24839	      /* This mapping symbol is at the end of the section.  Discard
24840		 it.  */
24841	      know (next->fr_fix == 0 && next->fr_var == 0);
24842	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24843	      break;
24844	    }
24845
24846	  /* As long as we have empty frags without any mapping symbols,
24847	     keep looking.  */
24848	  /* If the next frag is non-empty and does not start with a
24849	     mapping symbol, then this mapping symbol is required.  */
24850	  if (next->fr_address != next->fr_next->fr_address)
24851	    break;
24852
24853	  next = next->fr_next;
24854	}
24855      while (next != NULL);
24856    }
24857}
24858#endif
24859
24860/* Adjust the symbol table.  This marks Thumb symbols as distinct from
24861   ARM ones.  */
24862
24863void
24864arm_adjust_symtab (void)
24865{
24866#ifdef OBJ_COFF
24867  symbolS * sym;
24868
24869  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24870    {
24871      if (ARM_IS_THUMB (sym))
24872	{
24873	  if (THUMB_IS_FUNC (sym))
24874	    {
24875	      /* Mark the symbol as a Thumb function.  */
24876	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
24877		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
24878		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24879
24880	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24881		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24882	      else
24883		as_bad (_("%s: unexpected function type: %d"),
24884			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24885	    }
24886	  else switch (S_GET_STORAGE_CLASS (sym))
24887	    {
24888	    case C_EXT:
24889	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24890	      break;
24891	    case C_STAT:
24892	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24893	      break;
24894	    case C_LABEL:
24895	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24896	      break;
24897	    default:
24898	      /* Do nothing.  */
24899	      break;
24900	    }
24901	}
24902
24903      if (ARM_IS_INTERWORK (sym))
24904	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24905    }
24906#endif
24907#ifdef OBJ_ELF
24908  symbolS * sym;
24909  char	    bind;
24910
24911  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24912    {
24913      if (ARM_IS_THUMB (sym))
24914	{
24915	  elf_symbol_type * elf_sym;
24916
24917	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24918	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24919
24920	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24921		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24922	    {
24923	      /* If it's a .thumb_func, declare it as so,
24924		 otherwise tag label as .code 16.  */
24925	      if (THUMB_IS_FUNC (sym))
24926		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24927					 ST_BRANCH_TO_THUMB);
24928	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24929		elf_sym->internal_elf_sym.st_info =
24930		  ELF_ST_INFO (bind, STT_ARM_16BIT);
24931	    }
24932	}
24933    }
24934
24935  /* Remove any overlapping mapping symbols generated by alignment frags.  */
24936  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24937  /* Now do generic ELF adjustments.  */
24938  elf_adjust_symtab ();
24939#endif
24940}
24941
24942/* MD interface: Initialization.  */
24943
24944static void
24945set_constant_flonums (void)
24946{
24947  int i;
24948
24949  for (i = 0; i < NUM_FLOAT_VALS; i++)
24950    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24951      abort ();
24952}
24953
24954/* Auto-select Thumb mode if it's the only available instruction set for the
24955   given architecture.  */
24956
24957static void
24958autoselect_thumb_from_cpu_variant (void)
24959{
24960  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24961    opcode_select (16);
24962}
24963
24964void
24965md_begin (void)
24966{
24967  unsigned mach;
24968  unsigned int i;
24969
24970  if (	 (arm_ops_hsh = hash_new ()) == NULL
24971      || (arm_cond_hsh = hash_new ()) == NULL
24972      || (arm_shift_hsh = hash_new ()) == NULL
24973      || (arm_psr_hsh = hash_new ()) == NULL
24974      || (arm_v7m_psr_hsh = hash_new ()) == NULL
24975      || (arm_reg_hsh = hash_new ()) == NULL
24976      || (arm_reloc_hsh = hash_new ()) == NULL
24977      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24978    as_fatal (_("virtual memory exhausted"));
24979
24980  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24981    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24982  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24983    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24984  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24985    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24986  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24987    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24988  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24989    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24990		 (void *) (v7m_psrs + i));
24991  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24992    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24993  for (i = 0;
24994       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24995       i++)
24996    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24997		 (void *) (barrier_opt_names + i));
24998#ifdef OBJ_ELF
24999  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25000    {
25001      struct reloc_entry * entry = reloc_names + i;
25002
25003      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25004	/* This makes encode_branch() use the EABI versions of this relocation.  */
25005	entry->reloc = BFD_RELOC_UNUSED;
25006
25007      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25008    }
25009#endif
25010
25011  set_constant_flonums ();
25012
25013  /* Set the cpu variant based on the command-line options.  We prefer
25014     -mcpu= over -march= if both are set (as for GCC); and we prefer
25015     -mfpu= over any other way of setting the floating point unit.
25016     Use of legacy options with new options are faulted.  */
25017  if (legacy_cpu)
25018    {
25019      if (mcpu_cpu_opt || march_cpu_opt)
25020	as_bad (_("use of old and new-style options to set CPU type"));
25021
25022      mcpu_cpu_opt = legacy_cpu;
25023    }
25024  else if (!mcpu_cpu_opt)
25025    mcpu_cpu_opt = march_cpu_opt;
25026
25027  if (legacy_fpu)
25028    {
25029      if (mfpu_opt)
25030	as_bad (_("use of old and new-style options to set FPU type"));
25031
25032      mfpu_opt = legacy_fpu;
25033    }
25034  else if (!mfpu_opt)
25035    {
25036#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25037	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
25038      /* Some environments specify a default FPU.  If they don't, infer it
25039	 from the processor.  */
25040      if (mcpu_fpu_opt)
25041	mfpu_opt = mcpu_fpu_opt;
25042      else
25043	mfpu_opt = march_fpu_opt;
25044#else
25045      mfpu_opt = &fpu_default;
25046#endif
25047    }
25048
25049  if (!mfpu_opt)
25050    {
25051      if (mcpu_cpu_opt != NULL)
25052	mfpu_opt = &fpu_default;
25053      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
25054	mfpu_opt = &fpu_arch_vfp_v2;
25055      else
25056	mfpu_opt = &fpu_arch_fpa;
25057    }
25058
25059#ifdef CPU_DEFAULT
25060  if (!mcpu_cpu_opt)
25061    {
25062      mcpu_cpu_opt = &cpu_default;
25063      selected_cpu = cpu_default;
25064    }
25065  else if (no_cpu_selected ())
25066    selected_cpu = cpu_default;
25067#else
25068  if (mcpu_cpu_opt)
25069    selected_cpu = *mcpu_cpu_opt;
25070  else
25071    mcpu_cpu_opt = &arm_arch_any;
25072#endif
25073
25074  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25075
25076  autoselect_thumb_from_cpu_variant ();
25077
25078  arm_arch_used = thumb_arch_used = arm_arch_none;
25079
25080#if defined OBJ_COFF || defined OBJ_ELF
25081  {
25082    unsigned int flags = 0;
25083
25084#if defined OBJ_ELF
25085    flags = meabi_flags;
25086
25087    switch (meabi_flags)
25088      {
25089      case EF_ARM_EABI_UNKNOWN:
25090#endif
25091	/* Set the flags in the private structure.  */
25092	if (uses_apcs_26)      flags |= F_APCS26;
25093	if (support_interwork) flags |= F_INTERWORK;
25094	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
25095	if (pic_code)	       flags |= F_PIC;
25096	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25097	  flags |= F_SOFT_FLOAT;
25098
25099	switch (mfloat_abi_opt)
25100	  {
25101	  case ARM_FLOAT_ABI_SOFT:
25102	  case ARM_FLOAT_ABI_SOFTFP:
25103	    flags |= F_SOFT_FLOAT;
25104	    break;
25105
25106	  case ARM_FLOAT_ABI_HARD:
25107	    if (flags & F_SOFT_FLOAT)
25108	      as_bad (_("hard-float conflicts with specified fpu"));
25109	    break;
25110	  }
25111
25112	/* Using pure-endian doubles (even if soft-float).	*/
25113	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25114	  flags |= F_VFP_FLOAT;
25115
25116#if defined OBJ_ELF
25117	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25118	    flags |= EF_ARM_MAVERICK_FLOAT;
25119	break;
25120
25121      case EF_ARM_EABI_VER4:
25122      case EF_ARM_EABI_VER5:
25123	/* No additional flags to set.	*/
25124	break;
25125
25126      default:
25127	abort ();
25128      }
25129#endif
25130    bfd_set_private_flags (stdoutput, flags);
25131
25132    /* We have run out flags in the COFF header to encode the
25133       status of ATPCS support, so instead we create a dummy,
25134       empty, debug section called .arm.atpcs.	*/
25135    if (atpcs)
25136      {
25137	asection * sec;
25138
25139	sec = bfd_make_section (stdoutput, ".arm.atpcs");
25140
25141	if (sec != NULL)
25142	  {
25143	    bfd_set_section_flags
25144	      (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25145	    bfd_set_section_size (stdoutput, sec, 0);
25146	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25147	  }
25148      }
25149  }
25150#endif
25151
25152  /* Record the CPU type as well.  */
25153  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25154    mach = bfd_mach_arm_iWMMXt2;
25155  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25156    mach = bfd_mach_arm_iWMMXt;
25157  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25158    mach = bfd_mach_arm_XScale;
25159  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25160    mach = bfd_mach_arm_ep9312;
25161  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25162    mach = bfd_mach_arm_5TE;
25163  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25164    {
25165      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25166	mach = bfd_mach_arm_5T;
25167      else
25168	mach = bfd_mach_arm_5;
25169    }
25170  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25171    {
25172      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25173	mach = bfd_mach_arm_4T;
25174      else
25175	mach = bfd_mach_arm_4;
25176    }
25177  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25178    mach = bfd_mach_arm_3M;
25179  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25180    mach = bfd_mach_arm_3;
25181  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25182    mach = bfd_mach_arm_2a;
25183  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25184    mach = bfd_mach_arm_2;
25185  else
25186    mach = bfd_mach_arm_unknown;
25187
25188  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25189}
25190
25191/* Command line processing.  */
25192
25193/* md_parse_option
25194      Invocation line includes a switch not recognized by the base assembler.
25195      See if it's a processor-specific option.
25196
25197      This routine is somewhat complicated by the need for backwards
25198      compatibility (since older releases of gcc can't be changed).
25199      The new options try to make the interface as compatible as
25200      possible with GCC.
25201
25202      New options (supported) are:
25203
25204	      -mcpu=<cpu name>		 Assemble for selected processor
25205	      -march=<architecture name> Assemble for selected architecture
25206	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
25207	      -EB/-mbig-endian		 Big-endian
25208	      -EL/-mlittle-endian	 Little-endian
25209	      -k			 Generate PIC code
25210	      -mthumb			 Start in Thumb mode
25211	      -mthumb-interwork		 Code supports ARM/Thumb interworking
25212
25213	      -m[no-]warn-deprecated     Warn about deprecated features
25214	      -m[no-]warn-syms		 Warn when symbols match instructions
25215
25216      For now we will also provide support for:
25217
25218	      -mapcs-32			 32-bit Program counter
25219	      -mapcs-26			 26-bit Program counter
25220	      -macps-float		 Floats passed in FP registers
25221	      -mapcs-reentrant		 Reentrant code
25222	      -matpcs
25223      (sometime these will probably be replaced with -mapcs=<list of options>
25224      and -matpcs=<list of options>)
25225
25226      The remaining options are only supported for back-wards compatibility.
25227      Cpu variants, the arm part is optional:
25228	      -m[arm]1		      Currently not supported.
25229	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
25230	      -m[arm]3		      Arm 3 processor
25231	      -m[arm]6[xx],	      Arm 6 processors
25232	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
25233	      -m[arm]8[10]	      Arm 8 processors
25234	      -m[arm]9[20][tdmi]      Arm 9 processors
25235	      -mstrongarm[110[0]]     StrongARM processors
25236	      -mxscale		      XScale processors
25237	      -m[arm]v[2345[t[e]]]    Arm architectures
25238	      -mall		      All (except the ARM1)
25239      FP variants:
25240	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
25241	      -mfpe-old		      (No float load/store multiples)
25242	      -mvfpxd		      VFP Single precision
25243	      -mvfp		      All VFP
25244	      -mno-fpu		      Disable all floating point instructions
25245
25246      The following CPU names are recognized:
25247	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25248	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25249	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25250	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25251	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25252	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
25253	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25254
25255      */
25256
25257const char * md_shortopts = "m:k";
25258
25259#ifdef ARM_BI_ENDIAN
25260#define OPTION_EB (OPTION_MD_BASE + 0)
25261#define OPTION_EL (OPTION_MD_BASE + 1)
25262#else
25263#if TARGET_BYTES_BIG_ENDIAN
25264#define OPTION_EB (OPTION_MD_BASE + 0)
25265#else
25266#define OPTION_EL (OPTION_MD_BASE + 1)
25267#endif
25268#endif
25269#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25270
25271struct option md_longopts[] =
25272{
25273#ifdef OPTION_EB
25274  {"EB", no_argument, NULL, OPTION_EB},
25275#endif
25276#ifdef OPTION_EL
25277  {"EL", no_argument, NULL, OPTION_EL},
25278#endif
25279  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25280  {NULL, no_argument, NULL, 0}
25281};
25282
25283
25284size_t md_longopts_size = sizeof (md_longopts);
25285
25286struct arm_option_table
25287{
25288  const char *option;		/* Option name to match.  */
25289  const char *help;		/* Help information.  */
25290  int  *var;		/* Variable to change.	*/
25291  int	value;		/* What to change it to.  */
25292  const char *deprecated;	/* If non-null, print this message.  */
25293};
25294
25295struct arm_option_table arm_opts[] =
25296{
25297  {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
25298  {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
25299  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25300   &support_interwork, 1, NULL},
25301  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25302  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25303  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25304   1, NULL},
25305  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25306  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25307  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25308  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25309   NULL},
25310
25311  /* These are recognized by the assembler, but have no affect on code.	 */
25312  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25313  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25314
25315  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25316  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25317   &warn_on_deprecated, 0, NULL},
25318  {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25319  {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25320  {NULL, NULL, NULL, 0, NULL}
25321};
25322
25323struct arm_legacy_option_table
25324{
25325  const char *option;				/* Option name to match.  */
25326  const arm_feature_set	**var;		/* Variable to change.	*/
25327  const arm_feature_set	value;		/* What to change it to.  */
25328  const char *deprecated;			/* If non-null, print this message.  */
25329};
25330
25331const struct arm_legacy_option_table arm_legacy_opts[] =
25332{
25333  /* DON'T add any new processors to this list -- we want the whole list
25334     to go away...  Add them to the processors table instead.  */
25335  {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
25336  {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
25337  {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
25338  {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
25339  {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25340  {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25341  {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25342  {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25343  {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
25344  {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
25345  {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
25346  {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
25347  {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
25348  {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
25349  {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
25350  {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
25351  {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
25352  {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
25353  {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
25354  {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
25355  {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
25356  {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
25357  {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
25358  {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
25359  {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
25360  {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
25361  {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
25362  {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
25363  {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
25364  {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
25365  {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
25366  {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
25367  {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
25368  {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
25369  {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25370  {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25371  {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25372  {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25373  {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25374  {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25375  {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
25376  {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
25377  {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
25378  {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
25379  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
25380  {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
25381  {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25382  {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25383  {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25384  {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25385  {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25386  {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25387  {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25388  {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25389  {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25390  {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25391  {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
25392  {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
25393  {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
25394  {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
25395  {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25396  {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25397  {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25398  {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25399  {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25400  {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25401  {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25402  {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25403  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
25404  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25405   N_("use -mcpu=strongarm110")},
25406  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25407   N_("use -mcpu=strongarm1100")},
25408  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25409   N_("use -mcpu=strongarm1110")},
25410  {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25411  {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25412  {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
25413
25414  /* Architecture variants -- don't add any more to this list either.  */
25415  {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
25416  {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
25417  {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25418  {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25419  {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
25420  {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
25421  {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25422  {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25423  {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
25424  {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
25425  {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25426  {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25427  {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
25428  {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
25429  {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25430  {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25431  {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25432  {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25433
25434  /* Floating point variants -- don't add any more to this list either.	 */
25435  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25436  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25437  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25438  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
25439   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25440
25441  {NULL, NULL, ARM_ARCH_NONE, NULL}
25442};
25443
25444struct arm_cpu_option_table
25445{
25446  const char *name;
25447  size_t name_len;
25448  const arm_feature_set	value;
25449  /* For some CPUs we assume an FPU unless the user explicitly sets
25450     -mfpu=...	*/
25451  const arm_feature_set	default_fpu;
25452  /* The canonical name of the CPU, or NULL to use NAME converted to upper
25453     case.  */
25454  const char *canonical_name;
25455};
25456
25457/* This list should, at a minimum, contain all the cpu names
25458   recognized by GCC.  */
25459#define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25460static const struct arm_cpu_option_table arm_cpus[] =
25461{
25462  ARM_CPU_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA,    NULL),
25463  ARM_CPU_OPT ("arm1",		ARM_ARCH_V1,	 FPU_ARCH_FPA,    NULL),
25464  ARM_CPU_OPT ("arm2",		ARM_ARCH_V2,	 FPU_ARCH_FPA,    NULL),
25465  ARM_CPU_OPT ("arm250",	ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
25466  ARM_CPU_OPT ("arm3",		ARM_ARCH_V2S,	 FPU_ARCH_FPA,    NULL),
25467  ARM_CPU_OPT ("arm6",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25468  ARM_CPU_OPT ("arm60",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25469  ARM_CPU_OPT ("arm600",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25470  ARM_CPU_OPT ("arm610",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25471  ARM_CPU_OPT ("arm620",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25472  ARM_CPU_OPT ("arm7",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25473  ARM_CPU_OPT ("arm7m",		ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25474  ARM_CPU_OPT ("arm7d",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25475  ARM_CPU_OPT ("arm7dm",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25476  ARM_CPU_OPT ("arm7di",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25477  ARM_CPU_OPT ("arm7dmi",	ARM_ARCH_V3M,	 FPU_ARCH_FPA,    NULL),
25478  ARM_CPU_OPT ("arm70",		ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25479  ARM_CPU_OPT ("arm700",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25480  ARM_CPU_OPT ("arm700i",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25481  ARM_CPU_OPT ("arm710",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25482  ARM_CPU_OPT ("arm710t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25483  ARM_CPU_OPT ("arm720",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25484  ARM_CPU_OPT ("arm720t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25485  ARM_CPU_OPT ("arm740t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25486  ARM_CPU_OPT ("arm710c",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25487  ARM_CPU_OPT ("arm7100",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25488  ARM_CPU_OPT ("arm7500",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25489  ARM_CPU_OPT ("arm7500fe",	ARM_ARCH_V3,	 FPU_ARCH_FPA,    NULL),
25490  ARM_CPU_OPT ("arm7t",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25491  ARM_CPU_OPT ("arm7tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25492  ARM_CPU_OPT ("arm7tdmi-s",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25493  ARM_CPU_OPT ("arm8",		ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25494  ARM_CPU_OPT ("arm810",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25495  ARM_CPU_OPT ("strongarm",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25496  ARM_CPU_OPT ("strongarm1",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25497  ARM_CPU_OPT ("strongarm110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25498  ARM_CPU_OPT ("strongarm1100",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25499  ARM_CPU_OPT ("strongarm1110",	ARM_ARCH_V4,	 FPU_ARCH_FPA,    NULL),
25500  ARM_CPU_OPT ("arm9",		ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25501  ARM_CPU_OPT ("arm920",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    "ARM920T"),
25502  ARM_CPU_OPT ("arm920t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25503  ARM_CPU_OPT ("arm922t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25504  ARM_CPU_OPT ("arm940t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,    NULL),
25505  ARM_CPU_OPT ("arm9tdmi",	ARM_ARCH_V4T,	 FPU_ARCH_FPA,	  NULL),
25506  ARM_CPU_OPT ("fa526",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
25507  ARM_CPU_OPT ("fa626",		ARM_ARCH_V4,	 FPU_ARCH_FPA,	  NULL),
25508  /* For V5 or later processors we default to using VFP; but the user
25509     should really set the FPU type explicitly.	 */
25510  ARM_CPU_OPT ("arm9e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25511  ARM_CPU_OPT ("arm9e",		ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25512  ARM_CPU_OPT ("arm926ej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25513  ARM_CPU_OPT ("arm926ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25514  ARM_CPU_OPT ("arm926ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
25515  ARM_CPU_OPT ("arm946e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25516  ARM_CPU_OPT ("arm946e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM946E-S"),
25517  ARM_CPU_OPT ("arm946e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25518  ARM_CPU_OPT ("arm966e-r0",	ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25519  ARM_CPU_OPT ("arm966e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM966E-S"),
25520  ARM_CPU_OPT ("arm966e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25521  ARM_CPU_OPT ("arm968e-s",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25522  ARM_CPU_OPT ("arm10t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25523  ARM_CPU_OPT ("arm10tdmi",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25524  ARM_CPU_OPT ("arm10e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25525  ARM_CPU_OPT ("arm1020",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, "ARM1020E"),
25526  ARM_CPU_OPT ("arm1020t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP_V1, NULL),
25527  ARM_CPU_OPT ("arm1020e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25528  ARM_CPU_OPT ("arm1022e",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25529  ARM_CPU_OPT ("arm1026ejs",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2,
25530								 "ARM1026EJ-S"),
25531  ARM_CPU_OPT ("arm1026ej-s",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP_V2, NULL),
25532  ARM_CPU_OPT ("fa606te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25533  ARM_CPU_OPT ("fa616te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25534  ARM_CPU_OPT ("fa626te",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25535  ARM_CPU_OPT ("fmp626",	ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
25536  ARM_CPU_OPT ("fa726te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP_V2, NULL),
25537  ARM_CPU_OPT ("arm1136js",	ARM_ARCH_V6,	 FPU_NONE,	  "ARM1136J-S"),
25538  ARM_CPU_OPT ("arm1136j-s",	ARM_ARCH_V6,	 FPU_NONE,	  NULL),
25539  ARM_CPU_OPT ("arm1136jfs",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2,
25540								 "ARM1136JF-S"),
25541  ARM_CPU_OPT ("arm1136jf-s",	ARM_ARCH_V6,	 FPU_ARCH_VFP_V2, NULL),
25542  ARM_CPU_OPT ("mpcore",	ARM_ARCH_V6K,	 FPU_ARCH_VFP_V2, "MPCore"),
25543  ARM_CPU_OPT ("mpcorenovfp",	ARM_ARCH_V6K,	 FPU_NONE,	  "MPCore"),
25544  ARM_CPU_OPT ("arm1156t2-s",	ARM_ARCH_V6T2,	 FPU_NONE,	  NULL),
25545  ARM_CPU_OPT ("arm1156t2f-s",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP_V2, NULL),
25546  ARM_CPU_OPT ("arm1176jz-s",	ARM_ARCH_V6KZ,	 FPU_NONE,	  NULL),
25547  ARM_CPU_OPT ("arm1176jzf-s",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP_V2, NULL),
25548  ARM_CPU_OPT ("cortex-a5",	ARM_ARCH_V7A_MP_SEC,
25549						 FPU_NONE,	  "Cortex-A5"),
25550  ARM_CPU_OPT ("cortex-a7",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25551								  "Cortex-A7"),
25552  ARM_CPU_OPT ("cortex-a8",	ARM_ARCH_V7A_SEC,
25553						 ARM_FEATURE_COPROC (FPU_VFP_V3
25554							| FPU_NEON_EXT_V1),
25555								  "Cortex-A8"),
25556  ARM_CPU_OPT ("cortex-a9",	ARM_ARCH_V7A_MP_SEC,
25557						 ARM_FEATURE_COPROC (FPU_VFP_V3
25558							| FPU_NEON_EXT_V1),
25559								  "Cortex-A9"),
25560  ARM_CPU_OPT ("cortex-a12",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25561								  "Cortex-A12"),
25562  ARM_CPU_OPT ("cortex-a15",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25563								  "Cortex-A15"),
25564  ARM_CPU_OPT ("cortex-a17",	ARM_ARCH_V7VE,   FPU_ARCH_NEON_VFP_V4,
25565								  "Cortex-A17"),
25566  ARM_CPU_OPT ("cortex-a32",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25567								  "Cortex-A32"),
25568  ARM_CPU_OPT ("cortex-a35",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25569								  "Cortex-A35"),
25570  ARM_CPU_OPT ("cortex-a53",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25571								  "Cortex-A53"),
25572  ARM_CPU_OPT ("cortex-a57",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25573								  "Cortex-A57"),
25574  ARM_CPU_OPT ("cortex-a72",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25575								  "Cortex-A72"),
25576  ARM_CPU_OPT ("cortex-a73",    ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25577								  "Cortex-A73"),
25578  ARM_CPU_OPT ("cortex-r4",	ARM_ARCH_V7R,	 FPU_NONE,	  "Cortex-R4"),
25579  ARM_CPU_OPT ("cortex-r4f",	ARM_ARCH_V7R,	 FPU_ARCH_VFP_V3D16,
25580								  "Cortex-R4F"),
25581  ARM_CPU_OPT ("cortex-r5",	ARM_ARCH_V7R_IDIV,
25582						 FPU_NONE,	  "Cortex-R5"),
25583  ARM_CPU_OPT ("cortex-r7",	ARM_ARCH_V7R_IDIV,
25584						 FPU_ARCH_VFP_V3D16,
25585								  "Cortex-R7"),
25586  ARM_CPU_OPT ("cortex-r8",	ARM_ARCH_V7R_IDIV,
25587						 FPU_ARCH_VFP_V3D16,
25588								  "Cortex-R8"),
25589  ARM_CPU_OPT ("cortex-m33",	ARM_ARCH_V8M_MAIN_DSP,
25590						 FPU_NONE,	  "Cortex-M33"),
25591  ARM_CPU_OPT ("cortex-m23",	ARM_ARCH_V8M_BASE,
25592						 FPU_NONE,	  "Cortex-M23"),
25593  ARM_CPU_OPT ("cortex-m7",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M7"),
25594  ARM_CPU_OPT ("cortex-m4",	ARM_ARCH_V7EM,	 FPU_NONE,	  "Cortex-M4"),
25595  ARM_CPU_OPT ("cortex-m3",	ARM_ARCH_V7M,	 FPU_NONE,	  "Cortex-M3"),
25596  ARM_CPU_OPT ("cortex-m1",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M1"),
25597  ARM_CPU_OPT ("cortex-m0",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0"),
25598  ARM_CPU_OPT ("cortex-m0plus",	ARM_ARCH_V6SM,	 FPU_NONE,	  "Cortex-M0+"),
25599  ARM_CPU_OPT ("exynos-m1",	ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25600								  "Samsung " \
25601								  "Exynos M1"),
25602  ARM_CPU_OPT ("falkor",	ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25603								  "Qualcomm "
25604								  "Falkor"),
25605  ARM_CPU_OPT ("qdf24xx",	ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25606								  "Qualcomm "
25607								  "QDF24XX"),
25608
25609  /* ??? XSCALE is really an architecture.  */
25610  ARM_CPU_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25611  /* ??? iwmmxt is not a processor.  */
25612  ARM_CPU_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25613  ARM_CPU_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25614  ARM_CPU_OPT ("i80200",	ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25615  /* Maverick */
25616  ARM_CPU_OPT ("ep9312",	ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25617						 FPU_ARCH_MAVERICK, "ARM920T"),
25618  /* Marvell processors.  */
25619  ARM_CPU_OPT ("marvell-pj4",   ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25620						  | ARM_EXT_SEC,
25621						  ARM_EXT2_V6T2_V8M),
25622						FPU_ARCH_VFP_V3D16, NULL),
25623  ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25624						    | ARM_EXT_SEC,
25625						    ARM_EXT2_V6T2_V8M),
25626					       FPU_ARCH_NEON_VFP_V4, NULL),
25627  /* APM X-Gene family.  */
25628  ARM_CPU_OPT ("xgene1",        ARM_ARCH_V8A,    FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25629	                                                          "APM X-Gene 1"),
25630  ARM_CPU_OPT ("xgene2",        ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25631	                                                          "APM X-Gene 2"),
25632
25633  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25634};
25635#undef ARM_CPU_OPT
25636
25637struct arm_arch_option_table
25638{
25639  const char *name;
25640  size_t name_len;
25641  const arm_feature_set	value;
25642  const arm_feature_set	default_fpu;
25643};
25644
25645/* This list should, at a minimum, contain all the architecture names
25646   recognized by GCC.  */
25647#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25648static const struct arm_arch_option_table arm_archs[] =
25649{
25650  ARM_ARCH_OPT ("all",		ARM_ANY,	 FPU_ARCH_FPA),
25651  ARM_ARCH_OPT ("armv1",	ARM_ARCH_V1,	 FPU_ARCH_FPA),
25652  ARM_ARCH_OPT ("armv2",	ARM_ARCH_V2,	 FPU_ARCH_FPA),
25653  ARM_ARCH_OPT ("armv2a",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
25654  ARM_ARCH_OPT ("armv2s",	ARM_ARCH_V2S,	 FPU_ARCH_FPA),
25655  ARM_ARCH_OPT ("armv3",	ARM_ARCH_V3,	 FPU_ARCH_FPA),
25656  ARM_ARCH_OPT ("armv3m",	ARM_ARCH_V3M,	 FPU_ARCH_FPA),
25657  ARM_ARCH_OPT ("armv4",	ARM_ARCH_V4,	 FPU_ARCH_FPA),
25658  ARM_ARCH_OPT ("armv4xm",	ARM_ARCH_V4xM,	 FPU_ARCH_FPA),
25659  ARM_ARCH_OPT ("armv4t",	ARM_ARCH_V4T,	 FPU_ARCH_FPA),
25660  ARM_ARCH_OPT ("armv4txm",	ARM_ARCH_V4TxM,	 FPU_ARCH_FPA),
25661  ARM_ARCH_OPT ("armv5",	ARM_ARCH_V5,	 FPU_ARCH_VFP),
25662  ARM_ARCH_OPT ("armv5t",	ARM_ARCH_V5T,	 FPU_ARCH_VFP),
25663  ARM_ARCH_OPT ("armv5txm",	ARM_ARCH_V5TxM,	 FPU_ARCH_VFP),
25664  ARM_ARCH_OPT ("armv5te",	ARM_ARCH_V5TE,	 FPU_ARCH_VFP),
25665  ARM_ARCH_OPT ("armv5texp",	ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25666  ARM_ARCH_OPT ("armv5tej",	ARM_ARCH_V5TEJ,	 FPU_ARCH_VFP),
25667  ARM_ARCH_OPT ("armv6",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
25668  ARM_ARCH_OPT ("armv6j",	ARM_ARCH_V6,	 FPU_ARCH_VFP),
25669  ARM_ARCH_OPT ("armv6k",	ARM_ARCH_V6K,	 FPU_ARCH_VFP),
25670  ARM_ARCH_OPT ("armv6z",	ARM_ARCH_V6Z,	 FPU_ARCH_VFP),
25671  /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25672     kept to preserve existing behaviour.  */
25673  ARM_ARCH_OPT ("armv6kz",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
25674  ARM_ARCH_OPT ("armv6zk",	ARM_ARCH_V6KZ,	 FPU_ARCH_VFP),
25675  ARM_ARCH_OPT ("armv6t2",	ARM_ARCH_V6T2,	 FPU_ARCH_VFP),
25676  ARM_ARCH_OPT ("armv6kt2",	ARM_ARCH_V6KT2,	 FPU_ARCH_VFP),
25677  ARM_ARCH_OPT ("armv6zt2",	ARM_ARCH_V6ZT2,	 FPU_ARCH_VFP),
25678  /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25679     kept to preserve existing behaviour.  */
25680  ARM_ARCH_OPT ("armv6kzt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25681  ARM_ARCH_OPT ("armv6zkt2",	ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25682  ARM_ARCH_OPT ("armv6-m",	ARM_ARCH_V6M,	 FPU_ARCH_VFP),
25683  ARM_ARCH_OPT ("armv6s-m",	ARM_ARCH_V6SM,	 FPU_ARCH_VFP),
25684  ARM_ARCH_OPT ("armv7",	ARM_ARCH_V7,	 FPU_ARCH_VFP),
25685  /* The official spelling of the ARMv7 profile variants is the dashed form.
25686     Accept the non-dashed form for compatibility with old toolchains.  */
25687  ARM_ARCH_OPT ("armv7a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
25688  ARM_ARCH_OPT ("armv7ve",	ARM_ARCH_V7VE,	 FPU_ARCH_VFP),
25689  ARM_ARCH_OPT ("armv7r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
25690  ARM_ARCH_OPT ("armv7m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
25691  ARM_ARCH_OPT ("armv7-a",	ARM_ARCH_V7A,	 FPU_ARCH_VFP),
25692  ARM_ARCH_OPT ("armv7-r",	ARM_ARCH_V7R,	 FPU_ARCH_VFP),
25693  ARM_ARCH_OPT ("armv7-m",	ARM_ARCH_V7M,	 FPU_ARCH_VFP),
25694  ARM_ARCH_OPT ("armv7e-m",	ARM_ARCH_V7EM,	 FPU_ARCH_VFP),
25695  ARM_ARCH_OPT ("armv8-m.base",	ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25696  ARM_ARCH_OPT ("armv8-m.main",	ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25697  ARM_ARCH_OPT ("armv8-a",	ARM_ARCH_V8A,	 FPU_ARCH_VFP),
25698  ARM_ARCH_OPT ("armv8.1-a",	ARM_ARCH_V8_1A,	 FPU_ARCH_VFP),
25699  ARM_ARCH_OPT ("armv8.2-a",	ARM_ARCH_V8_2A,	 FPU_ARCH_VFP),
25700  ARM_ARCH_OPT ("armv8.3-a",	ARM_ARCH_V8_3A,	 FPU_ARCH_VFP),
25701  ARM_ARCH_OPT ("xscale",	ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25702  ARM_ARCH_OPT ("iwmmxt",	ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25703  ARM_ARCH_OPT ("iwmmxt2",	ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25704  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25705};
25706#undef ARM_ARCH_OPT
25707
25708/* ISA extensions in the co-processor and main instruction set space.  */
25709struct arm_option_extension_value_table
25710{
25711  const char *name;
25712  size_t name_len;
25713  const arm_feature_set merge_value;
25714  const arm_feature_set clear_value;
25715  /* List of architectures for which an extension is available.  ARM_ARCH_NONE
25716     indicates that an extension is available for all architectures while
25717     ARM_ANY marks an empty entry.  */
25718  const arm_feature_set allowed_archs[2];
25719};
25720
25721/* The following table must be in alphabetical order with a NULL last entry.
25722   */
25723#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25724#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25725static const struct arm_option_extension_value_table arm_extensions[] =
25726{
25727  ARM_EXT_OPT ("crc",  ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25728			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25729  ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25730			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25731				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25732  ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25733			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25734			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25735  ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25736				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25737  ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25738			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25739			ARM_ARCH_V8_2A),
25740  ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25741			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25742			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25743			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25744  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25745			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25746  ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25747			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25748  ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25749			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25750  ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25751			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25752			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25753			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25754  ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25755			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25756				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25757  ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25758			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25759			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25760  ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25761			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25762			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25763  ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
25764			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25765			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25766  ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25767			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25768			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25769			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25770  ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
25771			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25772			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25773  ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25774				     | ARM_EXT_DIV),
25775			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25776				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25777  ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25778			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25779  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25780};
25781#undef ARM_EXT_OPT
25782
25783/* ISA floating-point and Advanced SIMD extensions.  */
25784struct arm_option_fpu_value_table
25785{
25786  const char *name;
25787  const arm_feature_set value;
25788};
25789
25790/* This list should, at a minimum, contain all the fpu names
25791   recognized by GCC.  */
25792static const struct arm_option_fpu_value_table arm_fpus[] =
25793{
25794  {"softfpa",		FPU_NONE},
25795  {"fpe",		FPU_ARCH_FPE},
25796  {"fpe2",		FPU_ARCH_FPE},
25797  {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
25798  {"fpa",		FPU_ARCH_FPA},
25799  {"fpa10",		FPU_ARCH_FPA},
25800  {"fpa11",		FPU_ARCH_FPA},
25801  {"arm7500fe",		FPU_ARCH_FPA},
25802  {"softvfp",		FPU_ARCH_VFP},
25803  {"softvfp+vfp",	FPU_ARCH_VFP_V2},
25804  {"vfp",		FPU_ARCH_VFP_V2},
25805  {"vfp9",		FPU_ARCH_VFP_V2},
25806  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
25807  {"vfp10",		FPU_ARCH_VFP_V2},
25808  {"vfp10-r0",		FPU_ARCH_VFP_V1},
25809  {"vfpxd",		FPU_ARCH_VFP_V1xD},
25810  {"vfpv2",		FPU_ARCH_VFP_V2},
25811  {"vfpv3",		FPU_ARCH_VFP_V3},
25812  {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
25813  {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
25814  {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
25815  {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
25816  {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
25817  {"arm1020t",		FPU_ARCH_VFP_V1},
25818  {"arm1020e",		FPU_ARCH_VFP_V2},
25819  {"arm1136jfs",	FPU_ARCH_VFP_V2},
25820  {"arm1136jf-s",	FPU_ARCH_VFP_V2},
25821  {"maverick",		FPU_ARCH_MAVERICK},
25822  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25823  {"neon-fp16",		FPU_ARCH_NEON_FP16},
25824  {"vfpv4",		FPU_ARCH_VFP_V4},
25825  {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
25826  {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
25827  {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
25828  {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
25829  {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
25830  {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
25831  {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
25832  {"crypto-neon-fp-armv8",
25833			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25834  {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
25835  {"crypto-neon-fp-armv8.1",
25836			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25837  {NULL,		ARM_ARCH_NONE}
25838};
25839
25840struct arm_option_value_table
25841{
25842  const char *name;
25843  long value;
25844};
25845
25846static const struct arm_option_value_table arm_float_abis[] =
25847{
25848  {"hard",	ARM_FLOAT_ABI_HARD},
25849  {"softfp",	ARM_FLOAT_ABI_SOFTFP},
25850  {"soft",	ARM_FLOAT_ABI_SOFT},
25851  {NULL,	0}
25852};
25853
25854#ifdef OBJ_ELF
25855/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
25856static const struct arm_option_value_table arm_eabis[] =
25857{
25858  {"gnu",	EF_ARM_EABI_UNKNOWN},
25859  {"4",		EF_ARM_EABI_VER4},
25860  {"5",		EF_ARM_EABI_VER5},
25861  {NULL,	0}
25862};
25863#endif
25864
25865struct arm_long_option_table
25866{
25867  const char * option;		/* Substring to match.	*/
25868  const char * help;			/* Help information.  */
25869  int (* func) (const char * subopt);	/* Function to decode sub-option.  */
25870  const char * deprecated;		/* If non-null, print this message.  */
25871};
25872
25873static bfd_boolean
25874arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25875{
25876  arm_feature_set *ext_set = XNEW (arm_feature_set);
25877
25878  /* We insist on extensions being specified in alphabetical order, and with
25879     extensions being added before being removed.  We achieve this by having
25880     the global ARM_EXTENSIONS table in alphabetical order, and using the
25881     ADDING_VALUE variable to indicate whether we are adding an extension (1)
25882     or removing it (0) and only allowing it to change in the order
25883     -1 -> 1 -> 0.  */
25884  const struct arm_option_extension_value_table * opt = NULL;
25885  const arm_feature_set arm_any = ARM_ANY;
25886  int adding_value = -1;
25887
25888  /* Copy the feature set, so that we can modify it.  */
25889  *ext_set = **opt_p;
25890  *opt_p = ext_set;
25891
25892  while (str != NULL && *str != 0)
25893    {
25894      const char *ext;
25895      size_t len;
25896
25897      if (*str != '+')
25898	{
25899	  as_bad (_("invalid architectural extension"));
25900	  return FALSE;
25901	}
25902
25903      str++;
25904      ext = strchr (str, '+');
25905
25906      if (ext != NULL)
25907	len = ext - str;
25908      else
25909	len = strlen (str);
25910
25911      if (len >= 2 && strncmp (str, "no", 2) == 0)
25912	{
25913	  if (adding_value != 0)
25914	    {
25915	      adding_value = 0;
25916	      opt = arm_extensions;
25917	    }
25918
25919	  len -= 2;
25920	  str += 2;
25921	}
25922      else if (len > 0)
25923	{
25924	  if (adding_value == -1)
25925	    {
25926	      adding_value = 1;
25927	      opt = arm_extensions;
25928	    }
25929	  else if (adding_value != 1)
25930	    {
25931	      as_bad (_("must specify extensions to add before specifying "
25932			"those to remove"));
25933	      return FALSE;
25934	    }
25935	}
25936
25937      if (len == 0)
25938	{
25939	  as_bad (_("missing architectural extension"));
25940	  return FALSE;
25941	}
25942
25943      gas_assert (adding_value != -1);
25944      gas_assert (opt != NULL);
25945
25946      /* Scan over the options table trying to find an exact match. */
25947      for (; opt->name != NULL; opt++)
25948	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25949	  {
25950	    int i, nb_allowed_archs =
25951	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25952	    /* Check we can apply the extension to this architecture.  */
25953	    for (i = 0; i < nb_allowed_archs; i++)
25954	      {
25955		/* Empty entry.  */
25956		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25957		  continue;
25958		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25959		  break;
25960	      }
25961	    if (i == nb_allowed_archs)
25962	      {
25963		as_bad (_("extension does not apply to the base architecture"));
25964		return FALSE;
25965	      }
25966
25967	    /* Add or remove the extension.  */
25968	    if (adding_value)
25969	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25970	    else
25971	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25972
25973	    break;
25974	  }
25975
25976      if (opt->name == NULL)
25977	{
25978	  /* Did we fail to find an extension because it wasn't specified in
25979	     alphabetical order, or because it does not exist?  */
25980
25981	  for (opt = arm_extensions; opt->name != NULL; opt++)
25982	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25983	      break;
25984
25985	  if (opt->name == NULL)
25986	    as_bad (_("unknown architectural extension `%s'"), str);
25987	  else
25988	    as_bad (_("architectural extensions must be specified in "
25989		      "alphabetical order"));
25990
25991	  return FALSE;
25992	}
25993      else
25994	{
25995	  /* We should skip the extension we've just matched the next time
25996	     round.  */
25997	  opt++;
25998	}
25999
26000      str = ext;
26001    };
26002
26003  return TRUE;
26004}
26005
26006static bfd_boolean
26007arm_parse_cpu (const char *str)
26008{
26009  const struct arm_cpu_option_table *opt;
26010  const char *ext = strchr (str, '+');
26011  size_t len;
26012
26013  if (ext != NULL)
26014    len = ext - str;
26015  else
26016    len = strlen (str);
26017
26018  if (len == 0)
26019    {
26020      as_bad (_("missing cpu name `%s'"), str);
26021      return FALSE;
26022    }
26023
26024  for (opt = arm_cpus; opt->name != NULL; opt++)
26025    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26026      {
26027	mcpu_cpu_opt = &opt->value;
26028	mcpu_fpu_opt = &opt->default_fpu;
26029	if (opt->canonical_name)
26030	  {
26031	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26032	    strcpy (selected_cpu_name, opt->canonical_name);
26033	  }
26034	else
26035	  {
26036	    size_t i;
26037
26038	    if (len >= sizeof selected_cpu_name)
26039	      len = (sizeof selected_cpu_name) - 1;
26040
26041	    for (i = 0; i < len; i++)
26042	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
26043	    selected_cpu_name[i] = 0;
26044	  }
26045
26046	if (ext != NULL)
26047	  return arm_parse_extension (ext, &mcpu_cpu_opt);
26048
26049	return TRUE;
26050      }
26051
26052  as_bad (_("unknown cpu `%s'"), str);
26053  return FALSE;
26054}
26055
26056static bfd_boolean
26057arm_parse_arch (const char *str)
26058{
26059  const struct arm_arch_option_table *opt;
26060  const char *ext = strchr (str, '+');
26061  size_t len;
26062
26063  if (ext != NULL)
26064    len = ext - str;
26065  else
26066    len = strlen (str);
26067
26068  if (len == 0)
26069    {
26070      as_bad (_("missing architecture name `%s'"), str);
26071      return FALSE;
26072    }
26073
26074  for (opt = arm_archs; opt->name != NULL; opt++)
26075    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26076      {
26077	march_cpu_opt = &opt->value;
26078	march_fpu_opt = &opt->default_fpu;
26079	strcpy (selected_cpu_name, opt->name);
26080
26081	if (ext != NULL)
26082	  return arm_parse_extension (ext, &march_cpu_opt);
26083
26084	return TRUE;
26085      }
26086
26087  as_bad (_("unknown architecture `%s'\n"), str);
26088  return FALSE;
26089}
26090
26091static bfd_boolean
26092arm_parse_fpu (const char * str)
26093{
26094  const struct arm_option_fpu_value_table * opt;
26095
26096  for (opt = arm_fpus; opt->name != NULL; opt++)
26097    if (streq (opt->name, str))
26098      {
26099	mfpu_opt = &opt->value;
26100	return TRUE;
26101      }
26102
26103  as_bad (_("unknown floating point format `%s'\n"), str);
26104  return FALSE;
26105}
26106
26107static bfd_boolean
26108arm_parse_float_abi (const char * str)
26109{
26110  const struct arm_option_value_table * opt;
26111
26112  for (opt = arm_float_abis; opt->name != NULL; opt++)
26113    if (streq (opt->name, str))
26114      {
26115	mfloat_abi_opt = opt->value;
26116	return TRUE;
26117      }
26118
26119  as_bad (_("unknown floating point abi `%s'\n"), str);
26120  return FALSE;
26121}
26122
26123#ifdef OBJ_ELF
26124static bfd_boolean
26125arm_parse_eabi (const char * str)
26126{
26127  const struct arm_option_value_table *opt;
26128
26129  for (opt = arm_eabis; opt->name != NULL; opt++)
26130    if (streq (opt->name, str))
26131      {
26132	meabi_flags = opt->value;
26133	return TRUE;
26134      }
26135  as_bad (_("unknown EABI `%s'\n"), str);
26136  return FALSE;
26137}
26138#endif
26139
26140static bfd_boolean
26141arm_parse_it_mode (const char * str)
26142{
26143  bfd_boolean ret = TRUE;
26144
26145  if (streq ("arm", str))
26146    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26147  else if (streq ("thumb", str))
26148    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26149  else if (streq ("always", str))
26150    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26151  else if (streq ("never", str))
26152    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26153  else
26154    {
26155      as_bad (_("unknown implicit IT mode `%s', should be "\
26156		"arm, thumb, always, or never."), str);
26157      ret = FALSE;
26158    }
26159
26160  return ret;
26161}
26162
26163static bfd_boolean
26164arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26165{
26166  codecomposer_syntax = TRUE;
26167  arm_comment_chars[0] = ';';
26168  arm_line_separator_chars[0] = 0;
26169  return TRUE;
26170}
26171
26172struct arm_long_option_table arm_long_opts[] =
26173{
26174  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
26175   arm_parse_cpu, NULL},
26176  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
26177   arm_parse_arch, NULL},
26178  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
26179   arm_parse_fpu, NULL},
26180  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
26181   arm_parse_float_abi, NULL},
26182#ifdef OBJ_ELF
26183  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
26184   arm_parse_eabi, NULL},
26185#endif
26186  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
26187   arm_parse_it_mode, NULL},
26188  {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
26189   arm_ccs_mode, NULL},
26190  {NULL, NULL, 0, NULL}
26191};
26192
26193int
26194md_parse_option (int c, const char * arg)
26195{
26196  struct arm_option_table *opt;
26197  const struct arm_legacy_option_table *fopt;
26198  struct arm_long_option_table *lopt;
26199
26200  switch (c)
26201    {
26202#ifdef OPTION_EB
26203    case OPTION_EB:
26204      target_big_endian = 1;
26205      break;
26206#endif
26207
26208#ifdef OPTION_EL
26209    case OPTION_EL:
26210      target_big_endian = 0;
26211      break;
26212#endif
26213
26214    case OPTION_FIX_V4BX:
26215      fix_v4bx = TRUE;
26216      break;
26217
26218    case 'a':
26219      /* Listing option.  Just ignore these, we don't support additional
26220	 ones.	*/
26221      return 0;
26222
26223    default:
26224      for (opt = arm_opts; opt->option != NULL; opt++)
26225	{
26226	  if (c == opt->option[0]
26227	      && ((arg == NULL && opt->option[1] == 0)
26228		  || streq (arg, opt->option + 1)))
26229	    {
26230	      /* If the option is deprecated, tell the user.  */
26231	      if (warn_on_deprecated && opt->deprecated != NULL)
26232		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26233			   arg ? arg : "", _(opt->deprecated));
26234
26235	      if (opt->var != NULL)
26236		*opt->var = opt->value;
26237
26238	      return 1;
26239	    }
26240	}
26241
26242      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26243	{
26244	  if (c == fopt->option[0]
26245	      && ((arg == NULL && fopt->option[1] == 0)
26246		  || streq (arg, fopt->option + 1)))
26247	    {
26248	      /* If the option is deprecated, tell the user.  */
26249	      if (warn_on_deprecated && fopt->deprecated != NULL)
26250		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26251			   arg ? arg : "", _(fopt->deprecated));
26252
26253	      if (fopt->var != NULL)
26254		*fopt->var = &fopt->value;
26255
26256	      return 1;
26257	    }
26258	}
26259
26260      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26261	{
26262	  /* These options are expected to have an argument.  */
26263	  if (c == lopt->option[0]
26264	      && arg != NULL
26265	      && strncmp (arg, lopt->option + 1,
26266			  strlen (lopt->option + 1)) == 0)
26267	    {
26268	      /* If the option is deprecated, tell the user.  */
26269	      if (warn_on_deprecated && lopt->deprecated != NULL)
26270		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26271			   _(lopt->deprecated));
26272
26273	      /* Call the sup-option parser.  */
26274	      return lopt->func (arg + strlen (lopt->option) - 1);
26275	    }
26276	}
26277
26278      return 0;
26279    }
26280
26281  return 1;
26282}
26283
26284void
26285md_show_usage (FILE * fp)
26286{
26287  struct arm_option_table *opt;
26288  struct arm_long_option_table *lopt;
26289
26290  fprintf (fp, _(" ARM-specific assembler options:\n"));
26291
26292  for (opt = arm_opts; opt->option != NULL; opt++)
26293    if (opt->help != NULL)
26294      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
26295
26296  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26297    if (lopt->help != NULL)
26298      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
26299
26300#ifdef OPTION_EB
26301  fprintf (fp, _("\
26302  -EB                     assemble code for a big-endian cpu\n"));
26303#endif
26304
26305#ifdef OPTION_EL
26306  fprintf (fp, _("\
26307  -EL                     assemble code for a little-endian cpu\n"));
26308#endif
26309
26310  fprintf (fp, _("\
26311  --fix-v4bx              Allow BX in ARMv4 code\n"));
26312}
26313
26314
26315#ifdef OBJ_ELF
26316typedef struct
26317{
26318  int val;
26319  arm_feature_set flags;
26320} cpu_arch_ver_table;
26321
26322/* Mapping from CPU features to EABI CPU arch values.  As a general rule, table
26323   must be sorted least features first but some reordering is needed, eg. for
26324   Thumb-2 instructions to be detected as coming from ARMv6T2.  */
26325static const cpu_arch_ver_table cpu_arch_ver[] =
26326{
26327    {1, ARM_ARCH_V4},
26328    {2, ARM_ARCH_V4T},
26329    {3, ARM_ARCH_V5},
26330    {3, ARM_ARCH_V5T},
26331    {4, ARM_ARCH_V5TE},
26332    {5, ARM_ARCH_V5TEJ},
26333    {6, ARM_ARCH_V6},
26334    {9, ARM_ARCH_V6K},
26335    {7, ARM_ARCH_V6Z},
26336    {11, ARM_ARCH_V6M},
26337    {12, ARM_ARCH_V6SM},
26338    {8, ARM_ARCH_V6T2},
26339    {10, ARM_ARCH_V7VE},
26340    {10, ARM_ARCH_V7R},
26341    {10, ARM_ARCH_V7M},
26342    {14, ARM_ARCH_V8A},
26343    {16, ARM_ARCH_V8M_BASE},
26344    {17, ARM_ARCH_V8M_MAIN},
26345    {0, ARM_ARCH_NONE}
26346};
26347
26348/* Set an attribute if it has not already been set by the user.  */
26349static void
26350aeabi_set_attribute_int (int tag, int value)
26351{
26352  if (tag < 1
26353      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26354      || !attributes_set_explicitly[tag])
26355    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26356}
26357
26358static void
26359aeabi_set_attribute_string (int tag, const char *value)
26360{
26361  if (tag < 1
26362      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26363      || !attributes_set_explicitly[tag])
26364    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26365}
26366
26367/* Set the public EABI object attributes.  */
26368void
26369aeabi_set_public_attributes (void)
26370{
26371  int arch;
26372  char profile;
26373  int virt_sec = 0;
26374  int fp16_optional = 0;
26375  arm_feature_set arm_arch = ARM_ARCH_NONE;
26376  arm_feature_set flags;
26377  arm_feature_set tmp;
26378  arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26379  const cpu_arch_ver_table *p;
26380
26381  /* Choose the architecture based on the capabilities of the requested cpu
26382     (if any) and/or the instructions actually used.  */
26383  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26384  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26385  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26386
26387  if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26388    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26389
26390  if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26391    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26392
26393  selected_cpu = flags;
26394
26395  /* Allow the user to override the reported architecture.  */
26396  if (object_arch)
26397    {
26398      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26399      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26400    }
26401
26402  /* We need to make sure that the attributes do not identify us as v6S-M
26403     when the only v6S-M feature in use is the Operating System Extensions.  */
26404  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26405      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26406	ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26407
26408  tmp = flags;
26409  arch = 0;
26410  for (p = cpu_arch_ver; p->val; p++)
26411    {
26412      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26413	{
26414	  arch = p->val;
26415	  arm_arch = p->flags;
26416	  ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26417	}
26418    }
26419
26420  /* The table lookup above finds the last architecture to contribute
26421     a new feature.  Unfortunately, Tag13 is a subset of the union of
26422     v6T2 and v7-M, so it is never seen as contributing a new feature.
26423     We can not search for the last entry which is entirely used,
26424     because if no CPU is specified we build up only those flags
26425     actually used.  Perhaps we should separate out the specified
26426     and implicit cases.  Avoid taking this path for -march=all by
26427     checking for contradictory v7-A / v7-M features.  */
26428  if (arch == TAG_CPU_ARCH_V7
26429      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26430      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26431      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26432    {
26433      arch = TAG_CPU_ARCH_V7E_M;
26434      arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26435    }
26436
26437  ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26438  if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26439    {
26440      arch = TAG_CPU_ARCH_V8M_MAIN;
26441      arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26442    }
26443
26444  /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26445     coming from ARMv8-A.  However, since ARMv8-A has more instructions than
26446     ARMv8-M, -march=all must be detected as ARMv8-A.  */
26447  if (arch == TAG_CPU_ARCH_V8M_MAIN
26448      && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26449    {
26450      arch = TAG_CPU_ARCH_V8;
26451      arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26452    }
26453
26454  /* Tag_CPU_name.  */
26455  if (selected_cpu_name[0])
26456    {
26457      char *q;
26458
26459      q = selected_cpu_name;
26460      if (strncmp (q, "armv", 4) == 0)
26461	{
26462	  int i;
26463
26464	  q += 4;
26465	  for (i = 0; q[i]; i++)
26466	    q[i] = TOUPPER (q[i]);
26467	}
26468      aeabi_set_attribute_string (Tag_CPU_name, q);
26469    }
26470
26471  /* Tag_CPU_arch.  */
26472  aeabi_set_attribute_int (Tag_CPU_arch, arch);
26473
26474  /* Tag_CPU_arch_profile.  */
26475  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26476      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26477      || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26478	  && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26479    profile = 'A';
26480  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26481    profile = 'R';
26482  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26483    profile = 'M';
26484  else
26485    profile = '\0';
26486
26487  if (profile != '\0')
26488    aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26489
26490  /* Tag_DSP_extension.  */
26491  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26492    {
26493      arm_feature_set ext;
26494
26495      /* DSP instructions not in architecture.  */
26496      ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26497      if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26498	aeabi_set_attribute_int (Tag_DSP_extension, 1);
26499    }
26500
26501  /* Tag_ARM_ISA_use.  */
26502  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26503      || arch == 0)
26504    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26505
26506  /* Tag_THUMB_ISA_use.  */
26507  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26508      || arch == 0)
26509    {
26510      int thumb_isa_use;
26511
26512      if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26513	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26514	thumb_isa_use = 3;
26515      else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26516	thumb_isa_use = 2;
26517      else
26518	thumb_isa_use = 1;
26519      aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26520    }
26521
26522  /* Tag_VFP_arch.  */
26523  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26524    aeabi_set_attribute_int (Tag_VFP_arch,
26525			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26526			     ? 7 : 8);
26527  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26528    aeabi_set_attribute_int (Tag_VFP_arch,
26529			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26530			     ? 5 : 6);
26531  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26532    {
26533      fp16_optional = 1;
26534      aeabi_set_attribute_int (Tag_VFP_arch, 3);
26535    }
26536  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26537    {
26538      aeabi_set_attribute_int (Tag_VFP_arch, 4);
26539      fp16_optional = 1;
26540    }
26541  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26542    aeabi_set_attribute_int (Tag_VFP_arch, 2);
26543  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26544	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26545    aeabi_set_attribute_int (Tag_VFP_arch, 1);
26546
26547  /* Tag_ABI_HardFP_use.  */
26548  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26549      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26550    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26551
26552  /* Tag_WMMX_arch.  */
26553  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26554    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26555  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26556    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26557
26558  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
26559  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26560    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26561  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26562    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26563  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26564    {
26565      if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26566	{
26567	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26568	}
26569      else
26570	{
26571	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26572	  fp16_optional = 1;
26573	}
26574    }
26575
26576  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
26577  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26578    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26579
26580  /* Tag_DIV_use.
26581
26582     We set Tag_DIV_use to two when integer divide instructions have been used
26583     in ARM state, or when Thumb integer divide instructions have been used,
26584     but we have no architecture profile set, nor have we any ARM instructions.
26585
26586     For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26587     by the base architecture.
26588
26589     For new architectures we will have to check these tests.  */
26590  gas_assert (arch <= TAG_CPU_ARCH_V8
26591	      || (arch >= TAG_CPU_ARCH_V8M_BASE
26592		  && arch <= TAG_CPU_ARCH_V8M_MAIN));
26593  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26594      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26595    aeabi_set_attribute_int (Tag_DIV_use, 0);
26596  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26597	   || (profile == '\0'
26598	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26599	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26600    aeabi_set_attribute_int (Tag_DIV_use, 2);
26601
26602  /* Tag_MP_extension_use.  */
26603  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26604    aeabi_set_attribute_int (Tag_MPextension_use, 1);
26605
26606  /* Tag Virtualization_use.  */
26607  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26608    virt_sec |= 1;
26609  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26610    virt_sec |= 2;
26611  if (virt_sec != 0)
26612    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26613}
26614
26615/* Add the default contents for the .ARM.attributes section.  */
26616void
26617arm_md_end (void)
26618{
26619  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26620    return;
26621
26622  aeabi_set_public_attributes ();
26623}
26624#endif /* OBJ_ELF */
26625
26626
26627/* Parse a .cpu directive.  */
26628
26629static void
26630s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26631{
26632  const struct arm_cpu_option_table *opt;
26633  char *name;
26634  char saved_char;
26635
26636  name = input_line_pointer;
26637  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26638    input_line_pointer++;
26639  saved_char = *input_line_pointer;
26640  *input_line_pointer = 0;
26641
26642  /* Skip the first "all" entry.  */
26643  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26644    if (streq (opt->name, name))
26645      {
26646	mcpu_cpu_opt = &opt->value;
26647	selected_cpu = opt->value;
26648	if (opt->canonical_name)
26649	  strcpy (selected_cpu_name, opt->canonical_name);
26650	else
26651	  {
26652	    int i;
26653	    for (i = 0; opt->name[i]; i++)
26654	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
26655
26656	    selected_cpu_name[i] = 0;
26657	  }
26658	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26659	*input_line_pointer = saved_char;
26660	demand_empty_rest_of_line ();
26661	return;
26662      }
26663  as_bad (_("unknown cpu `%s'"), name);
26664  *input_line_pointer = saved_char;
26665  ignore_rest_of_line ();
26666}
26667
26668
26669/* Parse a .arch directive.  */
26670
26671static void
26672s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26673{
26674  const struct arm_arch_option_table *opt;
26675  char saved_char;
26676  char *name;
26677
26678  name = input_line_pointer;
26679  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26680    input_line_pointer++;
26681  saved_char = *input_line_pointer;
26682  *input_line_pointer = 0;
26683
26684  /* Skip the first "all" entry.  */
26685  for (opt = arm_archs + 1; opt->name != NULL; opt++)
26686    if (streq (opt->name, name))
26687      {
26688	mcpu_cpu_opt = &opt->value;
26689	selected_cpu = opt->value;
26690	strcpy (selected_cpu_name, opt->name);
26691	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26692	*input_line_pointer = saved_char;
26693	demand_empty_rest_of_line ();
26694	return;
26695      }
26696
26697  as_bad (_("unknown architecture `%s'\n"), name);
26698  *input_line_pointer = saved_char;
26699  ignore_rest_of_line ();
26700}
26701
26702
26703/* Parse a .object_arch directive.  */
26704
26705static void
26706s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26707{
26708  const struct arm_arch_option_table *opt;
26709  char saved_char;
26710  char *name;
26711
26712  name = input_line_pointer;
26713  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26714    input_line_pointer++;
26715  saved_char = *input_line_pointer;
26716  *input_line_pointer = 0;
26717
26718  /* Skip the first "all" entry.  */
26719  for (opt = arm_archs + 1; opt->name != NULL; opt++)
26720    if (streq (opt->name, name))
26721      {
26722	object_arch = &opt->value;
26723	*input_line_pointer = saved_char;
26724	demand_empty_rest_of_line ();
26725	return;
26726      }
26727
26728  as_bad (_("unknown architecture `%s'\n"), name);
26729  *input_line_pointer = saved_char;
26730  ignore_rest_of_line ();
26731}
26732
26733/* Parse a .arch_extension directive.  */
26734
26735static void
26736s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26737{
26738  const struct arm_option_extension_value_table *opt;
26739  const arm_feature_set arm_any = ARM_ANY;
26740  char saved_char;
26741  char *name;
26742  int adding_value = 1;
26743
26744  name = input_line_pointer;
26745  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26746    input_line_pointer++;
26747  saved_char = *input_line_pointer;
26748  *input_line_pointer = 0;
26749
26750  if (strlen (name) >= 2
26751      && strncmp (name, "no", 2) == 0)
26752    {
26753      adding_value = 0;
26754      name += 2;
26755    }
26756
26757  for (opt = arm_extensions; opt->name != NULL; opt++)
26758    if (streq (opt->name, name))
26759      {
26760	int i, nb_allowed_archs =
26761	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26762	for (i = 0; i < nb_allowed_archs; i++)
26763	  {
26764	    /* Empty entry.  */
26765	    if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26766	      continue;
26767	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26768	      break;
26769	  }
26770
26771	if (i == nb_allowed_archs)
26772	  {
26773	    as_bad (_("architectural extension `%s' is not allowed for the "
26774		      "current base architecture"), name);
26775	    break;
26776	  }
26777
26778	if (adding_value)
26779	  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26780				  opt->merge_value);
26781	else
26782	  ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26783
26784	mcpu_cpu_opt = &selected_cpu;
26785	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26786	*input_line_pointer = saved_char;
26787	demand_empty_rest_of_line ();
26788	return;
26789      }
26790
26791  if (opt->name == NULL)
26792    as_bad (_("unknown architecture extension `%s'\n"), name);
26793
26794  *input_line_pointer = saved_char;
26795  ignore_rest_of_line ();
26796}
26797
26798/* Parse a .fpu directive.  */
26799
26800static void
26801s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26802{
26803  const struct arm_option_fpu_value_table *opt;
26804  char saved_char;
26805  char *name;
26806
26807  name = input_line_pointer;
26808  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26809    input_line_pointer++;
26810  saved_char = *input_line_pointer;
26811  *input_line_pointer = 0;
26812
26813  for (opt = arm_fpus; opt->name != NULL; opt++)
26814    if (streq (opt->name, name))
26815      {
26816	mfpu_opt = &opt->value;
26817	ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26818	*input_line_pointer = saved_char;
26819	demand_empty_rest_of_line ();
26820	return;
26821      }
26822
26823  as_bad (_("unknown floating point format `%s'\n"), name);
26824  *input_line_pointer = saved_char;
26825  ignore_rest_of_line ();
26826}
26827
26828/* Copy symbol information.  */
26829
26830void
26831arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26832{
26833  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26834}
26835
26836#ifdef OBJ_ELF
26837/* Given a symbolic attribute NAME, return the proper integer value.
26838   Returns -1 if the attribute is not known.  */
26839
26840int
26841arm_convert_symbolic_attribute (const char *name)
26842{
26843  static const struct
26844  {
26845    const char * name;
26846    const int    tag;
26847  }
26848  attribute_table[] =
26849    {
26850      /* When you modify this table you should
26851	 also modify the list in doc/c-arm.texi.  */
26852#define T(tag) {#tag, tag}
26853      T (Tag_CPU_raw_name),
26854      T (Tag_CPU_name),
26855      T (Tag_CPU_arch),
26856      T (Tag_CPU_arch_profile),
26857      T (Tag_ARM_ISA_use),
26858      T (Tag_THUMB_ISA_use),
26859      T (Tag_FP_arch),
26860      T (Tag_VFP_arch),
26861      T (Tag_WMMX_arch),
26862      T (Tag_Advanced_SIMD_arch),
26863      T (Tag_PCS_config),
26864      T (Tag_ABI_PCS_R9_use),
26865      T (Tag_ABI_PCS_RW_data),
26866      T (Tag_ABI_PCS_RO_data),
26867      T (Tag_ABI_PCS_GOT_use),
26868      T (Tag_ABI_PCS_wchar_t),
26869      T (Tag_ABI_FP_rounding),
26870      T (Tag_ABI_FP_denormal),
26871      T (Tag_ABI_FP_exceptions),
26872      T (Tag_ABI_FP_user_exceptions),
26873      T (Tag_ABI_FP_number_model),
26874      T (Tag_ABI_align_needed),
26875      T (Tag_ABI_align8_needed),
26876      T (Tag_ABI_align_preserved),
26877      T (Tag_ABI_align8_preserved),
26878      T (Tag_ABI_enum_size),
26879      T (Tag_ABI_HardFP_use),
26880      T (Tag_ABI_VFP_args),
26881      T (Tag_ABI_WMMX_args),
26882      T (Tag_ABI_optimization_goals),
26883      T (Tag_ABI_FP_optimization_goals),
26884      T (Tag_compatibility),
26885      T (Tag_CPU_unaligned_access),
26886      T (Tag_FP_HP_extension),
26887      T (Tag_VFP_HP_extension),
26888      T (Tag_ABI_FP_16bit_format),
26889      T (Tag_MPextension_use),
26890      T (Tag_DIV_use),
26891      T (Tag_nodefaults),
26892      T (Tag_also_compatible_with),
26893      T (Tag_conformance),
26894      T (Tag_T2EE_use),
26895      T (Tag_Virtualization_use),
26896      T (Tag_DSP_extension),
26897      /* We deliberately do not include Tag_MPextension_use_legacy.  */
26898#undef T
26899    };
26900  unsigned int i;
26901
26902  if (name == NULL)
26903    return -1;
26904
26905  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26906    if (streq (name, attribute_table[i].name))
26907      return attribute_table[i].tag;
26908
26909  return -1;
26910}
26911
26912
26913/* Apply sym value for relocations only in the case that they are for
26914   local symbols in the same segment as the fixup and you have the
26915   respective architectural feature for blx and simple switches.  */
26916int
26917arm_apply_sym_value (struct fix * fixP, segT this_seg)
26918{
26919  if (fixP->fx_addsy
26920      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26921      /* PR 17444: If the local symbol is in a different section then a reloc
26922	 will always be generated for it, so applying the symbol value now
26923	 will result in a double offset being stored in the relocation.  */
26924      && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26925      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26926    {
26927      switch (fixP->fx_r_type)
26928	{
26929	case BFD_RELOC_ARM_PCREL_BLX:
26930	case BFD_RELOC_THUMB_PCREL_BRANCH23:
26931	  if (ARM_IS_FUNC (fixP->fx_addsy))
26932	    return 1;
26933	  break;
26934
26935	case BFD_RELOC_ARM_PCREL_CALL:
26936	case BFD_RELOC_THUMB_PCREL_BLX:
26937	  if (THUMB_IS_FUNC (fixP->fx_addsy))
26938	    return 1;
26939	  break;
26940
26941	default:
26942	  break;
26943	}
26944
26945    }
26946  return 0;
26947}
26948#endif /* OBJ_ELF */
26949