1/* tc-arm.c -- Assemble for the ARM
2   Copyright (C) 1994-2020 Free Software Foundation, Inc.
3   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4	Modified by David Taylor (dtaylor@armltd.co.uk)
5	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9   This file is part of GAS, the GNU Assembler.
10
11   GAS is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 3, or (at your option)
14   any later version.
15
16   GAS is distributed in the hope that it will be useful,
17   but WITHOUT ANY WARRANTY; without even the implied warranty of
18   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19   GNU General Public License for more details.
20
21   You should have received a copy of the GNU General Public License
22   along with GAS; see the file COPYING.  If not, write to the Free
23   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24   02110-1301, USA.  */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define	 NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35#include "cpu-arm.h"
36
37#ifdef OBJ_ELF
38#include "elf/arm.h"
39#include "dw2gencfi.h"
40#endif
41
42#include "dwarf2dbg.h"
43
44#ifdef OBJ_ELF
45/* Must be at least the size of the largest unwind opcode (currently two).  */
46#define ARM_OPCODE_CHUNK_SIZE 8
47
48/* This structure holds the unwinding state.  */
49
50static struct
51{
52  symbolS *	  proc_start;
53  symbolS *	  table_entry;
54  symbolS *	  personality_routine;
55  int		  personality_index;
56  /* The segment containing the function.  */
57  segT		  saved_seg;
58  subsegT	  saved_subseg;
59  /* Opcodes generated from this function.  */
60  unsigned char * opcodes;
61  int		  opcode_count;
62  int		  opcode_alloc;
63  /* The number of bytes pushed to the stack.  */
64  offsetT	  frame_size;
65  /* We don't add stack adjustment opcodes immediately so that we can merge
66     multiple adjustments.  We can also omit the final adjustment
67     when using a frame pointer.  */
68  offsetT	  pending_offset;
69  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
70     hold the reg+offset to use when restoring sp from a frame pointer.	 */
71  offsetT	  fp_offset;
72  int		  fp_reg;
73  /* Nonzero if an unwind_setfp directive has been seen.  */
74  unsigned	  fp_used:1;
75  /* Nonzero if the last opcode restores sp from fp_reg.  */
76  unsigned	  sp_restored:1;
77} unwind;
78
79/* Whether --fdpic was given.  */
80static int arm_fdpic;
81
82#endif /* OBJ_ELF */
83
84/* Results from operand parsing worker functions.  */
85
86typedef enum
87{
88  PARSE_OPERAND_SUCCESS,
89  PARSE_OPERAND_FAIL,
90  PARSE_OPERAND_FAIL_NO_BACKTRACK
91} parse_operand_result;
92
93enum arm_float_abi
94{
95  ARM_FLOAT_ABI_HARD,
96  ARM_FLOAT_ABI_SOFTFP,
97  ARM_FLOAT_ABI_SOFT
98};
99
100/* Types of processor to assemble for.	*/
101#ifndef CPU_DEFAULT
102/* The code that was here used to select a default CPU depending on compiler
103   pre-defines which were only present when doing native builds, thus
104   changing gas' default behaviour depending upon the build host.
105
106   If you have a target that requires a default CPU option then the you
107   should define CPU_DEFAULT here.  */
108#endif
109
110/* Perform range checks on positive and negative overflows by checking if the
111   VALUE given fits within the range of an BITS sized immediate.  */
112static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
113 {
114  gas_assert (bits < (offsetT)(sizeof (value) * 8));
115  return (value & ~((1 << bits)-1))
116	  && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117}
118
119#ifndef FPU_DEFAULT
120# ifdef TE_LINUX
121#  define FPU_DEFAULT FPU_ARCH_FPA
122# elif defined (TE_NetBSD)
123#  ifdef OBJ_ELF
124#   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
125#  else
126    /* Legacy a.out format.  */
127#   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
128#  endif
129# elif defined (TE_VXWORKS)
130#  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
131# else
132   /* For backwards compatibility, default to FPA.  */
133#  define FPU_DEFAULT FPU_ARCH_FPA
134# endif
135#endif /* ifndef FPU_DEFAULT */
136
137#define streq(a, b)	      (strcmp (a, b) == 0)
138
139/* Current set of feature bits available (CPU+FPU).  Different from
140   selected_cpu + selected_fpu in case of autodetection since the CPU
141   feature bits are then all set.  */
142static arm_feature_set cpu_variant;
143/* Feature bits used in each execution state.  Used to set build attribute
144   (in particular Tag_*_ISA_use) in CPU autodetection mode.  */
145static arm_feature_set arm_arch_used;
146static arm_feature_set thumb_arch_used;
147
148/* Flags stored in private area of BFD structure.  */
149static int uses_apcs_26	     = FALSE;
150static int atpcs	     = FALSE;
151static int support_interwork = FALSE;
152static int uses_apcs_float   = FALSE;
153static int pic_code	     = FALSE;
154static int fix_v4bx	     = FALSE;
155/* Warn on using deprecated features.  */
156static int warn_on_deprecated = TRUE;
157static int warn_on_restrict_it = FALSE;
158
159/* Understand CodeComposer Studio assembly syntax.  */
160bfd_boolean codecomposer_syntax = FALSE;
161
162/* Variables that we set while parsing command-line options.  Once all
163   options have been read we re-process these values to set the real
164   assembly flags.  */
165
166/* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167   instead of -mcpu=arm1).  */
168static const arm_feature_set *legacy_cpu = NULL;
169static const arm_feature_set *legacy_fpu = NULL;
170
171/* CPU, extension and FPU feature bits selected by -mcpu.  */
172static const arm_feature_set *mcpu_cpu_opt = NULL;
173static arm_feature_set *mcpu_ext_opt = NULL;
174static const arm_feature_set *mcpu_fpu_opt = NULL;
175
176/* CPU, extension and FPU feature bits selected by -march.  */
177static const arm_feature_set *march_cpu_opt = NULL;
178static arm_feature_set *march_ext_opt = NULL;
179static const arm_feature_set *march_fpu_opt = NULL;
180
181/* Feature bits selected by -mfpu.  */
182static const arm_feature_set *mfpu_opt = NULL;
183
184/* Constants for known architecture features.  */
185static const arm_feature_set fpu_default = FPU_DEFAULT;
186static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
187static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
188static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
189static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
190static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
191static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
192#ifdef OBJ_ELF
193static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
194#endif
195static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
196
197#ifdef CPU_DEFAULT
198static const arm_feature_set cpu_default = CPU_DEFAULT;
199#endif
200
201static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
202static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
203static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
204static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
205static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
206static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
207static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
208static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
209static const arm_feature_set arm_ext_v4t_5 =
210  ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
211static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
212static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
213static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
214static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
215static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
216static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
217static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
218/* Only for compatability of hint instructions.  */
219static const arm_feature_set arm_ext_v6k_v6t2 =
220  ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
221static const arm_feature_set arm_ext_v6_notm =
222  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
223static const arm_feature_set arm_ext_v6_dsp =
224  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
225static const arm_feature_set arm_ext_barrier =
226  ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
227static const arm_feature_set arm_ext_msr =
228  ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
229static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
230static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
231static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
232static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
233#ifdef OBJ_ELF
234static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
235#endif
236static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
237static const arm_feature_set arm_ext_m =
238  ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
239		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
240static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
241static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
242static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
243static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
244static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
245static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
246static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
247static const arm_feature_set arm_ext_v8m_main =
248  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
249static const arm_feature_set arm_ext_v8_1m_main =
250ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
251/* Instructions in ARMv8-M only found in M profile architectures.  */
252static const arm_feature_set arm_ext_v8m_m_only =
253  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
254static const arm_feature_set arm_ext_v6t2_v8m =
255  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
256/* Instructions shared between ARMv8-A and ARMv8-M.  */
257static const arm_feature_set arm_ext_atomics =
258  ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
259#ifdef OBJ_ELF
260/* DSP instructions Tag_DSP_extension refers to.  */
261static const arm_feature_set arm_ext_dsp =
262  ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
263#endif
264static const arm_feature_set arm_ext_ras =
265  ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
266/* FP16 instructions.  */
267static const arm_feature_set arm_ext_fp16 =
268  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
269static const arm_feature_set arm_ext_fp16_fml =
270  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
271static const arm_feature_set arm_ext_v8_2 =
272  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
273static const arm_feature_set arm_ext_v8_3 =
274  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
275static const arm_feature_set arm_ext_sb =
276  ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
277static const arm_feature_set arm_ext_predres =
278  ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
279static const arm_feature_set arm_ext_bf16 =
280  ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
281static const arm_feature_set arm_ext_i8mm =
282  ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
283static const arm_feature_set arm_ext_crc =
284  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
285
286static const arm_feature_set arm_arch_any = ARM_ANY;
287static const arm_feature_set fpu_any = FPU_ANY;
288static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
289static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
290static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
291
292static const arm_feature_set arm_cext_iwmmxt2 =
293  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
294static const arm_feature_set arm_cext_iwmmxt =
295  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
296static const arm_feature_set arm_cext_xscale =
297  ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
298static const arm_feature_set arm_cext_maverick =
299  ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
300static const arm_feature_set fpu_fpa_ext_v1 =
301  ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
302static const arm_feature_set fpu_fpa_ext_v2 =
303  ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
304static const arm_feature_set fpu_vfp_ext_v1xd =
305  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
306static const arm_feature_set fpu_vfp_ext_v1 =
307  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
308static const arm_feature_set fpu_vfp_ext_v2 =
309  ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
310static const arm_feature_set fpu_vfp_ext_v3xd =
311  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
312static const arm_feature_set fpu_vfp_ext_v3 =
313  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
314static const arm_feature_set fpu_vfp_ext_d32 =
315  ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
316static const arm_feature_set fpu_neon_ext_v1 =
317  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
318static const arm_feature_set fpu_vfp_v3_or_neon_ext =
319  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
320static const arm_feature_set mve_ext =
321  ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
322static const arm_feature_set mve_fp_ext =
323  ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
324#ifdef OBJ_ELF
325static const arm_feature_set fpu_vfp_fp16 =
326  ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
327static const arm_feature_set fpu_neon_ext_fma =
328  ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
329#endif
330static const arm_feature_set fpu_vfp_ext_fma =
331  ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
332static const arm_feature_set fpu_vfp_ext_armv8 =
333  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
334static const arm_feature_set fpu_vfp_ext_armv8xd =
335  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
336static const arm_feature_set fpu_neon_ext_armv8 =
337  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
338static const arm_feature_set fpu_crypto_ext_armv8 =
339  ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
340static const arm_feature_set fpu_neon_ext_v8_1 =
341  ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
342static const arm_feature_set fpu_neon_ext_dotprod =
343  ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
344
345static int mfloat_abi_opt = -1;
346/* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
347   directive.  */
348static arm_feature_set selected_arch = ARM_ARCH_NONE;
349/* Extension feature bits selected by the last -mcpu/-march or .arch_extension
350   directive.  */
351static arm_feature_set selected_ext = ARM_ARCH_NONE;
352/* Feature bits selected by the last -mcpu/-march or by the combination of the
353   last .cpu/.arch directive .arch_extension directives since that
354   directive.  */
355static arm_feature_set selected_cpu = ARM_ARCH_NONE;
356/* FPU feature bits selected by the last -mfpu or .fpu directive.  */
357static arm_feature_set selected_fpu = FPU_NONE;
358/* Feature bits selected by the last .object_arch directive.  */
359static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
360/* Must be long enough to hold any of the names in arm_cpus.  */
361static const struct arm_ext_table * selected_ctx_ext_table = NULL;
362static char selected_cpu_name[20];
363
364extern FLONUM_TYPE generic_floating_point_number;
365
366/* Return if no cpu was selected on command-line.  */
367static bfd_boolean
368no_cpu_selected (void)
369{
370  return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
371}
372
373#ifdef OBJ_ELF
374# ifdef EABI_DEFAULT
375static int meabi_flags = EABI_DEFAULT;
376# else
377static int meabi_flags = EF_ARM_EABI_UNKNOWN;
378# endif
379
380static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
381
382bfd_boolean
383arm_is_eabi (void)
384{
385  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
386}
387#endif
388
389#ifdef OBJ_ELF
390/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
391symbolS * GOT_symbol;
392#endif
393
394/* 0: assemble for ARM,
395   1: assemble for Thumb,
396   2: assemble for Thumb even though target CPU does not support thumb
397      instructions.  */
398static int thumb_mode = 0;
399/* A value distinct from the possible values for thumb_mode that we
400   can use to record whether thumb_mode has been copied into the
401   tc_frag_data field of a frag.  */
402#define MODE_RECORDED (1 << 4)
403
404/* Specifies the intrinsic IT insn behavior mode.  */
405enum implicit_it_mode
406{
407  IMPLICIT_IT_MODE_NEVER  = 0x00,
408  IMPLICIT_IT_MODE_ARM    = 0x01,
409  IMPLICIT_IT_MODE_THUMB  = 0x02,
410  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
411};
412static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
413
414/* If unified_syntax is true, we are processing the new unified
415   ARM/Thumb syntax.  Important differences from the old ARM mode:
416
417     - Immediate operands do not require a # prefix.
418     - Conditional affixes always appear at the end of the
419       instruction.  (For backward compatibility, those instructions
420       that formerly had them in the middle, continue to accept them
421       there.)
422     - The IT instruction may appear, and if it does is validated
423       against subsequent conditional affixes.  It does not generate
424       machine code.
425
426   Important differences from the old Thumb mode:
427
428     - Immediate operands do not require a # prefix.
429     - Most of the V6T2 instructions are only available in unified mode.
430     - The .N and .W suffixes are recognized and honored (it is an error
431       if they cannot be honored).
432     - All instructions set the flags if and only if they have an 's' affix.
433     - Conditional affixes may be used.  They are validated against
434       preceding IT instructions.  Unlike ARM mode, you cannot use a
435       conditional affix except in the scope of an IT instruction.  */
436
437static bfd_boolean unified_syntax = FALSE;
438
439/* An immediate operand can start with #, and ld*, st*, pld operands
440   can contain [ and ].  We need to tell APP not to elide whitespace
441   before a [, which can appear as the first operand for pld.
442   Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
443const char arm_symbol_chars[] = "#[]{}";
444
445enum neon_el_type
446{
447  NT_invtype,
448  NT_untyped,
449  NT_integer,
450  NT_float,
451  NT_poly,
452  NT_signed,
453  NT_bfloat,
454  NT_unsigned
455};
456
457struct neon_type_el
458{
459  enum neon_el_type type;
460  unsigned size;
461};
462
463#define NEON_MAX_TYPE_ELS 4
464
465struct neon_type
466{
467  struct neon_type_el el[NEON_MAX_TYPE_ELS];
468  unsigned elems;
469};
470
471enum pred_instruction_type
472{
473   OUTSIDE_PRED_INSN,
474   INSIDE_VPT_INSN,
475   INSIDE_IT_INSN,
476   INSIDE_IT_LAST_INSN,
477   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
478			      if inside, should be the last one.  */
479   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
480			      i.e. BKPT and NOP.  */
481   IT_INSN,		   /* The IT insn has been parsed.  */
482   VPT_INSN,		   /* The VPT/VPST insn has been parsed.  */
483   MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
484			      a predication code.  */
485   MVE_UNPREDICABLE_INSN   /* MVE instruction that is non-predicable.  */
486};
487
488/* The maximum number of operands we need.  */
489#define ARM_IT_MAX_OPERANDS 6
490#define ARM_IT_MAX_RELOCS 3
491
492struct arm_it
493{
494  const char *	error;
495  unsigned long instruction;
496  int		size;
497  int		size_req;
498  int		cond;
499  /* "uncond_value" is set to the value in place of the conditional field in
500     unconditional versions of the instruction, or -1 if nothing is
501     appropriate.  */
502  int		uncond_value;
503  struct neon_type vectype;
504  /* This does not indicate an actual NEON instruction, only that
505     the mnemonic accepts neon-style type suffixes.  */
506  int		is_neon;
507  /* Set to the opcode if the instruction needs relaxation.
508     Zero if the instruction is not relaxed.  */
509  unsigned long	relax;
510  struct
511  {
512    bfd_reloc_code_real_type type;
513    expressionS		     exp;
514    int			     pc_rel;
515  } relocs[ARM_IT_MAX_RELOCS];
516
517  enum pred_instruction_type pred_insn_type;
518
519  struct
520  {
521    unsigned reg;
522    signed int imm;
523    struct neon_type_el vectype;
524    unsigned present	: 1;  /* Operand present.  */
525    unsigned isreg	: 1;  /* Operand was a register.  */
526    unsigned immisreg	: 2;  /* .imm field is a second register.
527				 0: imm, 1: gpr, 2: MVE Q-register.  */
528    unsigned isscalar   : 2;  /* Operand is a (SIMD) scalar:
529				 0) not scalar,
530				 1) Neon scalar,
531				 2) MVE scalar.  */
532    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
533    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
534    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
535       instructions. This allows us to disambiguate ARM <-> vector insns.  */
536    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
537    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
538    unsigned isquad     : 1;  /* Operand is SIMD quad register.  */
539    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
540    unsigned iszr	: 1;  /* Operand is ZR register.  */
541    unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
542    unsigned writeback	: 1;  /* Operand has trailing !  */
543    unsigned preind	: 1;  /* Preindexed address.  */
544    unsigned postind	: 1;  /* Postindexed address.  */
545    unsigned negative	: 1;  /* Index register was negated.  */
546    unsigned shifted	: 1;  /* Shift applied to operation.  */
547    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
548  } operands[ARM_IT_MAX_OPERANDS];
549};
550
551static struct arm_it inst;
552
553#define NUM_FLOAT_VALS 8
554
555const char * fp_const[] =
556{
557  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
558};
559
560LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
561
562#define FAIL	(-1)
563#define SUCCESS (0)
564
565#define SUFF_S 1
566#define SUFF_D 2
567#define SUFF_E 3
568#define SUFF_P 4
569
570#define CP_T_X	 0x00008000
571#define CP_T_Y	 0x00400000
572
573#define CONDS_BIT	 0x00100000
574#define LOAD_BIT	 0x00100000
575
576#define DOUBLE_LOAD_FLAG 0x00000001
577
578struct asm_cond
579{
580  const char *	 template_name;
581  unsigned long  value;
582};
583
584#define COND_ALWAYS 0xE
585
586struct asm_psr
587{
588  const char *   template_name;
589  unsigned long  field;
590};
591
592struct asm_barrier_opt
593{
594  const char *    template_name;
595  unsigned long   value;
596  const arm_feature_set arch;
597};
598
599/* The bit that distinguishes CPSR and SPSR.  */
600#define SPSR_BIT   (1 << 22)
601
602/* The individual PSR flag bits.  */
603#define PSR_c	(1 << 16)
604#define PSR_x	(1 << 17)
605#define PSR_s	(1 << 18)
606#define PSR_f	(1 << 19)
607
608struct reloc_entry
609{
610  const char *              name;
611  bfd_reloc_code_real_type  reloc;
612};
613
614enum vfp_reg_pos
615{
616  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
617  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
618};
619
620enum vfp_ldstm_type
621{
622  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
623};
624
625/* Bits for DEFINED field in neon_typed_alias.  */
626#define NTA_HASTYPE  1
627#define NTA_HASINDEX 2
628
629struct neon_typed_alias
630{
631  unsigned char        defined;
632  unsigned char        index;
633  struct neon_type_el  eltype;
634};
635
636/* ARM register categories.  This includes coprocessor numbers and various
637   architecture extensions' registers.  Each entry should have an error message
638   in reg_expected_msgs below.  */
639enum arm_reg_type
640{
641  REG_TYPE_RN,
642  REG_TYPE_CP,
643  REG_TYPE_CN,
644  REG_TYPE_FN,
645  REG_TYPE_VFS,
646  REG_TYPE_VFD,
647  REG_TYPE_NQ,
648  REG_TYPE_VFSD,
649  REG_TYPE_NDQ,
650  REG_TYPE_NSD,
651  REG_TYPE_NSDQ,
652  REG_TYPE_VFC,
653  REG_TYPE_MVF,
654  REG_TYPE_MVD,
655  REG_TYPE_MVFX,
656  REG_TYPE_MVDX,
657  REG_TYPE_MVAX,
658  REG_TYPE_MQ,
659  REG_TYPE_DSPSC,
660  REG_TYPE_MMXWR,
661  REG_TYPE_MMXWC,
662  REG_TYPE_MMXWCG,
663  REG_TYPE_XSCALE,
664  REG_TYPE_RNB,
665  REG_TYPE_ZR
666};
667
668/* Structure for a hash table entry for a register.
669   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
670   information which states whether a vector type or index is specified (for a
671   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
672struct reg_entry
673{
674  const char *               name;
675  unsigned int               number;
676  unsigned char              type;
677  unsigned char              builtin;
678  struct neon_typed_alias *  neon;
679};
680
681/* Diagnostics used when we don't get a register of the expected type.	*/
682const char * const reg_expected_msgs[] =
683{
684  [REG_TYPE_RN]	    = N_("ARM register expected"),
685  [REG_TYPE_CP]	    = N_("bad or missing co-processor number"),
686  [REG_TYPE_CN]	    = N_("co-processor register expected"),
687  [REG_TYPE_FN]	    = N_("FPA register expected"),
688  [REG_TYPE_VFS]    = N_("VFP single precision register expected"),
689  [REG_TYPE_VFD]    = N_("VFP/Neon double precision register expected"),
690  [REG_TYPE_NQ]	    = N_("Neon quad precision register expected"),
691  [REG_TYPE_VFSD]   = N_("VFP single or double precision register expected"),
692  [REG_TYPE_NDQ]    = N_("Neon double or quad precision register expected"),
693  [REG_TYPE_NSD]    = N_("Neon single or double precision register expected"),
694  [REG_TYPE_NSDQ]   = N_("VFP single, double or Neon quad precision register"
695			 " expected"),
696  [REG_TYPE_VFC]    = N_("VFP system register expected"),
697  [REG_TYPE_MVF]    = N_("Maverick MVF register expected"),
698  [REG_TYPE_MVD]    = N_("Maverick MVD register expected"),
699  [REG_TYPE_MVFX]   = N_("Maverick MVFX register expected"),
700  [REG_TYPE_MVDX]   = N_("Maverick MVDX register expected"),
701  [REG_TYPE_MVAX]   = N_("Maverick MVAX register expected"),
702  [REG_TYPE_DSPSC]  = N_("Maverick DSPSC register expected"),
703  [REG_TYPE_MMXWR]  = N_("iWMMXt data register expected"),
704  [REG_TYPE_MMXWC]  = N_("iWMMXt control register expected"),
705  [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
706  [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
707  [REG_TYPE_MQ]	    = N_("MVE vector register expected"),
708  [REG_TYPE_RNB]    = N_("")
709};
710
711/* Some well known registers that we refer to directly elsewhere.  */
712#define REG_R12	12
713#define REG_SP	13
714#define REG_LR	14
715#define REG_PC	15
716
717/* ARM instructions take 4bytes in the object file, Thumb instructions
718   take 2:  */
719#define INSN_SIZE	4
720
721struct asm_opcode
722{
723  /* Basic string to match.  */
724  const char * template_name;
725
726  /* Parameters to instruction.	 */
727  unsigned int operands[8];
728
729  /* Conditional tag - see opcode_lookup.  */
730  unsigned int tag : 4;
731
732  /* Basic instruction code.  */
733  unsigned int avalue;
734
735  /* Thumb-format instruction code.  */
736  unsigned int tvalue;
737
738  /* Which architecture variant provides this instruction.  */
739  const arm_feature_set * avariant;
740  const arm_feature_set * tvariant;
741
742  /* Function to call to encode instruction in ARM format.  */
743  void (* aencode) (void);
744
745  /* Function to call to encode instruction in Thumb format.  */
746  void (* tencode) (void);
747
748  /* Indicates whether this instruction may be vector predicated.  */
749  unsigned int mayBeVecPred : 1;
750};
751
752/* Defines for various bits that we will want to toggle.  */
753#define INST_IMMEDIATE	0x02000000
754#define OFFSET_REG	0x02000000
755#define HWOFFSET_IMM	0x00400000
756#define SHIFT_BY_REG	0x00000010
757#define PRE_INDEX	0x01000000
758#define INDEX_UP	0x00800000
759#define WRITE_BACK	0x00200000
760#define LDM_TYPE_2_OR_3	0x00400000
761#define CPSI_MMOD	0x00020000
762
763#define LITERAL_MASK	0xf000f000
764#define OPCODE_MASK	0xfe1fffff
765#define V4_STR_BIT	0x00000020
766#define VLDR_VMOV_SAME	0x0040f000
767
768#define T2_SUBS_PC_LR	0xf3de8f00
769
770#define DATA_OP_SHIFT	21
771#define SBIT_SHIFT	20
772
773#define T2_OPCODE_MASK	0xfe1fffff
774#define T2_DATA_OP_SHIFT 21
775#define T2_SBIT_SHIFT	 20
776
777#define A_COND_MASK         0xf0000000
778#define A_PUSH_POP_OP_MASK  0x0fff0000
779
780/* Opcodes for pushing/poping registers to/from the stack.  */
781#define A1_OPCODE_PUSH    0x092d0000
782#define A2_OPCODE_PUSH    0x052d0004
783#define A2_OPCODE_POP     0x049d0004
784
785/* Codes to distinguish the arithmetic instructions.  */
786#define OPCODE_AND	0
787#define OPCODE_EOR	1
788#define OPCODE_SUB	2
789#define OPCODE_RSB	3
790#define OPCODE_ADD	4
791#define OPCODE_ADC	5
792#define OPCODE_SBC	6
793#define OPCODE_RSC	7
794#define OPCODE_TST	8
795#define OPCODE_TEQ	9
796#define OPCODE_CMP	10
797#define OPCODE_CMN	11
798#define OPCODE_ORR	12
799#define OPCODE_MOV	13
800#define OPCODE_BIC	14
801#define OPCODE_MVN	15
802
803#define T2_OPCODE_AND	0
804#define T2_OPCODE_BIC	1
805#define T2_OPCODE_ORR	2
806#define T2_OPCODE_ORN	3
807#define T2_OPCODE_EOR	4
808#define T2_OPCODE_ADD	8
809#define T2_OPCODE_ADC	10
810#define T2_OPCODE_SBC	11
811#define T2_OPCODE_SUB	13
812#define T2_OPCODE_RSB	14
813
814#define T_OPCODE_MUL 0x4340
815#define T_OPCODE_TST 0x4200
816#define T_OPCODE_CMN 0x42c0
817#define T_OPCODE_NEG 0x4240
818#define T_OPCODE_MVN 0x43c0
819
820#define T_OPCODE_ADD_R3	0x1800
821#define T_OPCODE_SUB_R3 0x1a00
822#define T_OPCODE_ADD_HI 0x4400
823#define T_OPCODE_ADD_ST 0xb000
824#define T_OPCODE_SUB_ST 0xb080
825#define T_OPCODE_ADD_SP 0xa800
826#define T_OPCODE_ADD_PC 0xa000
827#define T_OPCODE_ADD_I8 0x3000
828#define T_OPCODE_SUB_I8 0x3800
829#define T_OPCODE_ADD_I3 0x1c00
830#define T_OPCODE_SUB_I3 0x1e00
831
832#define T_OPCODE_ASR_R	0x4100
833#define T_OPCODE_LSL_R	0x4080
834#define T_OPCODE_LSR_R	0x40c0
835#define T_OPCODE_ROR_R	0x41c0
836#define T_OPCODE_ASR_I	0x1000
837#define T_OPCODE_LSL_I	0x0000
838#define T_OPCODE_LSR_I	0x0800
839
840#define T_OPCODE_MOV_I8	0x2000
841#define T_OPCODE_CMP_I8 0x2800
842#define T_OPCODE_CMP_LR 0x4280
843#define T_OPCODE_MOV_HR 0x4600
844#define T_OPCODE_CMP_HR 0x4500
845
846#define T_OPCODE_LDR_PC 0x4800
847#define T_OPCODE_LDR_SP 0x9800
848#define T_OPCODE_STR_SP 0x9000
849#define T_OPCODE_LDR_IW 0x6800
850#define T_OPCODE_STR_IW 0x6000
851#define T_OPCODE_LDR_IH 0x8800
852#define T_OPCODE_STR_IH 0x8000
853#define T_OPCODE_LDR_IB 0x7800
854#define T_OPCODE_STR_IB 0x7000
855#define T_OPCODE_LDR_RW 0x5800
856#define T_OPCODE_STR_RW 0x5000
857#define T_OPCODE_LDR_RH 0x5a00
858#define T_OPCODE_STR_RH 0x5200
859#define T_OPCODE_LDR_RB 0x5c00
860#define T_OPCODE_STR_RB 0x5400
861
862#define T_OPCODE_PUSH	0xb400
863#define T_OPCODE_POP	0xbc00
864
865#define T_OPCODE_BRANCH 0xe000
866
867#define THUMB_SIZE	2	/* Size of thumb instruction.  */
868#define THUMB_PP_PC_LR 0x0100
869#define THUMB_LOAD_BIT 0x0800
870#define THUMB2_LOAD_BIT 0x00100000
871
872#define BAD_SYNTAX	_("syntax error")
873#define BAD_ARGS	_("bad arguments to instruction")
874#define BAD_SP          _("r13 not allowed here")
875#define BAD_PC		_("r15 not allowed here")
876#define BAD_ODD		_("Odd register not allowed here")
877#define BAD_EVEN	_("Even register not allowed here")
878#define BAD_COND	_("instruction cannot be conditional")
879#define BAD_OVERLAP	_("registers may not be the same")
880#define BAD_HIREG	_("lo register required")
881#define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
882#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode")
883#define BAD_BRANCH	_("branch must be last instruction in IT block")
884#define BAD_BRANCH_OFF	_("branch out of range or not a multiple of 2")
885#define BAD_NOT_IT	_("instruction not allowed in IT block")
886#define BAD_NOT_VPT	_("instruction missing MVE vector predication code")
887#define BAD_FPU		_("selected FPU does not support instruction")
888#define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
889#define BAD_OUT_VPT	\
890	_("vector predicated instruction should be in VPT/VPST block")
891#define BAD_IT_COND	_("incorrect condition in IT block")
892#define BAD_VPT_COND	_("incorrect condition in VPT/VPST block")
893#define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
894#define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
895#define BAD_PC_ADDRESSING \
896	_("cannot use register index with PC-relative addressing")
897#define BAD_PC_WRITEBACK \
898	_("cannot use writeback with PC-relative addressing")
899#define BAD_RANGE	_("branch out of range")
900#define BAD_FP16	_("selected processor does not support fp16 instruction")
901#define BAD_BF16	_("selected processor does not support bf16 instruction")
902#define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
903#define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
904#define MVE_NOT_IT	_("Warning: instruction is UNPREDICTABLE in an IT " \
905			  "block")
906#define MVE_NOT_VPT	_("Warning: instruction is UNPREDICTABLE in a VPT " \
907			  "block")
908#define MVE_BAD_PC	_("Warning: instruction is UNPREDICTABLE with PC" \
909			  " operand")
910#define MVE_BAD_SP	_("Warning: instruction is UNPREDICTABLE with SP" \
911			  " operand")
912#define BAD_SIMD_TYPE	_("bad type in SIMD instruction")
913#define BAD_MVE_AUTO	\
914  _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
915    " use a valid -march or -mcpu option.")
916#define BAD_MVE_SRCDEST	_("Warning: 32-bit element size and same destination "\
917			  "and source operands makes instruction UNPREDICTABLE")
918#define BAD_EL_TYPE	_("bad element type for instruction")
919#define MVE_BAD_QREG	_("MVE vector register Q[0..7] expected")
920
921static struct hash_control * arm_ops_hsh;
922static struct hash_control * arm_cond_hsh;
923static struct hash_control * arm_vcond_hsh;
924static struct hash_control * arm_shift_hsh;
925static struct hash_control * arm_psr_hsh;
926static struct hash_control * arm_v7m_psr_hsh;
927static struct hash_control * arm_reg_hsh;
928static struct hash_control * arm_reloc_hsh;
929static struct hash_control * arm_barrier_opt_hsh;
930
931/* Stuff needed to resolve the label ambiguity
932   As:
933     ...
934     label:   <insn>
935   may differ from:
936     ...
937     label:
938	      <insn>  */
939
940symbolS *  last_label_seen;
941static int label_is_thumb_function_name = FALSE;
942
943/* Literal pool structure.  Held on a per-section
944   and per-sub-section basis.  */
945
946#define MAX_LITERAL_POOL_SIZE 1024
947typedef struct literal_pool
948{
949  expressionS	         literals [MAX_LITERAL_POOL_SIZE];
950  unsigned int	         next_free_entry;
951  unsigned int	         id;
952  symbolS *	         symbol;
953  segT		         section;
954  subsegT	         sub_section;
955#ifdef OBJ_ELF
956  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
957#endif
958  struct literal_pool *  next;
959  unsigned int		 alignment;
960} literal_pool;
961
962/* Pointer to a linked list of literal pools.  */
963literal_pool * list_of_pools = NULL;
964
965typedef enum asmfunc_states
966{
967  OUTSIDE_ASMFUNC,
968  WAITING_ASMFUNC_NAME,
969  WAITING_ENDASMFUNC
970} asmfunc_states;
971
972static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
973
974#ifdef OBJ_ELF
975#  define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
976#else
977static struct current_pred now_pred;
978#endif
979
980static inline int
981now_pred_compatible (int cond)
982{
983  return (cond & ~1) == (now_pred.cc & ~1);
984}
985
986static inline int
987conditional_insn (void)
988{
989  return inst.cond != COND_ALWAYS;
990}
991
992static int in_pred_block (void);
993
994static int handle_pred_state (void);
995
996static void force_automatic_it_block_close (void);
997
998static void it_fsm_post_encode (void);
999
1000#define set_pred_insn_type(type)			\
1001  do						\
1002    {						\
1003      inst.pred_insn_type = type;			\
1004      if (handle_pred_state () == FAIL)		\
1005	return;					\
1006    }						\
1007  while (0)
1008
1009#define set_pred_insn_type_nonvoid(type, failret) \
1010  do						\
1011    {                                           \
1012      inst.pred_insn_type = type;			\
1013      if (handle_pred_state () == FAIL)		\
1014	return failret;				\
1015    }						\
1016  while(0)
1017
1018#define set_pred_insn_type_last()				\
1019  do							\
1020    {							\
1021      if (inst.cond == COND_ALWAYS)			\
1022	set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);	\
1023      else						\
1024	set_pred_insn_type (INSIDE_IT_LAST_INSN);		\
1025    }							\
1026  while (0)
1027
1028/* Toggle value[pos].  */
1029#define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1030
1031/* Pure syntax.	 */
1032
1033/* This array holds the chars that always start a comment.  If the
1034   pre-processor is disabled, these aren't very useful.	 */
1035char arm_comment_chars[] = "@";
1036
1037/* This array holds the chars that only start a comment at the beginning of
1038   a line.  If the line seems to have the form '# 123 filename'
1039   .line and .file directives will appear in the pre-processed output.	*/
1040/* Note that input_file.c hand checks for '#' at the beginning of the
1041   first line of the input file.  This is because the compiler outputs
1042   #NO_APP at the beginning of its output.  */
1043/* Also note that comments like this one will always work.  */
1044const char line_comment_chars[] = "#";
1045
1046char arm_line_separator_chars[] = ";";
1047
1048/* Chars that can be used to separate mant
1049   from exp in floating point numbers.	*/
1050const char EXP_CHARS[] = "eE";
1051
1052/* Chars that mean this number is a floating point constant.  */
1053/* As in 0f12.456  */
1054/* or	 0d1.2345e12  */
1055
1056const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1057
1058/* Prefix characters that indicate the start of an immediate
1059   value.  */
1060#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1061
1062/* Separator character handling.  */
1063
1064#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
1065
1066enum fp_16bit_format
1067{
1068  ARM_FP16_FORMAT_IEEE		= 0x1,
1069  ARM_FP16_FORMAT_ALTERNATIVE	= 0x2,
1070  ARM_FP16_FORMAT_DEFAULT	= 0x3
1071};
1072
1073static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1074
1075
1076static inline int
1077skip_past_char (char ** str, char c)
1078{
1079  /* PR gas/14987: Allow for whitespace before the expected character.  */
1080  skip_whitespace (*str);
1081
1082  if (**str == c)
1083    {
1084      (*str)++;
1085      return SUCCESS;
1086    }
1087  else
1088    return FAIL;
1089}
1090
1091#define skip_past_comma(str) skip_past_char (str, ',')
1092
1093/* Arithmetic expressions (possibly involving symbols).	 */
1094
1095/* Return TRUE if anything in the expression is a bignum.  */
1096
1097static bfd_boolean
1098walk_no_bignums (symbolS * sp)
1099{
1100  if (symbol_get_value_expression (sp)->X_op == O_big)
1101    return TRUE;
1102
1103  if (symbol_get_value_expression (sp)->X_add_symbol)
1104    {
1105      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1106	      || (symbol_get_value_expression (sp)->X_op_symbol
1107		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1108    }
1109
1110  return FALSE;
1111}
1112
1113static bfd_boolean in_my_get_expression = FALSE;
1114
1115/* Third argument to my_get_expression.	 */
1116#define GE_NO_PREFIX 0
1117#define GE_IMM_PREFIX 1
1118#define GE_OPT_PREFIX 2
1119/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1120   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
1121#define GE_OPT_PREFIX_BIG 3
1122
1123static int
1124my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1125{
1126  char * save_in;
1127
1128  /* In unified syntax, all prefixes are optional.  */
1129  if (unified_syntax)
1130    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1131		  : GE_OPT_PREFIX;
1132
1133  switch (prefix_mode)
1134    {
1135    case GE_NO_PREFIX: break;
1136    case GE_IMM_PREFIX:
1137      if (!is_immediate_prefix (**str))
1138	{
1139	  inst.error = _("immediate expression requires a # prefix");
1140	  return FAIL;
1141	}
1142      (*str)++;
1143      break;
1144    case GE_OPT_PREFIX:
1145    case GE_OPT_PREFIX_BIG:
1146      if (is_immediate_prefix (**str))
1147	(*str)++;
1148      break;
1149    default:
1150      abort ();
1151    }
1152
1153  memset (ep, 0, sizeof (expressionS));
1154
1155  save_in = input_line_pointer;
1156  input_line_pointer = *str;
1157  in_my_get_expression = TRUE;
1158  expression (ep);
1159  in_my_get_expression = FALSE;
1160
1161  if (ep->X_op == O_illegal || ep->X_op == O_absent)
1162    {
1163      /* We found a bad or missing expression in md_operand().  */
1164      *str = input_line_pointer;
1165      input_line_pointer = save_in;
1166      if (inst.error == NULL)
1167	inst.error = (ep->X_op == O_absent
1168		      ? _("missing expression") :_("bad expression"));
1169      return 1;
1170    }
1171
1172  /* Get rid of any bignums now, so that we don't generate an error for which
1173     we can't establish a line number later on.	 Big numbers are never valid
1174     in instructions, which is where this routine is always called.  */
1175  if (prefix_mode != GE_OPT_PREFIX_BIG
1176      && (ep->X_op == O_big
1177	  || (ep->X_add_symbol
1178	      && (walk_no_bignums (ep->X_add_symbol)
1179		  || (ep->X_op_symbol
1180		      && walk_no_bignums (ep->X_op_symbol))))))
1181    {
1182      inst.error = _("invalid constant");
1183      *str = input_line_pointer;
1184      input_line_pointer = save_in;
1185      return 1;
1186    }
1187
1188  *str = input_line_pointer;
1189  input_line_pointer = save_in;
1190  return SUCCESS;
1191}
1192
1193/* Turn a string in input_line_pointer into a floating point constant
1194   of type TYPE, and store the appropriate bytes in *LITP.  The number
1195   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1196   returned, or NULL on OK.
1197
1198   Note that fp constants aren't represent in the normal way on the ARM.
1199   In big endian mode, things are as expected.	However, in little endian
1200   mode fp constants are big-endian word-wise, and little-endian byte-wise
1201   within the words.  For example, (double) 1.1 in big endian mode is
1202   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1203   the byte sequence 99 99 f1 3f 9a 99 99 99.
1204
1205   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1206
1207const char *
1208md_atof (int type, char * litP, int * sizeP)
1209{
1210  int prec;
1211  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1212  char *t;
1213  int i;
1214
1215  switch (type)
1216    {
1217    case 'H':
1218    case 'h':
1219      prec = 1;
1220      break;
1221
1222    /* If this is a bfloat16, then parse it slightly differently, as it
1223       does not follow the IEEE specification for floating point numbers
1224       exactly.  */
1225    case 'b':
1226      {
1227	FLONUM_TYPE generic_float;
1228
1229	t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
1230
1231	if (t)
1232	  input_line_pointer = t;
1233	else
1234	  return _("invalid floating point number");
1235
1236	switch (generic_float.sign)
1237	  {
1238	  /* Is +Inf.  */
1239	  case 'P':
1240	    words[0] = 0x7f80;
1241	    break;
1242
1243	  /* Is -Inf.  */
1244	  case 'N':
1245	    words[0] = 0xff80;
1246	    break;
1247
1248	  /* Is NaN.  */
1249	  /* bfloat16 has two types of NaN - quiet and signalling.
1250	     Quiet NaN has bit[6] == 1 && faction != 0, whereas
1251	     signalling NaN's have bit[0] == 0 && fraction != 0.
1252	     Chosen this specific encoding as it is the same form
1253	     as used by other IEEE 754 encodings in GAS.  */
1254	  case 0:
1255	    words[0] = 0x7fff;
1256	    break;
1257
1258	  default:
1259	    break;
1260	  }
1261
1262	*sizeP = 2;
1263
1264	md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
1265
1266	return NULL;
1267      }
1268    case 'f':
1269    case 'F':
1270    case 's':
1271    case 'S':
1272      prec = 2;
1273      break;
1274
1275    case 'd':
1276    case 'D':
1277    case 'r':
1278    case 'R':
1279      prec = 4;
1280      break;
1281
1282    case 'x':
1283    case 'X':
1284      prec = 5;
1285      break;
1286
1287    case 'p':
1288    case 'P':
1289      prec = 5;
1290      break;
1291
1292    default:
1293      *sizeP = 0;
1294      return _("Unrecognized or unsupported floating point constant");
1295    }
1296
1297  t = atof_ieee (input_line_pointer, type, words);
1298  if (t)
1299    input_line_pointer = t;
1300  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1301
1302  if (target_big_endian || prec == 1)
1303    for (i = 0; i < prec; i++)
1304      {
1305	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1306	litP += sizeof (LITTLENUM_TYPE);
1307      }
1308  else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1309    for (i = prec - 1; i >= 0; i--)
1310      {
1311	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1312	litP += sizeof (LITTLENUM_TYPE);
1313      }
1314  else
1315    /* For a 4 byte float the order of elements in `words' is 1 0.
1316       For an 8 byte float the order is 1 0 3 2.  */
1317    for (i = 0; i < prec; i += 2)
1318      {
1319	md_number_to_chars (litP, (valueT) words[i + 1],
1320			    sizeof (LITTLENUM_TYPE));
1321	md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1322			    (valueT) words[i], sizeof (LITTLENUM_TYPE));
1323	litP += 2 * sizeof (LITTLENUM_TYPE);
1324      }
1325
1326  return NULL;
1327}
1328
1329/* We handle all bad expressions here, so that we can report the faulty
1330   instruction in the error message.  */
1331
1332void
1333md_operand (expressionS * exp)
1334{
1335  if (in_my_get_expression)
1336    exp->X_op = O_illegal;
1337}
1338
1339/* Immediate values.  */
1340
1341#ifdef OBJ_ELF
1342/* Generic immediate-value read function for use in directives.
1343   Accepts anything that 'expression' can fold to a constant.
1344   *val receives the number.  */
1345
1346static int
1347immediate_for_directive (int *val)
1348{
1349  expressionS exp;
1350  exp.X_op = O_illegal;
1351
1352  if (is_immediate_prefix (*input_line_pointer))
1353    {
1354      input_line_pointer++;
1355      expression (&exp);
1356    }
1357
1358  if (exp.X_op != O_constant)
1359    {
1360      as_bad (_("expected #constant"));
1361      ignore_rest_of_line ();
1362      return FAIL;
1363    }
1364  *val = exp.X_add_number;
1365  return SUCCESS;
1366}
1367#endif
1368
1369/* Register parsing.  */
1370
1371/* Generic register parser.  CCP points to what should be the
1372   beginning of a register name.  If it is indeed a valid register
1373   name, advance CCP over it and return the reg_entry structure;
1374   otherwise return NULL.  Does not issue diagnostics.	*/
1375
1376static struct reg_entry *
1377arm_reg_parse_multi (char **ccp)
1378{
1379  char *start = *ccp;
1380  char *p;
1381  struct reg_entry *reg;
1382
1383  skip_whitespace (start);
1384
1385#ifdef REGISTER_PREFIX
1386  if (*start != REGISTER_PREFIX)
1387    return NULL;
1388  start++;
1389#endif
1390#ifdef OPTIONAL_REGISTER_PREFIX
1391  if (*start == OPTIONAL_REGISTER_PREFIX)
1392    start++;
1393#endif
1394
1395  p = start;
1396  if (!ISALPHA (*p) || !is_name_beginner (*p))
1397    return NULL;
1398
1399  do
1400    p++;
1401  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1402
1403  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1404
1405  if (!reg)
1406    return NULL;
1407
1408  *ccp = p;
1409  return reg;
1410}
1411
1412static int
1413arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1414		    enum arm_reg_type type)
1415{
1416  /* Alternative syntaxes are accepted for a few register classes.  */
1417  switch (type)
1418    {
1419    case REG_TYPE_MVF:
1420    case REG_TYPE_MVD:
1421    case REG_TYPE_MVFX:
1422    case REG_TYPE_MVDX:
1423      /* Generic coprocessor register names are allowed for these.  */
1424      if (reg && reg->type == REG_TYPE_CN)
1425	return reg->number;
1426      break;
1427
1428    case REG_TYPE_CP:
1429      /* For backward compatibility, a bare number is valid here.  */
1430      {
1431	unsigned long processor = strtoul (start, ccp, 10);
1432	if (*ccp != start && processor <= 15)
1433	  return processor;
1434      }
1435      /* Fall through.  */
1436
1437    case REG_TYPE_MMXWC:
1438      /* WC includes WCG.  ??? I'm not sure this is true for all
1439	 instructions that take WC registers.  */
1440      if (reg && reg->type == REG_TYPE_MMXWCG)
1441	return reg->number;
1442      break;
1443
1444    default:
1445      break;
1446    }
1447
1448  return FAIL;
1449}
1450
1451/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1452   return value is the register number or FAIL.  */
1453
1454static int
1455arm_reg_parse (char **ccp, enum arm_reg_type type)
1456{
1457  char *start = *ccp;
1458  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1459  int ret;
1460
1461  /* Do not allow a scalar (reg+index) to parse as a register.  */
1462  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1463    return FAIL;
1464
1465  if (reg && reg->type == type)
1466    return reg->number;
1467
1468  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1469    return ret;
1470
1471  *ccp = start;
1472  return FAIL;
1473}
1474
1475/* Parse a Neon type specifier. *STR should point at the leading '.'
1476   character. Does no verification at this stage that the type fits the opcode
1477   properly. E.g.,
1478
1479     .i32.i32.s16
1480     .s32.f32
1481     .u16
1482
1483   Can all be legally parsed by this function.
1484
1485   Fills in neon_type struct pointer with parsed information, and updates STR
1486   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1487   type, FAIL if not.  */
1488
1489static int
1490parse_neon_type (struct neon_type *type, char **str)
1491{
1492  char *ptr = *str;
1493
1494  if (type)
1495    type->elems = 0;
1496
1497  while (type->elems < NEON_MAX_TYPE_ELS)
1498    {
1499      enum neon_el_type thistype = NT_untyped;
1500      unsigned thissize = -1u;
1501
1502      if (*ptr != '.')
1503	break;
1504
1505      ptr++;
1506
1507      /* Just a size without an explicit type.  */
1508      if (ISDIGIT (*ptr))
1509	goto parsesize;
1510
1511      switch (TOLOWER (*ptr))
1512	{
1513	case 'i': thistype = NT_integer; break;
1514	case 'f': thistype = NT_float; break;
1515	case 'p': thistype = NT_poly; break;
1516	case 's': thistype = NT_signed; break;
1517	case 'u': thistype = NT_unsigned; break;
1518	case 'd':
1519	  thistype = NT_float;
1520	  thissize = 64;
1521	  ptr++;
1522	  goto done;
1523	case 'b':
1524	  thistype = NT_bfloat;
1525	  switch (TOLOWER (*(++ptr)))
1526	    {
1527	    case 'f':
1528	      ptr += 1;
1529	      thissize = strtoul (ptr, &ptr, 10);
1530	      if (thissize != 16)
1531		{
1532		  as_bad (_("bad size %d in type specifier"), thissize);
1533		  return FAIL;
1534		}
1535	      goto done;
1536	    case '0': case '1': case '2': case '3': case '4':
1537	    case '5': case '6': case '7': case '8': case '9':
1538	    case ' ': case '.':
1539	      as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1540	      return FAIL;
1541	    default:
1542	      break;
1543	    }
1544	  break;
1545	default:
1546	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1547	  return FAIL;
1548	}
1549
1550      ptr++;
1551
1552      /* .f is an abbreviation for .f32.  */
1553      if (thistype == NT_float && !ISDIGIT (*ptr))
1554	thissize = 32;
1555      else
1556	{
1557	parsesize:
1558	  thissize = strtoul (ptr, &ptr, 10);
1559
1560	  if (thissize != 8 && thissize != 16 && thissize != 32
1561	      && thissize != 64)
1562	    {
1563	      as_bad (_("bad size %d in type specifier"), thissize);
1564	      return FAIL;
1565	    }
1566	}
1567
1568      done:
1569      if (type)
1570	{
1571	  type->el[type->elems].type = thistype;
1572	  type->el[type->elems].size = thissize;
1573	  type->elems++;
1574	}
1575    }
1576
1577  /* Empty/missing type is not a successful parse.  */
1578  if (type->elems == 0)
1579    return FAIL;
1580
1581  *str = ptr;
1582
1583  return SUCCESS;
1584}
1585
1586/* Errors may be set multiple times during parsing or bit encoding
1587   (particularly in the Neon bits), but usually the earliest error which is set
1588   will be the most meaningful. Avoid overwriting it with later (cascading)
1589   errors by calling this function.  */
1590
1591static void
1592first_error (const char *err)
1593{
1594  if (!inst.error)
1595    inst.error = err;
1596}
1597
1598/* Parse a single type, e.g. ".s32", leading period included.  */
1599static int
1600parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1601{
1602  char *str = *ccp;
1603  struct neon_type optype;
1604
1605  if (*str == '.')
1606    {
1607      if (parse_neon_type (&optype, &str) == SUCCESS)
1608	{
1609	  if (optype.elems == 1)
1610	    *vectype = optype.el[0];
1611	  else
1612	    {
1613	      first_error (_("only one type should be specified for operand"));
1614	      return FAIL;
1615	    }
1616	}
1617      else
1618	{
1619	  first_error (_("vector type expected"));
1620	  return FAIL;
1621	}
1622    }
1623  else
1624    return FAIL;
1625
1626  *ccp = str;
1627
1628  return SUCCESS;
1629}
1630
1631/* Special meanings for indices (which have a range of 0-7), which will fit into
1632   a 4-bit integer.  */
1633
1634#define NEON_ALL_LANES		15
1635#define NEON_INTERLEAVE_LANES	14
1636
1637/* Record a use of the given feature.  */
1638static void
1639record_feature_use (const arm_feature_set *feature)
1640{
1641  if (thumb_mode)
1642    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1643  else
1644    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1645}
1646
1647/* If the given feature available in the selected CPU, mark it as used.
1648   Returns TRUE iff feature is available.  */
1649static bfd_boolean
1650mark_feature_used (const arm_feature_set *feature)
1651{
1652
1653  /* Do not support the use of MVE only instructions when in auto-detection or
1654     -march=all.  */
1655  if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1656      && ARM_CPU_IS_ANY (cpu_variant))
1657    {
1658      first_error (BAD_MVE_AUTO);
1659      return FALSE;
1660    }
1661  /* Ensure the option is valid on the current architecture.  */
1662  if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1663    return FALSE;
1664
1665  /* Add the appropriate architecture feature for the barrier option used.
1666     */
1667  record_feature_use (feature);
1668
1669  return TRUE;
1670}
1671
1672/* Parse either a register or a scalar, with an optional type. Return the
1673   register number, and optionally fill in the actual type of the register
1674   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1675   type/index information in *TYPEINFO.  */
1676
1677static int
1678parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1679			   enum arm_reg_type *rtype,
1680			   struct neon_typed_alias *typeinfo)
1681{
1682  char *str = *ccp;
1683  struct reg_entry *reg = arm_reg_parse_multi (&str);
1684  struct neon_typed_alias atype;
1685  struct neon_type_el parsetype;
1686
1687  atype.defined = 0;
1688  atype.index = -1;
1689  atype.eltype.type = NT_invtype;
1690  atype.eltype.size = -1;
1691
1692  /* Try alternate syntax for some types of register. Note these are mutually
1693     exclusive with the Neon syntax extensions.  */
1694  if (reg == NULL)
1695    {
1696      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1697      if (altreg != FAIL)
1698	*ccp = str;
1699      if (typeinfo)
1700	*typeinfo = atype;
1701      return altreg;
1702    }
1703
1704  /* Undo polymorphism when a set of register types may be accepted.  */
1705  if ((type == REG_TYPE_NDQ
1706       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1707      || (type == REG_TYPE_VFSD
1708	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1709      || (type == REG_TYPE_NSDQ
1710	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1711	      || reg->type == REG_TYPE_NQ))
1712      || (type == REG_TYPE_NSD
1713	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1714      || (type == REG_TYPE_MMXWC
1715	  && (reg->type == REG_TYPE_MMXWCG)))
1716    type = (enum arm_reg_type) reg->type;
1717
1718  if (type == REG_TYPE_MQ)
1719    {
1720      if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1721	return FAIL;
1722
1723      if (!reg || reg->type != REG_TYPE_NQ)
1724	return FAIL;
1725
1726      if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1727	{
1728	  first_error (_("expected MVE register [q0..q7]"));
1729	  return FAIL;
1730	}
1731      type = REG_TYPE_NQ;
1732    }
1733  else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1734	   && (type == REG_TYPE_NQ))
1735    return FAIL;
1736
1737
1738  if (type != reg->type)
1739    return FAIL;
1740
1741  if (reg->neon)
1742    atype = *reg->neon;
1743
1744  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1745    {
1746      if ((atype.defined & NTA_HASTYPE) != 0)
1747	{
1748	  first_error (_("can't redefine type for operand"));
1749	  return FAIL;
1750	}
1751      atype.defined |= NTA_HASTYPE;
1752      atype.eltype = parsetype;
1753    }
1754
1755  if (skip_past_char (&str, '[') == SUCCESS)
1756    {
1757      if (type != REG_TYPE_VFD
1758	  && !(type == REG_TYPE_VFS
1759	       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1760	  && !(type == REG_TYPE_NQ
1761	       && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1762	{
1763	  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1764	    first_error (_("only D and Q registers may be indexed"));
1765	  else
1766	    first_error (_("only D registers may be indexed"));
1767	  return FAIL;
1768	}
1769
1770      if ((atype.defined & NTA_HASINDEX) != 0)
1771	{
1772	  first_error (_("can't change index for operand"));
1773	  return FAIL;
1774	}
1775
1776      atype.defined |= NTA_HASINDEX;
1777
1778      if (skip_past_char (&str, ']') == SUCCESS)
1779	atype.index = NEON_ALL_LANES;
1780      else
1781	{
1782	  expressionS exp;
1783
1784	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1785
1786	  if (exp.X_op != O_constant)
1787	    {
1788	      first_error (_("constant expression required"));
1789	      return FAIL;
1790	    }
1791
1792	  if (skip_past_char (&str, ']') == FAIL)
1793	    return FAIL;
1794
1795	  atype.index = exp.X_add_number;
1796	}
1797    }
1798
1799  if (typeinfo)
1800    *typeinfo = atype;
1801
1802  if (rtype)
1803    *rtype = type;
1804
1805  *ccp = str;
1806
1807  return reg->number;
1808}
1809
1810/* Like arm_reg_parse, but also allow the following extra features:
1811    - If RTYPE is non-zero, return the (possibly restricted) type of the
1812      register (e.g. Neon double or quad reg when either has been requested).
1813    - If this is a Neon vector type with additional type information, fill
1814      in the struct pointed to by VECTYPE (if non-NULL).
1815   This function will fault on encountering a scalar.  */
1816
1817static int
1818arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1819		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1820{
1821  struct neon_typed_alias atype;
1822  char *str = *ccp;
1823  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1824
1825  if (reg == FAIL)
1826    return FAIL;
1827
1828  /* Do not allow regname(... to parse as a register.  */
1829  if (*str == '(')
1830    return FAIL;
1831
1832  /* Do not allow a scalar (reg+index) to parse as a register.  */
1833  if ((atype.defined & NTA_HASINDEX) != 0)
1834    {
1835      first_error (_("register operand expected, but got scalar"));
1836      return FAIL;
1837    }
1838
1839  if (vectype)
1840    *vectype = atype.eltype;
1841
1842  *ccp = str;
1843
1844  return reg;
1845}
1846
1847#define NEON_SCALAR_REG(X)	((X) >> 4)
1848#define NEON_SCALAR_INDEX(X)	((X) & 15)
1849
1850/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1851   have enough information to be able to do a good job bounds-checking. So, we
1852   just do easy checks here, and do further checks later.  */
1853
1854static int
1855parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1856	      arm_reg_type reg_type)
1857{
1858  int reg;
1859  char *str = *ccp;
1860  struct neon_typed_alias atype;
1861  unsigned reg_size;
1862
1863  reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1864
1865  switch (reg_type)
1866    {
1867    case REG_TYPE_VFS:
1868      reg_size = 32;
1869      break;
1870    case REG_TYPE_VFD:
1871      reg_size = 64;
1872      break;
1873    case REG_TYPE_MQ:
1874      reg_size = 128;
1875      break;
1876    default:
1877      gas_assert (0);
1878      return FAIL;
1879    }
1880
1881  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1882    return FAIL;
1883
1884  if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1885    {
1886      first_error (_("scalar must have an index"));
1887      return FAIL;
1888    }
1889  else if (atype.index >= reg_size / elsize)
1890    {
1891      first_error (_("scalar index out of range"));
1892      return FAIL;
1893    }
1894
1895  if (type)
1896    *type = atype.eltype;
1897
1898  *ccp = str;
1899
1900  return reg * 16 + atype.index;
1901}
1902
1903/* Types of registers in a list.  */
1904
1905enum reg_list_els
1906{
1907  REGLIST_RN,
1908  REGLIST_CLRM,
1909  REGLIST_VFP_S,
1910  REGLIST_VFP_S_VPR,
1911  REGLIST_VFP_D,
1912  REGLIST_VFP_D_VPR,
1913  REGLIST_NEON_D
1914};
1915
1916/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1917
1918static long
1919parse_reg_list (char ** strp, enum reg_list_els etype)
1920{
1921  char *str = *strp;
1922  long range = 0;
1923  int another_range;
1924
1925  gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1926
1927  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1928  do
1929    {
1930      skip_whitespace (str);
1931
1932      another_range = 0;
1933
1934      if (*str == '{')
1935	{
1936	  int in_range = 0;
1937	  int cur_reg = -1;
1938
1939	  str++;
1940	  do
1941	    {
1942	      int reg;
1943	      const char apsr_str[] = "apsr";
1944	      int apsr_str_len = strlen (apsr_str);
1945
1946	      reg = arm_reg_parse (&str, REG_TYPE_RN);
1947	      if (etype == REGLIST_CLRM)
1948		{
1949		  if (reg == REG_SP || reg == REG_PC)
1950		    reg = FAIL;
1951		  else if (reg == FAIL
1952			   && !strncasecmp (str, apsr_str, apsr_str_len)
1953			   && !ISALPHA (*(str + apsr_str_len)))
1954		    {
1955		      reg = 15;
1956		      str += apsr_str_len;
1957		    }
1958
1959		  if (reg == FAIL)
1960		    {
1961		      first_error (_("r0-r12, lr or APSR expected"));
1962		      return FAIL;
1963		    }
1964		}
1965	      else /* etype == REGLIST_RN.  */
1966		{
1967		  if (reg == FAIL)
1968		    {
1969		      first_error (_(reg_expected_msgs[REGLIST_RN]));
1970		      return FAIL;
1971		    }
1972		}
1973
1974	      if (in_range)
1975		{
1976		  int i;
1977
1978		  if (reg <= cur_reg)
1979		    {
1980		      first_error (_("bad range in register list"));
1981		      return FAIL;
1982		    }
1983
1984		  for (i = cur_reg + 1; i < reg; i++)
1985		    {
1986		      if (range & (1 << i))
1987			as_tsktsk
1988			  (_("Warning: duplicated register (r%d) in register list"),
1989			   i);
1990		      else
1991			range |= 1 << i;
1992		    }
1993		  in_range = 0;
1994		}
1995
1996	      if (range & (1 << reg))
1997		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1998			   reg);
1999	      else if (reg <= cur_reg)
2000		as_tsktsk (_("Warning: register range not in ascending order"));
2001
2002	      range |= 1 << reg;
2003	      cur_reg = reg;
2004	    }
2005	  while (skip_past_comma (&str) != FAIL
2006		 || (in_range = 1, *str++ == '-'));
2007	  str--;
2008
2009	  if (skip_past_char (&str, '}') == FAIL)
2010	    {
2011	      first_error (_("missing `}'"));
2012	      return FAIL;
2013	    }
2014	}
2015      else if (etype == REGLIST_RN)
2016	{
2017	  expressionS exp;
2018
2019	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2020	    return FAIL;
2021
2022	  if (exp.X_op == O_constant)
2023	    {
2024	      if (exp.X_add_number
2025		  != (exp.X_add_number & 0x0000ffff))
2026		{
2027		  inst.error = _("invalid register mask");
2028		  return FAIL;
2029		}
2030
2031	      if ((range & exp.X_add_number) != 0)
2032		{
2033		  int regno = range & exp.X_add_number;
2034
2035		  regno &= -regno;
2036		  regno = (1 << regno) - 1;
2037		  as_tsktsk
2038		    (_("Warning: duplicated register (r%d) in register list"),
2039		     regno);
2040		}
2041
2042	      range |= exp.X_add_number;
2043	    }
2044	  else
2045	    {
2046	      if (inst.relocs[0].type != 0)
2047		{
2048		  inst.error = _("expression too complex");
2049		  return FAIL;
2050		}
2051
2052	      memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2053	      inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2054	      inst.relocs[0].pc_rel = 0;
2055	    }
2056	}
2057
2058      if (*str == '|' || *str == '+')
2059	{
2060	  str++;
2061	  another_range = 1;
2062	}
2063    }
2064  while (another_range);
2065
2066  *strp = str;
2067  return range;
2068}
2069
2070/* Parse a VFP register list.  If the string is invalid return FAIL.
2071   Otherwise return the number of registers, and set PBASE to the first
2072   register.  Parses registers of type ETYPE.
2073   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2074     - Q registers can be used to specify pairs of D registers
2075     - { } can be omitted from around a singleton register list
2076	 FIXME: This is not implemented, as it would require backtracking in
2077	 some cases, e.g.:
2078	   vtbl.8 d3,d4,d5
2079	 This could be done (the meaning isn't really ambiguous), but doesn't
2080	 fit in well with the current parsing framework.
2081     - 32 D registers may be used (also true for VFPv3).
2082   FIXME: Types are ignored in these register lists, which is probably a
2083   bug.  */
2084
2085static int
2086parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2087		    bfd_boolean *partial_match)
2088{
2089  char *str = *ccp;
2090  int base_reg;
2091  int new_base;
2092  enum arm_reg_type regtype = (enum arm_reg_type) 0;
2093  int max_regs = 0;
2094  int count = 0;
2095  int warned = 0;
2096  unsigned long mask = 0;
2097  int i;
2098  bfd_boolean vpr_seen = FALSE;
2099  bfd_boolean expect_vpr =
2100    (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2101
2102  if (skip_past_char (&str, '{') == FAIL)
2103    {
2104      inst.error = _("expecting {");
2105      return FAIL;
2106    }
2107
2108  switch (etype)
2109    {
2110    case REGLIST_VFP_S:
2111    case REGLIST_VFP_S_VPR:
2112      regtype = REG_TYPE_VFS;
2113      max_regs = 32;
2114      break;
2115
2116    case REGLIST_VFP_D:
2117    case REGLIST_VFP_D_VPR:
2118      regtype = REG_TYPE_VFD;
2119      break;
2120
2121    case REGLIST_NEON_D:
2122      regtype = REG_TYPE_NDQ;
2123      break;
2124
2125    default:
2126      gas_assert (0);
2127    }
2128
2129  if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2130    {
2131      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
2132      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2133	{
2134	  max_regs = 32;
2135	  if (thumb_mode)
2136	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2137				    fpu_vfp_ext_d32);
2138	  else
2139	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2140				    fpu_vfp_ext_d32);
2141	}
2142      else
2143	max_regs = 16;
2144    }
2145
2146  base_reg = max_regs;
2147  *partial_match = FALSE;
2148
2149  do
2150    {
2151      int setmask = 1, addregs = 1;
2152      const char vpr_str[] = "vpr";
2153      int vpr_str_len = strlen (vpr_str);
2154
2155      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2156
2157      if (expect_vpr)
2158	{
2159	  if (new_base == FAIL
2160	      && !strncasecmp (str, vpr_str, vpr_str_len)
2161	      && !ISALPHA (*(str + vpr_str_len))
2162	      && !vpr_seen)
2163	    {
2164	      vpr_seen = TRUE;
2165	      str += vpr_str_len;
2166	      if (count == 0)
2167		base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs.  */
2168	    }
2169	  else if (vpr_seen)
2170	    {
2171	      first_error (_("VPR expected last"));
2172	      return FAIL;
2173	    }
2174	  else if (new_base == FAIL)
2175	    {
2176	      if (regtype == REG_TYPE_VFS)
2177		first_error (_("VFP single precision register or VPR "
2178			       "expected"));
2179	      else /* regtype == REG_TYPE_VFD.  */
2180		first_error (_("VFP/Neon double precision register or VPR "
2181			       "expected"));
2182	      return FAIL;
2183	    }
2184	}
2185      else if (new_base == FAIL)
2186	{
2187	  first_error (_(reg_expected_msgs[regtype]));
2188	  return FAIL;
2189	}
2190
2191      *partial_match = TRUE;
2192      if (vpr_seen)
2193	continue;
2194
2195      if (new_base >= max_regs)
2196	{
2197	  first_error (_("register out of range in list"));
2198	  return FAIL;
2199	}
2200
2201      /* Note: a value of 2 * n is returned for the register Q<n>.  */
2202      if (regtype == REG_TYPE_NQ)
2203	{
2204	  setmask = 3;
2205	  addregs = 2;
2206	}
2207
2208      if (new_base < base_reg)
2209	base_reg = new_base;
2210
2211      if (mask & (setmask << new_base))
2212	{
2213	  first_error (_("invalid register list"));
2214	  return FAIL;
2215	}
2216
2217      if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2218	{
2219	  as_tsktsk (_("register list not in ascending order"));
2220	  warned = 1;
2221	}
2222
2223      mask |= setmask << new_base;
2224      count += addregs;
2225
2226      if (*str == '-') /* We have the start of a range expression */
2227	{
2228	  int high_range;
2229
2230	  str++;
2231
2232	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2233	      == FAIL)
2234	    {
2235	      inst.error = gettext (reg_expected_msgs[regtype]);
2236	      return FAIL;
2237	    }
2238
2239	  if (high_range >= max_regs)
2240	    {
2241	      first_error (_("register out of range in list"));
2242	      return FAIL;
2243	    }
2244
2245	  if (regtype == REG_TYPE_NQ)
2246	    high_range = high_range + 1;
2247
2248	  if (high_range <= new_base)
2249	    {
2250	      inst.error = _("register range not in ascending order");
2251	      return FAIL;
2252	    }
2253
2254	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
2255	    {
2256	      if (mask & (setmask << new_base))
2257		{
2258		  inst.error = _("invalid register list");
2259		  return FAIL;
2260		}
2261
2262	      mask |= setmask << new_base;
2263	      count += addregs;
2264	    }
2265	}
2266    }
2267  while (skip_past_comma (&str) != FAIL);
2268
2269  str++;
2270
2271  /* Sanity check -- should have raised a parse error above.  */
2272  if ((!vpr_seen && count == 0) || count > max_regs)
2273    abort ();
2274
2275  *pbase = base_reg;
2276
2277  if (expect_vpr && !vpr_seen)
2278    {
2279      first_error (_("VPR expected last"));
2280      return FAIL;
2281    }
2282
2283  /* Final test -- the registers must be consecutive.  */
2284  mask >>= base_reg;
2285  for (i = 0; i < count; i++)
2286    {
2287      if ((mask & (1u << i)) == 0)
2288	{
2289	  inst.error = _("non-contiguous register range");
2290	  return FAIL;
2291	}
2292    }
2293
2294  *ccp = str;
2295
2296  return count;
2297}
2298
2299/* True if two alias types are the same.  */
2300
2301static bfd_boolean
2302neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2303{
2304  if (!a && !b)
2305    return TRUE;
2306
2307  if (!a || !b)
2308    return FALSE;
2309
2310  if (a->defined != b->defined)
2311    return FALSE;
2312
2313  if ((a->defined & NTA_HASTYPE) != 0
2314      && (a->eltype.type != b->eltype.type
2315	  || a->eltype.size != b->eltype.size))
2316    return FALSE;
2317
2318  if ((a->defined & NTA_HASINDEX) != 0
2319      && (a->index != b->index))
2320    return FALSE;
2321
2322  return TRUE;
2323}
2324
2325/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2326   The base register is put in *PBASE.
2327   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2328   the return value.
2329   The register stride (minus one) is put in bit 4 of the return value.
2330   Bits [6:5] encode the list length (minus one).
2331   The type of the list elements is put in *ELTYPE, if non-NULL.  */
2332
2333#define NEON_LANE(X)		((X) & 0xf)
2334#define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
2335#define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
2336
2337static int
2338parse_neon_el_struct_list (char **str, unsigned *pbase,
2339			   int mve,
2340			   struct neon_type_el *eltype)
2341{
2342  char *ptr = *str;
2343  int base_reg = -1;
2344  int reg_incr = -1;
2345  int count = 0;
2346  int lane = -1;
2347  int leading_brace = 0;
2348  enum arm_reg_type rtype = REG_TYPE_NDQ;
2349  const char *const incr_error = mve ? _("register stride must be 1") :
2350    _("register stride must be 1 or 2");
2351  const char *const type_error = _("mismatched element/structure types in list");
2352  struct neon_typed_alias firsttype;
2353  firsttype.defined = 0;
2354  firsttype.eltype.type = NT_invtype;
2355  firsttype.eltype.size = -1;
2356  firsttype.index = -1;
2357
2358  if (skip_past_char (&ptr, '{') == SUCCESS)
2359    leading_brace = 1;
2360
2361  do
2362    {
2363      struct neon_typed_alias atype;
2364      if (mve)
2365	rtype = REG_TYPE_MQ;
2366      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2367
2368      if (getreg == FAIL)
2369	{
2370	  first_error (_(reg_expected_msgs[rtype]));
2371	  return FAIL;
2372	}
2373
2374      if (base_reg == -1)
2375	{
2376	  base_reg = getreg;
2377	  if (rtype == REG_TYPE_NQ)
2378	    {
2379	      reg_incr = 1;
2380	    }
2381	  firsttype = atype;
2382	}
2383      else if (reg_incr == -1)
2384	{
2385	  reg_incr = getreg - base_reg;
2386	  if (reg_incr < 1 || reg_incr > 2)
2387	    {
2388	      first_error (_(incr_error));
2389	      return FAIL;
2390	    }
2391	}
2392      else if (getreg != base_reg + reg_incr * count)
2393	{
2394	  first_error (_(incr_error));
2395	  return FAIL;
2396	}
2397
2398      if (! neon_alias_types_same (&atype, &firsttype))
2399	{
2400	  first_error (_(type_error));
2401	  return FAIL;
2402	}
2403
2404      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2405	 modes.  */
2406      if (ptr[0] == '-')
2407	{
2408	  struct neon_typed_alias htype;
2409	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2410	  if (lane == -1)
2411	    lane = NEON_INTERLEAVE_LANES;
2412	  else if (lane != NEON_INTERLEAVE_LANES)
2413	    {
2414	      first_error (_(type_error));
2415	      return FAIL;
2416	    }
2417	  if (reg_incr == -1)
2418	    reg_incr = 1;
2419	  else if (reg_incr != 1)
2420	    {
2421	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2422	      return FAIL;
2423	    }
2424	  ptr++;
2425	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2426	  if (hireg == FAIL)
2427	    {
2428	      first_error (_(reg_expected_msgs[rtype]));
2429	      return FAIL;
2430	    }
2431	  if (! neon_alias_types_same (&htype, &firsttype))
2432	    {
2433	      first_error (_(type_error));
2434	      return FAIL;
2435	    }
2436	  count += hireg + dregs - getreg;
2437	  continue;
2438	}
2439
2440      /* If we're using Q registers, we can't use [] or [n] syntax.  */
2441      if (rtype == REG_TYPE_NQ)
2442	{
2443	  count += 2;
2444	  continue;
2445	}
2446
2447      if ((atype.defined & NTA_HASINDEX) != 0)
2448	{
2449	  if (lane == -1)
2450	    lane = atype.index;
2451	  else if (lane != atype.index)
2452	    {
2453	      first_error (_(type_error));
2454	      return FAIL;
2455	    }
2456	}
2457      else if (lane == -1)
2458	lane = NEON_INTERLEAVE_LANES;
2459      else if (lane != NEON_INTERLEAVE_LANES)
2460	{
2461	  first_error (_(type_error));
2462	  return FAIL;
2463	}
2464      count++;
2465    }
2466  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2467
2468  /* No lane set by [x]. We must be interleaving structures.  */
2469  if (lane == -1)
2470    lane = NEON_INTERLEAVE_LANES;
2471
2472  /* Sanity check.  */
2473  if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2474      || (count > 1 && reg_incr == -1))
2475    {
2476      first_error (_("error parsing element/structure list"));
2477      return FAIL;
2478    }
2479
2480  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2481    {
2482      first_error (_("expected }"));
2483      return FAIL;
2484    }
2485
2486  if (reg_incr == -1)
2487    reg_incr = 1;
2488
2489  if (eltype)
2490    *eltype = firsttype.eltype;
2491
2492  *pbase = base_reg;
2493  *str = ptr;
2494
2495  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2496}
2497
2498/* Parse an explicit relocation suffix on an expression.  This is
2499   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2500   arm_reloc_hsh contains no entries, so this function can only
2501   succeed if there is no () after the word.  Returns -1 on error,
2502   BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2503
2504static int
2505parse_reloc (char **str)
2506{
2507  struct reloc_entry *r;
2508  char *p, *q;
2509
2510  if (**str != '(')
2511    return BFD_RELOC_UNUSED;
2512
2513  p = *str + 1;
2514  q = p;
2515
2516  while (*q && *q != ')' && *q != ',')
2517    q++;
2518  if (*q != ')')
2519    return -1;
2520
2521  if ((r = (struct reloc_entry *)
2522       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2523    return -1;
2524
2525  *str = q + 1;
2526  return r->reloc;
2527}
2528
2529/* Directives: register aliases.  */
2530
2531static struct reg_entry *
2532insert_reg_alias (char *str, unsigned number, int type)
2533{
2534  struct reg_entry *new_reg;
2535  const char *name;
2536
2537  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2538    {
2539      if (new_reg->builtin)
2540	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2541
2542      /* Only warn about a redefinition if it's not defined as the
2543	 same register.	 */
2544      else if (new_reg->number != number || new_reg->type != type)
2545	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2546
2547      return NULL;
2548    }
2549
2550  name = xstrdup (str);
2551  new_reg = XNEW (struct reg_entry);
2552
2553  new_reg->name = name;
2554  new_reg->number = number;
2555  new_reg->type = type;
2556  new_reg->builtin = FALSE;
2557  new_reg->neon = NULL;
2558
2559  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2560    abort ();
2561
2562  return new_reg;
2563}
2564
2565static void
2566insert_neon_reg_alias (char *str, int number, int type,
2567		       struct neon_typed_alias *atype)
2568{
2569  struct reg_entry *reg = insert_reg_alias (str, number, type);
2570
2571  if (!reg)
2572    {
2573      first_error (_("attempt to redefine typed alias"));
2574      return;
2575    }
2576
2577  if (atype)
2578    {
2579      reg->neon = XNEW (struct neon_typed_alias);
2580      *reg->neon = *atype;
2581    }
2582}
2583
2584/* Look for the .req directive.	 This is of the form:
2585
2586	new_register_name .req existing_register_name
2587
2588   If we find one, or if it looks sufficiently like one that we want to
2589   handle any error here, return TRUE.  Otherwise return FALSE.  */
2590
2591static bfd_boolean
2592create_register_alias (char * newname, char *p)
2593{
2594  struct reg_entry *old;
2595  char *oldname, *nbuf;
2596  size_t nlen;
2597
2598  /* The input scrubber ensures that whitespace after the mnemonic is
2599     collapsed to single spaces.  */
2600  oldname = p;
2601  if (strncmp (oldname, " .req ", 6) != 0)
2602    return FALSE;
2603
2604  oldname += 6;
2605  if (*oldname == '\0')
2606    return FALSE;
2607
2608  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2609  if (!old)
2610    {
2611      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2612      return TRUE;
2613    }
2614
2615  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2616     the desired alias name, and p points to its end.  If not, then
2617     the desired alias name is in the global original_case_string.  */
2618#ifdef TC_CASE_SENSITIVE
2619  nlen = p - newname;
2620#else
2621  newname = original_case_string;
2622  nlen = strlen (newname);
2623#endif
2624
2625  nbuf = xmemdup0 (newname, nlen);
2626
2627  /* Create aliases under the new name as stated; an all-lowercase
2628     version of the new name; and an all-uppercase version of the new
2629     name.  */
2630  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2631    {
2632      for (p = nbuf; *p; p++)
2633	*p = TOUPPER (*p);
2634
2635      if (strncmp (nbuf, newname, nlen))
2636	{
2637	  /* If this attempt to create an additional alias fails, do not bother
2638	     trying to create the all-lower case alias.  We will fail and issue
2639	     a second, duplicate error message.  This situation arises when the
2640	     programmer does something like:
2641	       foo .req r0
2642	       Foo .req r1
2643	     The second .req creates the "Foo" alias but then fails to create
2644	     the artificial FOO alias because it has already been created by the
2645	     first .req.  */
2646	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2647	    {
2648	      free (nbuf);
2649	      return TRUE;
2650	    }
2651	}
2652
2653      for (p = nbuf; *p; p++)
2654	*p = TOLOWER (*p);
2655
2656      if (strncmp (nbuf, newname, nlen))
2657	insert_reg_alias (nbuf, old->number, old->type);
2658    }
2659
2660  free (nbuf);
2661  return TRUE;
2662}
2663
2664/* Create a Neon typed/indexed register alias using directives, e.g.:
2665     X .dn d5.s32[1]
2666     Y .qn 6.s16
2667     Z .dn d7
2668     T .dn Z[0]
2669   These typed registers can be used instead of the types specified after the
2670   Neon mnemonic, so long as all operands given have types. Types can also be
2671   specified directly, e.g.:
2672     vadd d0.s32, d1.s32, d2.s32  */
2673
2674static bfd_boolean
2675create_neon_reg_alias (char *newname, char *p)
2676{
2677  enum arm_reg_type basetype;
2678  struct reg_entry *basereg;
2679  struct reg_entry mybasereg;
2680  struct neon_type ntype;
2681  struct neon_typed_alias typeinfo;
2682  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2683  int namelen;
2684
2685  typeinfo.defined = 0;
2686  typeinfo.eltype.type = NT_invtype;
2687  typeinfo.eltype.size = -1;
2688  typeinfo.index = -1;
2689
2690  nameend = p;
2691
2692  if (strncmp (p, " .dn ", 5) == 0)
2693    basetype = REG_TYPE_VFD;
2694  else if (strncmp (p, " .qn ", 5) == 0)
2695    basetype = REG_TYPE_NQ;
2696  else
2697    return FALSE;
2698
2699  p += 5;
2700
2701  if (*p == '\0')
2702    return FALSE;
2703
2704  basereg = arm_reg_parse_multi (&p);
2705
2706  if (basereg && basereg->type != basetype)
2707    {
2708      as_bad (_("bad type for register"));
2709      return FALSE;
2710    }
2711
2712  if (basereg == NULL)
2713    {
2714      expressionS exp;
2715      /* Try parsing as an integer.  */
2716      my_get_expression (&exp, &p, GE_NO_PREFIX);
2717      if (exp.X_op != O_constant)
2718	{
2719	  as_bad (_("expression must be constant"));
2720	  return FALSE;
2721	}
2722      basereg = &mybasereg;
2723      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2724						  : exp.X_add_number;
2725      basereg->neon = 0;
2726    }
2727
2728  if (basereg->neon)
2729    typeinfo = *basereg->neon;
2730
2731  if (parse_neon_type (&ntype, &p) == SUCCESS)
2732    {
2733      /* We got a type.  */
2734      if (typeinfo.defined & NTA_HASTYPE)
2735	{
2736	  as_bad (_("can't redefine the type of a register alias"));
2737	  return FALSE;
2738	}
2739
2740      typeinfo.defined |= NTA_HASTYPE;
2741      if (ntype.elems != 1)
2742	{
2743	  as_bad (_("you must specify a single type only"));
2744	  return FALSE;
2745	}
2746      typeinfo.eltype = ntype.el[0];
2747    }
2748
2749  if (skip_past_char (&p, '[') == SUCCESS)
2750    {
2751      expressionS exp;
2752      /* We got a scalar index.  */
2753
2754      if (typeinfo.defined & NTA_HASINDEX)
2755	{
2756	  as_bad (_("can't redefine the index of a scalar alias"));
2757	  return FALSE;
2758	}
2759
2760      my_get_expression (&exp, &p, GE_NO_PREFIX);
2761
2762      if (exp.X_op != O_constant)
2763	{
2764	  as_bad (_("scalar index must be constant"));
2765	  return FALSE;
2766	}
2767
2768      typeinfo.defined |= NTA_HASINDEX;
2769      typeinfo.index = exp.X_add_number;
2770
2771      if (skip_past_char (&p, ']') == FAIL)
2772	{
2773	  as_bad (_("expecting ]"));
2774	  return FALSE;
2775	}
2776    }
2777
2778  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2779     the desired alias name, and p points to its end.  If not, then
2780     the desired alias name is in the global original_case_string.  */
2781#ifdef TC_CASE_SENSITIVE
2782  namelen = nameend - newname;
2783#else
2784  newname = original_case_string;
2785  namelen = strlen (newname);
2786#endif
2787
2788  namebuf = xmemdup0 (newname, namelen);
2789
2790  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2791			 typeinfo.defined != 0 ? &typeinfo : NULL);
2792
2793  /* Insert name in all uppercase.  */
2794  for (p = namebuf; *p; p++)
2795    *p = TOUPPER (*p);
2796
2797  if (strncmp (namebuf, newname, namelen))
2798    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2799			   typeinfo.defined != 0 ? &typeinfo : NULL);
2800
2801  /* Insert name in all lowercase.  */
2802  for (p = namebuf; *p; p++)
2803    *p = TOLOWER (*p);
2804
2805  if (strncmp (namebuf, newname, namelen))
2806    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2807			   typeinfo.defined != 0 ? &typeinfo : NULL);
2808
2809  free (namebuf);
2810  return TRUE;
2811}
2812
2813/* Should never be called, as .req goes between the alias and the
2814   register name, not at the beginning of the line.  */
2815
2816static void
2817s_req (int a ATTRIBUTE_UNUSED)
2818{
2819  as_bad (_("invalid syntax for .req directive"));
2820}
2821
2822static void
2823s_dn (int a ATTRIBUTE_UNUSED)
2824{
2825  as_bad (_("invalid syntax for .dn directive"));
2826}
2827
2828static void
2829s_qn (int a ATTRIBUTE_UNUSED)
2830{
2831  as_bad (_("invalid syntax for .qn directive"));
2832}
2833
2834/* The .unreq directive deletes an alias which was previously defined
2835   by .req.  For example:
2836
2837       my_alias .req r11
2838       .unreq my_alias	  */
2839
2840static void
2841s_unreq (int a ATTRIBUTE_UNUSED)
2842{
2843  char * name;
2844  char saved_char;
2845
2846  name = input_line_pointer;
2847
2848  while (*input_line_pointer != 0
2849	 && *input_line_pointer != ' '
2850	 && *input_line_pointer != '\n')
2851    ++input_line_pointer;
2852
2853  saved_char = *input_line_pointer;
2854  *input_line_pointer = 0;
2855
2856  if (!*name)
2857    as_bad (_("invalid syntax for .unreq directive"));
2858  else
2859    {
2860      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2861							      name);
2862
2863      if (!reg)
2864	as_bad (_("unknown register alias '%s'"), name);
2865      else if (reg->builtin)
2866	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2867		 name);
2868      else
2869	{
2870	  char * p;
2871	  char * nbuf;
2872
2873	  hash_delete (arm_reg_hsh, name, FALSE);
2874	  free ((char *) reg->name);
2875	  if (reg->neon)
2876	    free (reg->neon);
2877	  free (reg);
2878
2879	  /* Also locate the all upper case and all lower case versions.
2880	     Do not complain if we cannot find one or the other as it
2881	     was probably deleted above.  */
2882
2883	  nbuf = strdup (name);
2884	  for (p = nbuf; *p; p++)
2885	    *p = TOUPPER (*p);
2886	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2887	  if (reg)
2888	    {
2889	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2890	      free ((char *) reg->name);
2891	      if (reg->neon)
2892		free (reg->neon);
2893	      free (reg);
2894	    }
2895
2896	  for (p = nbuf; *p; p++)
2897	    *p = TOLOWER (*p);
2898	  reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2899	  if (reg)
2900	    {
2901	      hash_delete (arm_reg_hsh, nbuf, FALSE);
2902	      free ((char *) reg->name);
2903	      if (reg->neon)
2904		free (reg->neon);
2905	      free (reg);
2906	    }
2907
2908	  free (nbuf);
2909	}
2910    }
2911
2912  *input_line_pointer = saved_char;
2913  demand_empty_rest_of_line ();
2914}
2915
2916/* Directives: Instruction set selection.  */
2917
2918#ifdef OBJ_ELF
2919/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2920   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2921   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2922   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2923
2924/* Create a new mapping symbol for the transition to STATE.  */
2925
2926static void
2927make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2928{
2929  symbolS * symbolP;
2930  const char * symname;
2931  int type;
2932
2933  switch (state)
2934    {
2935    case MAP_DATA:
2936      symname = "$d";
2937      type = BSF_NO_FLAGS;
2938      break;
2939    case MAP_ARM:
2940      symname = "$a";
2941      type = BSF_NO_FLAGS;
2942      break;
2943    case MAP_THUMB:
2944      symname = "$t";
2945      type = BSF_NO_FLAGS;
2946      break;
2947    default:
2948      abort ();
2949    }
2950
2951  symbolP = symbol_new (symname, now_seg, value, frag);
2952  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2953
2954  switch (state)
2955    {
2956    case MAP_ARM:
2957      THUMB_SET_FUNC (symbolP, 0);
2958      ARM_SET_THUMB (symbolP, 0);
2959      ARM_SET_INTERWORK (symbolP, support_interwork);
2960      break;
2961
2962    case MAP_THUMB:
2963      THUMB_SET_FUNC (symbolP, 1);
2964      ARM_SET_THUMB (symbolP, 1);
2965      ARM_SET_INTERWORK (symbolP, support_interwork);
2966      break;
2967
2968    case MAP_DATA:
2969    default:
2970      break;
2971    }
2972
2973  /* Save the mapping symbols for future reference.  Also check that
2974     we do not place two mapping symbols at the same offset within a
2975     frag.  We'll handle overlap between frags in
2976     check_mapping_symbols.
2977
2978     If .fill or other data filling directive generates zero sized data,
2979     the mapping symbol for the following code will have the same value
2980     as the one generated for the data filling directive.  In this case,
2981     we replace the old symbol with the new one at the same address.  */
2982  if (value == 0)
2983    {
2984      if (frag->tc_frag_data.first_map != NULL)
2985	{
2986	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2987	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2988	}
2989      frag->tc_frag_data.first_map = symbolP;
2990    }
2991  if (frag->tc_frag_data.last_map != NULL)
2992    {
2993      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2994      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2995	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2996    }
2997  frag->tc_frag_data.last_map = symbolP;
2998}
2999
3000/* We must sometimes convert a region marked as code to data during
3001   code alignment, if an odd number of bytes have to be padded.  The
3002   code mapping symbol is pushed to an aligned address.  */
3003
3004static void
3005insert_data_mapping_symbol (enum mstate state,
3006			    valueT value, fragS *frag, offsetT bytes)
3007{
3008  /* If there was already a mapping symbol, remove it.  */
3009  if (frag->tc_frag_data.last_map != NULL
3010      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3011    {
3012      symbolS *symp = frag->tc_frag_data.last_map;
3013
3014      if (value == 0)
3015	{
3016	  know (frag->tc_frag_data.first_map == symp);
3017	  frag->tc_frag_data.first_map = NULL;
3018	}
3019      frag->tc_frag_data.last_map = NULL;
3020      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3021    }
3022
3023  make_mapping_symbol (MAP_DATA, value, frag);
3024  make_mapping_symbol (state, value + bytes, frag);
3025}
3026
3027static void mapping_state_2 (enum mstate state, int max_chars);
3028
3029/* Set the mapping state to STATE.  Only call this when about to
3030   emit some STATE bytes to the file.  */
3031
3032#define TRANSITION(from, to) (mapstate == (from) && state == (to))
3033void
3034mapping_state (enum mstate state)
3035{
3036  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3037
3038  if (mapstate == state)
3039    /* The mapping symbol has already been emitted.
3040       There is nothing else to do.  */
3041    return;
3042
3043  if (state == MAP_ARM || state == MAP_THUMB)
3044    /*  PR gas/12931
3045	All ARM instructions require 4-byte alignment.
3046	(Almost) all Thumb instructions require 2-byte alignment.
3047
3048	When emitting instructions into any section, mark the section
3049	appropriately.
3050
3051	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3052	but themselves require 2-byte alignment; this applies to some
3053	PC- relative forms.  However, these cases will involve implicit
3054	literal pool generation or an explicit .align >=2, both of
3055	which will cause the section to me marked with sufficient
3056	alignment.  Thus, we don't handle those cases here.  */
3057    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3058
3059  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3060    /* This case will be evaluated later.  */
3061    return;
3062
3063  mapping_state_2 (state, 0);
3064}
3065
3066/* Same as mapping_state, but MAX_CHARS bytes have already been
3067   allocated.  Put the mapping symbol that far back.  */
3068
3069static void
3070mapping_state_2 (enum mstate state, int max_chars)
3071{
3072  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3073
3074  if (!SEG_NORMAL (now_seg))
3075    return;
3076
3077  if (mapstate == state)
3078    /* The mapping symbol has already been emitted.
3079       There is nothing else to do.  */
3080    return;
3081
3082  if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3083	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3084    {
3085      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3086      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3087
3088      if (add_symbol)
3089	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3090    }
3091
3092  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3093  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3094}
3095#undef TRANSITION
3096#else
3097#define mapping_state(x) ((void)0)
3098#define mapping_state_2(x, y) ((void)0)
3099#endif
3100
3101/* Find the real, Thumb encoded start of a Thumb function.  */
3102
3103#ifdef OBJ_COFF
3104static symbolS *
3105find_real_start (symbolS * symbolP)
3106{
3107  char *       real_start;
3108  const char * name = S_GET_NAME (symbolP);
3109  symbolS *    new_target;
3110
3111  /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
3112#define STUB_NAME ".real_start_of"
3113
3114  if (name == NULL)
3115    abort ();
3116
3117  /* The compiler may generate BL instructions to local labels because
3118     it needs to perform a branch to a far away location. These labels
3119     do not have a corresponding ".real_start_of" label.  We check
3120     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3121     the ".real_start_of" convention for nonlocal branches.  */
3122  if (S_IS_LOCAL (symbolP) || name[0] == '.')
3123    return symbolP;
3124
3125  real_start = concat (STUB_NAME, name, NULL);
3126  new_target = symbol_find (real_start);
3127  free (real_start);
3128
3129  if (new_target == NULL)
3130    {
3131      as_warn (_("Failed to find real start of function: %s\n"), name);
3132      new_target = symbolP;
3133    }
3134
3135  return new_target;
3136}
3137#endif
3138
3139static void
3140opcode_select (int width)
3141{
3142  switch (width)
3143    {
3144    case 16:
3145      if (! thumb_mode)
3146	{
3147	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3148	    as_bad (_("selected processor does not support THUMB opcodes"));
3149
3150	  thumb_mode = 1;
3151	  /* No need to force the alignment, since we will have been
3152	     coming from ARM mode, which is word-aligned.  */
3153	  record_alignment (now_seg, 1);
3154	}
3155      break;
3156
3157    case 32:
3158      if (thumb_mode)
3159	{
3160	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3161	    as_bad (_("selected processor does not support ARM opcodes"));
3162
3163	  thumb_mode = 0;
3164
3165	  if (!need_pass_2)
3166	    frag_align (2, 0, 0);
3167
3168	  record_alignment (now_seg, 1);
3169	}
3170      break;
3171
3172    default:
3173      as_bad (_("invalid instruction size selected (%d)"), width);
3174    }
3175}
3176
3177static void
3178s_arm (int ignore ATTRIBUTE_UNUSED)
3179{
3180  opcode_select (32);
3181  demand_empty_rest_of_line ();
3182}
3183
3184static void
3185s_thumb (int ignore ATTRIBUTE_UNUSED)
3186{
3187  opcode_select (16);
3188  demand_empty_rest_of_line ();
3189}
3190
3191static void
3192s_code (int unused ATTRIBUTE_UNUSED)
3193{
3194  int temp;
3195
3196  temp = get_absolute_expression ();
3197  switch (temp)
3198    {
3199    case 16:
3200    case 32:
3201      opcode_select (temp);
3202      break;
3203
3204    default:
3205      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3206    }
3207}
3208
3209static void
3210s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3211{
3212  /* If we are not already in thumb mode go into it, EVEN if
3213     the target processor does not support thumb instructions.
3214     This is used by gcc/config/arm/lib1funcs.asm for example
3215     to compile interworking support functions even if the
3216     target processor should not support interworking.	*/
3217  if (! thumb_mode)
3218    {
3219      thumb_mode = 2;
3220      record_alignment (now_seg, 1);
3221    }
3222
3223  demand_empty_rest_of_line ();
3224}
3225
3226static void
3227s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3228{
3229  s_thumb (0);
3230
3231  /* The following label is the name/address of the start of a Thumb function.
3232     We need to know this for the interworking support.	 */
3233  label_is_thumb_function_name = TRUE;
3234}
3235
3236/* Perform a .set directive, but also mark the alias as
3237   being a thumb function.  */
3238
3239static void
3240s_thumb_set (int equiv)
3241{
3242  /* XXX the following is a duplicate of the code for s_set() in read.c
3243     We cannot just call that code as we need to get at the symbol that
3244     is created.  */
3245  char *    name;
3246  char	    delim;
3247  char *    end_name;
3248  symbolS * symbolP;
3249
3250  /* Especial apologies for the random logic:
3251     This just grew, and could be parsed much more simply!
3252     Dean - in haste.  */
3253  delim	    = get_symbol_name (& name);
3254  end_name  = input_line_pointer;
3255  (void) restore_line_pointer (delim);
3256
3257  if (*input_line_pointer != ',')
3258    {
3259      *end_name = 0;
3260      as_bad (_("expected comma after name \"%s\""), name);
3261      *end_name = delim;
3262      ignore_rest_of_line ();
3263      return;
3264    }
3265
3266  input_line_pointer++;
3267  *end_name = 0;
3268
3269  if (name[0] == '.' && name[1] == '\0')
3270    {
3271      /* XXX - this should not happen to .thumb_set.  */
3272      abort ();
3273    }
3274
3275  if ((symbolP = symbol_find (name)) == NULL
3276      && (symbolP = md_undefined_symbol (name)) == NULL)
3277    {
3278#ifndef NO_LISTING
3279      /* When doing symbol listings, play games with dummy fragments living
3280	 outside the normal fragment chain to record the file and line info
3281	 for this symbol.  */
3282      if (listing & LISTING_SYMBOLS)
3283	{
3284	  extern struct list_info_struct * listing_tail;
3285	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3286
3287	  memset (dummy_frag, 0, sizeof (fragS));
3288	  dummy_frag->fr_type = rs_fill;
3289	  dummy_frag->line = listing_tail;
3290	  symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3291	  dummy_frag->fr_symbol = symbolP;
3292	}
3293      else
3294#endif
3295	symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3296
3297#ifdef OBJ_COFF
3298      /* "set" symbols are local unless otherwise specified.  */
3299      SF_SET_LOCAL (symbolP);
3300#endif /* OBJ_COFF  */
3301    }				/* Make a new symbol.  */
3302
3303  symbol_table_insert (symbolP);
3304
3305  * end_name = delim;
3306
3307  if (equiv
3308      && S_IS_DEFINED (symbolP)
3309      && S_GET_SEGMENT (symbolP) != reg_section)
3310    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3311
3312  pseudo_set (symbolP);
3313
3314  demand_empty_rest_of_line ();
3315
3316  /* XXX Now we come to the Thumb specific bit of code.	 */
3317
3318  THUMB_SET_FUNC (symbolP, 1);
3319  ARM_SET_THUMB (symbolP, 1);
3320#if defined OBJ_ELF || defined OBJ_COFF
3321  ARM_SET_INTERWORK (symbolP, support_interwork);
3322#endif
3323}
3324
3325/* Directives: Mode selection.  */
3326
3327/* .syntax [unified|divided] - choose the new unified syntax
3328   (same for Arm and Thumb encoding, modulo slight differences in what
3329   can be represented) or the old divergent syntax for each mode.  */
3330static void
3331s_syntax (int unused ATTRIBUTE_UNUSED)
3332{
3333  char *name, delim;
3334
3335  delim = get_symbol_name (& name);
3336
3337  if (!strcasecmp (name, "unified"))
3338    unified_syntax = TRUE;
3339  else if (!strcasecmp (name, "divided"))
3340    unified_syntax = FALSE;
3341  else
3342    {
3343      as_bad (_("unrecognized syntax mode \"%s\""), name);
3344      return;
3345    }
3346  (void) restore_line_pointer (delim);
3347  demand_empty_rest_of_line ();
3348}
3349
3350/* Directives: sectioning and alignment.  */
3351
3352static void
3353s_bss (int ignore ATTRIBUTE_UNUSED)
3354{
3355  /* We don't support putting frags in the BSS segment, we fake it by
3356     marking in_bss, then looking at s_skip for clues.	*/
3357  subseg_set (bss_section, 0);
3358  demand_empty_rest_of_line ();
3359
3360#ifdef md_elf_section_change_hook
3361  md_elf_section_change_hook ();
3362#endif
3363}
3364
3365static void
3366s_even (int ignore ATTRIBUTE_UNUSED)
3367{
3368  /* Never make frag if expect extra pass.  */
3369  if (!need_pass_2)
3370    frag_align (1, 0, 0);
3371
3372  record_alignment (now_seg, 1);
3373
3374  demand_empty_rest_of_line ();
3375}
3376
3377/* Directives: CodeComposer Studio.  */
3378
3379/*  .ref  (for CodeComposer Studio syntax only).  */
3380static void
3381s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3382{
3383  if (codecomposer_syntax)
3384    ignore_rest_of_line ();
3385  else
3386    as_bad (_(".ref pseudo-op only available with -mccs flag."));
3387}
3388
3389/*  If name is not NULL, then it is used for marking the beginning of a
3390    function, whereas if it is NULL then it means the function end.  */
3391static void
3392asmfunc_debug (const char * name)
3393{
3394  static const char * last_name = NULL;
3395
3396  if (name != NULL)
3397    {
3398      gas_assert (last_name == NULL);
3399      last_name = name;
3400
3401      if (debug_type == DEBUG_STABS)
3402         stabs_generate_asm_func (name, name);
3403    }
3404  else
3405    {
3406      gas_assert (last_name != NULL);
3407
3408      if (debug_type == DEBUG_STABS)
3409        stabs_generate_asm_endfunc (last_name, last_name);
3410
3411      last_name = NULL;
3412    }
3413}
3414
3415static void
3416s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3417{
3418  if (codecomposer_syntax)
3419    {
3420      switch (asmfunc_state)
3421	{
3422	case OUTSIDE_ASMFUNC:
3423	  asmfunc_state = WAITING_ASMFUNC_NAME;
3424	  break;
3425
3426	case WAITING_ASMFUNC_NAME:
3427	  as_bad (_(".asmfunc repeated."));
3428	  break;
3429
3430	case WAITING_ENDASMFUNC:
3431	  as_bad (_(".asmfunc without function."));
3432	  break;
3433	}
3434      demand_empty_rest_of_line ();
3435    }
3436  else
3437    as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3438}
3439
3440static void
3441s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3442{
3443  if (codecomposer_syntax)
3444    {
3445      switch (asmfunc_state)
3446	{
3447	case OUTSIDE_ASMFUNC:
3448	  as_bad (_(".endasmfunc without a .asmfunc."));
3449	  break;
3450
3451	case WAITING_ASMFUNC_NAME:
3452	  as_bad (_(".endasmfunc without function."));
3453	  break;
3454
3455	case WAITING_ENDASMFUNC:
3456	  asmfunc_state = OUTSIDE_ASMFUNC;
3457	  asmfunc_debug (NULL);
3458	  break;
3459	}
3460      demand_empty_rest_of_line ();
3461    }
3462  else
3463    as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3464}
3465
3466static void
3467s_ccs_def (int name)
3468{
3469  if (codecomposer_syntax)
3470    s_globl (name);
3471  else
3472    as_bad (_(".def pseudo-op only available with -mccs flag."));
3473}
3474
3475/* Directives: Literal pools.  */
3476
3477static literal_pool *
3478find_literal_pool (void)
3479{
3480  literal_pool * pool;
3481
3482  for (pool = list_of_pools; pool != NULL; pool = pool->next)
3483    {
3484      if (pool->section == now_seg
3485	  && pool->sub_section == now_subseg)
3486	break;
3487    }
3488
3489  return pool;
3490}
3491
3492static literal_pool *
3493find_or_make_literal_pool (void)
3494{
3495  /* Next literal pool ID number.  */
3496  static unsigned int latest_pool_num = 1;
3497  literal_pool *      pool;
3498
3499  pool = find_literal_pool ();
3500
3501  if (pool == NULL)
3502    {
3503      /* Create a new pool.  */
3504      pool = XNEW (literal_pool);
3505      if (! pool)
3506	return NULL;
3507
3508      pool->next_free_entry = 0;
3509      pool->section	    = now_seg;
3510      pool->sub_section	    = now_subseg;
3511      pool->next	    = list_of_pools;
3512      pool->symbol	    = NULL;
3513      pool->alignment	    = 2;
3514
3515      /* Add it to the list.  */
3516      list_of_pools = pool;
3517    }
3518
3519  /* New pools, and emptied pools, will have a NULL symbol.  */
3520  if (pool->symbol == NULL)
3521    {
3522      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3523				    (valueT) 0, &zero_address_frag);
3524      pool->id = latest_pool_num ++;
3525    }
3526
3527  /* Done.  */
3528  return pool;
3529}
3530
3531/* Add the literal in the global 'inst'
3532   structure to the relevant literal pool.  */
3533
3534static int
3535add_to_lit_pool (unsigned int nbytes)
3536{
3537#define PADDING_SLOT 0x1
3538#define LIT_ENTRY_SIZE_MASK 0xFF
3539  literal_pool * pool;
3540  unsigned int entry, pool_size = 0;
3541  bfd_boolean padding_slot_p = FALSE;
3542  unsigned imm1 = 0;
3543  unsigned imm2 = 0;
3544
3545  if (nbytes == 8)
3546    {
3547      imm1 = inst.operands[1].imm;
3548      imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3549	       : inst.relocs[0].exp.X_unsigned ? 0
3550	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3551      if (target_big_endian)
3552	{
3553	  imm1 = imm2;
3554	  imm2 = inst.operands[1].imm;
3555	}
3556    }
3557
3558  pool = find_or_make_literal_pool ();
3559
3560  /* Check if this literal value is already in the pool.  */
3561  for (entry = 0; entry < pool->next_free_entry; entry ++)
3562    {
3563      if (nbytes == 4)
3564	{
3565	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3566	      && (inst.relocs[0].exp.X_op == O_constant)
3567	      && (pool->literals[entry].X_add_number
3568		  == inst.relocs[0].exp.X_add_number)
3569	      && (pool->literals[entry].X_md == nbytes)
3570	      && (pool->literals[entry].X_unsigned
3571		  == inst.relocs[0].exp.X_unsigned))
3572	    break;
3573
3574	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3575	      && (inst.relocs[0].exp.X_op == O_symbol)
3576	      && (pool->literals[entry].X_add_number
3577		  == inst.relocs[0].exp.X_add_number)
3578	      && (pool->literals[entry].X_add_symbol
3579		  == inst.relocs[0].exp.X_add_symbol)
3580	      && (pool->literals[entry].X_op_symbol
3581		  == inst.relocs[0].exp.X_op_symbol)
3582	      && (pool->literals[entry].X_md == nbytes))
3583	    break;
3584	}
3585      else if ((nbytes == 8)
3586	       && !(pool_size & 0x7)
3587	       && ((entry + 1) != pool->next_free_entry)
3588	       && (pool->literals[entry].X_op == O_constant)
3589	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3590	       && (pool->literals[entry].X_unsigned
3591		   == inst.relocs[0].exp.X_unsigned)
3592	       && (pool->literals[entry + 1].X_op == O_constant)
3593	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3594	       && (pool->literals[entry + 1].X_unsigned
3595		   == inst.relocs[0].exp.X_unsigned))
3596	break;
3597
3598      padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3599      if (padding_slot_p && (nbytes == 4))
3600	break;
3601
3602      pool_size += 4;
3603    }
3604
3605  /* Do we need to create a new entry?	*/
3606  if (entry == pool->next_free_entry)
3607    {
3608      if (entry >= MAX_LITERAL_POOL_SIZE)
3609	{
3610	  inst.error = _("literal pool overflow");
3611	  return FAIL;
3612	}
3613
3614      if (nbytes == 8)
3615	{
3616	  /* For 8-byte entries, we align to an 8-byte boundary,
3617	     and split it into two 4-byte entries, because on 32-bit
3618	     host, 8-byte constants are treated as big num, thus
3619	     saved in "generic_bignum" which will be overwritten
3620	     by later assignments.
3621
3622	     We also need to make sure there is enough space for
3623	     the split.
3624
3625	     We also check to make sure the literal operand is a
3626	     constant number.  */
3627	  if (!(inst.relocs[0].exp.X_op == O_constant
3628		|| inst.relocs[0].exp.X_op == O_big))
3629	    {
3630	      inst.error = _("invalid type for literal pool");
3631	      return FAIL;
3632	    }
3633	  else if (pool_size & 0x7)
3634	    {
3635	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3636		{
3637		  inst.error = _("literal pool overflow");
3638		  return FAIL;
3639		}
3640
3641	      pool->literals[entry] = inst.relocs[0].exp;
3642	      pool->literals[entry].X_op = O_constant;
3643	      pool->literals[entry].X_add_number = 0;
3644	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3645	      pool->next_free_entry += 1;
3646	      pool_size += 4;
3647	    }
3648	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3649	    {
3650	      inst.error = _("literal pool overflow");
3651	      return FAIL;
3652	    }
3653
3654	  pool->literals[entry] = inst.relocs[0].exp;
3655	  pool->literals[entry].X_op = O_constant;
3656	  pool->literals[entry].X_add_number = imm1;
3657	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3658	  pool->literals[entry++].X_md = 4;
3659	  pool->literals[entry] = inst.relocs[0].exp;
3660	  pool->literals[entry].X_op = O_constant;
3661	  pool->literals[entry].X_add_number = imm2;
3662	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3663	  pool->literals[entry].X_md = 4;
3664	  pool->alignment = 3;
3665	  pool->next_free_entry += 1;
3666	}
3667      else
3668	{
3669	  pool->literals[entry] = inst.relocs[0].exp;
3670	  pool->literals[entry].X_md = 4;
3671	}
3672
3673#ifdef OBJ_ELF
3674      /* PR ld/12974: Record the location of the first source line to reference
3675	 this entry in the literal pool.  If it turns out during linking that the
3676	 symbol does not exist we will be able to give an accurate line number for
3677	 the (first use of the) missing reference.  */
3678      if (debug_type == DEBUG_DWARF2)
3679	dwarf2_where (pool->locs + entry);
3680#endif
3681      pool->next_free_entry += 1;
3682    }
3683  else if (padding_slot_p)
3684    {
3685      pool->literals[entry] = inst.relocs[0].exp;
3686      pool->literals[entry].X_md = nbytes;
3687    }
3688
3689  inst.relocs[0].exp.X_op	      = O_symbol;
3690  inst.relocs[0].exp.X_add_number = pool_size;
3691  inst.relocs[0].exp.X_add_symbol = pool->symbol;
3692
3693  return SUCCESS;
3694}
3695
3696bfd_boolean
3697tc_start_label_without_colon (void)
3698{
3699  bfd_boolean ret = TRUE;
3700
3701  if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3702    {
3703      const char *label = input_line_pointer;
3704
3705      while (!is_end_of_line[(int) label[-1]])
3706	--label;
3707
3708      if (*label == '.')
3709	{
3710	  as_bad (_("Invalid label '%s'"), label);
3711	  ret = FALSE;
3712	}
3713
3714      asmfunc_debug (label);
3715
3716      asmfunc_state = WAITING_ENDASMFUNC;
3717    }
3718
3719  return ret;
3720}
3721
3722/* Can't use symbol_new here, so have to create a symbol and then at
3723   a later date assign it a value. That's what these functions do.  */
3724
3725static void
3726symbol_locate (symbolS *    symbolP,
3727	       const char * name,	/* It is copied, the caller can modify.	 */
3728	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3729	       valueT	    valu,	/* Symbol value.  */
3730	       fragS *	    frag)	/* Associated fragment.	 */
3731{
3732  size_t name_length;
3733  char * preserved_copy_of_name;
3734
3735  name_length = strlen (name) + 1;   /* +1 for \0.  */
3736  obstack_grow (&notes, name, name_length);
3737  preserved_copy_of_name = (char *) obstack_finish (&notes);
3738
3739#ifdef tc_canonicalize_symbol_name
3740  preserved_copy_of_name =
3741    tc_canonicalize_symbol_name (preserved_copy_of_name);
3742#endif
3743
3744  S_SET_NAME (symbolP, preserved_copy_of_name);
3745
3746  S_SET_SEGMENT (symbolP, segment);
3747  S_SET_VALUE (symbolP, valu);
3748  symbol_clear_list_pointers (symbolP);
3749
3750  symbol_set_frag (symbolP, frag);
3751
3752  /* Link to end of symbol chain.  */
3753  {
3754    extern int symbol_table_frozen;
3755
3756    if (symbol_table_frozen)
3757      abort ();
3758  }
3759
3760  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3761
3762  obj_symbol_new_hook (symbolP);
3763
3764#ifdef tc_symbol_new_hook
3765  tc_symbol_new_hook (symbolP);
3766#endif
3767
3768#ifdef DEBUG_SYMS
3769  verify_symbol_chain (symbol_rootP, symbol_lastP);
3770#endif /* DEBUG_SYMS  */
3771}
3772
3773static void
3774s_ltorg (int ignored ATTRIBUTE_UNUSED)
3775{
3776  unsigned int entry;
3777  literal_pool * pool;
3778  char sym_name[20];
3779
3780  pool = find_literal_pool ();
3781  if (pool == NULL
3782      || pool->symbol == NULL
3783      || pool->next_free_entry == 0)
3784    return;
3785
3786  /* Align pool as you have word accesses.
3787     Only make a frag if we have to.  */
3788  if (!need_pass_2)
3789    frag_align (pool->alignment, 0, 0);
3790
3791  record_alignment (now_seg, 2);
3792
3793#ifdef OBJ_ELF
3794  seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3795  make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3796#endif
3797  sprintf (sym_name, "$$lit_\002%x", pool->id);
3798
3799  symbol_locate (pool->symbol, sym_name, now_seg,
3800		 (valueT) frag_now_fix (), frag_now);
3801  symbol_table_insert (pool->symbol);
3802
3803  ARM_SET_THUMB (pool->symbol, thumb_mode);
3804
3805#if defined OBJ_COFF || defined OBJ_ELF
3806  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3807#endif
3808
3809  for (entry = 0; entry < pool->next_free_entry; entry ++)
3810    {
3811#ifdef OBJ_ELF
3812      if (debug_type == DEBUG_DWARF2)
3813	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3814#endif
3815      /* First output the expression in the instruction to the pool.  */
3816      emit_expr (&(pool->literals[entry]),
3817		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3818    }
3819
3820  /* Mark the pool as empty.  */
3821  pool->next_free_entry = 0;
3822  pool->symbol = NULL;
3823}
3824
3825#ifdef OBJ_ELF
3826/* Forward declarations for functions below, in the MD interface
3827   section.  */
3828static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3829static valueT create_unwind_entry (int);
3830static void start_unwind_section (const segT, int);
3831static void add_unwind_opcode (valueT, int);
3832static void flush_pending_unwind (void);
3833
3834/* Directives: Data.  */
3835
3836static void
3837s_arm_elf_cons (int nbytes)
3838{
3839  expressionS exp;
3840
3841#ifdef md_flush_pending_output
3842  md_flush_pending_output ();
3843#endif
3844
3845  if (is_it_end_of_statement ())
3846    {
3847      demand_empty_rest_of_line ();
3848      return;
3849    }
3850
3851#ifdef md_cons_align
3852  md_cons_align (nbytes);
3853#endif
3854
3855  mapping_state (MAP_DATA);
3856  do
3857    {
3858      int reloc;
3859      char *base = input_line_pointer;
3860
3861      expression (& exp);
3862
3863      if (exp.X_op != O_symbol)
3864	emit_expr (&exp, (unsigned int) nbytes);
3865      else
3866	{
3867	  char *before_reloc = input_line_pointer;
3868	  reloc = parse_reloc (&input_line_pointer);
3869	  if (reloc == -1)
3870	    {
3871	      as_bad (_("unrecognized relocation suffix"));
3872	      ignore_rest_of_line ();
3873	      return;
3874	    }
3875	  else if (reloc == BFD_RELOC_UNUSED)
3876	    emit_expr (&exp, (unsigned int) nbytes);
3877	  else
3878	    {
3879	      reloc_howto_type *howto = (reloc_howto_type *)
3880		  bfd_reloc_type_lookup (stdoutput,
3881					 (bfd_reloc_code_real_type) reloc);
3882	      int size = bfd_get_reloc_size (howto);
3883
3884	      if (reloc == BFD_RELOC_ARM_PLT32)
3885		{
3886		  as_bad (_("(plt) is only valid on branch targets"));
3887		  reloc = BFD_RELOC_UNUSED;
3888		  size = 0;
3889		}
3890
3891	      if (size > nbytes)
3892		as_bad (ngettext ("%s relocations do not fit in %d byte",
3893				  "%s relocations do not fit in %d bytes",
3894				  nbytes),
3895			howto->name, nbytes);
3896	      else
3897		{
3898		  /* We've parsed an expression stopping at O_symbol.
3899		     But there may be more expression left now that we
3900		     have parsed the relocation marker.  Parse it again.
3901		     XXX Surely there is a cleaner way to do this.  */
3902		  char *p = input_line_pointer;
3903		  int offset;
3904		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
3905
3906		  memcpy (save_buf, base, input_line_pointer - base);
3907		  memmove (base + (input_line_pointer - before_reloc),
3908			   base, before_reloc - base);
3909
3910		  input_line_pointer = base + (input_line_pointer-before_reloc);
3911		  expression (&exp);
3912		  memcpy (base, save_buf, p - base);
3913
3914		  offset = nbytes - size;
3915		  p = frag_more (nbytes);
3916		  memset (p, 0, nbytes);
3917		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3918			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3919		  free (save_buf);
3920		}
3921	    }
3922	}
3923    }
3924  while (*input_line_pointer++ == ',');
3925
3926  /* Put terminator back into stream.  */
3927  input_line_pointer --;
3928  demand_empty_rest_of_line ();
3929}
3930
3931/* Emit an expression containing a 32-bit thumb instruction.
3932   Implementation based on put_thumb32_insn.  */
3933
3934static void
3935emit_thumb32_expr (expressionS * exp)
3936{
3937  expressionS exp_high = *exp;
3938
3939  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3940  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3941  exp->X_add_number &= 0xffff;
3942  emit_expr (exp, (unsigned int) THUMB_SIZE);
3943}
3944
3945/*  Guess the instruction size based on the opcode.  */
3946
3947static int
3948thumb_insn_size (int opcode)
3949{
3950  if ((unsigned int) opcode < 0xe800u)
3951    return 2;
3952  else if ((unsigned int) opcode >= 0xe8000000u)
3953    return 4;
3954  else
3955    return 0;
3956}
3957
3958static bfd_boolean
3959emit_insn (expressionS *exp, int nbytes)
3960{
3961  int size = 0;
3962
3963  if (exp->X_op == O_constant)
3964    {
3965      size = nbytes;
3966
3967      if (size == 0)
3968	size = thumb_insn_size (exp->X_add_number);
3969
3970      if (size != 0)
3971	{
3972	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3973	    {
3974	      as_bad (_(".inst.n operand too big. "\
3975			"Use .inst.w instead"));
3976	      size = 0;
3977	    }
3978	  else
3979	    {
3980	      if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3981		set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3982	      else
3983		set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3984
3985	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3986		emit_thumb32_expr (exp);
3987	      else
3988		emit_expr (exp, (unsigned int) size);
3989
3990	      it_fsm_post_encode ();
3991	    }
3992	}
3993      else
3994	as_bad (_("cannot determine Thumb instruction size. "	\
3995		  "Use .inst.n/.inst.w instead"));
3996    }
3997  else
3998    as_bad (_("constant expression required"));
3999
4000  return (size != 0);
4001}
4002
4003/* Like s_arm_elf_cons but do not use md_cons_align and
4004   set the mapping state to MAP_ARM/MAP_THUMB.  */
4005
4006static void
4007s_arm_elf_inst (int nbytes)
4008{
4009  if (is_it_end_of_statement ())
4010    {
4011      demand_empty_rest_of_line ();
4012      return;
4013    }
4014
4015  /* Calling mapping_state () here will not change ARM/THUMB,
4016     but will ensure not to be in DATA state.  */
4017
4018  if (thumb_mode)
4019    mapping_state (MAP_THUMB);
4020  else
4021    {
4022      if (nbytes != 0)
4023	{
4024	  as_bad (_("width suffixes are invalid in ARM mode"));
4025	  ignore_rest_of_line ();
4026	  return;
4027	}
4028
4029      nbytes = 4;
4030
4031      mapping_state (MAP_ARM);
4032    }
4033
4034  do
4035    {
4036      expressionS exp;
4037
4038      expression (& exp);
4039
4040      if (! emit_insn (& exp, nbytes))
4041	{
4042	  ignore_rest_of_line ();
4043	  return;
4044	}
4045    }
4046  while (*input_line_pointer++ == ',');
4047
4048  /* Put terminator back into stream.  */
4049  input_line_pointer --;
4050  demand_empty_rest_of_line ();
4051}
4052
4053/* Parse a .rel31 directive.  */
4054
4055static void
4056s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4057{
4058  expressionS exp;
4059  char *p;
4060  valueT highbit;
4061
4062  highbit = 0;
4063  if (*input_line_pointer == '1')
4064    highbit = 0x80000000;
4065  else if (*input_line_pointer != '0')
4066    as_bad (_("expected 0 or 1"));
4067
4068  input_line_pointer++;
4069  if (*input_line_pointer != ',')
4070    as_bad (_("missing comma"));
4071  input_line_pointer++;
4072
4073#ifdef md_flush_pending_output
4074  md_flush_pending_output ();
4075#endif
4076
4077#ifdef md_cons_align
4078  md_cons_align (4);
4079#endif
4080
4081  mapping_state (MAP_DATA);
4082
4083  expression (&exp);
4084
4085  p = frag_more (4);
4086  md_number_to_chars (p, highbit, 4);
4087  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4088	       BFD_RELOC_ARM_PREL31);
4089
4090  demand_empty_rest_of_line ();
4091}
4092
4093/* Directives: AEABI stack-unwind tables.  */
4094
4095/* Parse an unwind_fnstart directive.  Simply records the current location.  */
4096
4097static void
4098s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4099{
4100  demand_empty_rest_of_line ();
4101  if (unwind.proc_start)
4102    {
4103      as_bad (_("duplicate .fnstart directive"));
4104      return;
4105    }
4106
4107  /* Mark the start of the function.  */
4108  unwind.proc_start = expr_build_dot ();
4109
4110  /* Reset the rest of the unwind info.	 */
4111  unwind.opcode_count = 0;
4112  unwind.table_entry = NULL;
4113  unwind.personality_routine = NULL;
4114  unwind.personality_index = -1;
4115  unwind.frame_size = 0;
4116  unwind.fp_offset = 0;
4117  unwind.fp_reg = REG_SP;
4118  unwind.fp_used = 0;
4119  unwind.sp_restored = 0;
4120}
4121
4122
4123/* Parse a handlerdata directive.  Creates the exception handling table entry
4124   for the function.  */
4125
4126static void
4127s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4128{
4129  demand_empty_rest_of_line ();
4130  if (!unwind.proc_start)
4131    as_bad (MISSING_FNSTART);
4132
4133  if (unwind.table_entry)
4134    as_bad (_("duplicate .handlerdata directive"));
4135
4136  create_unwind_entry (1);
4137}
4138
4139/* Parse an unwind_fnend directive.  Generates the index table entry.  */
4140
4141static void
4142s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4143{
4144  long where;
4145  char *ptr;
4146  valueT val;
4147  unsigned int marked_pr_dependency;
4148
4149  demand_empty_rest_of_line ();
4150
4151  if (!unwind.proc_start)
4152    {
4153      as_bad (_(".fnend directive without .fnstart"));
4154      return;
4155    }
4156
4157  /* Add eh table entry.  */
4158  if (unwind.table_entry == NULL)
4159    val = create_unwind_entry (0);
4160  else
4161    val = 0;
4162
4163  /* Add index table entry.  This is two words.	 */
4164  start_unwind_section (unwind.saved_seg, 1);
4165  frag_align (2, 0, 0);
4166  record_alignment (now_seg, 2);
4167
4168  ptr = frag_more (8);
4169  memset (ptr, 0, 8);
4170  where = frag_now_fix () - 8;
4171
4172  /* Self relative offset of the function start.  */
4173  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4174	   BFD_RELOC_ARM_PREL31);
4175
4176  /* Indicate dependency on EHABI-defined personality routines to the
4177     linker, if it hasn't been done already.  */
4178  marked_pr_dependency
4179    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4180  if (unwind.personality_index >= 0 && unwind.personality_index < 3
4181      && !(marked_pr_dependency & (1 << unwind.personality_index)))
4182    {
4183      static const char *const name[] =
4184	{
4185	  "__aeabi_unwind_cpp_pr0",
4186	  "__aeabi_unwind_cpp_pr1",
4187	  "__aeabi_unwind_cpp_pr2"
4188	};
4189      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4190      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4191      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4192	|= 1 << unwind.personality_index;
4193    }
4194
4195  if (val)
4196    /* Inline exception table entry.  */
4197    md_number_to_chars (ptr + 4, val, 4);
4198  else
4199    /* Self relative offset of the table entry.	 */
4200    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4201	     BFD_RELOC_ARM_PREL31);
4202
4203  /* Restore the original section.  */
4204  subseg_set (unwind.saved_seg, unwind.saved_subseg);
4205
4206  unwind.proc_start = NULL;
4207}
4208
4209
4210/* Parse an unwind_cantunwind directive.  */
4211
4212static void
4213s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4214{
4215  demand_empty_rest_of_line ();
4216  if (!unwind.proc_start)
4217    as_bad (MISSING_FNSTART);
4218
4219  if (unwind.personality_routine || unwind.personality_index != -1)
4220    as_bad (_("personality routine specified for cantunwind frame"));
4221
4222  unwind.personality_index = -2;
4223}
4224
4225
4226/* Parse a personalityindex directive.	*/
4227
4228static void
4229s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4230{
4231  expressionS exp;
4232
4233  if (!unwind.proc_start)
4234    as_bad (MISSING_FNSTART);
4235
4236  if (unwind.personality_routine || unwind.personality_index != -1)
4237    as_bad (_("duplicate .personalityindex directive"));
4238
4239  expression (&exp);
4240
4241  if (exp.X_op != O_constant
4242      || exp.X_add_number < 0 || exp.X_add_number > 15)
4243    {
4244      as_bad (_("bad personality routine number"));
4245      ignore_rest_of_line ();
4246      return;
4247    }
4248
4249  unwind.personality_index = exp.X_add_number;
4250
4251  demand_empty_rest_of_line ();
4252}
4253
4254
4255/* Parse a personality directive.  */
4256
4257static void
4258s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4259{
4260  char *name, *p, c;
4261
4262  if (!unwind.proc_start)
4263    as_bad (MISSING_FNSTART);
4264
4265  if (unwind.personality_routine || unwind.personality_index != -1)
4266    as_bad (_("duplicate .personality directive"));
4267
4268  c = get_symbol_name (& name);
4269  p = input_line_pointer;
4270  if (c == '"')
4271    ++ input_line_pointer;
4272  unwind.personality_routine = symbol_find_or_make (name);
4273  *p = c;
4274  demand_empty_rest_of_line ();
4275}
4276
4277
4278/* Parse a directive saving core registers.  */
4279
4280static void
4281s_arm_unwind_save_core (void)
4282{
4283  valueT op;
4284  long range;
4285  int n;
4286
4287  range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4288  if (range == FAIL)
4289    {
4290      as_bad (_("expected register list"));
4291      ignore_rest_of_line ();
4292      return;
4293    }
4294
4295  demand_empty_rest_of_line ();
4296
4297  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4298     into .unwind_save {..., sp...}.  We aren't bothered about the value of
4299     ip because it is clobbered by calls.  */
4300  if (unwind.sp_restored && unwind.fp_reg == 12
4301      && (range & 0x3000) == 0x1000)
4302    {
4303      unwind.opcode_count--;
4304      unwind.sp_restored = 0;
4305      range = (range | 0x2000) & ~0x1000;
4306      unwind.pending_offset = 0;
4307    }
4308
4309  /* Pop r4-r15.  */
4310  if (range & 0xfff0)
4311    {
4312      /* See if we can use the short opcodes.  These pop a block of up to 8
4313	 registers starting with r4, plus maybe r14.  */
4314      for (n = 0; n < 8; n++)
4315	{
4316	  /* Break at the first non-saved register.	 */
4317	  if ((range & (1 << (n + 4))) == 0)
4318	    break;
4319	}
4320      /* See if there are any other bits set.  */
4321      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4322	{
4323	  /* Use the long form.  */
4324	  op = 0x8000 | ((range >> 4) & 0xfff);
4325	  add_unwind_opcode (op, 2);
4326	}
4327      else
4328	{
4329	  /* Use the short form.  */
4330	  if (range & 0x4000)
4331	    op = 0xa8; /* Pop r14.	*/
4332	  else
4333	    op = 0xa0; /* Do not pop r14.  */
4334	  op |= (n - 1);
4335	  add_unwind_opcode (op, 1);
4336	}
4337    }
4338
4339  /* Pop r0-r3.	 */
4340  if (range & 0xf)
4341    {
4342      op = 0xb100 | (range & 0xf);
4343      add_unwind_opcode (op, 2);
4344    }
4345
4346  /* Record the number of bytes pushed.	 */
4347  for (n = 0; n < 16; n++)
4348    {
4349      if (range & (1 << n))
4350	unwind.frame_size += 4;
4351    }
4352}
4353
4354
4355/* Parse a directive saving FPA registers.  */
4356
4357static void
4358s_arm_unwind_save_fpa (int reg)
4359{
4360  expressionS exp;
4361  int num_regs;
4362  valueT op;
4363
4364  /* Get Number of registers to transfer.  */
4365  if (skip_past_comma (&input_line_pointer) != FAIL)
4366    expression (&exp);
4367  else
4368    exp.X_op = O_illegal;
4369
4370  if (exp.X_op != O_constant)
4371    {
4372      as_bad (_("expected , <constant>"));
4373      ignore_rest_of_line ();
4374      return;
4375    }
4376
4377  num_regs = exp.X_add_number;
4378
4379  if (num_regs < 1 || num_regs > 4)
4380    {
4381      as_bad (_("number of registers must be in the range [1:4]"));
4382      ignore_rest_of_line ();
4383      return;
4384    }
4385
4386  demand_empty_rest_of_line ();
4387
4388  if (reg == 4)
4389    {
4390      /* Short form.  */
4391      op = 0xb4 | (num_regs - 1);
4392      add_unwind_opcode (op, 1);
4393    }
4394  else
4395    {
4396      /* Long form.  */
4397      op = 0xc800 | (reg << 4) | (num_regs - 1);
4398      add_unwind_opcode (op, 2);
4399    }
4400  unwind.frame_size += num_regs * 12;
4401}
4402
4403
4404/* Parse a directive saving VFP registers for ARMv6 and above.  */
4405
4406static void
4407s_arm_unwind_save_vfp_armv6 (void)
4408{
4409  int count;
4410  unsigned int start;
4411  valueT op;
4412  int num_vfpv3_regs = 0;
4413  int num_regs_below_16;
4414  bfd_boolean partial_match;
4415
4416  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4417			      &partial_match);
4418  if (count == FAIL)
4419    {
4420      as_bad (_("expected register list"));
4421      ignore_rest_of_line ();
4422      return;
4423    }
4424
4425  demand_empty_rest_of_line ();
4426
4427  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4428     than FSTMX/FLDMX-style ones).  */
4429
4430  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4431  if (start >= 16)
4432    num_vfpv3_regs = count;
4433  else if (start + count > 16)
4434    num_vfpv3_regs = start + count - 16;
4435
4436  if (num_vfpv3_regs > 0)
4437    {
4438      int start_offset = start > 16 ? start - 16 : 0;
4439      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4440      add_unwind_opcode (op, 2);
4441    }
4442
4443  /* Generate opcode for registers numbered in the range 0 .. 15.  */
4444  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4445  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4446  if (num_regs_below_16 > 0)
4447    {
4448      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4449      add_unwind_opcode (op, 2);
4450    }
4451
4452  unwind.frame_size += count * 8;
4453}
4454
4455
4456/* Parse a directive saving VFP registers for pre-ARMv6.  */
4457
4458static void
4459s_arm_unwind_save_vfp (void)
4460{
4461  int count;
4462  unsigned int reg;
4463  valueT op;
4464  bfd_boolean partial_match;
4465
4466  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4467			      &partial_match);
4468  if (count == FAIL)
4469    {
4470      as_bad (_("expected register list"));
4471      ignore_rest_of_line ();
4472      return;
4473    }
4474
4475  demand_empty_rest_of_line ();
4476
4477  if (reg == 8)
4478    {
4479      /* Short form.  */
4480      op = 0xb8 | (count - 1);
4481      add_unwind_opcode (op, 1);
4482    }
4483  else
4484    {
4485      /* Long form.  */
4486      op = 0xb300 | (reg << 4) | (count - 1);
4487      add_unwind_opcode (op, 2);
4488    }
4489  unwind.frame_size += count * 8 + 4;
4490}
4491
4492
4493/* Parse a directive saving iWMMXt data registers.  */
4494
4495static void
4496s_arm_unwind_save_mmxwr (void)
4497{
4498  int reg;
4499  int hi_reg;
4500  int i;
4501  unsigned mask = 0;
4502  valueT op;
4503
4504  if (*input_line_pointer == '{')
4505    input_line_pointer++;
4506
4507  do
4508    {
4509      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4510
4511      if (reg == FAIL)
4512	{
4513	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4514	  goto error;
4515	}
4516
4517      if (mask >> reg)
4518	as_tsktsk (_("register list not in ascending order"));
4519      mask |= 1 << reg;
4520
4521      if (*input_line_pointer == '-')
4522	{
4523	  input_line_pointer++;
4524	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4525	  if (hi_reg == FAIL)
4526	    {
4527	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4528	      goto error;
4529	    }
4530	  else if (reg >= hi_reg)
4531	    {
4532	      as_bad (_("bad register range"));
4533	      goto error;
4534	    }
4535	  for (; reg < hi_reg; reg++)
4536	    mask |= 1 << reg;
4537	}
4538    }
4539  while (skip_past_comma (&input_line_pointer) != FAIL);
4540
4541  skip_past_char (&input_line_pointer, '}');
4542
4543  demand_empty_rest_of_line ();
4544
4545  /* Generate any deferred opcodes because we're going to be looking at
4546     the list.	*/
4547  flush_pending_unwind ();
4548
4549  for (i = 0; i < 16; i++)
4550    {
4551      if (mask & (1 << i))
4552	unwind.frame_size += 8;
4553    }
4554
4555  /* Attempt to combine with a previous opcode.	 We do this because gcc
4556     likes to output separate unwind directives for a single block of
4557     registers.	 */
4558  if (unwind.opcode_count > 0)
4559    {
4560      i = unwind.opcodes[unwind.opcode_count - 1];
4561      if ((i & 0xf8) == 0xc0)
4562	{
4563	  i &= 7;
4564	  /* Only merge if the blocks are contiguous.  */
4565	  if (i < 6)
4566	    {
4567	      if ((mask & 0xfe00) == (1 << 9))
4568		{
4569		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4570		  unwind.opcode_count--;
4571		}
4572	    }
4573	  else if (i == 6 && unwind.opcode_count >= 2)
4574	    {
4575	      i = unwind.opcodes[unwind.opcode_count - 2];
4576	      reg = i >> 4;
4577	      i &= 0xf;
4578
4579	      op = 0xffff << (reg - 1);
4580	      if (reg > 0
4581		  && ((mask & op) == (1u << (reg - 1))))
4582		{
4583		  op = (1 << (reg + i + 1)) - 1;
4584		  op &= ~((1 << reg) - 1);
4585		  mask |= op;
4586		  unwind.opcode_count -= 2;
4587		}
4588	    }
4589	}
4590    }
4591
4592  hi_reg = 15;
4593  /* We want to generate opcodes in the order the registers have been
4594     saved, ie. descending order.  */
4595  for (reg = 15; reg >= -1; reg--)
4596    {
4597      /* Save registers in blocks.  */
4598      if (reg < 0
4599	  || !(mask & (1 << reg)))
4600	{
4601	  /* We found an unsaved reg.  Generate opcodes to save the
4602	     preceding block.	*/
4603	  if (reg != hi_reg)
4604	    {
4605	      if (reg == 9)
4606		{
4607		  /* Short form.  */
4608		  op = 0xc0 | (hi_reg - 10);
4609		  add_unwind_opcode (op, 1);
4610		}
4611	      else
4612		{
4613		  /* Long form.	 */
4614		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4615		  add_unwind_opcode (op, 2);
4616		}
4617	    }
4618	  hi_reg = reg - 1;
4619	}
4620    }
4621
4622  return;
4623error:
4624  ignore_rest_of_line ();
4625}
4626
4627static void
4628s_arm_unwind_save_mmxwcg (void)
4629{
4630  int reg;
4631  int hi_reg;
4632  unsigned mask = 0;
4633  valueT op;
4634
4635  if (*input_line_pointer == '{')
4636    input_line_pointer++;
4637
4638  skip_whitespace (input_line_pointer);
4639
4640  do
4641    {
4642      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4643
4644      if (reg == FAIL)
4645	{
4646	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4647	  goto error;
4648	}
4649
4650      reg -= 8;
4651      if (mask >> reg)
4652	as_tsktsk (_("register list not in ascending order"));
4653      mask |= 1 << reg;
4654
4655      if (*input_line_pointer == '-')
4656	{
4657	  input_line_pointer++;
4658	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4659	  if (hi_reg == FAIL)
4660	    {
4661	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4662	      goto error;
4663	    }
4664	  else if (reg >= hi_reg)
4665	    {
4666	      as_bad (_("bad register range"));
4667	      goto error;
4668	    }
4669	  for (; reg < hi_reg; reg++)
4670	    mask |= 1 << reg;
4671	}
4672    }
4673  while (skip_past_comma (&input_line_pointer) != FAIL);
4674
4675  skip_past_char (&input_line_pointer, '}');
4676
4677  demand_empty_rest_of_line ();
4678
4679  /* Generate any deferred opcodes because we're going to be looking at
4680     the list.	*/
4681  flush_pending_unwind ();
4682
4683  for (reg = 0; reg < 16; reg++)
4684    {
4685      if (mask & (1 << reg))
4686	unwind.frame_size += 4;
4687    }
4688  op = 0xc700 | mask;
4689  add_unwind_opcode (op, 2);
4690  return;
4691error:
4692  ignore_rest_of_line ();
4693}
4694
4695
4696/* Parse an unwind_save directive.
4697   If the argument is non-zero, this is a .vsave directive.  */
4698
4699static void
4700s_arm_unwind_save (int arch_v6)
4701{
4702  char *peek;
4703  struct reg_entry *reg;
4704  bfd_boolean had_brace = FALSE;
4705
4706  if (!unwind.proc_start)
4707    as_bad (MISSING_FNSTART);
4708
4709  /* Figure out what sort of save we have.  */
4710  peek = input_line_pointer;
4711
4712  if (*peek == '{')
4713    {
4714      had_brace = TRUE;
4715      peek++;
4716    }
4717
4718  reg = arm_reg_parse_multi (&peek);
4719
4720  if (!reg)
4721    {
4722      as_bad (_("register expected"));
4723      ignore_rest_of_line ();
4724      return;
4725    }
4726
4727  switch (reg->type)
4728    {
4729    case REG_TYPE_FN:
4730      if (had_brace)
4731	{
4732	  as_bad (_("FPA .unwind_save does not take a register list"));
4733	  ignore_rest_of_line ();
4734	  return;
4735	}
4736      input_line_pointer = peek;
4737      s_arm_unwind_save_fpa (reg->number);
4738      return;
4739
4740    case REG_TYPE_RN:
4741      s_arm_unwind_save_core ();
4742      return;
4743
4744    case REG_TYPE_VFD:
4745      if (arch_v6)
4746	s_arm_unwind_save_vfp_armv6 ();
4747      else
4748	s_arm_unwind_save_vfp ();
4749      return;
4750
4751    case REG_TYPE_MMXWR:
4752      s_arm_unwind_save_mmxwr ();
4753      return;
4754
4755    case REG_TYPE_MMXWCG:
4756      s_arm_unwind_save_mmxwcg ();
4757      return;
4758
4759    default:
4760      as_bad (_(".unwind_save does not support this kind of register"));
4761      ignore_rest_of_line ();
4762    }
4763}
4764
4765
4766/* Parse an unwind_movsp directive.  */
4767
4768static void
4769s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4770{
4771  int reg;
4772  valueT op;
4773  int offset;
4774
4775  if (!unwind.proc_start)
4776    as_bad (MISSING_FNSTART);
4777
4778  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4779  if (reg == FAIL)
4780    {
4781      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4782      ignore_rest_of_line ();
4783      return;
4784    }
4785
4786  /* Optional constant.	 */
4787  if (skip_past_comma (&input_line_pointer) != FAIL)
4788    {
4789      if (immediate_for_directive (&offset) == FAIL)
4790	return;
4791    }
4792  else
4793    offset = 0;
4794
4795  demand_empty_rest_of_line ();
4796
4797  if (reg == REG_SP || reg == REG_PC)
4798    {
4799      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4800      return;
4801    }
4802
4803  if (unwind.fp_reg != REG_SP)
4804    as_bad (_("unexpected .unwind_movsp directive"));
4805
4806  /* Generate opcode to restore the value.  */
4807  op = 0x90 | reg;
4808  add_unwind_opcode (op, 1);
4809
4810  /* Record the information for later.	*/
4811  unwind.fp_reg = reg;
4812  unwind.fp_offset = unwind.frame_size - offset;
4813  unwind.sp_restored = 1;
4814}
4815
4816/* Parse an unwind_pad directive.  */
4817
4818static void
4819s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4820{
4821  int offset;
4822
4823  if (!unwind.proc_start)
4824    as_bad (MISSING_FNSTART);
4825
4826  if (immediate_for_directive (&offset) == FAIL)
4827    return;
4828
4829  if (offset & 3)
4830    {
4831      as_bad (_("stack increment must be multiple of 4"));
4832      ignore_rest_of_line ();
4833      return;
4834    }
4835
4836  /* Don't generate any opcodes, just record the details for later.  */
4837  unwind.frame_size += offset;
4838  unwind.pending_offset += offset;
4839
4840  demand_empty_rest_of_line ();
4841}
4842
4843/* Parse an unwind_setfp directive.  */
4844
4845static void
4846s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4847{
4848  int sp_reg;
4849  int fp_reg;
4850  int offset;
4851
4852  if (!unwind.proc_start)
4853    as_bad (MISSING_FNSTART);
4854
4855  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4856  if (skip_past_comma (&input_line_pointer) == FAIL)
4857    sp_reg = FAIL;
4858  else
4859    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4860
4861  if (fp_reg == FAIL || sp_reg == FAIL)
4862    {
4863      as_bad (_("expected <reg>, <reg>"));
4864      ignore_rest_of_line ();
4865      return;
4866    }
4867
4868  /* Optional constant.	 */
4869  if (skip_past_comma (&input_line_pointer) != FAIL)
4870    {
4871      if (immediate_for_directive (&offset) == FAIL)
4872	return;
4873    }
4874  else
4875    offset = 0;
4876
4877  demand_empty_rest_of_line ();
4878
4879  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4880    {
4881      as_bad (_("register must be either sp or set by a previous"
4882		"unwind_movsp directive"));
4883      return;
4884    }
4885
4886  /* Don't generate any opcodes, just record the information for later.	 */
4887  unwind.fp_reg = fp_reg;
4888  unwind.fp_used = 1;
4889  if (sp_reg == REG_SP)
4890    unwind.fp_offset = unwind.frame_size - offset;
4891  else
4892    unwind.fp_offset -= offset;
4893}
4894
4895/* Parse an unwind_raw directive.  */
4896
4897static void
4898s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4899{
4900  expressionS exp;
4901  /* This is an arbitrary limit.	 */
4902  unsigned char op[16];
4903  int count;
4904
4905  if (!unwind.proc_start)
4906    as_bad (MISSING_FNSTART);
4907
4908  expression (&exp);
4909  if (exp.X_op == O_constant
4910      && skip_past_comma (&input_line_pointer) != FAIL)
4911    {
4912      unwind.frame_size += exp.X_add_number;
4913      expression (&exp);
4914    }
4915  else
4916    exp.X_op = O_illegal;
4917
4918  if (exp.X_op != O_constant)
4919    {
4920      as_bad (_("expected <offset>, <opcode>"));
4921      ignore_rest_of_line ();
4922      return;
4923    }
4924
4925  count = 0;
4926
4927  /* Parse the opcode.	*/
4928  for (;;)
4929    {
4930      if (count >= 16)
4931	{
4932	  as_bad (_("unwind opcode too long"));
4933	  ignore_rest_of_line ();
4934	}
4935      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4936	{
4937	  as_bad (_("invalid unwind opcode"));
4938	  ignore_rest_of_line ();
4939	  return;
4940	}
4941      op[count++] = exp.X_add_number;
4942
4943      /* Parse the next byte.  */
4944      if (skip_past_comma (&input_line_pointer) == FAIL)
4945	break;
4946
4947      expression (&exp);
4948    }
4949
4950  /* Add the opcode bytes in reverse order.  */
4951  while (count--)
4952    add_unwind_opcode (op[count], 1);
4953
4954  demand_empty_rest_of_line ();
4955}
4956
4957
4958/* Parse a .eabi_attribute directive.  */
4959
4960static void
4961s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4962{
4963  int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4964
4965  if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4966    attributes_set_explicitly[tag] = 1;
4967}
4968
4969/* Emit a tls fix for the symbol.  */
4970
4971static void
4972s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4973{
4974  char *p;
4975  expressionS exp;
4976#ifdef md_flush_pending_output
4977  md_flush_pending_output ();
4978#endif
4979
4980#ifdef md_cons_align
4981  md_cons_align (4);
4982#endif
4983
4984  /* Since we're just labelling the code, there's no need to define a
4985     mapping symbol.  */
4986  expression (&exp);
4987  p = obstack_next_free (&frchain_now->frch_obstack);
4988  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4989	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4990	       : BFD_RELOC_ARM_TLS_DESCSEQ);
4991}
4992#endif /* OBJ_ELF */
4993
4994static void s_arm_arch (int);
4995static void s_arm_object_arch (int);
4996static void s_arm_cpu (int);
4997static void s_arm_fpu (int);
4998static void s_arm_arch_extension (int);
4999
5000#ifdef TE_PE
5001
5002static void
5003pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5004{
5005  expressionS exp;
5006
5007  do
5008    {
5009      expression (&exp);
5010      if (exp.X_op == O_symbol)
5011	exp.X_op = O_secrel;
5012
5013      emit_expr (&exp, 4);
5014    }
5015  while (*input_line_pointer++ == ',');
5016
5017  input_line_pointer--;
5018  demand_empty_rest_of_line ();
5019}
5020#endif /* TE_PE */
5021
5022int
5023arm_is_largest_exponent_ok (int precision)
5024{
5025  /* precision == 1 ensures that this will only return
5026     true for 16 bit floats.  */
5027  return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5028}
5029
5030static void
5031set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5032{
5033  char saved_char;
5034  char* name;
5035  enum fp_16bit_format new_format;
5036
5037  new_format = ARM_FP16_FORMAT_DEFAULT;
5038
5039  name = input_line_pointer;
5040  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5041    input_line_pointer++;
5042
5043  saved_char = *input_line_pointer;
5044  *input_line_pointer = 0;
5045
5046  if (strcasecmp (name, "ieee") == 0)
5047    new_format = ARM_FP16_FORMAT_IEEE;
5048  else if (strcasecmp (name, "alternative") == 0)
5049    new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5050  else
5051    {
5052      as_bad (_("unrecognised float16 format \"%s\""), name);
5053      goto cleanup;
5054    }
5055
5056  /* Only set fp16_format if it is still the default (aka not already
5057     been set yet).  */
5058  if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5059    fp16_format = new_format;
5060  else
5061    {
5062      if (new_format != fp16_format)
5063	as_warn (_("float16 format cannot be set more than once, ignoring."));
5064    }
5065
5066cleanup:
5067  *input_line_pointer = saved_char;
5068  ignore_rest_of_line ();
5069}
5070
5071/* This table describes all the machine specific pseudo-ops the assembler
5072   has to support.  The fields are:
5073     pseudo-op name without dot
5074     function to call to execute this pseudo-op
5075     Integer arg to pass to the function.  */
5076
5077const pseudo_typeS md_pseudo_table[] =
5078{
5079  /* Never called because '.req' does not start a line.	 */
5080  { "req",	   s_req,	  0 },
5081  /* Following two are likewise never called.  */
5082  { "dn",	   s_dn,          0 },
5083  { "qn",          s_qn,          0 },
5084  { "unreq",	   s_unreq,	  0 },
5085  { "bss",	   s_bss,	  0 },
5086  { "align",	   s_align_ptwo,  2 },
5087  { "arm",	   s_arm,	  0 },
5088  { "thumb",	   s_thumb,	  0 },
5089  { "code",	   s_code,	  0 },
5090  { "force_thumb", s_force_thumb, 0 },
5091  { "thumb_func",  s_thumb_func,  0 },
5092  { "thumb_set",   s_thumb_set,	  0 },
5093  { "even",	   s_even,	  0 },
5094  { "ltorg",	   s_ltorg,	  0 },
5095  { "pool",	   s_ltorg,	  0 },
5096  { "syntax",	   s_syntax,	  0 },
5097  { "cpu",	   s_arm_cpu,	  0 },
5098  { "arch",	   s_arm_arch,	  0 },
5099  { "object_arch", s_arm_object_arch,	0 },
5100  { "fpu",	   s_arm_fpu,	  0 },
5101  { "arch_extension", s_arm_arch_extension, 0 },
5102#ifdef OBJ_ELF
5103  { "word",	        s_arm_elf_cons, 4 },
5104  { "long",	        s_arm_elf_cons, 4 },
5105  { "inst.n",           s_arm_elf_inst, 2 },
5106  { "inst.w",           s_arm_elf_inst, 4 },
5107  { "inst",             s_arm_elf_inst, 0 },
5108  { "rel31",	        s_arm_rel31,	  0 },
5109  { "fnstart",		s_arm_unwind_fnstart,	0 },
5110  { "fnend",		s_arm_unwind_fnend,	0 },
5111  { "cantunwind",	s_arm_unwind_cantunwind, 0 },
5112  { "personality",	s_arm_unwind_personality, 0 },
5113  { "personalityindex",	s_arm_unwind_personalityindex, 0 },
5114  { "handlerdata",	s_arm_unwind_handlerdata, 0 },
5115  { "save",		s_arm_unwind_save,	0 },
5116  { "vsave",		s_arm_unwind_save,	1 },
5117  { "movsp",		s_arm_unwind_movsp,	0 },
5118  { "pad",		s_arm_unwind_pad,	0 },
5119  { "setfp",		s_arm_unwind_setfp,	0 },
5120  { "unwind_raw",	s_arm_unwind_raw,	0 },
5121  { "eabi_attribute",	s_arm_eabi_attribute,	0 },
5122  { "tlsdescseq",	s_arm_tls_descseq,      0 },
5123#else
5124  { "word",	   cons, 4},
5125
5126  /* These are used for dwarf.  */
5127  {"2byte", cons, 2},
5128  {"4byte", cons, 4},
5129  {"8byte", cons, 8},
5130  /* These are used for dwarf2.  */
5131  { "file", dwarf2_directive_file, 0 },
5132  { "loc",  dwarf2_directive_loc,  0 },
5133  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5134#endif
5135  { "extend",	   float_cons, 'x' },
5136  { "ldouble",	   float_cons, 'x' },
5137  { "packed",	   float_cons, 'p' },
5138  { "bfloat16",	   float_cons, 'b' },
5139#ifdef TE_PE
5140  {"secrel32", pe_directive_secrel, 0},
5141#endif
5142
5143  /* These are for compatibility with CodeComposer Studio.  */
5144  {"ref",          s_ccs_ref,        0},
5145  {"def",          s_ccs_def,        0},
5146  {"asmfunc",      s_ccs_asmfunc,    0},
5147  {"endasmfunc",   s_ccs_endasmfunc, 0},
5148
5149  {"float16", float_cons, 'h' },
5150  {"float16_format", set_fp16_format, 0 },
5151
5152  { 0, 0, 0 }
5153};
5154
5155/* Parser functions used exclusively in instruction operands.  */
5156
5157/* Generic immediate-value read function for use in insn parsing.
5158   STR points to the beginning of the immediate (the leading #);
5159   VAL receives the value; if the value is outside [MIN, MAX]
5160   issue an error.  PREFIX_OPT is true if the immediate prefix is
5161   optional.  */
5162
5163static int
5164parse_immediate (char **str, int *val, int min, int max,
5165		 bfd_boolean prefix_opt)
5166{
5167  expressionS exp;
5168
5169  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5170  if (exp.X_op != O_constant)
5171    {
5172      inst.error = _("constant expression required");
5173      return FAIL;
5174    }
5175
5176  if (exp.X_add_number < min || exp.X_add_number > max)
5177    {
5178      inst.error = _("immediate value out of range");
5179      return FAIL;
5180    }
5181
5182  *val = exp.X_add_number;
5183  return SUCCESS;
5184}
5185
5186/* Less-generic immediate-value read function with the possibility of loading a
5187   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5188   instructions. Puts the result directly in inst.operands[i].  */
5189
5190static int
5191parse_big_immediate (char **str, int i, expressionS *in_exp,
5192		     bfd_boolean allow_symbol_p)
5193{
5194  expressionS exp;
5195  expressionS *exp_p = in_exp ? in_exp : &exp;
5196  char *ptr = *str;
5197
5198  my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5199
5200  if (exp_p->X_op == O_constant)
5201    {
5202      inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5203      /* If we're on a 64-bit host, then a 64-bit number can be returned using
5204	 O_constant.  We have to be careful not to break compilation for
5205	 32-bit X_add_number, though.  */
5206      if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5207	{
5208	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
5209	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5210				  & 0xffffffff);
5211	  inst.operands[i].regisimm = 1;
5212	}
5213    }
5214  else if (exp_p->X_op == O_big
5215	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5216    {
5217      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5218
5219      /* Bignums have their least significant bits in
5220	 generic_bignum[0]. Make sure we put 32 bits in imm and
5221	 32 bits in reg,  in a (hopefully) portable way.  */
5222      gas_assert (parts != 0);
5223
5224      /* Make sure that the number is not too big.
5225	 PR 11972: Bignums can now be sign-extended to the
5226	 size of a .octa so check that the out of range bits
5227	 are all zero or all one.  */
5228      if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5229	{
5230	  LITTLENUM_TYPE m = -1;
5231
5232	  if (generic_bignum[parts * 2] != 0
5233	      && generic_bignum[parts * 2] != m)
5234	    return FAIL;
5235
5236	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5237	    if (generic_bignum[j] != generic_bignum[j-1])
5238	      return FAIL;
5239	}
5240
5241      inst.operands[i].imm = 0;
5242      for (j = 0; j < parts; j++, idx++)
5243	inst.operands[i].imm |= generic_bignum[idx]
5244				<< (LITTLENUM_NUMBER_OF_BITS * j);
5245      inst.operands[i].reg = 0;
5246      for (j = 0; j < parts; j++, idx++)
5247	inst.operands[i].reg |= generic_bignum[idx]
5248				<< (LITTLENUM_NUMBER_OF_BITS * j);
5249      inst.operands[i].regisimm = 1;
5250    }
5251  else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5252    return FAIL;
5253
5254  *str = ptr;
5255
5256  return SUCCESS;
5257}
5258
5259/* Returns the pseudo-register number of an FPA immediate constant,
5260   or FAIL if there isn't a valid constant here.  */
5261
5262static int
5263parse_fpa_immediate (char ** str)
5264{
5265  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5266  char *	 save_in;
5267  expressionS	 exp;
5268  int		 i;
5269  int		 j;
5270
5271  /* First try and match exact strings, this is to guarantee
5272     that some formats will work even for cross assembly.  */
5273
5274  for (i = 0; fp_const[i]; i++)
5275    {
5276      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5277	{
5278	  char *start = *str;
5279
5280	  *str += strlen (fp_const[i]);
5281	  if (is_end_of_line[(unsigned char) **str])
5282	    return i + 8;
5283	  *str = start;
5284	}
5285    }
5286
5287  /* Just because we didn't get a match doesn't mean that the constant
5288     isn't valid, just that it is in a format that we don't
5289     automatically recognize.  Try parsing it with the standard
5290     expression routines.  */
5291
5292  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5293
5294  /* Look for a raw floating point number.  */
5295  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5296      && is_end_of_line[(unsigned char) *save_in])
5297    {
5298      for (i = 0; i < NUM_FLOAT_VALS; i++)
5299	{
5300	  for (j = 0; j < MAX_LITTLENUMS; j++)
5301	    {
5302	      if (words[j] != fp_values[i][j])
5303		break;
5304	    }
5305
5306	  if (j == MAX_LITTLENUMS)
5307	    {
5308	      *str = save_in;
5309	      return i + 8;
5310	    }
5311	}
5312    }
5313
5314  /* Try and parse a more complex expression, this will probably fail
5315     unless the code uses a floating point prefix (eg "0f").  */
5316  save_in = input_line_pointer;
5317  input_line_pointer = *str;
5318  if (expression (&exp) == absolute_section
5319      && exp.X_op == O_big
5320      && exp.X_add_number < 0)
5321    {
5322      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5323	 Ditto for 15.	*/
5324#define X_PRECISION 5
5325#define E_PRECISION 15L
5326      if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5327	{
5328	  for (i = 0; i < NUM_FLOAT_VALS; i++)
5329	    {
5330	      for (j = 0; j < MAX_LITTLENUMS; j++)
5331		{
5332		  if (words[j] != fp_values[i][j])
5333		    break;
5334		}
5335
5336	      if (j == MAX_LITTLENUMS)
5337		{
5338		  *str = input_line_pointer;
5339		  input_line_pointer = save_in;
5340		  return i + 8;
5341		}
5342	    }
5343	}
5344    }
5345
5346  *str = input_line_pointer;
5347  input_line_pointer = save_in;
5348  inst.error = _("invalid FPA immediate expression");
5349  return FAIL;
5350}
5351
5352/* Returns 1 if a number has "quarter-precision" float format
5353   0baBbbbbbc defgh000 00000000 00000000.  */
5354
5355static int
5356is_quarter_float (unsigned imm)
5357{
5358  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5359  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5360}
5361
5362
5363/* Detect the presence of a floating point or integer zero constant,
5364   i.e. #0.0 or #0.  */
5365
5366static bfd_boolean
5367parse_ifimm_zero (char **in)
5368{
5369  int error_code;
5370
5371  if (!is_immediate_prefix (**in))
5372    {
5373      /* In unified syntax, all prefixes are optional.  */
5374      if (!unified_syntax)
5375	return FALSE;
5376    }
5377  else
5378    ++*in;
5379
5380  /* Accept #0x0 as a synonym for #0.  */
5381  if (strncmp (*in, "0x", 2) == 0)
5382    {
5383      int val;
5384      if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5385        return FALSE;
5386      return TRUE;
5387    }
5388
5389  error_code = atof_generic (in, ".", EXP_CHARS,
5390                             &generic_floating_point_number);
5391
5392  if (!error_code
5393      && generic_floating_point_number.sign == '+'
5394      && (generic_floating_point_number.low
5395          > generic_floating_point_number.leader))
5396    return TRUE;
5397
5398  return FALSE;
5399}
5400
5401/* Parse an 8-bit "quarter-precision" floating point number of the form:
5402   0baBbbbbbc defgh000 00000000 00000000.
5403   The zero and minus-zero cases need special handling, since they can't be
5404   encoded in the "quarter-precision" float format, but can nonetheless be
5405   loaded as integer constants.  */
5406
5407static unsigned
5408parse_qfloat_immediate (char **ccp, int *immed)
5409{
5410  char *str = *ccp;
5411  char *fpnum;
5412  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5413  int found_fpchar = 0;
5414
5415  skip_past_char (&str, '#');
5416
5417  /* We must not accidentally parse an integer as a floating-point number. Make
5418     sure that the value we parse is not an integer by checking for special
5419     characters '.' or 'e'.
5420     FIXME: This is a horrible hack, but doing better is tricky because type
5421     information isn't in a very usable state at parse time.  */
5422  fpnum = str;
5423  skip_whitespace (fpnum);
5424
5425  if (strncmp (fpnum, "0x", 2) == 0)
5426    return FAIL;
5427  else
5428    {
5429      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5430	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5431	  {
5432	    found_fpchar = 1;
5433	    break;
5434	  }
5435
5436      if (!found_fpchar)
5437	return FAIL;
5438    }
5439
5440  if ((str = atof_ieee (str, 's', words)) != NULL)
5441    {
5442      unsigned fpword = 0;
5443      int i;
5444
5445      /* Our FP word must be 32 bits (single-precision FP).  */
5446      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5447	{
5448	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5449	  fpword |= words[i];
5450	}
5451
5452      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5453	*immed = fpword;
5454      else
5455	return FAIL;
5456
5457      *ccp = str;
5458
5459      return SUCCESS;
5460    }
5461
5462  return FAIL;
5463}
5464
5465/* Shift operands.  */
5466enum shift_kind
5467{
5468  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5469};
5470
5471struct asm_shift_name
5472{
5473  const char	  *name;
5474  enum shift_kind  kind;
5475};
5476
5477/* Third argument to parse_shift.  */
5478enum parse_shift_mode
5479{
5480  NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5481  SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5482  SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5483  SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5484  SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5485  SHIFT_UXTW_IMMEDIATE		/* Shift must be UXTW immediate.  */
5486};
5487
5488/* Parse a <shift> specifier on an ARM data processing instruction.
5489   This has three forms:
5490
5491     (LSL|LSR|ASL|ASR|ROR) Rs
5492     (LSL|LSR|ASL|ASR|ROR) #imm
5493     RRX
5494
5495   Note that ASL is assimilated to LSL in the instruction encoding, and
5496   RRX to ROR #0 (which cannot be written as such).  */
5497
5498static int
5499parse_shift (char **str, int i, enum parse_shift_mode mode)
5500{
5501  const struct asm_shift_name *shift_name;
5502  enum shift_kind shift;
5503  char *s = *str;
5504  char *p = s;
5505  int reg;
5506
5507  for (p = *str; ISALPHA (*p); p++)
5508    ;
5509
5510  if (p == *str)
5511    {
5512      inst.error = _("shift expression expected");
5513      return FAIL;
5514    }
5515
5516  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5517							    p - *str);
5518
5519  if (shift_name == NULL)
5520    {
5521      inst.error = _("shift expression expected");
5522      return FAIL;
5523    }
5524
5525  shift = shift_name->kind;
5526
5527  switch (mode)
5528    {
5529    case NO_SHIFT_RESTRICT:
5530    case SHIFT_IMMEDIATE:
5531      if (shift == SHIFT_UXTW)
5532	{
5533	  inst.error = _("'UXTW' not allowed here");
5534	  return FAIL;
5535	}
5536      break;
5537
5538    case SHIFT_LSL_OR_ASR_IMMEDIATE:
5539      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5540	{
5541	  inst.error = _("'LSL' or 'ASR' required");
5542	  return FAIL;
5543	}
5544      break;
5545
5546    case SHIFT_LSL_IMMEDIATE:
5547      if (shift != SHIFT_LSL)
5548	{
5549	  inst.error = _("'LSL' required");
5550	  return FAIL;
5551	}
5552      break;
5553
5554    case SHIFT_ASR_IMMEDIATE:
5555      if (shift != SHIFT_ASR)
5556	{
5557	  inst.error = _("'ASR' required");
5558	  return FAIL;
5559	}
5560      break;
5561    case SHIFT_UXTW_IMMEDIATE:
5562      if (shift != SHIFT_UXTW)
5563	{
5564	  inst.error = _("'UXTW' required");
5565	  return FAIL;
5566	}
5567      break;
5568
5569    default: abort ();
5570    }
5571
5572  if (shift != SHIFT_RRX)
5573    {
5574      /* Whitespace can appear here if the next thing is a bare digit.	*/
5575      skip_whitespace (p);
5576
5577      if (mode == NO_SHIFT_RESTRICT
5578	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5579	{
5580	  inst.operands[i].imm = reg;
5581	  inst.operands[i].immisreg = 1;
5582	}
5583      else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5584	return FAIL;
5585    }
5586  inst.operands[i].shift_kind = shift;
5587  inst.operands[i].shifted = 1;
5588  *str = p;
5589  return SUCCESS;
5590}
5591
5592/* Parse a <shifter_operand> for an ARM data processing instruction:
5593
5594      #<immediate>
5595      #<immediate>, <rotate>
5596      <Rm>
5597      <Rm>, <shift>
5598
5599   where <shift> is defined by parse_shift above, and <rotate> is a
5600   multiple of 2 between 0 and 30.  Validation of immediate operands
5601   is deferred to md_apply_fix.  */
5602
5603static int
5604parse_shifter_operand (char **str, int i)
5605{
5606  int value;
5607  expressionS exp;
5608
5609  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5610    {
5611      inst.operands[i].reg = value;
5612      inst.operands[i].isreg = 1;
5613
5614      /* parse_shift will override this if appropriate */
5615      inst.relocs[0].exp.X_op = O_constant;
5616      inst.relocs[0].exp.X_add_number = 0;
5617
5618      if (skip_past_comma (str) == FAIL)
5619	return SUCCESS;
5620
5621      /* Shift operation on register.  */
5622      return parse_shift (str, i, NO_SHIFT_RESTRICT);
5623    }
5624
5625  if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5626    return FAIL;
5627
5628  if (skip_past_comma (str) == SUCCESS)
5629    {
5630      /* #x, y -- ie explicit rotation by Y.  */
5631      if (my_get_expression (&exp, str, GE_NO_PREFIX))
5632	return FAIL;
5633
5634      if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5635	{
5636	  inst.error = _("constant expression expected");
5637	  return FAIL;
5638	}
5639
5640      value = exp.X_add_number;
5641      if (value < 0 || value > 30 || value % 2 != 0)
5642	{
5643	  inst.error = _("invalid rotation");
5644	  return FAIL;
5645	}
5646      if (inst.relocs[0].exp.X_add_number < 0
5647	  || inst.relocs[0].exp.X_add_number > 255)
5648	{
5649	  inst.error = _("invalid constant");
5650	  return FAIL;
5651	}
5652
5653      /* Encode as specified.  */
5654      inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5655      return SUCCESS;
5656    }
5657
5658  inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5659  inst.relocs[0].pc_rel = 0;
5660  return SUCCESS;
5661}
5662
5663/* Group relocation information.  Each entry in the table contains the
5664   textual name of the relocation as may appear in assembler source
5665   and must end with a colon.
5666   Along with this textual name are the relocation codes to be used if
5667   the corresponding instruction is an ALU instruction (ADD or SUB only),
5668   an LDR, an LDRS, or an LDC.  */
5669
5670struct group_reloc_table_entry
5671{
5672  const char *name;
5673  int alu_code;
5674  int ldr_code;
5675  int ldrs_code;
5676  int ldc_code;
5677};
5678
5679typedef enum
5680{
5681  /* Varieties of non-ALU group relocation.  */
5682
5683  GROUP_LDR,
5684  GROUP_LDRS,
5685  GROUP_LDC,
5686  GROUP_MVE
5687} group_reloc_type;
5688
5689static struct group_reloc_table_entry group_reloc_table[] =
5690  { /* Program counter relative: */
5691    { "pc_g0_nc",
5692      BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5693      0,				/* LDR */
5694      0,				/* LDRS */
5695      0 },				/* LDC */
5696    { "pc_g0",
5697      BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5698      BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5699      BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5700      BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5701    { "pc_g1_nc",
5702      BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5703      0,				/* LDR */
5704      0,				/* LDRS */
5705      0 },				/* LDC */
5706    { "pc_g1",
5707      BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5708      BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5709      BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5710      BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5711    { "pc_g2",
5712      BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5713      BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5714      BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5715      BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5716    /* Section base relative */
5717    { "sb_g0_nc",
5718      BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5719      0,				/* LDR */
5720      0,				/* LDRS */
5721      0 },				/* LDC */
5722    { "sb_g0",
5723      BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5724      BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5725      BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5726      BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5727    { "sb_g1_nc",
5728      BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5729      0,				/* LDR */
5730      0,				/* LDRS */
5731      0 },				/* LDC */
5732    { "sb_g1",
5733      BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5734      BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5735      BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5736      BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5737    { "sb_g2",
5738      BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5739      BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5740      BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5741      BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
5742    /* Absolute thumb alu relocations.  */
5743    { "lower0_7",
5744      BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
5745      0,				/* LDR.  */
5746      0,				/* LDRS.  */
5747      0 },				/* LDC.  */
5748    { "lower8_15",
5749      BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
5750      0,				/* LDR.  */
5751      0,				/* LDRS.  */
5752      0 },				/* LDC.  */
5753    { "upper0_7",
5754      BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
5755      0,				/* LDR.  */
5756      0,				/* LDRS.  */
5757      0 },				/* LDC.  */
5758    { "upper8_15",
5759      BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
5760      0,				/* LDR.  */
5761      0,				/* LDRS.  */
5762      0 } };				/* LDC.  */
5763
5764/* Given the address of a pointer pointing to the textual name of a group
5765   relocation as may appear in assembler source, attempt to find its details
5766   in group_reloc_table.  The pointer will be updated to the character after
5767   the trailing colon.  On failure, FAIL will be returned; SUCCESS
5768   otherwise.  On success, *entry will be updated to point at the relevant
5769   group_reloc_table entry. */
5770
5771static int
5772find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5773{
5774  unsigned int i;
5775  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5776    {
5777      int length = strlen (group_reloc_table[i].name);
5778
5779      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5780	  && (*str)[length] == ':')
5781	{
5782	  *out = &group_reloc_table[i];
5783	  *str += (length + 1);
5784	  return SUCCESS;
5785	}
5786    }
5787
5788  return FAIL;
5789}
5790
5791/* Parse a <shifter_operand> for an ARM data processing instruction
5792   (as for parse_shifter_operand) where group relocations are allowed:
5793
5794      #<immediate>
5795      #<immediate>, <rotate>
5796      #:<group_reloc>:<expression>
5797      <Rm>
5798      <Rm>, <shift>
5799
5800   where <group_reloc> is one of the strings defined in group_reloc_table.
5801   The hashes are optional.
5802
5803   Everything else is as for parse_shifter_operand.  */
5804
5805static parse_operand_result
5806parse_shifter_operand_group_reloc (char **str, int i)
5807{
5808  /* Determine if we have the sequence of characters #: or just :
5809     coming next.  If we do, then we check for a group relocation.
5810     If we don't, punt the whole lot to parse_shifter_operand.  */
5811
5812  if (((*str)[0] == '#' && (*str)[1] == ':')
5813      || (*str)[0] == ':')
5814    {
5815      struct group_reloc_table_entry *entry;
5816
5817      if ((*str)[0] == '#')
5818	(*str) += 2;
5819      else
5820	(*str)++;
5821
5822      /* Try to parse a group relocation.  Anything else is an error.  */
5823      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5824	{
5825	  inst.error = _("unknown group relocation");
5826	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5827	}
5828
5829      /* We now have the group relocation table entry corresponding to
5830	 the name in the assembler source.  Next, we parse the expression.  */
5831      if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5832	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5833
5834      /* Record the relocation type (always the ALU variant here).  */
5835      inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5836      gas_assert (inst.relocs[0].type != 0);
5837
5838      return PARSE_OPERAND_SUCCESS;
5839    }
5840  else
5841    return parse_shifter_operand (str, i) == SUCCESS
5842	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5843
5844  /* Never reached.  */
5845}
5846
5847/* Parse a Neon alignment expression.  Information is written to
5848   inst.operands[i].  We assume the initial ':' has been skipped.
5849
5850   align	.imm = align << 8, .immisalign=1, .preind=0  */
5851static parse_operand_result
5852parse_neon_alignment (char **str, int i)
5853{
5854  char *p = *str;
5855  expressionS exp;
5856
5857  my_get_expression (&exp, &p, GE_NO_PREFIX);
5858
5859  if (exp.X_op != O_constant)
5860    {
5861      inst.error = _("alignment must be constant");
5862      return PARSE_OPERAND_FAIL;
5863    }
5864
5865  inst.operands[i].imm = exp.X_add_number << 8;
5866  inst.operands[i].immisalign = 1;
5867  /* Alignments are not pre-indexes.  */
5868  inst.operands[i].preind = 0;
5869
5870  *str = p;
5871  return PARSE_OPERAND_SUCCESS;
5872}
5873
5874/* Parse all forms of an ARM address expression.  Information is written
5875   to inst.operands[i] and/or inst.relocs[0].
5876
5877   Preindexed addressing (.preind=1):
5878
5879   [Rn, #offset]       .reg=Rn .relocs[0].exp=offset
5880   [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5881   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5882		       .shift_kind=shift .relocs[0].exp=shift_imm
5883
5884   These three may have a trailing ! which causes .writeback to be set also.
5885
5886   Postindexed addressing (.postind=1, .writeback=1):
5887
5888   [Rn], #offset       .reg=Rn .relocs[0].exp=offset
5889   [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5890   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5891		       .shift_kind=shift .relocs[0].exp=shift_imm
5892
5893   Unindexed addressing (.preind=0, .postind=0):
5894
5895   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5896
5897   Other:
5898
5899   [Rn]{!}	       shorthand for [Rn,#0]{!}
5900   =immediate	       .isreg=0 .relocs[0].exp=immediate
5901   label	       .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5902
5903  It is the caller's responsibility to check for addressing modes not
5904  supported by the instruction, and to set inst.relocs[0].type.  */
5905
5906static parse_operand_result
5907parse_address_main (char **str, int i, int group_relocations,
5908		    group_reloc_type group_type)
5909{
5910  char *p = *str;
5911  int reg;
5912
5913  if (skip_past_char (&p, '[') == FAIL)
5914    {
5915      if (skip_past_char (&p, '=') == FAIL)
5916	{
5917	  /* Bare address - translate to PC-relative offset.  */
5918	  inst.relocs[0].pc_rel = 1;
5919	  inst.operands[i].reg = REG_PC;
5920	  inst.operands[i].isreg = 1;
5921	  inst.operands[i].preind = 1;
5922
5923	  if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5924	    return PARSE_OPERAND_FAIL;
5925	}
5926      else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5927				    /*allow_symbol_p=*/TRUE))
5928	return PARSE_OPERAND_FAIL;
5929
5930      *str = p;
5931      return PARSE_OPERAND_SUCCESS;
5932    }
5933
5934  /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5935  skip_whitespace (p);
5936
5937  if (group_type == GROUP_MVE)
5938    {
5939      enum arm_reg_type rtype = REG_TYPE_MQ;
5940      struct neon_type_el et;
5941      if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5942	{
5943	  inst.operands[i].isquad = 1;
5944	}
5945      else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5946	{
5947	  inst.error = BAD_ADDR_MODE;
5948	  return PARSE_OPERAND_FAIL;
5949	}
5950    }
5951  else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5952    {
5953      if (group_type == GROUP_MVE)
5954	inst.error = BAD_ADDR_MODE;
5955      else
5956	inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5957      return PARSE_OPERAND_FAIL;
5958    }
5959  inst.operands[i].reg = reg;
5960  inst.operands[i].isreg = 1;
5961
5962  if (skip_past_comma (&p) == SUCCESS)
5963    {
5964      inst.operands[i].preind = 1;
5965
5966      if (*p == '+') p++;
5967      else if (*p == '-') p++, inst.operands[i].negative = 1;
5968
5969      enum arm_reg_type rtype = REG_TYPE_MQ;
5970      struct neon_type_el et;
5971      if (group_type == GROUP_MVE
5972	  && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5973	{
5974	  inst.operands[i].immisreg = 2;
5975	  inst.operands[i].imm = reg;
5976
5977	  if (skip_past_comma (&p) == SUCCESS)
5978	    {
5979	      if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
5980		{
5981		  inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
5982		  inst.relocs[0].exp.X_add_number = 0;
5983		}
5984	      else
5985		return PARSE_OPERAND_FAIL;
5986	    }
5987	}
5988      else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5989	{
5990	  inst.operands[i].imm = reg;
5991	  inst.operands[i].immisreg = 1;
5992
5993	  if (skip_past_comma (&p) == SUCCESS)
5994	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5995	      return PARSE_OPERAND_FAIL;
5996	}
5997      else if (skip_past_char (&p, ':') == SUCCESS)
5998	{
5999	  /* FIXME: '@' should be used here, but it's filtered out by generic
6000	     code before we get to see it here. This may be subject to
6001	     change.  */
6002	  parse_operand_result result = parse_neon_alignment (&p, i);
6003
6004	  if (result != PARSE_OPERAND_SUCCESS)
6005	    return result;
6006	}
6007      else
6008	{
6009	  if (inst.operands[i].negative)
6010	    {
6011	      inst.operands[i].negative = 0;
6012	      p--;
6013	    }
6014
6015	  if (group_relocations
6016	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6017	    {
6018	      struct group_reloc_table_entry *entry;
6019
6020	      /* Skip over the #: or : sequence.  */
6021	      if (*p == '#')
6022		p += 2;
6023	      else
6024		p++;
6025
6026	      /* Try to parse a group relocation.  Anything else is an
6027		 error.  */
6028	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6029		{
6030		  inst.error = _("unknown group relocation");
6031		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6032		}
6033
6034	      /* We now have the group relocation table entry corresponding to
6035		 the name in the assembler source.  Next, we parse the
6036		 expression.  */
6037	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6038		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6039
6040	      /* Record the relocation type.  */
6041	      switch (group_type)
6042		{
6043		  case GROUP_LDR:
6044		    inst.relocs[0].type
6045			= (bfd_reloc_code_real_type) entry->ldr_code;
6046		    break;
6047
6048		  case GROUP_LDRS:
6049		    inst.relocs[0].type
6050			= (bfd_reloc_code_real_type) entry->ldrs_code;
6051		    break;
6052
6053		  case GROUP_LDC:
6054		    inst.relocs[0].type
6055			= (bfd_reloc_code_real_type) entry->ldc_code;
6056		    break;
6057
6058		  default:
6059		    gas_assert (0);
6060		}
6061
6062	      if (inst.relocs[0].type == 0)
6063		{
6064		  inst.error = _("this group relocation is not allowed on this instruction");
6065		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6066		}
6067	    }
6068	  else
6069	    {
6070	      char *q = p;
6071
6072	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6073		return PARSE_OPERAND_FAIL;
6074	      /* If the offset is 0, find out if it's a +0 or -0.  */
6075	      if (inst.relocs[0].exp.X_op == O_constant
6076		  && inst.relocs[0].exp.X_add_number == 0)
6077		{
6078		  skip_whitespace (q);
6079		  if (*q == '#')
6080		    {
6081		      q++;
6082		      skip_whitespace (q);
6083		    }
6084		  if (*q == '-')
6085		    inst.operands[i].negative = 1;
6086		}
6087	    }
6088	}
6089    }
6090  else if (skip_past_char (&p, ':') == SUCCESS)
6091    {
6092      /* FIXME: '@' should be used here, but it's filtered out by generic code
6093	 before we get to see it here. This may be subject to change.  */
6094      parse_operand_result result = parse_neon_alignment (&p, i);
6095
6096      if (result != PARSE_OPERAND_SUCCESS)
6097	return result;
6098    }
6099
6100  if (skip_past_char (&p, ']') == FAIL)
6101    {
6102      inst.error = _("']' expected");
6103      return PARSE_OPERAND_FAIL;
6104    }
6105
6106  if (skip_past_char (&p, '!') == SUCCESS)
6107    inst.operands[i].writeback = 1;
6108
6109  else if (skip_past_comma (&p) == SUCCESS)
6110    {
6111      if (skip_past_char (&p, '{') == SUCCESS)
6112	{
6113	  /* [Rn], {expr} - unindexed, with option */
6114	  if (parse_immediate (&p, &inst.operands[i].imm,
6115			       0, 255, TRUE) == FAIL)
6116	    return PARSE_OPERAND_FAIL;
6117
6118	  if (skip_past_char (&p, '}') == FAIL)
6119	    {
6120	      inst.error = _("'}' expected at end of 'option' field");
6121	      return PARSE_OPERAND_FAIL;
6122	    }
6123	  if (inst.operands[i].preind)
6124	    {
6125	      inst.error = _("cannot combine index with option");
6126	      return PARSE_OPERAND_FAIL;
6127	    }
6128	  *str = p;
6129	  return PARSE_OPERAND_SUCCESS;
6130	}
6131      else
6132	{
6133	  inst.operands[i].postind = 1;
6134	  inst.operands[i].writeback = 1;
6135
6136	  if (inst.operands[i].preind)
6137	    {
6138	      inst.error = _("cannot combine pre- and post-indexing");
6139	      return PARSE_OPERAND_FAIL;
6140	    }
6141
6142	  if (*p == '+') p++;
6143	  else if (*p == '-') p++, inst.operands[i].negative = 1;
6144
6145	  enum arm_reg_type rtype = REG_TYPE_MQ;
6146	  struct neon_type_el et;
6147	  if (group_type == GROUP_MVE
6148	      && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6149	    {
6150	      inst.operands[i].immisreg = 2;
6151	      inst.operands[i].imm = reg;
6152	    }
6153	  else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6154	    {
6155	      /* We might be using the immediate for alignment already. If we
6156		 are, OR the register number into the low-order bits.  */
6157	      if (inst.operands[i].immisalign)
6158		inst.operands[i].imm |= reg;
6159	      else
6160		inst.operands[i].imm = reg;
6161	      inst.operands[i].immisreg = 1;
6162
6163	      if (skip_past_comma (&p) == SUCCESS)
6164		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6165		  return PARSE_OPERAND_FAIL;
6166	    }
6167	  else
6168	    {
6169	      char *q = p;
6170
6171	      if (inst.operands[i].negative)
6172		{
6173		  inst.operands[i].negative = 0;
6174		  p--;
6175		}
6176	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6177		return PARSE_OPERAND_FAIL;
6178	      /* If the offset is 0, find out if it's a +0 or -0.  */
6179	      if (inst.relocs[0].exp.X_op == O_constant
6180		  && inst.relocs[0].exp.X_add_number == 0)
6181		{
6182		  skip_whitespace (q);
6183		  if (*q == '#')
6184		    {
6185		      q++;
6186		      skip_whitespace (q);
6187		    }
6188		  if (*q == '-')
6189		    inst.operands[i].negative = 1;
6190		}
6191	    }
6192	}
6193    }
6194
6195  /* If at this point neither .preind nor .postind is set, we have a
6196     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
6197  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6198    {
6199      inst.operands[i].preind = 1;
6200      inst.relocs[0].exp.X_op = O_constant;
6201      inst.relocs[0].exp.X_add_number = 0;
6202    }
6203  *str = p;
6204  return PARSE_OPERAND_SUCCESS;
6205}
6206
6207static int
6208parse_address (char **str, int i)
6209{
6210  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6211	 ? SUCCESS : FAIL;
6212}
6213
6214static parse_operand_result
6215parse_address_group_reloc (char **str, int i, group_reloc_type type)
6216{
6217  return parse_address_main (str, i, 1, type);
6218}
6219
6220/* Parse an operand for a MOVW or MOVT instruction.  */
6221static int
6222parse_half (char **str)
6223{
6224  char * p;
6225
6226  p = *str;
6227  skip_past_char (&p, '#');
6228  if (strncasecmp (p, ":lower16:", 9) == 0)
6229    inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6230  else if (strncasecmp (p, ":upper16:", 9) == 0)
6231    inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6232
6233  if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6234    {
6235      p += 9;
6236      skip_whitespace (p);
6237    }
6238
6239  if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6240    return FAIL;
6241
6242  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6243    {
6244      if (inst.relocs[0].exp.X_op != O_constant)
6245	{
6246	  inst.error = _("constant expression expected");
6247	  return FAIL;
6248	}
6249      if (inst.relocs[0].exp.X_add_number < 0
6250	  || inst.relocs[0].exp.X_add_number > 0xffff)
6251	{
6252	  inst.error = _("immediate value out of range");
6253	  return FAIL;
6254	}
6255    }
6256  *str = p;
6257  return SUCCESS;
6258}
6259
6260/* Miscellaneous. */
6261
6262/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
6263   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
6264static int
6265parse_psr (char **str, bfd_boolean lhs)
6266{
6267  char *p;
6268  unsigned long psr_field;
6269  const struct asm_psr *psr;
6270  char *start;
6271  bfd_boolean is_apsr = FALSE;
6272  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6273
6274  /* PR gas/12698:  If the user has specified -march=all then m_profile will
6275     be TRUE, but we want to ignore it in this case as we are building for any
6276     CPU type, including non-m variants.  */
6277  if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6278    m_profile = FALSE;
6279
6280  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
6281     feature for ease of use and backwards compatibility.  */
6282  p = *str;
6283  if (strncasecmp (p, "SPSR", 4) == 0)
6284    {
6285      if (m_profile)
6286	goto unsupported_psr;
6287
6288      psr_field = SPSR_BIT;
6289    }
6290  else if (strncasecmp (p, "CPSR", 4) == 0)
6291    {
6292      if (m_profile)
6293	goto unsupported_psr;
6294
6295      psr_field = 0;
6296    }
6297  else if (strncasecmp (p, "APSR", 4) == 0)
6298    {
6299      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6300	 and ARMv7-R architecture CPUs.  */
6301      is_apsr = TRUE;
6302      psr_field = 0;
6303    }
6304  else if (m_profile)
6305    {
6306      start = p;
6307      do
6308	p++;
6309      while (ISALNUM (*p) || *p == '_');
6310
6311      if (strncasecmp (start, "iapsr", 5) == 0
6312	  || strncasecmp (start, "eapsr", 5) == 0
6313	  || strncasecmp (start, "xpsr", 4) == 0
6314	  || strncasecmp (start, "psr", 3) == 0)
6315	p = start + strcspn (start, "rR") + 1;
6316
6317      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6318						  p - start);
6319
6320      if (!psr)
6321	return FAIL;
6322
6323      /* If APSR is being written, a bitfield may be specified.  Note that
6324	 APSR itself is handled above.  */
6325      if (psr->field <= 3)
6326	{
6327	  psr_field = psr->field;
6328	  is_apsr = TRUE;
6329	  goto check_suffix;
6330	}
6331
6332      *str = p;
6333      /* M-profile MSR instructions have the mask field set to "10", except
6334	 *PSR variants which modify APSR, which may use a different mask (and
6335	 have been handled already).  Do that by setting the PSR_f field
6336	 here.  */
6337      return psr->field | (lhs ? PSR_f : 0);
6338    }
6339  else
6340    goto unsupported_psr;
6341
6342  p += 4;
6343check_suffix:
6344  if (*p == '_')
6345    {
6346      /* A suffix follows.  */
6347      p++;
6348      start = p;
6349
6350      do
6351	p++;
6352      while (ISALNUM (*p) || *p == '_');
6353
6354      if (is_apsr)
6355	{
6356	  /* APSR uses a notation for bits, rather than fields.  */
6357	  unsigned int nzcvq_bits = 0;
6358	  unsigned int g_bit = 0;
6359	  char *bit;
6360
6361	  for (bit = start; bit != p; bit++)
6362	    {
6363	      switch (TOLOWER (*bit))
6364		{
6365		case 'n':
6366		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6367		  break;
6368
6369		case 'z':
6370		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6371		  break;
6372
6373		case 'c':
6374		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6375		  break;
6376
6377		case 'v':
6378		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6379		  break;
6380
6381		case 'q':
6382		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6383		  break;
6384
6385		case 'g':
6386		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6387		  break;
6388
6389		default:
6390		  inst.error = _("unexpected bit specified after APSR");
6391		  return FAIL;
6392		}
6393	    }
6394
6395	  if (nzcvq_bits == 0x1f)
6396	    psr_field |= PSR_f;
6397
6398	  if (g_bit == 0x1)
6399	    {
6400	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6401		{
6402		  inst.error = _("selected processor does not "
6403				 "support DSP extension");
6404		  return FAIL;
6405		}
6406
6407	      psr_field |= PSR_s;
6408	    }
6409
6410	  if ((nzcvq_bits & 0x20) != 0
6411	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6412	      || (g_bit & 0x2) != 0)
6413	    {
6414	      inst.error = _("bad bitmask specified after APSR");
6415	      return FAIL;
6416	    }
6417	}
6418      else
6419	{
6420	  psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6421						      p - start);
6422	  if (!psr)
6423	    goto error;
6424
6425	  psr_field |= psr->field;
6426	}
6427    }
6428  else
6429    {
6430      if (ISALNUM (*p))
6431	goto error;    /* Garbage after "[CS]PSR".  */
6432
6433      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
6434	 is deprecated, but allow it anyway.  */
6435      if (is_apsr && lhs)
6436	{
6437	  psr_field |= PSR_f;
6438	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
6439		       "deprecated"));
6440	}
6441      else if (!m_profile)
6442	/* These bits are never right for M-profile devices: don't set them
6443	   (only code paths which read/write APSR reach here).  */
6444	psr_field |= (PSR_c | PSR_f);
6445    }
6446  *str = p;
6447  return psr_field;
6448
6449 unsupported_psr:
6450  inst.error = _("selected processor does not support requested special "
6451		 "purpose register");
6452  return FAIL;
6453
6454 error:
6455  inst.error = _("flag for {c}psr instruction expected");
6456  return FAIL;
6457}
6458
6459static int
6460parse_sys_vldr_vstr (char **str)
6461{
6462  unsigned i;
6463  int val = FAIL;
6464  struct {
6465    const char *name;
6466    int regl;
6467    int regh;
6468  } sysregs[] = {
6469    {"FPSCR",		0x1, 0x0},
6470    {"FPSCR_nzcvqc",	0x2, 0x0},
6471    {"VPR",		0x4, 0x1},
6472    {"P0",		0x5, 0x1},
6473    {"FPCXTNS",		0x6, 0x1},
6474    {"FPCXTS",		0x7, 0x1}
6475  };
6476  char *op_end = strchr (*str, ',');
6477  size_t op_strlen = op_end - *str;
6478
6479  for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6480    {
6481      if (!strncmp (*str, sysregs[i].name, op_strlen))
6482	{
6483	  val = sysregs[i].regl | (sysregs[i].regh << 3);
6484	  *str = op_end;
6485	  break;
6486	}
6487    }
6488
6489  return val;
6490}
6491
6492/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
6493   value suitable for splatting into the AIF field of the instruction.	*/
6494
6495static int
6496parse_cps_flags (char **str)
6497{
6498  int val = 0;
6499  int saw_a_flag = 0;
6500  char *s = *str;
6501
6502  for (;;)
6503    switch (*s++)
6504      {
6505      case '\0': case ',':
6506	goto done;
6507
6508      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6509      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6510      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6511
6512      default:
6513	inst.error = _("unrecognized CPS flag");
6514	return FAIL;
6515      }
6516
6517 done:
6518  if (saw_a_flag == 0)
6519    {
6520      inst.error = _("missing CPS flags");
6521      return FAIL;
6522    }
6523
6524  *str = s - 1;
6525  return val;
6526}
6527
6528/* Parse an endian specifier ("BE" or "LE", case insensitive);
6529   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
6530
6531static int
6532parse_endian_specifier (char **str)
6533{
6534  int little_endian;
6535  char *s = *str;
6536
6537  if (strncasecmp (s, "BE", 2))
6538    little_endian = 0;
6539  else if (strncasecmp (s, "LE", 2))
6540    little_endian = 1;
6541  else
6542    {
6543      inst.error = _("valid endian specifiers are be or le");
6544      return FAIL;
6545    }
6546
6547  if (ISALNUM (s[2]) || s[2] == '_')
6548    {
6549      inst.error = _("valid endian specifiers are be or le");
6550      return FAIL;
6551    }
6552
6553  *str = s + 2;
6554  return little_endian;
6555}
6556
6557/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
6558   value suitable for poking into the rotate field of an sxt or sxta
6559   instruction, or FAIL on error.  */
6560
6561static int
6562parse_ror (char **str)
6563{
6564  int rot;
6565  char *s = *str;
6566
6567  if (strncasecmp (s, "ROR", 3) == 0)
6568    s += 3;
6569  else
6570    {
6571      inst.error = _("missing rotation field after comma");
6572      return FAIL;
6573    }
6574
6575  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6576    return FAIL;
6577
6578  switch (rot)
6579    {
6580    case  0: *str = s; return 0x0;
6581    case  8: *str = s; return 0x1;
6582    case 16: *str = s; return 0x2;
6583    case 24: *str = s; return 0x3;
6584
6585    default:
6586      inst.error = _("rotation can only be 0, 8, 16, or 24");
6587      return FAIL;
6588    }
6589}
6590
6591/* Parse a conditional code (from conds[] below).  The value returned is in the
6592   range 0 .. 14, or FAIL.  */
6593static int
6594parse_cond (char **str)
6595{
6596  char *q;
6597  const struct asm_cond *c;
6598  int n;
6599  /* Condition codes are always 2 characters, so matching up to
6600     3 characters is sufficient.  */
6601  char cond[3];
6602
6603  q = *str;
6604  n = 0;
6605  while (ISALPHA (*q) && n < 3)
6606    {
6607      cond[n] = TOLOWER (*q);
6608      q++;
6609      n++;
6610    }
6611
6612  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6613  if (!c)
6614    {
6615      inst.error = _("condition required");
6616      return FAIL;
6617    }
6618
6619  *str = q;
6620  return c->value;
6621}
6622
6623/* Parse an option for a barrier instruction.  Returns the encoding for the
6624   option, or FAIL.  */
6625static int
6626parse_barrier (char **str)
6627{
6628  char *p, *q;
6629  const struct asm_barrier_opt *o;
6630
6631  p = q = *str;
6632  while (ISALPHA (*q))
6633    q++;
6634
6635  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6636						    q - p);
6637  if (!o)
6638    return FAIL;
6639
6640  if (!mark_feature_used (&o->arch))
6641    return FAIL;
6642
6643  *str = q;
6644  return o->value;
6645}
6646
6647/* Parse the operands of a table branch instruction.  Similar to a memory
6648   operand.  */
6649static int
6650parse_tb (char **str)
6651{
6652  char * p = *str;
6653  int reg;
6654
6655  if (skip_past_char (&p, '[') == FAIL)
6656    {
6657      inst.error = _("'[' expected");
6658      return FAIL;
6659    }
6660
6661  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6662    {
6663      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6664      return FAIL;
6665    }
6666  inst.operands[0].reg = reg;
6667
6668  if (skip_past_comma (&p) == FAIL)
6669    {
6670      inst.error = _("',' expected");
6671      return FAIL;
6672    }
6673
6674  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6675    {
6676      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6677      return FAIL;
6678    }
6679  inst.operands[0].imm = reg;
6680
6681  if (skip_past_comma (&p) == SUCCESS)
6682    {
6683      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6684	return FAIL;
6685      if (inst.relocs[0].exp.X_add_number != 1)
6686	{
6687	  inst.error = _("invalid shift");
6688	  return FAIL;
6689	}
6690      inst.operands[0].shifted = 1;
6691    }
6692
6693  if (skip_past_char (&p, ']') == FAIL)
6694    {
6695      inst.error = _("']' expected");
6696      return FAIL;
6697    }
6698  *str = p;
6699  return SUCCESS;
6700}
6701
6702/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6703   information on the types the operands can take and how they are encoded.
6704   Up to four operands may be read; this function handles setting the
6705   ".present" field for each read operand itself.
6706   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6707   else returns FAIL.  */
6708
6709static int
6710parse_neon_mov (char **str, int *which_operand)
6711{
6712  int i = *which_operand, val;
6713  enum arm_reg_type rtype;
6714  char *ptr = *str;
6715  struct neon_type_el optype;
6716
6717   if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6718    {
6719      /* Cases 17 or 19.  */
6720      inst.operands[i].reg = val;
6721      inst.operands[i].isvec = 1;
6722      inst.operands[i].isscalar = 2;
6723      inst.operands[i].vectype = optype;
6724      inst.operands[i++].present = 1;
6725
6726      if (skip_past_comma (&ptr) == FAIL)
6727	goto wanted_comma;
6728
6729      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6730	{
6731	  /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt>  */
6732	  inst.operands[i].reg = val;
6733	  inst.operands[i].isreg = 1;
6734	  inst.operands[i].present = 1;
6735	}
6736      else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6737	{
6738	  /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>  */
6739	  inst.operands[i].reg = val;
6740	  inst.operands[i].isvec = 1;
6741	  inst.operands[i].isscalar = 2;
6742	  inst.operands[i].vectype = optype;
6743	  inst.operands[i++].present = 1;
6744
6745	  if (skip_past_comma (&ptr) == FAIL)
6746	    goto wanted_comma;
6747
6748	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6749	    goto wanted_arm;
6750
6751	  inst.operands[i].reg = val;
6752	  inst.operands[i].isreg = 1;
6753	  inst.operands[i++].present = 1;
6754
6755	  if (skip_past_comma (&ptr) == FAIL)
6756	    goto wanted_comma;
6757
6758	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6759	    goto wanted_arm;
6760
6761	  inst.operands[i].reg = val;
6762	  inst.operands[i].isreg = 1;
6763	  inst.operands[i].present = 1;
6764	}
6765      else
6766	{
6767	  first_error (_("expected ARM or MVE vector register"));
6768	  return FAIL;
6769	}
6770    }
6771   else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6772    {
6773      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6774      inst.operands[i].reg = val;
6775      inst.operands[i].isscalar = 1;
6776      inst.operands[i].vectype = optype;
6777      inst.operands[i++].present = 1;
6778
6779      if (skip_past_comma (&ptr) == FAIL)
6780	goto wanted_comma;
6781
6782      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6783	goto wanted_arm;
6784
6785      inst.operands[i].reg = val;
6786      inst.operands[i].isreg = 1;
6787      inst.operands[i].present = 1;
6788    }
6789  else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6790	    != FAIL)
6791	   || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6792	       != FAIL))
6793    {
6794      /* Cases 0, 1, 2, 3, 5 (D only).  */
6795      if (skip_past_comma (&ptr) == FAIL)
6796	goto wanted_comma;
6797
6798      inst.operands[i].reg = val;
6799      inst.operands[i].isreg = 1;
6800      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6801      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6802      inst.operands[i].isvec = 1;
6803      inst.operands[i].vectype = optype;
6804      inst.operands[i++].present = 1;
6805
6806      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6807	{
6808	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6809	     Case 13: VMOV <Sd>, <Rm>  */
6810	  inst.operands[i].reg = val;
6811	  inst.operands[i].isreg = 1;
6812	  inst.operands[i].present = 1;
6813
6814	  if (rtype == REG_TYPE_NQ)
6815	    {
6816	      first_error (_("can't use Neon quad register here"));
6817	      return FAIL;
6818	    }
6819	  else if (rtype != REG_TYPE_VFS)
6820	    {
6821	      i++;
6822	      if (skip_past_comma (&ptr) == FAIL)
6823		goto wanted_comma;
6824	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6825		goto wanted_arm;
6826	      inst.operands[i].reg = val;
6827	      inst.operands[i].isreg = 1;
6828	      inst.operands[i].present = 1;
6829	    }
6830	}
6831      else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6832		&optype)) != FAIL)
6833	       || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6834		   &optype)) != FAIL))
6835	{
6836	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6837	     Case 1: VMOV<c><q> <Dd>, <Dm>
6838	     Case 8: VMOV.F32 <Sd>, <Sm>
6839	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6840
6841	  inst.operands[i].reg = val;
6842	  inst.operands[i].isreg = 1;
6843	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6844	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6845	  inst.operands[i].isvec = 1;
6846	  inst.operands[i].vectype = optype;
6847	  inst.operands[i].present = 1;
6848
6849	  if (skip_past_comma (&ptr) == SUCCESS)
6850	    {
6851	      /* Case 15.  */
6852	      i++;
6853
6854	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6855		goto wanted_arm;
6856
6857	      inst.operands[i].reg = val;
6858	      inst.operands[i].isreg = 1;
6859	      inst.operands[i++].present = 1;
6860
6861	      if (skip_past_comma (&ptr) == FAIL)
6862		goto wanted_comma;
6863
6864	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6865		goto wanted_arm;
6866
6867	      inst.operands[i].reg = val;
6868	      inst.operands[i].isreg = 1;
6869	      inst.operands[i].present = 1;
6870	    }
6871	}
6872      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6873	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6874	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6875	     Case 10: VMOV.F32 <Sd>, #<imm>
6876	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6877	inst.operands[i].immisfloat = 1;
6878      else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6879	       == SUCCESS)
6880	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6881	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6882	;
6883      else
6884	{
6885	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6886	  return FAIL;
6887	}
6888    }
6889  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6890    {
6891      /* Cases 6, 7, 16, 18.  */
6892      inst.operands[i].reg = val;
6893      inst.operands[i].isreg = 1;
6894      inst.operands[i++].present = 1;
6895
6896      if (skip_past_comma (&ptr) == FAIL)
6897	goto wanted_comma;
6898
6899      if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6900	{
6901	  /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]>  */
6902	  inst.operands[i].reg = val;
6903	  inst.operands[i].isscalar = 2;
6904	  inst.operands[i].present = 1;
6905	  inst.operands[i].vectype = optype;
6906	}
6907      else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6908	{
6909	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6910	  inst.operands[i].reg = val;
6911	  inst.operands[i].isscalar = 1;
6912	  inst.operands[i].present = 1;
6913	  inst.operands[i].vectype = optype;
6914	}
6915      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6916	{
6917	  inst.operands[i].reg = val;
6918	  inst.operands[i].isreg = 1;
6919	  inst.operands[i++].present = 1;
6920
6921	  if (skip_past_comma (&ptr) == FAIL)
6922	    goto wanted_comma;
6923
6924	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6925	      != FAIL)
6926	    {
6927	      /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6928
6929	      inst.operands[i].reg = val;
6930	      inst.operands[i].isreg = 1;
6931	      inst.operands[i].isvec = 1;
6932	      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6933	      inst.operands[i].vectype = optype;
6934	      inst.operands[i].present = 1;
6935
6936	      if (rtype == REG_TYPE_VFS)
6937		{
6938		  /* Case 14.  */
6939		  i++;
6940		  if (skip_past_comma (&ptr) == FAIL)
6941		    goto wanted_comma;
6942		  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6943						  &optype)) == FAIL)
6944		    {
6945		      first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6946		      return FAIL;
6947		    }
6948		  inst.operands[i].reg = val;
6949		  inst.operands[i].isreg = 1;
6950		  inst.operands[i].isvec = 1;
6951		  inst.operands[i].issingle = 1;
6952		  inst.operands[i].vectype = optype;
6953		  inst.operands[i].present = 1;
6954		}
6955	    }
6956	  else
6957	    {
6958	      if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6959		       != FAIL)
6960		{
6961		  /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>  */
6962		  inst.operands[i].reg = val;
6963		  inst.operands[i].isvec = 1;
6964		  inst.operands[i].isscalar = 2;
6965		  inst.operands[i].vectype = optype;
6966		  inst.operands[i++].present = 1;
6967
6968		  if (skip_past_comma (&ptr) == FAIL)
6969		    goto wanted_comma;
6970
6971		  if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6972		      == FAIL)
6973		    {
6974		      first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
6975		      return FAIL;
6976		    }
6977		  inst.operands[i].reg = val;
6978		  inst.operands[i].isvec = 1;
6979		  inst.operands[i].isscalar = 2;
6980		  inst.operands[i].vectype = optype;
6981		  inst.operands[i].present = 1;
6982		}
6983	      else
6984		{
6985		  first_error (_("VFP single, double or MVE vector register"
6986			       " expected"));
6987		  return FAIL;
6988		}
6989	    }
6990	}
6991      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6992	       != FAIL)
6993	{
6994	  /* Case 13.  */
6995	  inst.operands[i].reg = val;
6996	  inst.operands[i].isreg = 1;
6997	  inst.operands[i].isvec = 1;
6998	  inst.operands[i].issingle = 1;
6999	  inst.operands[i].vectype = optype;
7000	  inst.operands[i].present = 1;
7001	}
7002    }
7003  else
7004    {
7005      first_error (_("parse error"));
7006      return FAIL;
7007    }
7008
7009  /* Successfully parsed the operands. Update args.  */
7010  *which_operand = i;
7011  *str = ptr;
7012  return SUCCESS;
7013
7014 wanted_comma:
7015  first_error (_("expected comma"));
7016  return FAIL;
7017
7018 wanted_arm:
7019  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7020  return FAIL;
7021}
7022
7023/* Use this macro when the operand constraints are different
7024   for ARM and THUMB (e.g. ldrd).  */
7025#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7026	((arm_operand) | ((thumb_operand) << 16))
7027
7028/* Matcher codes for parse_operands.  */
7029enum operand_parse_code
7030{
7031  OP_stop,	/* end of line */
7032
7033  OP_RR,	/* ARM register */
7034  OP_RRnpc,	/* ARM register, not r15 */
7035  OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7036  OP_RRnpcb,	/* ARM register, not r15, in square brackets */
7037  OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
7038		   optional trailing ! */
7039  OP_RRw,	/* ARM register, not r15, optional trailing ! */
7040  OP_RCP,	/* Coprocessor number */
7041  OP_RCN,	/* Coprocessor register */
7042  OP_RF,	/* FPA register */
7043  OP_RVS,	/* VFP single precision register */
7044  OP_RVD,	/* VFP double precision register (0..15) */
7045  OP_RND,       /* Neon double precision register (0..31) */
7046  OP_RNDMQ,     /* Neon double precision (0..31) or MVE vector register.  */
7047  OP_RNDMQR,    /* Neon double precision (0..31), MVE vector or ARM register.
7048		 */
7049  OP_RNQ,	/* Neon quad precision register */
7050  OP_RNQMQ,	/* Neon quad or MVE vector register.  */
7051  OP_RVSD,	/* VFP single or double precision register */
7052  OP_RVSD_COND,	/* VFP single, double precision register or condition code.  */
7053  OP_RVSDMQ,	/* VFP single, double precision or MVE vector register.  */
7054  OP_RNSD,      /* Neon single or double precision register */
7055  OP_RNDQ,      /* Neon double or quad precision register */
7056  OP_RNDQMQ,     /* Neon double, quad or MVE vector register.  */
7057  OP_RNDQMQR,   /* Neon double, quad, MVE vector or ARM register.  */
7058  OP_RNSDQ,	/* Neon single, double or quad precision register */
7059  OP_RNSC,      /* Neon scalar D[X] */
7060  OP_RVC,	/* VFP control register */
7061  OP_RMF,	/* Maverick F register */
7062  OP_RMD,	/* Maverick D register */
7063  OP_RMFX,	/* Maverick FX register */
7064  OP_RMDX,	/* Maverick DX register */
7065  OP_RMAX,	/* Maverick AX register */
7066  OP_RMDS,	/* Maverick DSPSC register */
7067  OP_RIWR,	/* iWMMXt wR register */
7068  OP_RIWC,	/* iWMMXt wC register */
7069  OP_RIWG,	/* iWMMXt wCG register */
7070  OP_RXA,	/* XScale accumulator register */
7071
7072  OP_RNSDQMQ,	/* Neon single, double or quad register or MVE vector register
7073		 */
7074  OP_RNSDQMQR,	/* Neon single, double or quad register, MVE vector register or
7075		   GPR (no SP/SP)  */
7076  OP_RMQ,	/* MVE vector register.  */
7077  OP_RMQRZ,	/* MVE vector or ARM register including ZR.  */
7078  OP_RMQRR,     /* MVE vector or ARM register.  */
7079
7080  /* New operands for Armv8.1-M Mainline.  */
7081  OP_LR,	/* ARM LR register */
7082  OP_RRe,	/* ARM register, only even numbered.  */
7083  OP_RRo,	/* ARM register, only odd numbered, not r13 or r15.  */
7084  OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7085  OP_RR_ZR,	/* ARM register or ZR but no PC */
7086
7087  OP_REGLST,	/* ARM register list */
7088  OP_CLRMLST,	/* CLRM register list */
7089  OP_VRSLST,	/* VFP single-precision register list */
7090  OP_VRDLST,	/* VFP double-precision register list */
7091  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
7092  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
7093  OP_NSTRLST,   /* Neon element/structure list */
7094  OP_VRSDVLST,  /* VFP single or double-precision register list and VPR */
7095  OP_MSTRLST2,	/* MVE vector list with two elements.  */
7096  OP_MSTRLST4,	/* MVE vector list with four elements.  */
7097
7098  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
7099  OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
7100  OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
7101  OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7102		    zero.  */
7103  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
7104  OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar.  */
7105  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
7106  OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7107		     */
7108  OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7109			  scalar, or ARM register.  */
7110  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
7111  OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register.  */
7112  OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7113			register.  */
7114  OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar.  */
7115  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
7116  OP_VMOV,      /* Neon VMOV operands.  */
7117  OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
7118  /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN.  */
7119  OP_RNDQMQ_Ibig,
7120  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
7121  OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7122			ARM register.  */
7123  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
7124  OP_VLDR,	/* VLDR operand.  */
7125
7126  OP_I0,        /* immediate zero */
7127  OP_I7,	/* immediate value 0 .. 7 */
7128  OP_I15,	/*		   0 .. 15 */
7129  OP_I16,	/*		   1 .. 16 */
7130  OP_I16z,      /*                 0 .. 16 */
7131  OP_I31,	/*		   0 .. 31 */
7132  OP_I31w,	/*		   0 .. 31, optional trailing ! */
7133  OP_I32,	/*		   1 .. 32 */
7134  OP_I32z,	/*		   0 .. 32 */
7135  OP_I48_I64,	/*		   48 or 64 */
7136  OP_I63,	/*		   0 .. 63 */
7137  OP_I63s,	/*		 -64 .. 63 */
7138  OP_I64,	/*		   1 .. 64 */
7139  OP_I64z,	/*		   0 .. 64 */
7140  OP_I255,	/*		   0 .. 255 */
7141
7142  OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
7143  OP_I7b,	/*			       0 .. 7 */
7144  OP_I15b,	/*			       0 .. 15 */
7145  OP_I31b,	/*			       0 .. 31 */
7146
7147  OP_SH,	/* shifter operand */
7148  OP_SHG,	/* shifter operand with possible group relocation */
7149  OP_ADDR,	/* Memory address expression (any mode) */
7150  OP_ADDRMVE,	/* Memory address expression for MVE's VSTR/VLDR.  */
7151  OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
7152  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7153  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
7154  OP_EXP,	/* arbitrary expression */
7155  OP_EXPi,	/* same, with optional immediate prefix */
7156  OP_EXPr,	/* same, with optional relocation suffix */
7157  OP_EXPs,	/* same, with optional non-first operand relocation suffix */
7158  OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
7159  OP_IROT1,	/* VCADD rotate immediate: 90, 270.  */
7160  OP_IROT2,	/* VCMLA rotate immediate: 0, 90, 180, 270.  */
7161
7162  OP_CPSF,	/* CPS flags */
7163  OP_ENDI,	/* Endianness specifier */
7164  OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
7165  OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
7166  OP_COND,	/* conditional code */
7167  OP_TB,	/* Table branch.  */
7168
7169  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
7170
7171  OP_RRnpc_I0,	/* ARM register or literal 0 */
7172  OP_RR_EXr,	/* ARM register or expression with opt. reloc stuff. */
7173  OP_RR_EXi,	/* ARM register or expression with imm prefix */
7174  OP_RF_IF,	/* FPA register or immediate */
7175  OP_RIWR_RIWC, /* iWMMXt R or C reg */
7176  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7177
7178  /* Optional operands.	 */
7179  OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
7180  OP_oI31b,	 /*				0 .. 31 */
7181  OP_oI32b,      /*                             1 .. 32 */
7182  OP_oI32z,      /*                             0 .. 32 */
7183  OP_oIffffb,	 /*				0 .. 65535 */
7184  OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
7185
7186  OP_oRR,	 /* ARM register */
7187  OP_oLR,	 /* ARM LR register */
7188  OP_oRRnpc,	 /* ARM register, not the PC */
7189  OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7190  OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
7191  OP_oRND,       /* Optional Neon double precision register */
7192  OP_oRNQ,       /* Optional Neon quad precision register */
7193  OP_oRNDQMQ,     /* Optional Neon double, quad or MVE vector register.  */
7194  OP_oRNDQ,      /* Optional Neon double or quad precision register */
7195  OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
7196  OP_oRNSDQMQ,	 /* Optional single, double or quad register or MVE vector
7197		    register.  */
7198  OP_oSHll,	 /* LSL immediate */
7199  OP_oSHar,	 /* ASR immediate */
7200  OP_oSHllar,	 /* LSL or ASR immediate */
7201  OP_oROR,	 /* ROR 0/8/16/24 */
7202  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
7203
7204  OP_oRMQRZ,	/* optional MVE vector or ARM register including ZR.  */
7205
7206  /* Some pre-defined mixed (ARM/THUMB) operands.  */
7207  OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7208  OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7209  OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7210
7211  OP_FIRST_OPTIONAL = OP_oI7b
7212};
7213
7214/* Generic instruction operand parser.	This does no encoding and no
7215   semantic validation; it merely squirrels values away in the inst
7216   structure.  Returns SUCCESS or FAIL depending on whether the
7217   specified grammar matched.  */
7218static int
7219parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
7220{
7221  unsigned const int *upat = pattern;
7222  char *backtrack_pos = 0;
7223  const char *backtrack_error = 0;
7224  int i, val = 0, backtrack_index = 0;
7225  enum arm_reg_type rtype;
7226  parse_operand_result result;
7227  unsigned int op_parse_code;
7228  bfd_boolean partial_match;
7229
7230#define po_char_or_fail(chr)			\
7231  do						\
7232    {						\
7233      if (skip_past_char (&str, chr) == FAIL)	\
7234	goto bad_args;				\
7235    }						\
7236  while (0)
7237
7238#define po_reg_or_fail(regtype)					\
7239  do								\
7240    {								\
7241      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7242				 & inst.operands[i].vectype);	\
7243      if (val == FAIL)						\
7244	{							\
7245	  first_error (_(reg_expected_msgs[regtype]));		\
7246	  goto failure;						\
7247	}							\
7248      inst.operands[i].reg = val;				\
7249      inst.operands[i].isreg = 1;				\
7250      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7251      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7252      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7253			     || rtype == REG_TYPE_VFD		\
7254			     || rtype == REG_TYPE_NQ);		\
7255      inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7256    }								\
7257  while (0)
7258
7259#define po_reg_or_goto(regtype, label)				\
7260  do								\
7261    {								\
7262      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7263				 & inst.operands[i].vectype);	\
7264      if (val == FAIL)						\
7265	goto label;						\
7266								\
7267      inst.operands[i].reg = val;				\
7268      inst.operands[i].isreg = 1;				\
7269      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7270      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7271      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7272			     || rtype == REG_TYPE_VFD		\
7273			     || rtype == REG_TYPE_NQ);		\
7274      inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7275    }								\
7276  while (0)
7277
7278#define po_imm_or_fail(min, max, popt)				\
7279  do								\
7280    {								\
7281      if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
7282	goto failure;						\
7283      inst.operands[i].imm = val;				\
7284    }								\
7285  while (0)
7286
7287#define po_imm1_or_imm2_or_fail(imm1, imm2, popt)		\
7288  do								\
7289    {								\
7290      expressionS exp;						\
7291      my_get_expression (&exp, &str, popt);			\
7292      if (exp.X_op != O_constant)				\
7293	{							\
7294	  inst.error = _("constant expression required");	\
7295	  goto failure;						\
7296	}							\
7297      if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7298	{							\
7299	  inst.error = _("immediate value 48 or 64 expected");	\
7300	  goto failure;						\
7301	}							\
7302      inst.operands[i].imm = exp.X_add_number;			\
7303    }								\
7304  while (0)
7305
7306#define po_scalar_or_goto(elsz, label, reg_type)			\
7307  do									\
7308    {									\
7309      val = parse_scalar (& str, elsz, & inst.operands[i].vectype,	\
7310			  reg_type);					\
7311      if (val == FAIL)							\
7312	goto label;							\
7313      inst.operands[i].reg = val;					\
7314      inst.operands[i].isscalar = 1;					\
7315    }									\
7316  while (0)
7317
7318#define po_misc_or_fail(expr)			\
7319  do						\
7320    {						\
7321      if (expr)					\
7322	goto failure;				\
7323    }						\
7324  while (0)
7325
7326#define po_misc_or_fail_no_backtrack(expr)		\
7327  do							\
7328    {							\
7329      result = expr;					\
7330      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
7331	backtrack_pos = 0;				\
7332      if (result != PARSE_OPERAND_SUCCESS)		\
7333	goto failure;					\
7334    }							\
7335  while (0)
7336
7337#define po_barrier_or_imm(str)				   \
7338  do							   \
7339    {						 	   \
7340      val = parse_barrier (&str);			   \
7341      if (val == FAIL && ! ISALPHA (*str))		   \
7342	goto immediate;					   \
7343      if (val == FAIL					   \
7344	  /* ISB can only take SY as an option.  */	   \
7345	  || ((inst.instruction & 0xf0) == 0x60		   \
7346	       && val != 0xf))				   \
7347	{						   \
7348	   inst.error = _("invalid barrier type");	   \
7349	   backtrack_pos = 0;				   \
7350	   goto failure;				   \
7351	}						   \
7352    }							   \
7353  while (0)
7354
7355  skip_whitespace (str);
7356
7357  for (i = 0; upat[i] != OP_stop; i++)
7358    {
7359      op_parse_code = upat[i];
7360      if (op_parse_code >= 1<<16)
7361	op_parse_code = thumb ? (op_parse_code >> 16)
7362				: (op_parse_code & ((1<<16)-1));
7363
7364      if (op_parse_code >= OP_FIRST_OPTIONAL)
7365	{
7366	  /* Remember where we are in case we need to backtrack.  */
7367	  backtrack_pos = str;
7368	  backtrack_error = inst.error;
7369	  backtrack_index = i;
7370	}
7371
7372      if (i > 0 && (i > 1 || inst.operands[0].present))
7373	po_char_or_fail (',');
7374
7375      switch (op_parse_code)
7376	{
7377	  /* Registers */
7378	case OP_oRRnpc:
7379	case OP_oRRnpcsp:
7380	case OP_RRnpc:
7381	case OP_RRnpcsp:
7382	case OP_oRR:
7383	case OP_RRe:
7384	case OP_RRo:
7385	case OP_LR:
7386	case OP_oLR:
7387	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
7388	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
7389	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
7390	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
7391	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
7392	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7393	case OP_oRND:
7394	case OP_RNDMQR:
7395	  po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7396	  break;
7397	try_rndmq:
7398	case OP_RNDMQ:
7399	  po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7400	  break;
7401	try_rnd:
7402	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7403	case OP_RVC:
7404	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7405	  break;
7406	  /* Also accept generic coprocessor regs for unknown registers.  */
7407	  coproc_reg:
7408	  po_reg_or_goto (REG_TYPE_CN, vpr_po);
7409	  break;
7410	  /* Also accept P0 or p0 for VPR.P0.  Since P0 is already an
7411	     existing register with a value of 0, this seems like the
7412	     best way to parse P0.  */
7413	  vpr_po:
7414	  if (strncasecmp (str, "P0", 2) == 0)
7415	    {
7416	      str += 2;
7417	      inst.operands[i].isreg = 1;
7418	      inst.operands[i].reg = 13;
7419	    }
7420	  else
7421	    goto failure;
7422	  break;
7423	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
7424	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
7425	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
7426	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
7427	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
7428	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
7429	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
7430	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
7431	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
7432	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
7433	case OP_oRNQ:
7434	case OP_RNQMQ:
7435	  po_reg_or_goto (REG_TYPE_MQ, try_nq);
7436	  break;
7437	try_nq:
7438	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
7439	case OP_RNSD:  po_reg_or_fail (REG_TYPE_NSD);     break;
7440	case OP_RNDQMQR:
7441	  po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7442	  break;
7443	try_rndqmq:
7444	case OP_oRNDQMQ:
7445	case OP_RNDQMQ:
7446	  po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7447	  break;
7448	try_rndq:
7449	case OP_oRNDQ:
7450	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
7451	case OP_RVSDMQ:
7452	  po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7453	  break;
7454	try_rvsd:
7455	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
7456	case OP_RVSD_COND:
7457	  po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7458	  break;
7459	case OP_oRNSDQ:
7460	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
7461	case OP_RNSDQMQR:
7462	  po_reg_or_goto (REG_TYPE_RN, try_mq);
7463	  break;
7464	  try_mq:
7465	case OP_oRNSDQMQ:
7466	case OP_RNSDQMQ:
7467	  po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7468	  break;
7469	  try_nsdq2:
7470	  po_reg_or_fail (REG_TYPE_NSDQ);
7471	  inst.error = 0;
7472	  break;
7473	case OP_RMQRR:
7474	  po_reg_or_goto (REG_TYPE_RN, try_rmq);
7475	  break;
7476	try_rmq:
7477	case OP_RMQ:
7478	  po_reg_or_fail (REG_TYPE_MQ);
7479	  break;
7480	/* Neon scalar. Using an element size of 8 means that some invalid
7481	   scalars are accepted here, so deal with those in later code.  */
7482	case OP_RNSC:  po_scalar_or_goto (8, failure, REG_TYPE_VFD);    break;
7483
7484	case OP_RNDQ_I0:
7485	  {
7486	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7487	    break;
7488	    try_imm0:
7489	    po_imm_or_fail (0, 0, TRUE);
7490	  }
7491	  break;
7492
7493	case OP_RVSD_I0:
7494	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7495	  break;
7496
7497	case OP_RSVDMQ_FI0:
7498	  po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7499	  break;
7500	try_rsvd_fi0:
7501	case OP_RSVD_FI0:
7502	  {
7503	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7504	    break;
7505	    try_ifimm0:
7506	    if (parse_ifimm_zero (&str))
7507	      inst.operands[i].imm = 0;
7508	    else
7509	    {
7510	      inst.error
7511	        = _("only floating point zero is allowed as immediate value");
7512	      goto failure;
7513	    }
7514	  }
7515	  break;
7516
7517	case OP_RR_RNSC:
7518	  {
7519	    po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7520	    break;
7521	    try_rr:
7522	    po_reg_or_fail (REG_TYPE_RN);
7523	  }
7524	  break;
7525
7526	case OP_RNSDQ_RNSC_MQ_RR:
7527	  po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7528	  break;
7529	try_rnsdq_rnsc_mq:
7530	case OP_RNSDQ_RNSC_MQ:
7531	  po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7532	  break;
7533	try_rnsdq_rnsc:
7534	case OP_RNSDQ_RNSC:
7535	  {
7536	    po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7537	    inst.error = 0;
7538	    break;
7539	    try_nsdq:
7540	    po_reg_or_fail (REG_TYPE_NSDQ);
7541	    inst.error = 0;
7542	  }
7543	  break;
7544
7545	case OP_RNSD_RNSC:
7546	  {
7547	    po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7548	    break;
7549	    try_s_scalar:
7550	    po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7551	    break;
7552	    try_nsd:
7553	    po_reg_or_fail (REG_TYPE_NSD);
7554	  }
7555	  break;
7556
7557	case OP_RNDQMQ_RNSC_RR:
7558	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7559	  break;
7560	try_rndq_rnsc_rr:
7561	case OP_RNDQ_RNSC_RR:
7562	  po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7563	  break;
7564	case OP_RNDQMQ_RNSC:
7565	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7566	  break;
7567	try_rndq_rnsc:
7568	case OP_RNDQ_RNSC:
7569	  {
7570	    po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7571	    break;
7572	    try_ndq:
7573	    po_reg_or_fail (REG_TYPE_NDQ);
7574	  }
7575	  break;
7576
7577	case OP_RND_RNSC:
7578	  {
7579	    po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7580	    break;
7581	    try_vfd:
7582	    po_reg_or_fail (REG_TYPE_VFD);
7583	  }
7584	  break;
7585
7586	case OP_VMOV:
7587	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7588	     not careful then bad things might happen.  */
7589	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7590	  break;
7591
7592	case OP_RNDQMQ_Ibig:
7593	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7594	  break;
7595	try_rndq_ibig:
7596	case OP_RNDQ_Ibig:
7597	  {
7598	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7599	    break;
7600	    try_immbig:
7601	    /* There's a possibility of getting a 64-bit immediate here, so
7602	       we need special handling.  */
7603	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7604		== FAIL)
7605	      {
7606		inst.error = _("immediate value is out of range");
7607		goto failure;
7608	      }
7609	  }
7610	  break;
7611
7612	case OP_RNDQMQ_I63b_RR:
7613	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7614	  break;
7615	try_rndq_i63b_rr:
7616	  po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7617	  break;
7618	try_rndq_i63b:
7619	case OP_RNDQ_I63b:
7620	  {
7621	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7622	    break;
7623	    try_shimm:
7624	    po_imm_or_fail (0, 63, TRUE);
7625	  }
7626	  break;
7627
7628	case OP_RRnpcb:
7629	  po_char_or_fail ('[');
7630	  po_reg_or_fail  (REG_TYPE_RN);
7631	  po_char_or_fail (']');
7632	  break;
7633
7634	case OP_RRnpctw:
7635	case OP_RRw:
7636	case OP_oRRw:
7637	  po_reg_or_fail (REG_TYPE_RN);
7638	  if (skip_past_char (&str, '!') == SUCCESS)
7639	    inst.operands[i].writeback = 1;
7640	  break;
7641
7642	  /* Immediates */
7643	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
7644	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
7645	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
7646	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
7647	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
7648	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
7649	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
7650	case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
7651	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
7652	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
7653	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
7654	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
7655	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
7656
7657	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
7658	case OP_oI7b:
7659	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
7660	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
7661	case OP_oI31b:
7662	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
7663	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
7664	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
7665	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
7666
7667	  /* Immediate variants */
7668	case OP_oI255c:
7669	  po_char_or_fail ('{');
7670	  po_imm_or_fail (0, 255, TRUE);
7671	  po_char_or_fail ('}');
7672	  break;
7673
7674	case OP_I31w:
7675	  /* The expression parser chokes on a trailing !, so we have
7676	     to find it first and zap it.  */
7677	  {
7678	    char *s = str;
7679	    while (*s && *s != ',')
7680	      s++;
7681	    if (s[-1] == '!')
7682	      {
7683		s[-1] = '\0';
7684		inst.operands[i].writeback = 1;
7685	      }
7686	    po_imm_or_fail (0, 31, TRUE);
7687	    if (str == s - 1)
7688	      str = s;
7689	  }
7690	  break;
7691
7692	  /* Expressions */
7693	case OP_EXPi:	EXPi:
7694	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7695					      GE_OPT_PREFIX));
7696	  break;
7697
7698	case OP_EXP:
7699	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7700					      GE_NO_PREFIX));
7701	  break;
7702
7703	case OP_EXPr:	EXPr:
7704	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7705					      GE_NO_PREFIX));
7706	  if (inst.relocs[0].exp.X_op == O_symbol)
7707	    {
7708	      val = parse_reloc (&str);
7709	      if (val == -1)
7710		{
7711		  inst.error = _("unrecognized relocation suffix");
7712		  goto failure;
7713		}
7714	      else if (val != BFD_RELOC_UNUSED)
7715		{
7716		  inst.operands[i].imm = val;
7717		  inst.operands[i].hasreloc = 1;
7718		}
7719	    }
7720	  break;
7721
7722	case OP_EXPs:
7723	  po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7724					      GE_NO_PREFIX));
7725	  if (inst.relocs[i].exp.X_op == O_symbol)
7726	    {
7727	      inst.operands[i].hasreloc = 1;
7728	    }
7729	  else if (inst.relocs[i].exp.X_op == O_constant)
7730	    {
7731	      inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7732	      inst.operands[i].hasreloc = 0;
7733	    }
7734	  break;
7735
7736	  /* Operand for MOVW or MOVT.  */
7737	case OP_HALF:
7738	  po_misc_or_fail (parse_half (&str));
7739	  break;
7740
7741	  /* Register or expression.  */
7742	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7743	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7744
7745	  /* Register or immediate.  */
7746	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
7747	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
7748
7749	case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32);	break;
7750	I32:		     po_imm_or_fail (1, 32, FALSE);	break;
7751
7752	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
7753	IF:
7754	  if (!is_immediate_prefix (*str))
7755	    goto bad_args;
7756	  str++;
7757	  val = parse_fpa_immediate (&str);
7758	  if (val == FAIL)
7759	    goto failure;
7760	  /* FPA immediates are encoded as registers 8-15.
7761	     parse_fpa_immediate has already applied the offset.  */
7762	  inst.operands[i].reg = val;
7763	  inst.operands[i].isreg = 1;
7764	  break;
7765
7766	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7767	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
7768
7769	  /* Two kinds of register.  */
7770	case OP_RIWR_RIWC:
7771	  {
7772	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7773	    if (!rege
7774		|| (rege->type != REG_TYPE_MMXWR
7775		    && rege->type != REG_TYPE_MMXWC
7776		    && rege->type != REG_TYPE_MMXWCG))
7777	      {
7778		inst.error = _("iWMMXt data or control register expected");
7779		goto failure;
7780	      }
7781	    inst.operands[i].reg = rege->number;
7782	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7783	  }
7784	  break;
7785
7786	case OP_RIWC_RIWG:
7787	  {
7788	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7789	    if (!rege
7790		|| (rege->type != REG_TYPE_MMXWC
7791		    && rege->type != REG_TYPE_MMXWCG))
7792	      {
7793		inst.error = _("iWMMXt control register expected");
7794		goto failure;
7795	      }
7796	    inst.operands[i].reg = rege->number;
7797	    inst.operands[i].isreg = 1;
7798	  }
7799	  break;
7800
7801	  /* Misc */
7802	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
7803	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
7804	case OP_oROR:	 val = parse_ror (&str);		break;
7805	try_cond:
7806	case OP_COND:	 val = parse_cond (&str);		break;
7807	case OP_oBARRIER_I15:
7808	  po_barrier_or_imm (str); break;
7809	  immediate:
7810	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7811	    goto failure;
7812	  break;
7813
7814	case OP_wPSR:
7815	case OP_rPSR:
7816	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
7817	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7818	    {
7819	      inst.error = _("Banked registers are not available with this "
7820			     "architecture.");
7821	      goto failure;
7822	    }
7823	  break;
7824	  try_psr:
7825	  val = parse_psr (&str, op_parse_code == OP_wPSR);
7826	  break;
7827
7828	case OP_VLDR:
7829	  po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7830	  break;
7831	try_sysreg:
7832	  val = parse_sys_vldr_vstr (&str);
7833	  break;
7834
7835	case OP_APSR_RR:
7836	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
7837	  break;
7838	  try_apsr:
7839	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7840	     instruction).  */
7841	  if (strncasecmp (str, "APSR_", 5) == 0)
7842	    {
7843	      unsigned found = 0;
7844	      str += 5;
7845	      while (found < 15)
7846		switch (*str++)
7847		  {
7848		  case 'c': found = (found & 1) ? 16 : found | 1; break;
7849		  case 'n': found = (found & 2) ? 16 : found | 2; break;
7850		  case 'z': found = (found & 4) ? 16 : found | 4; break;
7851		  case 'v': found = (found & 8) ? 16 : found | 8; break;
7852		  default: found = 16;
7853		  }
7854	      if (found != 15)
7855		goto failure;
7856	      inst.operands[i].isvec = 1;
7857	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
7858	      inst.operands[i].reg = REG_PC;
7859	    }
7860	  else
7861	    goto failure;
7862	  break;
7863
7864	case OP_TB:
7865	  po_misc_or_fail (parse_tb (&str));
7866	  break;
7867
7868	  /* Register lists.  */
7869	case OP_REGLST:
7870	  val = parse_reg_list (&str, REGLIST_RN);
7871	  if (*str == '^')
7872	    {
7873	      inst.operands[i].writeback = 1;
7874	      str++;
7875	    }
7876	  break;
7877
7878	case OP_CLRMLST:
7879	  val = parse_reg_list (&str, REGLIST_CLRM);
7880	  break;
7881
7882	case OP_VRSLST:
7883	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7884				    &partial_match);
7885	  break;
7886
7887	case OP_VRDLST:
7888	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7889				    &partial_match);
7890	  break;
7891
7892	case OP_VRSDLST:
7893	  /* Allow Q registers too.  */
7894	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7895				    REGLIST_NEON_D, &partial_match);
7896	  if (val == FAIL)
7897	    {
7898	      inst.error = NULL;
7899	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7900					REGLIST_VFP_S, &partial_match);
7901	      inst.operands[i].issingle = 1;
7902	    }
7903	  break;
7904
7905	case OP_VRSDVLST:
7906	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7907				    REGLIST_VFP_D_VPR, &partial_match);
7908	  if (val == FAIL && !partial_match)
7909	    {
7910	      inst.error = NULL;
7911	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7912					REGLIST_VFP_S_VPR, &partial_match);
7913	      inst.operands[i].issingle = 1;
7914	    }
7915	  break;
7916
7917	case OP_NRDLST:
7918	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7919				    REGLIST_NEON_D, &partial_match);
7920	  break;
7921
7922	case OP_MSTRLST4:
7923	case OP_MSTRLST2:
7924	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7925					   1, &inst.operands[i].vectype);
7926	  if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7927	    goto failure;
7928	  break;
7929	case OP_NSTRLST:
7930	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7931					   0, &inst.operands[i].vectype);
7932	  break;
7933
7934	  /* Addressing modes */
7935	case OP_ADDRMVE:
7936	  po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7937	  break;
7938
7939	case OP_ADDR:
7940	  po_misc_or_fail (parse_address (&str, i));
7941	  break;
7942
7943	case OP_ADDRGLDR:
7944	  po_misc_or_fail_no_backtrack (
7945	    parse_address_group_reloc (&str, i, GROUP_LDR));
7946	  break;
7947
7948	case OP_ADDRGLDRS:
7949	  po_misc_or_fail_no_backtrack (
7950	    parse_address_group_reloc (&str, i, GROUP_LDRS));
7951	  break;
7952
7953	case OP_ADDRGLDC:
7954	  po_misc_or_fail_no_backtrack (
7955	    parse_address_group_reloc (&str, i, GROUP_LDC));
7956	  break;
7957
7958	case OP_SH:
7959	  po_misc_or_fail (parse_shifter_operand (&str, i));
7960	  break;
7961
7962	case OP_SHG:
7963	  po_misc_or_fail_no_backtrack (
7964	    parse_shifter_operand_group_reloc (&str, i));
7965	  break;
7966
7967	case OP_oSHll:
7968	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7969	  break;
7970
7971	case OP_oSHar:
7972	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7973	  break;
7974
7975	case OP_oSHllar:
7976	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7977	  break;
7978
7979	case OP_RMQRZ:
7980	case OP_oRMQRZ:
7981	  po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
7982	  break;
7983
7984	case OP_RR_ZR:
7985	try_rr_zr:
7986	  po_reg_or_goto (REG_TYPE_RN, ZR);
7987	  break;
7988	ZR:
7989	  po_reg_or_fail (REG_TYPE_ZR);
7990	  break;
7991
7992	default:
7993	  as_fatal (_("unhandled operand code %d"), op_parse_code);
7994	}
7995
7996      /* Various value-based sanity checks and shared operations.  We
7997	 do not signal immediate failures for the register constraints;
7998	 this allows a syntax error to take precedence.	 */
7999      switch (op_parse_code)
8000	{
8001	case OP_oRRnpc:
8002	case OP_RRnpc:
8003	case OP_RRnpcb:
8004	case OP_RRw:
8005	case OP_oRRw:
8006	case OP_RRnpc_I0:
8007	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8008	    inst.error = BAD_PC;
8009	  break;
8010
8011	case OP_oRRnpcsp:
8012	case OP_RRnpcsp:
8013	case OP_RRnpcsp_I32:
8014	  if (inst.operands[i].isreg)
8015	    {
8016	      if (inst.operands[i].reg == REG_PC)
8017		inst.error = BAD_PC;
8018	      else if (inst.operands[i].reg == REG_SP
8019		       /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8020			  relaxed since ARMv8-A.  */
8021		       && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8022		{
8023		  gas_assert (thumb);
8024		  inst.error = BAD_SP;
8025		}
8026	    }
8027	  break;
8028
8029	case OP_RRnpctw:
8030	  if (inst.operands[i].isreg
8031	      && inst.operands[i].reg == REG_PC
8032	      && (inst.operands[i].writeback || thumb))
8033	    inst.error = BAD_PC;
8034	  break;
8035
8036	case OP_RVSD_COND:
8037	case OP_VLDR:
8038	  if (inst.operands[i].isreg)
8039	    break;
8040	/* fall through.  */
8041
8042	case OP_CPSF:
8043	case OP_ENDI:
8044	case OP_oROR:
8045	case OP_wPSR:
8046	case OP_rPSR:
8047	case OP_COND:
8048	case OP_oBARRIER_I15:
8049	case OP_REGLST:
8050	case OP_CLRMLST:
8051	case OP_VRSLST:
8052	case OP_VRDLST:
8053	case OP_VRSDLST:
8054	case OP_VRSDVLST:
8055	case OP_NRDLST:
8056	case OP_NSTRLST:
8057	case OP_MSTRLST2:
8058	case OP_MSTRLST4:
8059	  if (val == FAIL)
8060	    goto failure;
8061	  inst.operands[i].imm = val;
8062	  break;
8063
8064	case OP_LR:
8065	case OP_oLR:
8066	  if (inst.operands[i].reg != REG_LR)
8067	    inst.error = _("operand must be LR register");
8068	  break;
8069
8070	case OP_RMQRZ:
8071	case OP_oRMQRZ:
8072	case OP_RR_ZR:
8073	  if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8074	    inst.error = BAD_PC;
8075	  break;
8076
8077	case OP_RRe:
8078	  if (inst.operands[i].isreg
8079	      && (inst.operands[i].reg & 0x00000001) != 0)
8080	    inst.error = BAD_ODD;
8081	  break;
8082
8083	case OP_RRo:
8084	  if (inst.operands[i].isreg)
8085	    {
8086	      if ((inst.operands[i].reg & 0x00000001) != 1)
8087		inst.error = BAD_EVEN;
8088	      else if (inst.operands[i].reg == REG_SP)
8089		as_tsktsk (MVE_BAD_SP);
8090	      else if (inst.operands[i].reg == REG_PC)
8091		inst.error = BAD_PC;
8092	    }
8093	  break;
8094
8095	default:
8096	  break;
8097	}
8098
8099      /* If we get here, this operand was successfully parsed.	*/
8100      inst.operands[i].present = 1;
8101      continue;
8102
8103    bad_args:
8104      inst.error = BAD_ARGS;
8105
8106    failure:
8107      if (!backtrack_pos)
8108	{
8109	  /* The parse routine should already have set inst.error, but set a
8110	     default here just in case.  */
8111	  if (!inst.error)
8112	    inst.error = BAD_SYNTAX;
8113	  return FAIL;
8114	}
8115
8116      /* Do not backtrack over a trailing optional argument that
8117	 absorbed some text.  We will only fail again, with the
8118	 'garbage following instruction' error message, which is
8119	 probably less helpful than the current one.  */
8120      if (backtrack_index == i && backtrack_pos != str
8121	  && upat[i+1] == OP_stop)
8122	{
8123	  if (!inst.error)
8124	    inst.error = BAD_SYNTAX;
8125	  return FAIL;
8126	}
8127
8128      /* Try again, skipping the optional argument at backtrack_pos.  */
8129      str = backtrack_pos;
8130      inst.error = backtrack_error;
8131      inst.operands[backtrack_index].present = 0;
8132      i = backtrack_index;
8133      backtrack_pos = 0;
8134    }
8135
8136  /* Check that we have parsed all the arguments.  */
8137  if (*str != '\0' && !inst.error)
8138    inst.error = _("garbage following instruction");
8139
8140  return inst.error ? FAIL : SUCCESS;
8141}
8142
8143#undef po_char_or_fail
8144#undef po_reg_or_fail
8145#undef po_reg_or_goto
8146#undef po_imm_or_fail
8147#undef po_scalar_or_fail
8148#undef po_barrier_or_imm
8149
8150/* Shorthand macro for instruction encoding functions issuing errors.  */
8151#define constraint(expr, err)			\
8152  do						\
8153    {						\
8154      if (expr)					\
8155	{					\
8156	  inst.error = err;			\
8157	  return;				\
8158	}					\
8159    }						\
8160  while (0)
8161
8162/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
8163   instructions are unpredictable if these registers are used.  This
8164   is the BadReg predicate in ARM's Thumb-2 documentation.
8165
8166   Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8167   places, while the restriction on REG_SP was relaxed since ARMv8-A.  */
8168#define reject_bad_reg(reg)					\
8169  do								\
8170   if (reg == REG_PC)						\
8171     {								\
8172       inst.error = BAD_PC;					\
8173       return;							\
8174     }								\
8175   else if (reg == REG_SP					\
8176	    && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))	\
8177     {								\
8178       inst.error = BAD_SP;					\
8179       return;							\
8180     }								\
8181  while (0)
8182
8183/* If REG is R13 (the stack pointer), warn that its use is
8184   deprecated.  */
8185#define warn_deprecated_sp(reg)			\
8186  do						\
8187    if (warn_on_deprecated && reg == REG_SP)	\
8188       as_tsktsk (_("use of r13 is deprecated"));	\
8189  while (0)
8190
8191/* Functions for operand encoding.  ARM, then Thumb.  */
8192
8193#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8194
8195/* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8196
8197   The only binary encoding difference is the Coprocessor number.  Coprocessor
8198   9 is used for half-precision calculations or conversions.  The format of the
8199   instruction is the same as the equivalent Coprocessor 10 instruction that
8200   exists for Single-Precision operation.  */
8201
8202static void
8203do_scalar_fp16_v82_encode (void)
8204{
8205  if (inst.cond < COND_ALWAYS)
8206    as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8207	       " the behaviour is UNPREDICTABLE"));
8208  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8209	      _(BAD_FP16));
8210
8211  inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8212  mark_feature_used (&arm_ext_fp16);
8213}
8214
8215/* If VAL can be encoded in the immediate field of an ARM instruction,
8216   return the encoded form.  Otherwise, return FAIL.  */
8217
8218static unsigned int
8219encode_arm_immediate (unsigned int val)
8220{
8221  unsigned int a, i;
8222
8223  if (val <= 0xff)
8224    return val;
8225
8226  for (i = 2; i < 32; i += 2)
8227    if ((a = rotate_left (val, i)) <= 0xff)
8228      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
8229
8230  return FAIL;
8231}
8232
8233/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8234   return the encoded form.  Otherwise, return FAIL.  */
8235static unsigned int
8236encode_thumb32_immediate (unsigned int val)
8237{
8238  unsigned int a, i;
8239
8240  if (val <= 0xff)
8241    return val;
8242
8243  for (i = 1; i <= 24; i++)
8244    {
8245      a = val >> i;
8246      if ((val & ~(0xff << i)) == 0)
8247	return ((val >> i) & 0x7f) | ((32 - i) << 7);
8248    }
8249
8250  a = val & 0xff;
8251  if (val == ((a << 16) | a))
8252    return 0x100 | a;
8253  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8254    return 0x300 | a;
8255
8256  a = val & 0xff00;
8257  if (val == ((a << 16) | a))
8258    return 0x200 | (a >> 8);
8259
8260  return FAIL;
8261}
8262/* Encode a VFP SP or DP register number into inst.instruction.  */
8263
8264static void
8265encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8266{
8267  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8268      && reg > 15)
8269    {
8270      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8271	{
8272	  if (thumb_mode)
8273	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8274				    fpu_vfp_ext_d32);
8275	  else
8276	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8277				    fpu_vfp_ext_d32);
8278	}
8279      else
8280	{
8281	  first_error (_("D register out of range for selected VFP version"));
8282	  return;
8283	}
8284    }
8285
8286  switch (pos)
8287    {
8288    case VFP_REG_Sd:
8289      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8290      break;
8291
8292    case VFP_REG_Sn:
8293      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8294      break;
8295
8296    case VFP_REG_Sm:
8297      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8298      break;
8299
8300    case VFP_REG_Dd:
8301      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8302      break;
8303
8304    case VFP_REG_Dn:
8305      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8306      break;
8307
8308    case VFP_REG_Dm:
8309      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8310      break;
8311
8312    default:
8313      abort ();
8314    }
8315}
8316
8317/* Encode a <shift> in an ARM-format instruction.  The immediate,
8318   if any, is handled by md_apply_fix.	 */
8319static void
8320encode_arm_shift (int i)
8321{
8322  /* register-shifted register.  */
8323  if (inst.operands[i].immisreg)
8324    {
8325      int op_index;
8326      for (op_index = 0; op_index <= i; ++op_index)
8327	{
8328	  /* Check the operand only when it's presented.  In pre-UAL syntax,
8329	     if the destination register is the same as the first operand, two
8330	     register form of the instruction can be used.  */
8331	  if (inst.operands[op_index].present && inst.operands[op_index].isreg
8332	      && inst.operands[op_index].reg == REG_PC)
8333	    as_warn (UNPRED_REG ("r15"));
8334	}
8335
8336      if (inst.operands[i].imm == REG_PC)
8337	as_warn (UNPRED_REG ("r15"));
8338    }
8339
8340  if (inst.operands[i].shift_kind == SHIFT_RRX)
8341    inst.instruction |= SHIFT_ROR << 5;
8342  else
8343    {
8344      inst.instruction |= inst.operands[i].shift_kind << 5;
8345      if (inst.operands[i].immisreg)
8346	{
8347	  inst.instruction |= SHIFT_BY_REG;
8348	  inst.instruction |= inst.operands[i].imm << 8;
8349	}
8350      else
8351	inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8352    }
8353}
8354
8355static void
8356encode_arm_shifter_operand (int i)
8357{
8358  if (inst.operands[i].isreg)
8359    {
8360      inst.instruction |= inst.operands[i].reg;
8361      encode_arm_shift (i);
8362    }
8363  else
8364    {
8365      inst.instruction |= INST_IMMEDIATE;
8366      if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8367	inst.instruction |= inst.operands[i].imm;
8368    }
8369}
8370
8371/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
8372static void
8373encode_arm_addr_mode_common (int i, bfd_boolean is_t)
8374{
8375  /* PR 14260:
8376     Generate an error if the operand is not a register.  */
8377  constraint (!inst.operands[i].isreg,
8378	      _("Instruction does not support =N addresses"));
8379
8380  inst.instruction |= inst.operands[i].reg << 16;
8381
8382  if (inst.operands[i].preind)
8383    {
8384      if (is_t)
8385	{
8386	  inst.error = _("instruction does not accept preindexed addressing");
8387	  return;
8388	}
8389      inst.instruction |= PRE_INDEX;
8390      if (inst.operands[i].writeback)
8391	inst.instruction |= WRITE_BACK;
8392
8393    }
8394  else if (inst.operands[i].postind)
8395    {
8396      gas_assert (inst.operands[i].writeback);
8397      if (is_t)
8398	inst.instruction |= WRITE_BACK;
8399    }
8400  else /* unindexed - only for coprocessor */
8401    {
8402      inst.error = _("instruction does not accept unindexed addressing");
8403      return;
8404    }
8405
8406  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8407      && (((inst.instruction & 0x000f0000) >> 16)
8408	  == ((inst.instruction & 0x0000f000) >> 12)))
8409    as_warn ((inst.instruction & LOAD_BIT)
8410	     ? _("destination register same as write-back base")
8411	     : _("source register same as write-back base"));
8412}
8413
8414/* inst.operands[i] was set up by parse_address.  Encode it into an
8415   ARM-format mode 2 load or store instruction.	 If is_t is true,
8416   reject forms that cannot be used with a T instruction (i.e. not
8417   post-indexed).  */
8418static void
8419encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8420{
8421  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8422
8423  encode_arm_addr_mode_common (i, is_t);
8424
8425  if (inst.operands[i].immisreg)
8426    {
8427      constraint ((inst.operands[i].imm == REG_PC
8428		   || (is_pc && inst.operands[i].writeback)),
8429		  BAD_PC_ADDRESSING);
8430      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
8431      inst.instruction |= inst.operands[i].imm;
8432      if (!inst.operands[i].negative)
8433	inst.instruction |= INDEX_UP;
8434      if (inst.operands[i].shifted)
8435	{
8436	  if (inst.operands[i].shift_kind == SHIFT_RRX)
8437	    inst.instruction |= SHIFT_ROR << 5;
8438	  else
8439	    {
8440	      inst.instruction |= inst.operands[i].shift_kind << 5;
8441	      inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8442	    }
8443	}
8444    }
8445  else /* immediate offset in inst.relocs[0] */
8446    {
8447      if (is_pc && !inst.relocs[0].pc_rel)
8448	{
8449	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8450
8451	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
8452	     cannot use PC in addressing.
8453	     PC cannot be used in writeback addressing, either.  */
8454	  constraint ((is_t || inst.operands[i].writeback),
8455		      BAD_PC_ADDRESSING);
8456
8457	  /* Use of PC in str is deprecated for ARMv7.  */
8458	  if (warn_on_deprecated
8459	      && !is_load
8460	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8461	    as_tsktsk (_("use of PC in this instruction is deprecated"));
8462	}
8463
8464      if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8465	{
8466	  /* Prefer + for zero encoded value.  */
8467	  if (!inst.operands[i].negative)
8468	    inst.instruction |= INDEX_UP;
8469	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8470	}
8471    }
8472}
8473
8474/* inst.operands[i] was set up by parse_address.  Encode it into an
8475   ARM-format mode 3 load or store instruction.	 Reject forms that
8476   cannot be used with such instructions.  If is_t is true, reject
8477   forms that cannot be used with a T instruction (i.e. not
8478   post-indexed).  */
8479static void
8480encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8481{
8482  if (inst.operands[i].immisreg && inst.operands[i].shifted)
8483    {
8484      inst.error = _("instruction does not accept scaled register index");
8485      return;
8486    }
8487
8488  encode_arm_addr_mode_common (i, is_t);
8489
8490  if (inst.operands[i].immisreg)
8491    {
8492      constraint ((inst.operands[i].imm == REG_PC
8493		   || (is_t && inst.operands[i].reg == REG_PC)),
8494		  BAD_PC_ADDRESSING);
8495      constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8496		  BAD_PC_WRITEBACK);
8497      inst.instruction |= inst.operands[i].imm;
8498      if (!inst.operands[i].negative)
8499	inst.instruction |= INDEX_UP;
8500    }
8501  else /* immediate offset in inst.relocs[0] */
8502    {
8503      constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8504		   && inst.operands[i].writeback),
8505		  BAD_PC_WRITEBACK);
8506      inst.instruction |= HWOFFSET_IMM;
8507      if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8508	{
8509	  /* Prefer + for zero encoded value.  */
8510	  if (!inst.operands[i].negative)
8511	    inst.instruction |= INDEX_UP;
8512
8513	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8514	}
8515    }
8516}
8517
8518/* Write immediate bits [7:0] to the following locations:
8519
8520  |28/24|23     19|18 16|15                    4|3     0|
8521  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8522
8523  This function is used by VMOV/VMVN/VORR/VBIC.  */
8524
8525static void
8526neon_write_immbits (unsigned immbits)
8527{
8528  inst.instruction |= immbits & 0xf;
8529  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8530  inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8531}
8532
8533/* Invert low-order SIZE bits of XHI:XLO.  */
8534
8535static void
8536neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8537{
8538  unsigned immlo = xlo ? *xlo : 0;
8539  unsigned immhi = xhi ? *xhi : 0;
8540
8541  switch (size)
8542    {
8543    case 8:
8544      immlo = (~immlo) & 0xff;
8545      break;
8546
8547    case 16:
8548      immlo = (~immlo) & 0xffff;
8549      break;
8550
8551    case 64:
8552      immhi = (~immhi) & 0xffffffff;
8553      /* fall through.  */
8554
8555    case 32:
8556      immlo = (~immlo) & 0xffffffff;
8557      break;
8558
8559    default:
8560      abort ();
8561    }
8562
8563  if (xlo)
8564    *xlo = immlo;
8565
8566  if (xhi)
8567    *xhi = immhi;
8568}
8569
8570/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8571   A, B, C, D.  */
8572
8573static int
8574neon_bits_same_in_bytes (unsigned imm)
8575{
8576  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8577	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8578	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8579	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8580}
8581
8582/* For immediate of above form, return 0bABCD.  */
8583
8584static unsigned
8585neon_squash_bits (unsigned imm)
8586{
8587  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8588	 | ((imm & 0x01000000) >> 21);
8589}
8590
8591/* Compress quarter-float representation to 0b...000 abcdefgh.  */
8592
8593static unsigned
8594neon_qfloat_bits (unsigned imm)
8595{
8596  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8597}
8598
8599/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8600   the instruction. *OP is passed as the initial value of the op field, and
8601   may be set to a different value depending on the constant (i.e.
8602   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8603   MVN).  If the immediate looks like a repeated pattern then also
8604   try smaller element sizes.  */
8605
8606static int
8607neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8608			 unsigned *immbits, int *op, int size,
8609			 enum neon_el_type type)
8610{
8611  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8612     float.  */
8613  if (type == NT_float && !float_p)
8614    return FAIL;
8615
8616  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8617    {
8618      if (size != 32 || *op == 1)
8619	return FAIL;
8620      *immbits = neon_qfloat_bits (immlo);
8621      return 0xf;
8622    }
8623
8624  if (size == 64)
8625    {
8626      if (neon_bits_same_in_bytes (immhi)
8627	  && neon_bits_same_in_bytes (immlo))
8628	{
8629	  if (*op == 1)
8630	    return FAIL;
8631	  *immbits = (neon_squash_bits (immhi) << 4)
8632		     | neon_squash_bits (immlo);
8633	  *op = 1;
8634	  return 0xe;
8635	}
8636
8637      if (immhi != immlo)
8638	return FAIL;
8639    }
8640
8641  if (size >= 32)
8642    {
8643      if (immlo == (immlo & 0x000000ff))
8644	{
8645	  *immbits = immlo;
8646	  return 0x0;
8647	}
8648      else if (immlo == (immlo & 0x0000ff00))
8649	{
8650	  *immbits = immlo >> 8;
8651	  return 0x2;
8652	}
8653      else if (immlo == (immlo & 0x00ff0000))
8654	{
8655	  *immbits = immlo >> 16;
8656	  return 0x4;
8657	}
8658      else if (immlo == (immlo & 0xff000000))
8659	{
8660	  *immbits = immlo >> 24;
8661	  return 0x6;
8662	}
8663      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8664	{
8665	  *immbits = (immlo >> 8) & 0xff;
8666	  return 0xc;
8667	}
8668      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8669	{
8670	  *immbits = (immlo >> 16) & 0xff;
8671	  return 0xd;
8672	}
8673
8674      if ((immlo & 0xffff) != (immlo >> 16))
8675	return FAIL;
8676      immlo &= 0xffff;
8677    }
8678
8679  if (size >= 16)
8680    {
8681      if (immlo == (immlo & 0x000000ff))
8682	{
8683	  *immbits = immlo;
8684	  return 0x8;
8685	}
8686      else if (immlo == (immlo & 0x0000ff00))
8687	{
8688	  *immbits = immlo >> 8;
8689	  return 0xa;
8690	}
8691
8692      if ((immlo & 0xff) != (immlo >> 8))
8693	return FAIL;
8694      immlo &= 0xff;
8695    }
8696
8697  if (immlo == (immlo & 0x000000ff))
8698    {
8699      /* Don't allow MVN with 8-bit immediate.  */
8700      if (*op == 1)
8701	return FAIL;
8702      *immbits = immlo;
8703      return 0xe;
8704    }
8705
8706  return FAIL;
8707}
8708
8709#if defined BFD_HOST_64_BIT
8710/* Returns TRUE if double precision value V may be cast
8711   to single precision without loss of accuracy.  */
8712
8713static bfd_boolean
8714is_double_a_single (bfd_int64_t v)
8715{
8716  int exp = (int)((v >> 52) & 0x7FF);
8717  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8718
8719  return (exp == 0 || exp == 0x7FF
8720	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
8721    && (mantissa & 0x1FFFFFFFl) == 0;
8722}
8723
8724/* Returns a double precision value casted to single precision
8725   (ignoring the least significant bits in exponent and mantissa).  */
8726
8727static int
8728double_to_single (bfd_int64_t v)
8729{
8730  int sign = (int) ((v >> 63) & 1l);
8731  int exp = (int) ((v >> 52) & 0x7FF);
8732  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8733
8734  if (exp == 0x7FF)
8735    exp = 0xFF;
8736  else
8737    {
8738      exp = exp - 1023 + 127;
8739      if (exp >= 0xFF)
8740	{
8741	  /* Infinity.  */
8742	  exp = 0x7F;
8743	  mantissa = 0;
8744	}
8745      else if (exp < 0)
8746	{
8747	  /* No denormalized numbers.  */
8748	  exp = 0;
8749	  mantissa = 0;
8750	}
8751    }
8752  mantissa >>= 29;
8753  return (sign << 31) | (exp << 23) | mantissa;
8754}
8755#endif /* BFD_HOST_64_BIT */
8756
8757enum lit_type
8758{
8759  CONST_THUMB,
8760  CONST_ARM,
8761  CONST_VEC
8762};
8763
8764static void do_vfp_nsyn_opcode (const char *);
8765
8766/* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8767   Determine whether it can be performed with a move instruction; if
8768   it can, convert inst.instruction to that move instruction and
8769   return TRUE; if it can't, convert inst.instruction to a literal-pool
8770   load and return FALSE.  If this is not a valid thing to do in the
8771   current context, set inst.error and return TRUE.
8772
8773   inst.operands[i] describes the destination register.	 */
8774
8775static bfd_boolean
8776move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8777{
8778  unsigned long tbit;
8779  bfd_boolean thumb_p = (t == CONST_THUMB);
8780  bfd_boolean arm_p   = (t == CONST_ARM);
8781
8782  if (thumb_p)
8783    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8784  else
8785    tbit = LOAD_BIT;
8786
8787  if ((inst.instruction & tbit) == 0)
8788    {
8789      inst.error = _("invalid pseudo operation");
8790      return TRUE;
8791    }
8792
8793  if (inst.relocs[0].exp.X_op != O_constant
8794      && inst.relocs[0].exp.X_op != O_symbol
8795      && inst.relocs[0].exp.X_op != O_big)
8796    {
8797      inst.error = _("constant expression expected");
8798      return TRUE;
8799    }
8800
8801  if (inst.relocs[0].exp.X_op == O_constant
8802      || inst.relocs[0].exp.X_op == O_big)
8803    {
8804#if defined BFD_HOST_64_BIT
8805      bfd_int64_t v;
8806#else
8807      offsetT v;
8808#endif
8809      if (inst.relocs[0].exp.X_op == O_big)
8810	{
8811	  LITTLENUM_TYPE w[X_PRECISION];
8812	  LITTLENUM_TYPE * l;
8813
8814	  if (inst.relocs[0].exp.X_add_number == -1)
8815	    {
8816	      gen_to_words (w, X_PRECISION, E_PRECISION);
8817	      l = w;
8818	      /* FIXME: Should we check words w[2..5] ?  */
8819	    }
8820	  else
8821	    l = generic_bignum;
8822
8823#if defined BFD_HOST_64_BIT
8824	  v =
8825	    ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8826		  << LITTLENUM_NUMBER_OF_BITS)
8827		 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8828		<< LITTLENUM_NUMBER_OF_BITS)
8829	       | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8830	      << LITTLENUM_NUMBER_OF_BITS)
8831	     | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8832#else
8833	  v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8834	    |  (l[0] & LITTLENUM_MASK);
8835#endif
8836	}
8837      else
8838	v = inst.relocs[0].exp.X_add_number;
8839
8840      if (!inst.operands[i].issingle)
8841	{
8842	  if (thumb_p)
8843	    {
8844	      /* LDR should not use lead in a flag-setting instruction being
8845		 chosen so we do not check whether movs can be used.  */
8846
8847	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8848		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8849		  && inst.operands[i].reg != 13
8850		  && inst.operands[i].reg != 15)
8851		{
8852		  /* Check if on thumb2 it can be done with a mov.w, mvn or
8853		     movw instruction.  */
8854		  unsigned int newimm;
8855		  bfd_boolean isNegated;
8856
8857		  newimm = encode_thumb32_immediate (v);
8858		  if (newimm != (unsigned int) FAIL)
8859		    isNegated = FALSE;
8860		  else
8861		    {
8862		      newimm = encode_thumb32_immediate (~v);
8863		      if (newimm != (unsigned int) FAIL)
8864			isNegated = TRUE;
8865		    }
8866
8867		  /* The number can be loaded with a mov.w or mvn
8868		     instruction.  */
8869		  if (newimm != (unsigned int) FAIL
8870		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8871		    {
8872		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
8873					  | (inst.operands[i].reg << 8));
8874		      /* Change to MOVN.  */
8875		      inst.instruction |= (isNegated ? 0x200000 : 0);
8876		      inst.instruction |= (newimm & 0x800) << 15;
8877		      inst.instruction |= (newimm & 0x700) << 4;
8878		      inst.instruction |= (newimm & 0x0ff);
8879		      return TRUE;
8880		    }
8881		  /* The number can be loaded with a movw instruction.  */
8882		  else if ((v & ~0xFFFF) == 0
8883			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8884		    {
8885		      int imm = v & 0xFFFF;
8886
8887		      inst.instruction = 0xf2400000;  /* MOVW.  */
8888		      inst.instruction |= (inst.operands[i].reg << 8);
8889		      inst.instruction |= (imm & 0xf000) << 4;
8890		      inst.instruction |= (imm & 0x0800) << 15;
8891		      inst.instruction |= (imm & 0x0700) << 4;
8892		      inst.instruction |= (imm & 0x00ff);
8893		      /*  In case this replacement is being done on Armv8-M
8894			  Baseline we need to make sure to disable the
8895			  instruction size check, as otherwise GAS will reject
8896			  the use of this T32 instruction.  */
8897		      inst.size_req = 0;
8898		      return TRUE;
8899		    }
8900		}
8901	    }
8902	  else if (arm_p)
8903	    {
8904	      int value = encode_arm_immediate (v);
8905
8906	      if (value != FAIL)
8907		{
8908		  /* This can be done with a mov instruction.  */
8909		  inst.instruction &= LITERAL_MASK;
8910		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8911		  inst.instruction |= value & 0xfff;
8912		  return TRUE;
8913		}
8914
8915	      value = encode_arm_immediate (~ v);
8916	      if (value != FAIL)
8917		{
8918		  /* This can be done with a mvn instruction.  */
8919		  inst.instruction &= LITERAL_MASK;
8920		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8921		  inst.instruction |= value & 0xfff;
8922		  return TRUE;
8923		}
8924	    }
8925	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8926	    {
8927	      int op = 0;
8928	      unsigned immbits = 0;
8929	      unsigned immlo = inst.operands[1].imm;
8930	      unsigned immhi = inst.operands[1].regisimm
8931		? inst.operands[1].reg
8932		: inst.relocs[0].exp.X_unsigned
8933		? 0
8934		: ((bfd_int64_t)((int) immlo)) >> 32;
8935	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8936						   &op, 64, NT_invtype);
8937
8938	      if (cmode == FAIL)
8939		{
8940		  neon_invert_size (&immlo, &immhi, 64);
8941		  op = !op;
8942		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8943						   &op, 64, NT_invtype);
8944		}
8945
8946	      if (cmode != FAIL)
8947		{
8948		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8949		    | (1 << 23)
8950		    | (cmode << 8)
8951		    | (op << 5)
8952		    | (1 << 4);
8953
8954		  /* Fill other bits in vmov encoding for both thumb and arm.  */
8955		  if (thumb_mode)
8956		    inst.instruction |= (0x7U << 29) | (0xF << 24);
8957		  else
8958		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
8959		  neon_write_immbits (immbits);
8960		  return TRUE;
8961		}
8962	    }
8963	}
8964
8965      if (t == CONST_VEC)
8966	{
8967	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
8968	  if (inst.operands[i].issingle
8969	      && is_quarter_float (inst.operands[1].imm)
8970	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8971	    {
8972	      inst.operands[1].imm =
8973		neon_qfloat_bits (v);
8974	      do_vfp_nsyn_opcode ("fconsts");
8975	      return TRUE;
8976	    }
8977
8978	  /* If our host does not support a 64-bit type then we cannot perform
8979	     the following optimization.  This mean that there will be a
8980	     discrepancy between the output produced by an assembler built for
8981	     a 32-bit-only host and the output produced from a 64-bit host, but
8982	     this cannot be helped.  */
8983#if defined BFD_HOST_64_BIT
8984	  else if (!inst.operands[1].issingle
8985		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8986	    {
8987	      if (is_double_a_single (v)
8988		  && is_quarter_float (double_to_single (v)))
8989		{
8990		  inst.operands[1].imm =
8991		    neon_qfloat_bits (double_to_single (v));
8992		  do_vfp_nsyn_opcode ("fconstd");
8993		  return TRUE;
8994		}
8995	    }
8996#endif
8997	}
8998    }
8999
9000  if (add_to_lit_pool ((!inst.operands[i].isvec
9001			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
9002    return TRUE;
9003
9004  inst.operands[1].reg = REG_PC;
9005  inst.operands[1].isreg = 1;
9006  inst.operands[1].preind = 1;
9007  inst.relocs[0].pc_rel = 1;
9008  inst.relocs[0].type = (thumb_p
9009		     ? BFD_RELOC_ARM_THUMB_OFFSET
9010		     : (mode_3
9011			? BFD_RELOC_ARM_HWLITERAL
9012			: BFD_RELOC_ARM_LITERAL));
9013  return FALSE;
9014}
9015
9016/* inst.operands[i] was set up by parse_address.  Encode it into an
9017   ARM-format instruction.  Reject all forms which cannot be encoded
9018   into a coprocessor load/store instruction.  If wb_ok is false,
9019   reject use of writeback; if unind_ok is false, reject use of
9020   unindexed addressing.  If reloc_override is not 0, use it instead
9021   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9022   (in which case it is preserved).  */
9023
9024static int
9025encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9026{
9027  if (!inst.operands[i].isreg)
9028    {
9029      /* PR 18256 */
9030      if (! inst.operands[0].isvec)
9031	{
9032	  inst.error = _("invalid co-processor operand");
9033	  return FAIL;
9034	}
9035      if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
9036	return SUCCESS;
9037    }
9038
9039  inst.instruction |= inst.operands[i].reg << 16;
9040
9041  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9042
9043  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9044    {
9045      gas_assert (!inst.operands[i].writeback);
9046      if (!unind_ok)
9047	{
9048	  inst.error = _("instruction does not support unindexed addressing");
9049	  return FAIL;
9050	}
9051      inst.instruction |= inst.operands[i].imm;
9052      inst.instruction |= INDEX_UP;
9053      return SUCCESS;
9054    }
9055
9056  if (inst.operands[i].preind)
9057    inst.instruction |= PRE_INDEX;
9058
9059  if (inst.operands[i].writeback)
9060    {
9061      if (inst.operands[i].reg == REG_PC)
9062	{
9063	  inst.error = _("pc may not be used with write-back");
9064	  return FAIL;
9065	}
9066      if (!wb_ok)
9067	{
9068	  inst.error = _("instruction does not support writeback");
9069	  return FAIL;
9070	}
9071      inst.instruction |= WRITE_BACK;
9072    }
9073
9074  if (reloc_override)
9075    inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9076  else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9077	    || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9078	   && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9079    {
9080      if (thumb_mode)
9081	inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9082      else
9083	inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9084    }
9085
9086  /* Prefer + for zero encoded value.  */
9087  if (!inst.operands[i].negative)
9088    inst.instruction |= INDEX_UP;
9089
9090  return SUCCESS;
9091}
9092
9093/* Functions for instruction encoding, sorted by sub-architecture.
9094   First some generics; their names are taken from the conventional
9095   bit positions for register arguments in ARM format instructions.  */
9096
9097static void
9098do_noargs (void)
9099{
9100}
9101
9102static void
9103do_rd (void)
9104{
9105  inst.instruction |= inst.operands[0].reg << 12;
9106}
9107
9108static void
9109do_rn (void)
9110{
9111  inst.instruction |= inst.operands[0].reg << 16;
9112}
9113
9114static void
9115do_rd_rm (void)
9116{
9117  inst.instruction |= inst.operands[0].reg << 12;
9118  inst.instruction |= inst.operands[1].reg;
9119}
9120
9121static void
9122do_rm_rn (void)
9123{
9124  inst.instruction |= inst.operands[0].reg;
9125  inst.instruction |= inst.operands[1].reg << 16;
9126}
9127
9128static void
9129do_rd_rn (void)
9130{
9131  inst.instruction |= inst.operands[0].reg << 12;
9132  inst.instruction |= inst.operands[1].reg << 16;
9133}
9134
9135static void
9136do_rn_rd (void)
9137{
9138  inst.instruction |= inst.operands[0].reg << 16;
9139  inst.instruction |= inst.operands[1].reg << 12;
9140}
9141
9142static void
9143do_tt (void)
9144{
9145  inst.instruction |= inst.operands[0].reg << 8;
9146  inst.instruction |= inst.operands[1].reg << 16;
9147}
9148
9149static bfd_boolean
9150check_obsolete (const arm_feature_set *feature, const char *msg)
9151{
9152  if (ARM_CPU_IS_ANY (cpu_variant))
9153    {
9154      as_tsktsk ("%s", msg);
9155      return TRUE;
9156    }
9157  else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9158    {
9159      as_bad ("%s", msg);
9160      return TRUE;
9161    }
9162
9163  return FALSE;
9164}
9165
9166static void
9167do_rd_rm_rn (void)
9168{
9169  unsigned Rn = inst.operands[2].reg;
9170  /* Enforce restrictions on SWP instruction.  */
9171  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9172    {
9173      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9174		  _("Rn must not overlap other operands"));
9175
9176      /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9177       */
9178      if (!check_obsolete (&arm_ext_v8,
9179			   _("swp{b} use is obsoleted for ARMv8 and later"))
9180	  && warn_on_deprecated
9181	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9182	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9183    }
9184
9185  inst.instruction |= inst.operands[0].reg << 12;
9186  inst.instruction |= inst.operands[1].reg;
9187  inst.instruction |= Rn << 16;
9188}
9189
9190static void
9191do_rd_rn_rm (void)
9192{
9193  inst.instruction |= inst.operands[0].reg << 12;
9194  inst.instruction |= inst.operands[1].reg << 16;
9195  inst.instruction |= inst.operands[2].reg;
9196}
9197
9198static void
9199do_rm_rd_rn (void)
9200{
9201  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9202  constraint (((inst.relocs[0].exp.X_op != O_constant
9203		&& inst.relocs[0].exp.X_op != O_illegal)
9204	       || inst.relocs[0].exp.X_add_number != 0),
9205	      BAD_ADDR_MODE);
9206  inst.instruction |= inst.operands[0].reg;
9207  inst.instruction |= inst.operands[1].reg << 12;
9208  inst.instruction |= inst.operands[2].reg << 16;
9209}
9210
9211static void
9212do_imm0 (void)
9213{
9214  inst.instruction |= inst.operands[0].imm;
9215}
9216
9217static void
9218do_rd_cpaddr (void)
9219{
9220  inst.instruction |= inst.operands[0].reg << 12;
9221  encode_arm_cp_address (1, TRUE, TRUE, 0);
9222}
9223
9224/* ARM instructions, in alphabetical order by function name (except
9225   that wrapper functions appear immediately after the function they
9226   wrap).  */
9227
9228/* This is a pseudo-op of the form "adr rd, label" to be converted
9229   into a relative address of the form "add rd, pc, #label-.-8".  */
9230
9231static void
9232do_adr (void)
9233{
9234  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9235
9236  /* Frag hacking will turn this into a sub instruction if the offset turns
9237     out to be negative.  */
9238  inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9239  inst.relocs[0].pc_rel = 1;
9240  inst.relocs[0].exp.X_add_number -= 8;
9241
9242  if (support_interwork
9243      && inst.relocs[0].exp.X_op == O_symbol
9244      && inst.relocs[0].exp.X_add_symbol != NULL
9245      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9246      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9247    inst.relocs[0].exp.X_add_number |= 1;
9248}
9249
9250/* This is a pseudo-op of the form "adrl rd, label" to be converted
9251   into a relative address of the form:
9252   add rd, pc, #low(label-.-8)"
9253   add rd, rd, #high(label-.-8)"  */
9254
9255static void
9256do_adrl (void)
9257{
9258  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9259
9260  /* Frag hacking will turn this into a sub instruction if the offset turns
9261     out to be negative.  */
9262  inst.relocs[0].type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9263  inst.relocs[0].pc_rel	       = 1;
9264  inst.size		       = INSN_SIZE * 2;
9265  inst.relocs[0].exp.X_add_number -= 8;
9266
9267  if (support_interwork
9268      && inst.relocs[0].exp.X_op == O_symbol
9269      && inst.relocs[0].exp.X_add_symbol != NULL
9270      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9271      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9272    inst.relocs[0].exp.X_add_number |= 1;
9273}
9274
9275static void
9276do_arit (void)
9277{
9278  constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9279	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9280	      THUMB1_RELOC_ONLY);
9281  if (!inst.operands[1].present)
9282    inst.operands[1].reg = inst.operands[0].reg;
9283  inst.instruction |= inst.operands[0].reg << 12;
9284  inst.instruction |= inst.operands[1].reg << 16;
9285  encode_arm_shifter_operand (2);
9286}
9287
9288static void
9289do_barrier (void)
9290{
9291  if (inst.operands[0].present)
9292    inst.instruction |= inst.operands[0].imm;
9293  else
9294    inst.instruction |= 0xf;
9295}
9296
9297static void
9298do_bfc (void)
9299{
9300  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9301  constraint (msb > 32, _("bit-field extends past end of register"));
9302  /* The instruction encoding stores the LSB and MSB,
9303     not the LSB and width.  */
9304  inst.instruction |= inst.operands[0].reg << 12;
9305  inst.instruction |= inst.operands[1].imm << 7;
9306  inst.instruction |= (msb - 1) << 16;
9307}
9308
9309static void
9310do_bfi (void)
9311{
9312  unsigned int msb;
9313
9314  /* #0 in second position is alternative syntax for bfc, which is
9315     the same instruction but with REG_PC in the Rm field.  */
9316  if (!inst.operands[1].isreg)
9317    inst.operands[1].reg = REG_PC;
9318
9319  msb = inst.operands[2].imm + inst.operands[3].imm;
9320  constraint (msb > 32, _("bit-field extends past end of register"));
9321  /* The instruction encoding stores the LSB and MSB,
9322     not the LSB and width.  */
9323  inst.instruction |= inst.operands[0].reg << 12;
9324  inst.instruction |= inst.operands[1].reg;
9325  inst.instruction |= inst.operands[2].imm << 7;
9326  inst.instruction |= (msb - 1) << 16;
9327}
9328
9329static void
9330do_bfx (void)
9331{
9332  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9333	      _("bit-field extends past end of register"));
9334  inst.instruction |= inst.operands[0].reg << 12;
9335  inst.instruction |= inst.operands[1].reg;
9336  inst.instruction |= inst.operands[2].imm << 7;
9337  inst.instruction |= (inst.operands[3].imm - 1) << 16;
9338}
9339
9340/* ARM V5 breakpoint instruction (argument parse)
9341     BKPT <16 bit unsigned immediate>
9342     Instruction is not conditional.
9343	The bit pattern given in insns[] has the COND_ALWAYS condition,
9344	and it is an error if the caller tried to override that.  */
9345
9346static void
9347do_bkpt (void)
9348{
9349  /* Top 12 of 16 bits to bits 19:8.  */
9350  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9351
9352  /* Bottom 4 of 16 bits to bits 3:0.  */
9353  inst.instruction |= inst.operands[0].imm & 0xf;
9354}
9355
9356static void
9357encode_branch (int default_reloc)
9358{
9359  if (inst.operands[0].hasreloc)
9360    {
9361      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9362		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9363		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9364      inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9365	? BFD_RELOC_ARM_PLT32
9366	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9367    }
9368  else
9369    inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9370  inst.relocs[0].pc_rel = 1;
9371}
9372
9373static void
9374do_branch (void)
9375{
9376#ifdef OBJ_ELF
9377  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9378    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9379  else
9380#endif
9381    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9382}
9383
9384static void
9385do_bl (void)
9386{
9387#ifdef OBJ_ELF
9388  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9389    {
9390      if (inst.cond == COND_ALWAYS)
9391	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9392      else
9393	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9394    }
9395  else
9396#endif
9397    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9398}
9399
9400/* ARM V5 branch-link-exchange instruction (argument parse)
9401     BLX <target_addr>		ie BLX(1)
9402     BLX{<condition>} <Rm>	ie BLX(2)
9403   Unfortunately, there are two different opcodes for this mnemonic.
9404   So, the insns[].value is not used, and the code here zaps values
9405	into inst.instruction.
9406   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
9407
9408static void
9409do_blx (void)
9410{
9411  if (inst.operands[0].isreg)
9412    {
9413      /* Arg is a register; the opcode provided by insns[] is correct.
9414	 It is not illegal to do "blx pc", just useless.  */
9415      if (inst.operands[0].reg == REG_PC)
9416	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9417
9418      inst.instruction |= inst.operands[0].reg;
9419    }
9420  else
9421    {
9422      /* Arg is an address; this instruction cannot be executed
9423	 conditionally, and the opcode must be adjusted.
9424	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9425	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
9426      constraint (inst.cond != COND_ALWAYS, BAD_COND);
9427      inst.instruction = 0xfa000000;
9428      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9429    }
9430}
9431
9432static void
9433do_bx (void)
9434{
9435  bfd_boolean want_reloc;
9436
9437  if (inst.operands[0].reg == REG_PC)
9438    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9439
9440  inst.instruction |= inst.operands[0].reg;
9441  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9442     it is for ARMv4t or earlier.  */
9443  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9444  if (!ARM_FEATURE_ZERO (selected_object_arch)
9445      && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9446      want_reloc = TRUE;
9447
9448#ifdef OBJ_ELF
9449  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9450#endif
9451    want_reloc = FALSE;
9452
9453  if (want_reloc)
9454    inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9455}
9456
9457
9458/* ARM v5TEJ.  Jump to Jazelle code.  */
9459
9460static void
9461do_bxj (void)
9462{
9463  if (inst.operands[0].reg == REG_PC)
9464    as_tsktsk (_("use of r15 in bxj is not really useful"));
9465
9466  inst.instruction |= inst.operands[0].reg;
9467}
9468
9469/* Co-processor data operation:
9470      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9471      CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
9472static void
9473do_cdp (void)
9474{
9475  inst.instruction |= inst.operands[0].reg << 8;
9476  inst.instruction |= inst.operands[1].imm << 20;
9477  inst.instruction |= inst.operands[2].reg << 12;
9478  inst.instruction |= inst.operands[3].reg << 16;
9479  inst.instruction |= inst.operands[4].reg;
9480  inst.instruction |= inst.operands[5].imm << 5;
9481}
9482
9483static void
9484do_cmp (void)
9485{
9486  inst.instruction |= inst.operands[0].reg << 16;
9487  encode_arm_shifter_operand (1);
9488}
9489
9490/* Transfer between coprocessor and ARM registers.
9491   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9492   MRC2
9493   MCR{cond}
9494   MCR2
9495
9496   No special properties.  */
9497
9498struct deprecated_coproc_regs_s
9499{
9500  unsigned cp;
9501  int opc1;
9502  unsigned crn;
9503  unsigned crm;
9504  int opc2;
9505  arm_feature_set deprecated;
9506  arm_feature_set obsoleted;
9507  const char *dep_msg;
9508  const char *obs_msg;
9509};
9510
9511#define DEPR_ACCESS_V8 \
9512  N_("This coprocessor register access is deprecated in ARMv8")
9513
9514/* Table of all deprecated coprocessor registers.  */
9515static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9516{
9517    {15, 0, 7, 10, 5,					/* CP15DMB.  */
9518     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9519     DEPR_ACCESS_V8, NULL},
9520    {15, 0, 7, 10, 4,					/* CP15DSB.  */
9521     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9522     DEPR_ACCESS_V8, NULL},
9523    {15, 0, 7,  5, 4,					/* CP15ISB.  */
9524     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9525     DEPR_ACCESS_V8, NULL},
9526    {14, 6, 1,  0, 0,					/* TEEHBR.  */
9527     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9528     DEPR_ACCESS_V8, NULL},
9529    {14, 6, 0,  0, 0,					/* TEECR.  */
9530     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9531     DEPR_ACCESS_V8, NULL},
9532};
9533
9534#undef DEPR_ACCESS_V8
9535
9536static const size_t deprecated_coproc_reg_count =
9537  sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9538
9539static void
9540do_co_reg (void)
9541{
9542  unsigned Rd;
9543  size_t i;
9544
9545  Rd = inst.operands[2].reg;
9546  if (thumb_mode)
9547    {
9548      if (inst.instruction == 0xee000010
9549	  || inst.instruction == 0xfe000010)
9550	/* MCR, MCR2  */
9551	reject_bad_reg (Rd);
9552      else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9553	/* MRC, MRC2  */
9554	constraint (Rd == REG_SP, BAD_SP);
9555    }
9556  else
9557    {
9558      /* MCR */
9559      if (inst.instruction == 0xe000010)
9560	constraint (Rd == REG_PC, BAD_PC);
9561    }
9562
9563    for (i = 0; i < deprecated_coproc_reg_count; ++i)
9564      {
9565	const struct deprecated_coproc_regs_s *r =
9566	  deprecated_coproc_regs + i;
9567
9568	if (inst.operands[0].reg == r->cp
9569	    && inst.operands[1].imm == r->opc1
9570	    && inst.operands[3].reg == r->crn
9571	    && inst.operands[4].reg == r->crm
9572	    && inst.operands[5].imm == r->opc2)
9573	  {
9574	    if (! ARM_CPU_IS_ANY (cpu_variant)
9575		&& warn_on_deprecated
9576		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9577	      as_tsktsk ("%s", r->dep_msg);
9578	  }
9579      }
9580
9581  inst.instruction |= inst.operands[0].reg << 8;
9582  inst.instruction |= inst.operands[1].imm << 21;
9583  inst.instruction |= Rd << 12;
9584  inst.instruction |= inst.operands[3].reg << 16;
9585  inst.instruction |= inst.operands[4].reg;
9586  inst.instruction |= inst.operands[5].imm << 5;
9587}
9588
9589/* Transfer between coprocessor register and pair of ARM registers.
9590   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9591   MCRR2
9592   MRRC{cond}
9593   MRRC2
9594
9595   Two XScale instructions are special cases of these:
9596
9597     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9598     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9599
9600   Result unpredictable if Rd or Rn is R15.  */
9601
9602static void
9603do_co_reg2c (void)
9604{
9605  unsigned Rd, Rn;
9606
9607  Rd = inst.operands[2].reg;
9608  Rn = inst.operands[3].reg;
9609
9610  if (thumb_mode)
9611    {
9612      reject_bad_reg (Rd);
9613      reject_bad_reg (Rn);
9614    }
9615  else
9616    {
9617      constraint (Rd == REG_PC, BAD_PC);
9618      constraint (Rn == REG_PC, BAD_PC);
9619    }
9620
9621  /* Only check the MRRC{2} variants.  */
9622  if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9623    {
9624       /* If Rd == Rn, error that the operation is
9625	  unpredictable (example MRRC p3,#1,r1,r1,c4).  */
9626       constraint (Rd == Rn, BAD_OVERLAP);
9627    }
9628
9629  inst.instruction |= inst.operands[0].reg << 8;
9630  inst.instruction |= inst.operands[1].imm << 4;
9631  inst.instruction |= Rd << 12;
9632  inst.instruction |= Rn << 16;
9633  inst.instruction |= inst.operands[4].reg;
9634}
9635
9636static void
9637do_cpsi (void)
9638{
9639  inst.instruction |= inst.operands[0].imm << 6;
9640  if (inst.operands[1].present)
9641    {
9642      inst.instruction |= CPSI_MMOD;
9643      inst.instruction |= inst.operands[1].imm;
9644    }
9645}
9646
9647static void
9648do_dbg (void)
9649{
9650  inst.instruction |= inst.operands[0].imm;
9651}
9652
9653static void
9654do_div (void)
9655{
9656  unsigned Rd, Rn, Rm;
9657
9658  Rd = inst.operands[0].reg;
9659  Rn = (inst.operands[1].present
9660	? inst.operands[1].reg : Rd);
9661  Rm = inst.operands[2].reg;
9662
9663  constraint ((Rd == REG_PC), BAD_PC);
9664  constraint ((Rn == REG_PC), BAD_PC);
9665  constraint ((Rm == REG_PC), BAD_PC);
9666
9667  inst.instruction |= Rd << 16;
9668  inst.instruction |= Rn << 0;
9669  inst.instruction |= Rm << 8;
9670}
9671
9672static void
9673do_it (void)
9674{
9675  /* There is no IT instruction in ARM mode.  We
9676     process it to do the validation as if in
9677     thumb mode, just in case the code gets
9678     assembled for thumb using the unified syntax.  */
9679
9680  inst.size = 0;
9681  if (unified_syntax)
9682    {
9683      set_pred_insn_type (IT_INSN);
9684      now_pred.mask = (inst.instruction & 0xf) | 0x10;
9685      now_pred.cc = inst.operands[0].imm;
9686    }
9687}
9688
9689/* If there is only one register in the register list,
9690   then return its register number.  Otherwise return -1.  */
9691static int
9692only_one_reg_in_list (int range)
9693{
9694  int i = ffs (range) - 1;
9695  return (i > 15 || range != (1 << i)) ? -1 : i;
9696}
9697
9698static void
9699encode_ldmstm(int from_push_pop_mnem)
9700{
9701  int base_reg = inst.operands[0].reg;
9702  int range = inst.operands[1].imm;
9703  int one_reg;
9704
9705  inst.instruction |= base_reg << 16;
9706  inst.instruction |= range;
9707
9708  if (inst.operands[1].writeback)
9709    inst.instruction |= LDM_TYPE_2_OR_3;
9710
9711  if (inst.operands[0].writeback)
9712    {
9713      inst.instruction |= WRITE_BACK;
9714      /* Check for unpredictable uses of writeback.  */
9715      if (inst.instruction & LOAD_BIT)
9716	{
9717	  /* Not allowed in LDM type 2.	 */
9718	  if ((inst.instruction & LDM_TYPE_2_OR_3)
9719	      && ((range & (1 << REG_PC)) == 0))
9720	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9721	  /* Only allowed if base reg not in list for other types.  */
9722	  else if (range & (1 << base_reg))
9723	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9724	}
9725      else /* STM.  */
9726	{
9727	  /* Not allowed for type 2.  */
9728	  if (inst.instruction & LDM_TYPE_2_OR_3)
9729	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9730	  /* Only allowed if base reg not in list, or first in list.  */
9731	  else if ((range & (1 << base_reg))
9732		   && (range & ((1 << base_reg) - 1)))
9733	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9734	}
9735    }
9736
9737  /* If PUSH/POP has only one register, then use the A2 encoding.  */
9738  one_reg = only_one_reg_in_list (range);
9739  if (from_push_pop_mnem && one_reg >= 0)
9740    {
9741      int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9742
9743      if (is_push && one_reg == 13 /* SP */)
9744	/* PR 22483: The A2 encoding cannot be used when
9745	   pushing the stack pointer as this is UNPREDICTABLE.  */
9746	return;
9747
9748      inst.instruction &= A_COND_MASK;
9749      inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9750      inst.instruction |= one_reg << 12;
9751    }
9752}
9753
9754static void
9755do_ldmstm (void)
9756{
9757  encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9758}
9759
9760/* ARMv5TE load-consecutive (argument parse)
9761   Mode is like LDRH.
9762
9763     LDRccD R, mode
9764     STRccD R, mode.  */
9765
9766static void
9767do_ldrd (void)
9768{
9769  constraint (inst.operands[0].reg % 2 != 0,
9770	      _("first transfer register must be even"));
9771  constraint (inst.operands[1].present
9772	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9773	      _("can only transfer two consecutive registers"));
9774  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9775  constraint (!inst.operands[2].isreg, _("'[' expected"));
9776
9777  if (!inst.operands[1].present)
9778    inst.operands[1].reg = inst.operands[0].reg + 1;
9779
9780  /* encode_arm_addr_mode_3 will diagnose overlap between the base
9781     register and the first register written; we have to diagnose
9782     overlap between the base and the second register written here.  */
9783
9784  if (inst.operands[2].reg == inst.operands[1].reg
9785      && (inst.operands[2].writeback || inst.operands[2].postind))
9786    as_warn (_("base register written back, and overlaps "
9787	       "second transfer register"));
9788
9789  if (!(inst.instruction & V4_STR_BIT))
9790    {
9791      /* For an index-register load, the index register must not overlap the
9792	destination (even if not write-back).  */
9793      if (inst.operands[2].immisreg
9794	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9795	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9796	as_warn (_("index register overlaps transfer register"));
9797    }
9798  inst.instruction |= inst.operands[0].reg << 12;
9799  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9800}
9801
9802static void
9803do_ldrex (void)
9804{
9805  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9806	      || inst.operands[1].postind || inst.operands[1].writeback
9807	      || inst.operands[1].immisreg || inst.operands[1].shifted
9808	      || inst.operands[1].negative
9809	      /* This can arise if the programmer has written
9810		   strex rN, rM, foo
9811		 or if they have mistakenly used a register name as the last
9812		 operand,  eg:
9813		   strex rN, rM, rX
9814		 It is very difficult to distinguish between these two cases
9815		 because "rX" might actually be a label. ie the register
9816		 name has been occluded by a symbol of the same name. So we
9817		 just generate a general 'bad addressing mode' type error
9818		 message and leave it up to the programmer to discover the
9819		 true cause and fix their mistake.  */
9820	      || (inst.operands[1].reg == REG_PC),
9821	      BAD_ADDR_MODE);
9822
9823  constraint (inst.relocs[0].exp.X_op != O_constant
9824	      || inst.relocs[0].exp.X_add_number != 0,
9825	      _("offset must be zero in ARM encoding"));
9826
9827  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9828
9829  inst.instruction |= inst.operands[0].reg << 12;
9830  inst.instruction |= inst.operands[1].reg << 16;
9831  inst.relocs[0].type = BFD_RELOC_UNUSED;
9832}
9833
9834static void
9835do_ldrexd (void)
9836{
9837  constraint (inst.operands[0].reg % 2 != 0,
9838	      _("even register required"));
9839  constraint (inst.operands[1].present
9840	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9841	      _("can only load two consecutive registers"));
9842  /* If op 1 were present and equal to PC, this function wouldn't
9843     have been called in the first place.  */
9844  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9845
9846  inst.instruction |= inst.operands[0].reg << 12;
9847  inst.instruction |= inst.operands[2].reg << 16;
9848}
9849
9850/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
9851   which is not a multiple of four is UNPREDICTABLE.  */
9852static void
9853check_ldr_r15_aligned (void)
9854{
9855  constraint (!(inst.operands[1].immisreg)
9856	      && (inst.operands[0].reg == REG_PC
9857	      && inst.operands[1].reg == REG_PC
9858	      && (inst.relocs[0].exp.X_add_number & 0x3)),
9859	      _("ldr to register 15 must be 4-byte aligned"));
9860}
9861
9862static void
9863do_ldst (void)
9864{
9865  inst.instruction |= inst.operands[0].reg << 12;
9866  if (!inst.operands[1].isreg)
9867    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9868      return;
9869  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9870  check_ldr_r15_aligned ();
9871}
9872
9873static void
9874do_ldstt (void)
9875{
9876  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9877     reject [Rn,...].  */
9878  if (inst.operands[1].preind)
9879    {
9880      constraint (inst.relocs[0].exp.X_op != O_constant
9881		  || inst.relocs[0].exp.X_add_number != 0,
9882		  _("this instruction requires a post-indexed address"));
9883
9884      inst.operands[1].preind = 0;
9885      inst.operands[1].postind = 1;
9886      inst.operands[1].writeback = 1;
9887    }
9888  inst.instruction |= inst.operands[0].reg << 12;
9889  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9890}
9891
9892/* Halfword and signed-byte load/store operations.  */
9893
9894static void
9895do_ldstv4 (void)
9896{
9897  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9898  inst.instruction |= inst.operands[0].reg << 12;
9899  if (!inst.operands[1].isreg)
9900    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9901      return;
9902  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9903}
9904
9905static void
9906do_ldsttv4 (void)
9907{
9908  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9909     reject [Rn,...].  */
9910  if (inst.operands[1].preind)
9911    {
9912      constraint (inst.relocs[0].exp.X_op != O_constant
9913		  || inst.relocs[0].exp.X_add_number != 0,
9914		  _("this instruction requires a post-indexed address"));
9915
9916      inst.operands[1].preind = 0;
9917      inst.operands[1].postind = 1;
9918      inst.operands[1].writeback = 1;
9919    }
9920  inst.instruction |= inst.operands[0].reg << 12;
9921  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9922}
9923
9924/* Co-processor register load/store.
9925   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
9926static void
9927do_lstc (void)
9928{
9929  inst.instruction |= inst.operands[0].reg << 8;
9930  inst.instruction |= inst.operands[1].reg << 12;
9931  encode_arm_cp_address (2, TRUE, TRUE, 0);
9932}
9933
9934static void
9935do_mlas (void)
9936{
9937  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
9938  if (inst.operands[0].reg == inst.operands[1].reg
9939      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9940      && !(inst.instruction & 0x00400000))
9941    as_tsktsk (_("Rd and Rm should be different in mla"));
9942
9943  inst.instruction |= inst.operands[0].reg << 16;
9944  inst.instruction |= inst.operands[1].reg;
9945  inst.instruction |= inst.operands[2].reg << 8;
9946  inst.instruction |= inst.operands[3].reg << 12;
9947}
9948
9949static void
9950do_mov (void)
9951{
9952  constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9953	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9954	      THUMB1_RELOC_ONLY);
9955  inst.instruction |= inst.operands[0].reg << 12;
9956  encode_arm_shifter_operand (1);
9957}
9958
9959/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
9960static void
9961do_mov16 (void)
9962{
9963  bfd_vma imm;
9964  bfd_boolean top;
9965
9966  top = (inst.instruction & 0x00400000) != 0;
9967  constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9968	      _(":lower16: not allowed in this instruction"));
9969  constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9970	      _(":upper16: not allowed in this instruction"));
9971  inst.instruction |= inst.operands[0].reg << 12;
9972  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9973    {
9974      imm = inst.relocs[0].exp.X_add_number;
9975      /* The value is in two pieces: 0:11, 16:19.  */
9976      inst.instruction |= (imm & 0x00000fff);
9977      inst.instruction |= (imm & 0x0000f000) << 4;
9978    }
9979}
9980
9981static int
9982do_vfp_nsyn_mrs (void)
9983{
9984  if (inst.operands[0].isvec)
9985    {
9986      if (inst.operands[1].reg != 1)
9987	first_error (_("operand 1 must be FPSCR"));
9988      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9989      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9990      do_vfp_nsyn_opcode ("fmstat");
9991    }
9992  else if (inst.operands[1].isvec)
9993    do_vfp_nsyn_opcode ("fmrx");
9994  else
9995    return FAIL;
9996
9997  return SUCCESS;
9998}
9999
10000static int
10001do_vfp_nsyn_msr (void)
10002{
10003  if (inst.operands[0].isvec)
10004    do_vfp_nsyn_opcode ("fmxr");
10005  else
10006    return FAIL;
10007
10008  return SUCCESS;
10009}
10010
10011static void
10012do_vmrs (void)
10013{
10014  unsigned Rt = inst.operands[0].reg;
10015
10016  if (thumb_mode && Rt == REG_SP)
10017    {
10018      inst.error = BAD_SP;
10019      return;
10020    }
10021
10022  switch (inst.operands[1].reg)
10023    {
10024    /* MVFR2 is only valid for Armv8-A.  */
10025    case 5:
10026      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10027		  _(BAD_FPU));
10028      break;
10029
10030    /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10031    case 1: /* fpscr.  */
10032      constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10033		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10034		  _(BAD_FPU));
10035      break;
10036
10037    case 14: /* fpcxt_ns.  */
10038    case 15: /* fpcxt_s.  */
10039      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10040		  _("selected processor does not support instruction"));
10041      break;
10042
10043    case  2: /* fpscr_nzcvqc.  */
10044    case 12: /* vpr.  */
10045    case 13: /* p0.  */
10046      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10047		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10048		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10049		  _("selected processor does not support instruction"));
10050      if (inst.operands[0].reg != 2
10051	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10052	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10053      break;
10054
10055    default:
10056      break;
10057    }
10058
10059  /* APSR_ sets isvec. All other refs to PC are illegal.  */
10060  if (!inst.operands[0].isvec && Rt == REG_PC)
10061    {
10062      inst.error = BAD_PC;
10063      return;
10064    }
10065
10066  /* If we get through parsing the register name, we just insert the number
10067     generated into the instruction without further validation.  */
10068  inst.instruction |= (inst.operands[1].reg << 16);
10069  inst.instruction |= (Rt << 12);
10070}
10071
10072static void
10073do_vmsr (void)
10074{
10075  unsigned Rt = inst.operands[1].reg;
10076
10077  if (thumb_mode)
10078    reject_bad_reg (Rt);
10079  else if (Rt == REG_PC)
10080    {
10081      inst.error = BAD_PC;
10082      return;
10083    }
10084
10085  switch (inst.operands[0].reg)
10086    {
10087    /* MVFR2 is only valid for Armv8-A.  */
10088    case 5:
10089      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10090		  _(BAD_FPU));
10091      break;
10092
10093    /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10094    case  1: /* fpcr.  */
10095      constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10096		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10097		  _(BAD_FPU));
10098      break;
10099
10100    case 14: /* fpcxt_ns.  */
10101    case 15: /* fpcxt_s.  */
10102      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10103		  _("selected processor does not support instruction"));
10104      break;
10105
10106    case  2: /* fpscr_nzcvqc.  */
10107    case 12: /* vpr.  */
10108    case 13: /* p0.  */
10109      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10110		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10111		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10112		  _("selected processor does not support instruction"));
10113      if (inst.operands[0].reg != 2
10114	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10115	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10116      break;
10117
10118    default:
10119      break;
10120    }
10121
10122  /* If we get through parsing the register name, we just insert the number
10123     generated into the instruction without further validation.  */
10124  inst.instruction |= (inst.operands[0].reg << 16);
10125  inst.instruction |= (Rt << 12);
10126}
10127
10128static void
10129do_mrs (void)
10130{
10131  unsigned br;
10132
10133  if (do_vfp_nsyn_mrs () == SUCCESS)
10134    return;
10135
10136  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10137  inst.instruction |= inst.operands[0].reg << 12;
10138
10139  if (inst.operands[1].isreg)
10140    {
10141      br = inst.operands[1].reg;
10142      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10143	as_bad (_("bad register for mrs"));
10144    }
10145  else
10146    {
10147      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
10148      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10149		  != (PSR_c|PSR_f),
10150		  _("'APSR', 'CPSR' or 'SPSR' expected"));
10151      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10152    }
10153
10154  inst.instruction |= br;
10155}
10156
10157/* Two possible forms:
10158      "{C|S}PSR_<field>, Rm",
10159      "{C|S}PSR_f, #expression".  */
10160
10161static void
10162do_msr (void)
10163{
10164  if (do_vfp_nsyn_msr () == SUCCESS)
10165    return;
10166
10167  inst.instruction |= inst.operands[0].imm;
10168  if (inst.operands[1].isreg)
10169    inst.instruction |= inst.operands[1].reg;
10170  else
10171    {
10172      inst.instruction |= INST_IMMEDIATE;
10173      inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10174      inst.relocs[0].pc_rel = 0;
10175    }
10176}
10177
10178static void
10179do_mul (void)
10180{
10181  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10182
10183  if (!inst.operands[2].present)
10184    inst.operands[2].reg = inst.operands[0].reg;
10185  inst.instruction |= inst.operands[0].reg << 16;
10186  inst.instruction |= inst.operands[1].reg;
10187  inst.instruction |= inst.operands[2].reg << 8;
10188
10189  if (inst.operands[0].reg == inst.operands[1].reg
10190      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10191    as_tsktsk (_("Rd and Rm should be different in mul"));
10192}
10193
10194/* Long Multiply Parser
10195   UMULL RdLo, RdHi, Rm, Rs
10196   SMULL RdLo, RdHi, Rm, Rs
10197   UMLAL RdLo, RdHi, Rm, Rs
10198   SMLAL RdLo, RdHi, Rm, Rs.  */
10199
10200static void
10201do_mull (void)
10202{
10203  inst.instruction |= inst.operands[0].reg << 12;
10204  inst.instruction |= inst.operands[1].reg << 16;
10205  inst.instruction |= inst.operands[2].reg;
10206  inst.instruction |= inst.operands[3].reg << 8;
10207
10208  /* rdhi and rdlo must be different.  */
10209  if (inst.operands[0].reg == inst.operands[1].reg)
10210    as_tsktsk (_("rdhi and rdlo must be different"));
10211
10212  /* rdhi, rdlo and rm must all be different before armv6.  */
10213  if ((inst.operands[0].reg == inst.operands[2].reg
10214      || inst.operands[1].reg == inst.operands[2].reg)
10215      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10216    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10217}
10218
10219static void
10220do_nop (void)
10221{
10222  if (inst.operands[0].present
10223      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10224    {
10225      /* Architectural NOP hints are CPSR sets with no bits selected.  */
10226      inst.instruction &= 0xf0000000;
10227      inst.instruction |= 0x0320f000;
10228      if (inst.operands[0].present)
10229	inst.instruction |= inst.operands[0].imm;
10230    }
10231}
10232
10233/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10234   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10235   Condition defaults to COND_ALWAYS.
10236   Error if Rd, Rn or Rm are R15.  */
10237
10238static void
10239do_pkhbt (void)
10240{
10241  inst.instruction |= inst.operands[0].reg << 12;
10242  inst.instruction |= inst.operands[1].reg << 16;
10243  inst.instruction |= inst.operands[2].reg;
10244  if (inst.operands[3].present)
10245    encode_arm_shift (3);
10246}
10247
10248/* ARM V6 PKHTB (Argument Parse).  */
10249
10250static void
10251do_pkhtb (void)
10252{
10253  if (!inst.operands[3].present)
10254    {
10255      /* If the shift specifier is omitted, turn the instruction
10256	 into pkhbt rd, rm, rn. */
10257      inst.instruction &= 0xfff00010;
10258      inst.instruction |= inst.operands[0].reg << 12;
10259      inst.instruction |= inst.operands[1].reg;
10260      inst.instruction |= inst.operands[2].reg << 16;
10261    }
10262  else
10263    {
10264      inst.instruction |= inst.operands[0].reg << 12;
10265      inst.instruction |= inst.operands[1].reg << 16;
10266      inst.instruction |= inst.operands[2].reg;
10267      encode_arm_shift (3);
10268    }
10269}
10270
10271/* ARMv5TE: Preload-Cache
10272   MP Extensions: Preload for write
10273
10274    PLD(W) <addr_mode>
10275
10276  Syntactically, like LDR with B=1, W=0, L=1.  */
10277
10278static void
10279do_pld (void)
10280{
10281  constraint (!inst.operands[0].isreg,
10282	      _("'[' expected after PLD mnemonic"));
10283  constraint (inst.operands[0].postind,
10284	      _("post-indexed expression used in preload instruction"));
10285  constraint (inst.operands[0].writeback,
10286	      _("writeback used in preload instruction"));
10287  constraint (!inst.operands[0].preind,
10288	      _("unindexed addressing used in preload instruction"));
10289  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10290}
10291
10292/* ARMv7: PLI <addr_mode>  */
10293static void
10294do_pli (void)
10295{
10296  constraint (!inst.operands[0].isreg,
10297	      _("'[' expected after PLI mnemonic"));
10298  constraint (inst.operands[0].postind,
10299	      _("post-indexed expression used in preload instruction"));
10300  constraint (inst.operands[0].writeback,
10301	      _("writeback used in preload instruction"));
10302  constraint (!inst.operands[0].preind,
10303	      _("unindexed addressing used in preload instruction"));
10304  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10305  inst.instruction &= ~PRE_INDEX;
10306}
10307
10308static void
10309do_push_pop (void)
10310{
10311  constraint (inst.operands[0].writeback,
10312	      _("push/pop do not support {reglist}^"));
10313  inst.operands[1] = inst.operands[0];
10314  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10315  inst.operands[0].isreg = 1;
10316  inst.operands[0].writeback = 1;
10317  inst.operands[0].reg = REG_SP;
10318  encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
10319}
10320
10321/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10322   word at the specified address and the following word
10323   respectively.
10324   Unconditionally executed.
10325   Error if Rn is R15.	*/
10326
10327static void
10328do_rfe (void)
10329{
10330  inst.instruction |= inst.operands[0].reg << 16;
10331  if (inst.operands[0].writeback)
10332    inst.instruction |= WRITE_BACK;
10333}
10334
10335/* ARM V6 ssat (argument parse).  */
10336
10337static void
10338do_ssat (void)
10339{
10340  inst.instruction |= inst.operands[0].reg << 12;
10341  inst.instruction |= (inst.operands[1].imm - 1) << 16;
10342  inst.instruction |= inst.operands[2].reg;
10343
10344  if (inst.operands[3].present)
10345    encode_arm_shift (3);
10346}
10347
10348/* ARM V6 usat (argument parse).  */
10349
10350static void
10351do_usat (void)
10352{
10353  inst.instruction |= inst.operands[0].reg << 12;
10354  inst.instruction |= inst.operands[1].imm << 16;
10355  inst.instruction |= inst.operands[2].reg;
10356
10357  if (inst.operands[3].present)
10358    encode_arm_shift (3);
10359}
10360
10361/* ARM V6 ssat16 (argument parse).  */
10362
10363static void
10364do_ssat16 (void)
10365{
10366  inst.instruction |= inst.operands[0].reg << 12;
10367  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10368  inst.instruction |= inst.operands[2].reg;
10369}
10370
10371static void
10372do_usat16 (void)
10373{
10374  inst.instruction |= inst.operands[0].reg << 12;
10375  inst.instruction |= inst.operands[1].imm << 16;
10376  inst.instruction |= inst.operands[2].reg;
10377}
10378
10379/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
10380   preserving the other bits.
10381
10382   setend <endian_specifier>, where <endian_specifier> is either
10383   BE or LE.  */
10384
10385static void
10386do_setend (void)
10387{
10388  if (warn_on_deprecated
10389      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10390      as_tsktsk (_("setend use is deprecated for ARMv8"));
10391
10392  if (inst.operands[0].imm)
10393    inst.instruction |= 0x200;
10394}
10395
10396static void
10397do_shift (void)
10398{
10399  unsigned int Rm = (inst.operands[1].present
10400		     ? inst.operands[1].reg
10401		     : inst.operands[0].reg);
10402
10403  inst.instruction |= inst.operands[0].reg << 12;
10404  inst.instruction |= Rm;
10405  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
10406    {
10407      inst.instruction |= inst.operands[2].reg << 8;
10408      inst.instruction |= SHIFT_BY_REG;
10409      /* PR 12854: Error on extraneous shifts.  */
10410      constraint (inst.operands[2].shifted,
10411		  _("extraneous shift as part of operand to shift insn"));
10412    }
10413  else
10414    inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10415}
10416
10417static void
10418do_smc (void)
10419{
10420  unsigned int value = inst.relocs[0].exp.X_add_number;
10421  constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10422
10423  inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10424  inst.relocs[0].pc_rel = 0;
10425}
10426
10427static void
10428do_hvc (void)
10429{
10430  inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10431  inst.relocs[0].pc_rel = 0;
10432}
10433
10434static void
10435do_swi (void)
10436{
10437  inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10438  inst.relocs[0].pc_rel = 0;
10439}
10440
10441static void
10442do_setpan (void)
10443{
10444  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10445	      _("selected processor does not support SETPAN instruction"));
10446
10447  inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10448}
10449
10450static void
10451do_t_setpan (void)
10452{
10453  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10454	      _("selected processor does not support SETPAN instruction"));
10455
10456  inst.instruction |= (inst.operands[0].imm << 3);
10457}
10458
10459/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10460   SMLAxy{cond} Rd,Rm,Rs,Rn
10461   SMLAWy{cond} Rd,Rm,Rs,Rn
10462   Error if any register is R15.  */
10463
10464static void
10465do_smla (void)
10466{
10467  inst.instruction |= inst.operands[0].reg << 16;
10468  inst.instruction |= inst.operands[1].reg;
10469  inst.instruction |= inst.operands[2].reg << 8;
10470  inst.instruction |= inst.operands[3].reg << 12;
10471}
10472
10473/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10474   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10475   Error if any register is R15.
10476   Warning if Rdlo == Rdhi.  */
10477
10478static void
10479do_smlal (void)
10480{
10481  inst.instruction |= inst.operands[0].reg << 12;
10482  inst.instruction |= inst.operands[1].reg << 16;
10483  inst.instruction |= inst.operands[2].reg;
10484  inst.instruction |= inst.operands[3].reg << 8;
10485
10486  if (inst.operands[0].reg == inst.operands[1].reg)
10487    as_tsktsk (_("rdhi and rdlo must be different"));
10488}
10489
10490/* ARM V5E (El Segundo) signed-multiply (argument parse)
10491   SMULxy{cond} Rd,Rm,Rs
10492   Error if any register is R15.  */
10493
10494static void
10495do_smul (void)
10496{
10497  inst.instruction |= inst.operands[0].reg << 16;
10498  inst.instruction |= inst.operands[1].reg;
10499  inst.instruction |= inst.operands[2].reg << 8;
10500}
10501
10502/* ARM V6 srs (argument parse).  The variable fields in the encoding are
10503   the same for both ARM and Thumb-2.  */
10504
10505static void
10506do_srs (void)
10507{
10508  int reg;
10509
10510  if (inst.operands[0].present)
10511    {
10512      reg = inst.operands[0].reg;
10513      constraint (reg != REG_SP, _("SRS base register must be r13"));
10514    }
10515  else
10516    reg = REG_SP;
10517
10518  inst.instruction |= reg << 16;
10519  inst.instruction |= inst.operands[1].imm;
10520  if (inst.operands[0].writeback || inst.operands[1].writeback)
10521    inst.instruction |= WRITE_BACK;
10522}
10523
10524/* ARM V6 strex (argument parse).  */
10525
10526static void
10527do_strex (void)
10528{
10529  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10530	      || inst.operands[2].postind || inst.operands[2].writeback
10531	      || inst.operands[2].immisreg || inst.operands[2].shifted
10532	      || inst.operands[2].negative
10533	      /* See comment in do_ldrex().  */
10534	      || (inst.operands[2].reg == REG_PC),
10535	      BAD_ADDR_MODE);
10536
10537  constraint (inst.operands[0].reg == inst.operands[1].reg
10538	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10539
10540  constraint (inst.relocs[0].exp.X_op != O_constant
10541	      || inst.relocs[0].exp.X_add_number != 0,
10542	      _("offset must be zero in ARM encoding"));
10543
10544  inst.instruction |= inst.operands[0].reg << 12;
10545  inst.instruction |= inst.operands[1].reg;
10546  inst.instruction |= inst.operands[2].reg << 16;
10547  inst.relocs[0].type = BFD_RELOC_UNUSED;
10548}
10549
10550static void
10551do_t_strexbh (void)
10552{
10553  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10554	      || inst.operands[2].postind || inst.operands[2].writeback
10555	      || inst.operands[2].immisreg || inst.operands[2].shifted
10556	      || inst.operands[2].negative,
10557	      BAD_ADDR_MODE);
10558
10559  constraint (inst.operands[0].reg == inst.operands[1].reg
10560	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10561
10562  do_rm_rd_rn ();
10563}
10564
10565static void
10566do_strexd (void)
10567{
10568  constraint (inst.operands[1].reg % 2 != 0,
10569	      _("even register required"));
10570  constraint (inst.operands[2].present
10571	      && inst.operands[2].reg != inst.operands[1].reg + 1,
10572	      _("can only store two consecutive registers"));
10573  /* If op 2 were present and equal to PC, this function wouldn't
10574     have been called in the first place.  */
10575  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10576
10577  constraint (inst.operands[0].reg == inst.operands[1].reg
10578	      || inst.operands[0].reg == inst.operands[1].reg + 1
10579	      || inst.operands[0].reg == inst.operands[3].reg,
10580	      BAD_OVERLAP);
10581
10582  inst.instruction |= inst.operands[0].reg << 12;
10583  inst.instruction |= inst.operands[1].reg;
10584  inst.instruction |= inst.operands[3].reg << 16;
10585}
10586
10587/* ARM V8 STRL.  */
10588static void
10589do_stlex (void)
10590{
10591  constraint (inst.operands[0].reg == inst.operands[1].reg
10592	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10593
10594  do_rd_rm_rn ();
10595}
10596
10597static void
10598do_t_stlex (void)
10599{
10600  constraint (inst.operands[0].reg == inst.operands[1].reg
10601	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10602
10603  do_rm_rd_rn ();
10604}
10605
10606/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10607   extends it to 32-bits, and adds the result to a value in another
10608   register.  You can specify a rotation by 0, 8, 16, or 24 bits
10609   before extracting the 16-bit value.
10610   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10611   Condition defaults to COND_ALWAYS.
10612   Error if any register uses R15.  */
10613
10614static void
10615do_sxtah (void)
10616{
10617  inst.instruction |= inst.operands[0].reg << 12;
10618  inst.instruction |= inst.operands[1].reg << 16;
10619  inst.instruction |= inst.operands[2].reg;
10620  inst.instruction |= inst.operands[3].imm << 10;
10621}
10622
10623/* ARM V6 SXTH.
10624
10625   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10626   Condition defaults to COND_ALWAYS.
10627   Error if any register uses R15.  */
10628
10629static void
10630do_sxth (void)
10631{
10632  inst.instruction |= inst.operands[0].reg << 12;
10633  inst.instruction |= inst.operands[1].reg;
10634  inst.instruction |= inst.operands[2].imm << 10;
10635}
10636
10637/* VFP instructions.  In a logical order: SP variant first, monad
10638   before dyad, arithmetic then move then load/store.  */
10639
10640static void
10641do_vfp_sp_monadic (void)
10642{
10643  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10644	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10645	      _(BAD_FPU));
10646
10647  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10648  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10649}
10650
10651static void
10652do_vfp_sp_dyadic (void)
10653{
10654  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10655  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10656  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10657}
10658
10659static void
10660do_vfp_sp_compare_z (void)
10661{
10662  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10663}
10664
10665static void
10666do_vfp_dp_sp_cvt (void)
10667{
10668  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10669  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10670}
10671
10672static void
10673do_vfp_sp_dp_cvt (void)
10674{
10675  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10676  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10677}
10678
10679static void
10680do_vfp_reg_from_sp (void)
10681{
10682  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10683	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10684	     _(BAD_FPU));
10685
10686  inst.instruction |= inst.operands[0].reg << 12;
10687  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10688}
10689
10690static void
10691do_vfp_reg2_from_sp2 (void)
10692{
10693  constraint (inst.operands[2].imm != 2,
10694	      _("only two consecutive VFP SP registers allowed here"));
10695  inst.instruction |= inst.operands[0].reg << 12;
10696  inst.instruction |= inst.operands[1].reg << 16;
10697  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10698}
10699
10700static void
10701do_vfp_sp_from_reg (void)
10702{
10703  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10704	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10705	     _(BAD_FPU));
10706
10707  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10708  inst.instruction |= inst.operands[1].reg << 12;
10709}
10710
10711static void
10712do_vfp_sp2_from_reg2 (void)
10713{
10714  constraint (inst.operands[0].imm != 2,
10715	      _("only two consecutive VFP SP registers allowed here"));
10716  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10717  inst.instruction |= inst.operands[1].reg << 12;
10718  inst.instruction |= inst.operands[2].reg << 16;
10719}
10720
10721static void
10722do_vfp_sp_ldst (void)
10723{
10724  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10725  encode_arm_cp_address (1, FALSE, TRUE, 0);
10726}
10727
10728static void
10729do_vfp_dp_ldst (void)
10730{
10731  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10732  encode_arm_cp_address (1, FALSE, TRUE, 0);
10733}
10734
10735
10736static void
10737vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10738{
10739  if (inst.operands[0].writeback)
10740    inst.instruction |= WRITE_BACK;
10741  else
10742    constraint (ldstm_type != VFP_LDSTMIA,
10743		_("this addressing mode requires base-register writeback"));
10744  inst.instruction |= inst.operands[0].reg << 16;
10745  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10746  inst.instruction |= inst.operands[1].imm;
10747}
10748
10749static void
10750vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10751{
10752  int count;
10753
10754  if (inst.operands[0].writeback)
10755    inst.instruction |= WRITE_BACK;
10756  else
10757    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10758		_("this addressing mode requires base-register writeback"));
10759
10760  inst.instruction |= inst.operands[0].reg << 16;
10761  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10762
10763  count = inst.operands[1].imm << 1;
10764  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10765    count += 1;
10766
10767  inst.instruction |= count;
10768}
10769
10770static void
10771do_vfp_sp_ldstmia (void)
10772{
10773  vfp_sp_ldstm (VFP_LDSTMIA);
10774}
10775
10776static void
10777do_vfp_sp_ldstmdb (void)
10778{
10779  vfp_sp_ldstm (VFP_LDSTMDB);
10780}
10781
10782static void
10783do_vfp_dp_ldstmia (void)
10784{
10785  vfp_dp_ldstm (VFP_LDSTMIA);
10786}
10787
10788static void
10789do_vfp_dp_ldstmdb (void)
10790{
10791  vfp_dp_ldstm (VFP_LDSTMDB);
10792}
10793
10794static void
10795do_vfp_xp_ldstmia (void)
10796{
10797  vfp_dp_ldstm (VFP_LDSTMIAX);
10798}
10799
10800static void
10801do_vfp_xp_ldstmdb (void)
10802{
10803  vfp_dp_ldstm (VFP_LDSTMDBX);
10804}
10805
10806static void
10807do_vfp_dp_rd_rm (void)
10808{
10809  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10810	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10811	      _(BAD_FPU));
10812
10813  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10814  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10815}
10816
10817static void
10818do_vfp_dp_rn_rd (void)
10819{
10820  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10821  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10822}
10823
10824static void
10825do_vfp_dp_rd_rn (void)
10826{
10827  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10828  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10829}
10830
10831static void
10832do_vfp_dp_rd_rn_rm (void)
10833{
10834  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10835	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10836	      _(BAD_FPU));
10837
10838  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10839  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10840  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10841}
10842
10843static void
10844do_vfp_dp_rd (void)
10845{
10846  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10847}
10848
10849static void
10850do_vfp_dp_rm_rd_rn (void)
10851{
10852  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10853	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10854	      _(BAD_FPU));
10855
10856  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10857  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10858  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10859}
10860
10861/* VFPv3 instructions.  */
10862static void
10863do_vfp_sp_const (void)
10864{
10865  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10866  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10867  inst.instruction |= (inst.operands[1].imm & 0x0f);
10868}
10869
10870static void
10871do_vfp_dp_const (void)
10872{
10873  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10874  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10875  inst.instruction |= (inst.operands[1].imm & 0x0f);
10876}
10877
10878static void
10879vfp_conv (int srcsize)
10880{
10881  int immbits = srcsize - inst.operands[1].imm;
10882
10883  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10884    {
10885      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10886	 i.e. immbits must be in range 0 - 16.  */
10887      inst.error = _("immediate value out of range, expected range [0, 16]");
10888      return;
10889    }
10890  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10891    {
10892      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10893	 i.e. immbits must be in range 0 - 31.  */
10894      inst.error = _("immediate value out of range, expected range [1, 32]");
10895      return;
10896    }
10897
10898  inst.instruction |= (immbits & 1) << 5;
10899  inst.instruction |= (immbits >> 1);
10900}
10901
10902static void
10903do_vfp_sp_conv_16 (void)
10904{
10905  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10906  vfp_conv (16);
10907}
10908
10909static void
10910do_vfp_dp_conv_16 (void)
10911{
10912  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10913  vfp_conv (16);
10914}
10915
10916static void
10917do_vfp_sp_conv_32 (void)
10918{
10919  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10920  vfp_conv (32);
10921}
10922
10923static void
10924do_vfp_dp_conv_32 (void)
10925{
10926  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10927  vfp_conv (32);
10928}
10929
10930/* FPA instructions.  Also in a logical order.	*/
10931
10932static void
10933do_fpa_cmp (void)
10934{
10935  inst.instruction |= inst.operands[0].reg << 16;
10936  inst.instruction |= inst.operands[1].reg;
10937}
10938
10939static void
10940do_fpa_ldmstm (void)
10941{
10942  inst.instruction |= inst.operands[0].reg << 12;
10943  switch (inst.operands[1].imm)
10944    {
10945    case 1: inst.instruction |= CP_T_X;		 break;
10946    case 2: inst.instruction |= CP_T_Y;		 break;
10947    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10948    case 4:					 break;
10949    default: abort ();
10950    }
10951
10952  if (inst.instruction & (PRE_INDEX | INDEX_UP))
10953    {
10954      /* The instruction specified "ea" or "fd", so we can only accept
10955	 [Rn]{!}.  The instruction does not really support stacking or
10956	 unstacking, so we have to emulate these by setting appropriate
10957	 bits and offsets.  */
10958      constraint (inst.relocs[0].exp.X_op != O_constant
10959		  || inst.relocs[0].exp.X_add_number != 0,
10960		  _("this instruction does not support indexing"));
10961
10962      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10963	inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10964
10965      if (!(inst.instruction & INDEX_UP))
10966	inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10967
10968      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10969	{
10970	  inst.operands[2].preind = 0;
10971	  inst.operands[2].postind = 1;
10972	}
10973    }
10974
10975  encode_arm_cp_address (2, TRUE, TRUE, 0);
10976}
10977
10978/* iWMMXt instructions: strictly in alphabetical order.	 */
10979
10980static void
10981do_iwmmxt_tandorc (void)
10982{
10983  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10984}
10985
10986static void
10987do_iwmmxt_textrc (void)
10988{
10989  inst.instruction |= inst.operands[0].reg << 12;
10990  inst.instruction |= inst.operands[1].imm;
10991}
10992
10993static void
10994do_iwmmxt_textrm (void)
10995{
10996  inst.instruction |= inst.operands[0].reg << 12;
10997  inst.instruction |= inst.operands[1].reg << 16;
10998  inst.instruction |= inst.operands[2].imm;
10999}
11000
11001static void
11002do_iwmmxt_tinsr (void)
11003{
11004  inst.instruction |= inst.operands[0].reg << 16;
11005  inst.instruction |= inst.operands[1].reg << 12;
11006  inst.instruction |= inst.operands[2].imm;
11007}
11008
11009static void
11010do_iwmmxt_tmia (void)
11011{
11012  inst.instruction |= inst.operands[0].reg << 5;
11013  inst.instruction |= inst.operands[1].reg;
11014  inst.instruction |= inst.operands[2].reg << 12;
11015}
11016
11017static void
11018do_iwmmxt_waligni (void)
11019{
11020  inst.instruction |= inst.operands[0].reg << 12;
11021  inst.instruction |= inst.operands[1].reg << 16;
11022  inst.instruction |= inst.operands[2].reg;
11023  inst.instruction |= inst.operands[3].imm << 20;
11024}
11025
11026static void
11027do_iwmmxt_wmerge (void)
11028{
11029  inst.instruction |= inst.operands[0].reg << 12;
11030  inst.instruction |= inst.operands[1].reg << 16;
11031  inst.instruction |= inst.operands[2].reg;
11032  inst.instruction |= inst.operands[3].imm << 21;
11033}
11034
11035static void
11036do_iwmmxt_wmov (void)
11037{
11038  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
11039  inst.instruction |= inst.operands[0].reg << 12;
11040  inst.instruction |= inst.operands[1].reg << 16;
11041  inst.instruction |= inst.operands[1].reg;
11042}
11043
11044static void
11045do_iwmmxt_wldstbh (void)
11046{
11047  int reloc;
11048  inst.instruction |= inst.operands[0].reg << 12;
11049  if (thumb_mode)
11050    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11051  else
11052    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11053  encode_arm_cp_address (1, TRUE, FALSE, reloc);
11054}
11055
11056static void
11057do_iwmmxt_wldstw (void)
11058{
11059  /* RIWR_RIWC clears .isreg for a control register.  */
11060  if (!inst.operands[0].isreg)
11061    {
11062      constraint (inst.cond != COND_ALWAYS, BAD_COND);
11063      inst.instruction |= 0xf0000000;
11064    }
11065
11066  inst.instruction |= inst.operands[0].reg << 12;
11067  encode_arm_cp_address (1, TRUE, TRUE, 0);
11068}
11069
11070static void
11071do_iwmmxt_wldstd (void)
11072{
11073  inst.instruction |= inst.operands[0].reg << 12;
11074  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11075      && inst.operands[1].immisreg)
11076    {
11077      inst.instruction &= ~0x1a000ff;
11078      inst.instruction |= (0xfU << 28);
11079      if (inst.operands[1].preind)
11080	inst.instruction |= PRE_INDEX;
11081      if (!inst.operands[1].negative)
11082	inst.instruction |= INDEX_UP;
11083      if (inst.operands[1].writeback)
11084	inst.instruction |= WRITE_BACK;
11085      inst.instruction |= inst.operands[1].reg << 16;
11086      inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11087      inst.instruction |= inst.operands[1].imm;
11088    }
11089  else
11090    encode_arm_cp_address (1, TRUE, FALSE, 0);
11091}
11092
11093static void
11094do_iwmmxt_wshufh (void)
11095{
11096  inst.instruction |= inst.operands[0].reg << 12;
11097  inst.instruction |= inst.operands[1].reg << 16;
11098  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11099  inst.instruction |= (inst.operands[2].imm & 0x0f);
11100}
11101
11102static void
11103do_iwmmxt_wzero (void)
11104{
11105  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
11106  inst.instruction |= inst.operands[0].reg;
11107  inst.instruction |= inst.operands[0].reg << 12;
11108  inst.instruction |= inst.operands[0].reg << 16;
11109}
11110
11111static void
11112do_iwmmxt_wrwrwr_or_imm5 (void)
11113{
11114  if (inst.operands[2].isreg)
11115    do_rd_rn_rm ();
11116  else {
11117    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11118		_("immediate operand requires iWMMXt2"));
11119    do_rd_rn ();
11120    if (inst.operands[2].imm == 0)
11121      {
11122	switch ((inst.instruction >> 20) & 0xf)
11123	  {
11124	  case 4:
11125	  case 5:
11126	  case 6:
11127	  case 7:
11128	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
11129	    inst.operands[2].imm = 16;
11130	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11131	    break;
11132	  case 8:
11133	  case 9:
11134	  case 10:
11135	  case 11:
11136	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
11137	    inst.operands[2].imm = 32;
11138	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11139	    break;
11140	  case 12:
11141	  case 13:
11142	  case 14:
11143	  case 15:
11144	    {
11145	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
11146	      unsigned long wrn;
11147	      wrn = (inst.instruction >> 16) & 0xf;
11148	      inst.instruction &= 0xff0fff0f;
11149	      inst.instruction |= wrn;
11150	      /* Bail out here; the instruction is now assembled.  */
11151	      return;
11152	    }
11153	  }
11154      }
11155    /* Map 32 -> 0, etc.  */
11156    inst.operands[2].imm &= 0x1f;
11157    inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11158  }
11159}
11160
11161/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
11162   operations first, then control, shift, and load/store.  */
11163
11164/* Insns like "foo X,Y,Z".  */
11165
11166static void
11167do_mav_triple (void)
11168{
11169  inst.instruction |= inst.operands[0].reg << 16;
11170  inst.instruction |= inst.operands[1].reg;
11171  inst.instruction |= inst.operands[2].reg << 12;
11172}
11173
11174/* Insns like "foo W,X,Y,Z".
11175    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
11176
11177static void
11178do_mav_quad (void)
11179{
11180  inst.instruction |= inst.operands[0].reg << 5;
11181  inst.instruction |= inst.operands[1].reg << 12;
11182  inst.instruction |= inst.operands[2].reg << 16;
11183  inst.instruction |= inst.operands[3].reg;
11184}
11185
11186/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
11187static void
11188do_mav_dspsc (void)
11189{
11190  inst.instruction |= inst.operands[1].reg << 12;
11191}
11192
11193/* Maverick shift immediate instructions.
11194   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11195   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
11196
11197static void
11198do_mav_shift (void)
11199{
11200  int imm = inst.operands[2].imm;
11201
11202  inst.instruction |= inst.operands[0].reg << 12;
11203  inst.instruction |= inst.operands[1].reg << 16;
11204
11205  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11206     Bits 5-7 of the insn should have bits 4-6 of the immediate.
11207     Bit 4 should be 0.	 */
11208  imm = (imm & 0xf) | ((imm & 0x70) << 1);
11209
11210  inst.instruction |= imm;
11211}
11212
11213/* XScale instructions.	 Also sorted arithmetic before move.  */
11214
11215/* Xscale multiply-accumulate (argument parse)
11216     MIAcc   acc0,Rm,Rs
11217     MIAPHcc acc0,Rm,Rs
11218     MIAxycc acc0,Rm,Rs.  */
11219
11220static void
11221do_xsc_mia (void)
11222{
11223  inst.instruction |= inst.operands[1].reg;
11224  inst.instruction |= inst.operands[2].reg << 12;
11225}
11226
11227/* Xscale move-accumulator-register (argument parse)
11228
11229     MARcc   acc0,RdLo,RdHi.  */
11230
11231static void
11232do_xsc_mar (void)
11233{
11234  inst.instruction |= inst.operands[1].reg << 12;
11235  inst.instruction |= inst.operands[2].reg << 16;
11236}
11237
11238/* Xscale move-register-accumulator (argument parse)
11239
11240     MRAcc   RdLo,RdHi,acc0.  */
11241
11242static void
11243do_xsc_mra (void)
11244{
11245  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11246  inst.instruction |= inst.operands[0].reg << 12;
11247  inst.instruction |= inst.operands[1].reg << 16;
11248}
11249
11250/* Encoding functions relevant only to Thumb.  */
11251
11252/* inst.operands[i] is a shifted-register operand; encode
11253   it into inst.instruction in the format used by Thumb32.  */
11254
11255static void
11256encode_thumb32_shifted_operand (int i)
11257{
11258  unsigned int value = inst.relocs[0].exp.X_add_number;
11259  unsigned int shift = inst.operands[i].shift_kind;
11260
11261  constraint (inst.operands[i].immisreg,
11262	      _("shift by register not allowed in thumb mode"));
11263  inst.instruction |= inst.operands[i].reg;
11264  if (shift == SHIFT_RRX)
11265    inst.instruction |= SHIFT_ROR << 4;
11266  else
11267    {
11268      constraint (inst.relocs[0].exp.X_op != O_constant,
11269		  _("expression too complex"));
11270
11271      constraint (value > 32
11272		  || (value == 32 && (shift == SHIFT_LSL
11273				      || shift == SHIFT_ROR)),
11274		  _("shift expression is too large"));
11275
11276      if (value == 0)
11277	shift = SHIFT_LSL;
11278      else if (value == 32)
11279	value = 0;
11280
11281      inst.instruction |= shift << 4;
11282      inst.instruction |= (value & 0x1c) << 10;
11283      inst.instruction |= (value & 0x03) << 6;
11284    }
11285}
11286
11287
11288/* inst.operands[i] was set up by parse_address.  Encode it into a
11289   Thumb32 format load or store instruction.  Reject forms that cannot
11290   be used with such instructions.  If is_t is true, reject forms that
11291   cannot be used with a T instruction; if is_d is true, reject forms
11292   that cannot be used with a D instruction.  If it is a store insn,
11293   reject PC in Rn.  */
11294
11295static void
11296encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
11297{
11298  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
11299
11300  constraint (!inst.operands[i].isreg,
11301	      _("Instruction does not support =N addresses"));
11302
11303  inst.instruction |= inst.operands[i].reg << 16;
11304  if (inst.operands[i].immisreg)
11305    {
11306      constraint (is_pc, BAD_PC_ADDRESSING);
11307      constraint (is_t || is_d, _("cannot use register index with this instruction"));
11308      constraint (inst.operands[i].negative,
11309		  _("Thumb does not support negative register indexing"));
11310      constraint (inst.operands[i].postind,
11311		  _("Thumb does not support register post-indexing"));
11312      constraint (inst.operands[i].writeback,
11313		  _("Thumb does not support register indexing with writeback"));
11314      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11315		  _("Thumb supports only LSL in shifted register indexing"));
11316
11317      inst.instruction |= inst.operands[i].imm;
11318      if (inst.operands[i].shifted)
11319	{
11320	  constraint (inst.relocs[0].exp.X_op != O_constant,
11321		      _("expression too complex"));
11322	  constraint (inst.relocs[0].exp.X_add_number < 0
11323		      || inst.relocs[0].exp.X_add_number > 3,
11324		      _("shift out of range"));
11325	  inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11326	}
11327      inst.relocs[0].type = BFD_RELOC_UNUSED;
11328    }
11329  else if (inst.operands[i].preind)
11330    {
11331      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11332      constraint (is_t && inst.operands[i].writeback,
11333		  _("cannot use writeback with this instruction"));
11334      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11335		  BAD_PC_ADDRESSING);
11336
11337      if (is_d)
11338	{
11339	  inst.instruction |= 0x01000000;
11340	  if (inst.operands[i].writeback)
11341	    inst.instruction |= 0x00200000;
11342	}
11343      else
11344	{
11345	  inst.instruction |= 0x00000c00;
11346	  if (inst.operands[i].writeback)
11347	    inst.instruction |= 0x00000100;
11348	}
11349      inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11350    }
11351  else if (inst.operands[i].postind)
11352    {
11353      gas_assert (inst.operands[i].writeback);
11354      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11355      constraint (is_t, _("cannot use post-indexing with this instruction"));
11356
11357      if (is_d)
11358	inst.instruction |= 0x00200000;
11359      else
11360	inst.instruction |= 0x00000900;
11361      inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11362    }
11363  else /* unindexed - only for coprocessor */
11364    inst.error = _("instruction does not accept unindexed addressing");
11365}
11366
11367/* Table of Thumb instructions which exist in 16- and/or 32-bit
11368   encodings (the latter only in post-V6T2 cores).  The index is the
11369   value used in the insns table below.  When there is more than one
11370   possible 16-bit encoding for the instruction, this table always
11371   holds variant (1).
11372   Also contains several pseudo-instructions used during relaxation.  */
11373#define T16_32_TAB				\
11374  X(_adc,   4140, eb400000),			\
11375  X(_adcs,  4140, eb500000),			\
11376  X(_add,   1c00, eb000000),			\
11377  X(_adds,  1c00, eb100000),			\
11378  X(_addi,  0000, f1000000),			\
11379  X(_addis, 0000, f1100000),			\
11380  X(_add_pc,000f, f20f0000),			\
11381  X(_add_sp,000d, f10d0000),			\
11382  X(_adr,   000f, f20f0000),			\
11383  X(_and,   4000, ea000000),			\
11384  X(_ands,  4000, ea100000),			\
11385  X(_asr,   1000, fa40f000),			\
11386  X(_asrs,  1000, fa50f000),			\
11387  X(_b,     e000, f000b000),			\
11388  X(_bcond, d000, f0008000),			\
11389  X(_bf,    0000, f040e001),			\
11390  X(_bfcsel,0000, f000e001),			\
11391  X(_bfx,   0000, f060e001),			\
11392  X(_bfl,   0000, f000c001),			\
11393  X(_bflx,  0000, f070e001),			\
11394  X(_bic,   4380, ea200000),			\
11395  X(_bics,  4380, ea300000),			\
11396  X(_cinc,  0000, ea509000),			\
11397  X(_cinv,  0000, ea50a000),			\
11398  X(_cmn,   42c0, eb100f00),			\
11399  X(_cmp,   2800, ebb00f00),			\
11400  X(_cneg,  0000, ea50b000),			\
11401  X(_cpsie, b660, f3af8400),			\
11402  X(_cpsid, b670, f3af8600),			\
11403  X(_cpy,   4600, ea4f0000),			\
11404  X(_csel,  0000, ea508000),			\
11405  X(_cset,  0000, ea5f900f),			\
11406  X(_csetm, 0000, ea5fa00f),			\
11407  X(_csinc, 0000, ea509000),			\
11408  X(_csinv, 0000, ea50a000),			\
11409  X(_csneg, 0000, ea50b000),			\
11410  X(_dec_sp,80dd, f1ad0d00),			\
11411  X(_dls,   0000, f040e001),			\
11412  X(_dlstp, 0000, f000e001),			\
11413  X(_eor,   4040, ea800000),			\
11414  X(_eors,  4040, ea900000),			\
11415  X(_inc_sp,00dd, f10d0d00),			\
11416  X(_lctp,  0000, f00fe001),			\
11417  X(_ldmia, c800, e8900000),			\
11418  X(_ldr,   6800, f8500000),			\
11419  X(_ldrb,  7800, f8100000),			\
11420  X(_ldrh,  8800, f8300000),			\
11421  X(_ldrsb, 5600, f9100000),			\
11422  X(_ldrsh, 5e00, f9300000),			\
11423  X(_ldr_pc,4800, f85f0000),			\
11424  X(_ldr_pc2,4800, f85f0000),			\
11425  X(_ldr_sp,9800, f85d0000),			\
11426  X(_le,    0000, f00fc001),			\
11427  X(_letp,  0000, f01fc001),			\
11428  X(_lsl,   0000, fa00f000),			\
11429  X(_lsls,  0000, fa10f000),			\
11430  X(_lsr,   0800, fa20f000),			\
11431  X(_lsrs,  0800, fa30f000),			\
11432  X(_mov,   2000, ea4f0000),			\
11433  X(_movs,  2000, ea5f0000),			\
11434  X(_mul,   4340, fb00f000),                     \
11435  X(_muls,  4340, ffffffff), /* no 32b muls */	\
11436  X(_mvn,   43c0, ea6f0000),			\
11437  X(_mvns,  43c0, ea7f0000),			\
11438  X(_neg,   4240, f1c00000), /* rsb #0 */	\
11439  X(_negs,  4240, f1d00000), /* rsbs #0 */	\
11440  X(_orr,   4300, ea400000),			\
11441  X(_orrs,  4300, ea500000),			\
11442  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
11443  X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
11444  X(_rev,   ba00, fa90f080),			\
11445  X(_rev16, ba40, fa90f090),			\
11446  X(_revsh, bac0, fa90f0b0),			\
11447  X(_ror,   41c0, fa60f000),			\
11448  X(_rors,  41c0, fa70f000),			\
11449  X(_sbc,   4180, eb600000),			\
11450  X(_sbcs,  4180, eb700000),			\
11451  X(_stmia, c000, e8800000),			\
11452  X(_str,   6000, f8400000),			\
11453  X(_strb,  7000, f8000000),			\
11454  X(_strh,  8000, f8200000),			\
11455  X(_str_sp,9000, f84d0000),			\
11456  X(_sub,   1e00, eba00000),			\
11457  X(_subs,  1e00, ebb00000),			\
11458  X(_subi,  8000, f1a00000),			\
11459  X(_subis, 8000, f1b00000),			\
11460  X(_sxtb,  b240, fa4ff080),			\
11461  X(_sxth,  b200, fa0ff080),			\
11462  X(_tst,   4200, ea100f00),			\
11463  X(_uxtb,  b2c0, fa5ff080),			\
11464  X(_uxth,  b280, fa1ff080),			\
11465  X(_nop,   bf00, f3af8000),			\
11466  X(_yield, bf10, f3af8001),			\
11467  X(_wfe,   bf20, f3af8002),			\
11468  X(_wfi,   bf30, f3af8003),			\
11469  X(_wls,   0000, f040c001),			\
11470  X(_wlstp, 0000, f000c001),			\
11471  X(_sev,   bf40, f3af8004),                    \
11472  X(_sevl,  bf50, f3af8005),			\
11473  X(_udf,   de00, f7f0a000)
11474
11475/* To catch errors in encoding functions, the codes are all offset by
11476   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11477   as 16-bit instructions.  */
11478#define X(a,b,c) T_MNEM##a
11479enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11480#undef X
11481
11482#define X(a,b,c) 0x##b
11483static const unsigned short thumb_op16[] = { T16_32_TAB };
11484#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11485#undef X
11486
11487#define X(a,b,c) 0x##c
11488static const unsigned int thumb_op32[] = { T16_32_TAB };
11489#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11490#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
11491#undef X
11492#undef T16_32_TAB
11493
11494/* Thumb instruction encoders, in alphabetical order.  */
11495
11496/* ADDW or SUBW.  */
11497
11498static void
11499do_t_add_sub_w (void)
11500{
11501  int Rd, Rn;
11502
11503  Rd = inst.operands[0].reg;
11504  Rn = inst.operands[1].reg;
11505
11506  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11507     is the SP-{plus,minus}-immediate form of the instruction.  */
11508  if (Rn == REG_SP)
11509    constraint (Rd == REG_PC, BAD_PC);
11510  else
11511    reject_bad_reg (Rd);
11512
11513  inst.instruction |= (Rn << 16) | (Rd << 8);
11514  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11515}
11516
11517/* Parse an add or subtract instruction.  We get here with inst.instruction
11518   equaling any of THUMB_OPCODE_add, adds, sub, or subs.  */
11519
11520static void
11521do_t_add_sub (void)
11522{
11523  int Rd, Rs, Rn;
11524
11525  Rd = inst.operands[0].reg;
11526  Rs = (inst.operands[1].present
11527	? inst.operands[1].reg    /* Rd, Rs, foo */
11528	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11529
11530  if (Rd == REG_PC)
11531    set_pred_insn_type_last ();
11532
11533  if (unified_syntax)
11534    {
11535      bfd_boolean flags;
11536      bfd_boolean narrow;
11537      int opcode;
11538
11539      flags = (inst.instruction == T_MNEM_adds
11540	       || inst.instruction == T_MNEM_subs);
11541      if (flags)
11542	narrow = !in_pred_block ();
11543      else
11544	narrow = in_pred_block ();
11545      if (!inst.operands[2].isreg)
11546	{
11547	  int add;
11548
11549	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11550	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11551
11552	  add = (inst.instruction == T_MNEM_add
11553		 || inst.instruction == T_MNEM_adds);
11554	  opcode = 0;
11555	  if (inst.size_req != 4)
11556	    {
11557	      /* Attempt to use a narrow opcode, with relaxation if
11558		 appropriate.  */
11559	      if (Rd == REG_SP && Rs == REG_SP && !flags)
11560		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11561	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11562		opcode = T_MNEM_add_sp;
11563	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11564		opcode = T_MNEM_add_pc;
11565	      else if (Rd <= 7 && Rs <= 7 && narrow)
11566		{
11567		  if (flags)
11568		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
11569		  else
11570		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
11571		}
11572	      if (opcode)
11573		{
11574		  inst.instruction = THUMB_OP16(opcode);
11575		  inst.instruction |= (Rd << 4) | Rs;
11576		  if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11577		      || (inst.relocs[0].type
11578			  > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11579		  {
11580		    if (inst.size_req == 2)
11581		      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11582		    else
11583		      inst.relax = opcode;
11584		  }
11585		}
11586	      else
11587		constraint (inst.size_req == 2, BAD_HIREG);
11588	    }
11589	  if (inst.size_req == 4
11590	      || (inst.size_req != 2 && !opcode))
11591	    {
11592	      constraint ((inst.relocs[0].type
11593			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11594			  && (inst.relocs[0].type
11595			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11596			  THUMB1_RELOC_ONLY);
11597	      if (Rd == REG_PC)
11598		{
11599		  constraint (add, BAD_PC);
11600		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11601			     _("only SUBS PC, LR, #const allowed"));
11602		  constraint (inst.relocs[0].exp.X_op != O_constant,
11603			      _("expression too complex"));
11604		  constraint (inst.relocs[0].exp.X_add_number < 0
11605			      || inst.relocs[0].exp.X_add_number > 0xff,
11606			     _("immediate value out of range"));
11607		  inst.instruction = T2_SUBS_PC_LR
11608				     | inst.relocs[0].exp.X_add_number;
11609		  inst.relocs[0].type = BFD_RELOC_UNUSED;
11610		  return;
11611		}
11612	      else if (Rs == REG_PC)
11613		{
11614		  /* Always use addw/subw.  */
11615		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11616		  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11617		}
11618	      else
11619		{
11620		  inst.instruction = THUMB_OP32 (inst.instruction);
11621		  inst.instruction = (inst.instruction & 0xe1ffffff)
11622				     | 0x10000000;
11623		  if (flags)
11624		    inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11625		  else
11626		    inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11627		}
11628	      inst.instruction |= Rd << 8;
11629	      inst.instruction |= Rs << 16;
11630	    }
11631	}
11632      else
11633	{
11634	  unsigned int value = inst.relocs[0].exp.X_add_number;
11635	  unsigned int shift = inst.operands[2].shift_kind;
11636
11637	  Rn = inst.operands[2].reg;
11638	  /* See if we can do this with a 16-bit instruction.  */
11639	  if (!inst.operands[2].shifted && inst.size_req != 4)
11640	    {
11641	      if (Rd > 7 || Rs > 7 || Rn > 7)
11642		narrow = FALSE;
11643
11644	      if (narrow)
11645		{
11646		  inst.instruction = ((inst.instruction == T_MNEM_adds
11647				       || inst.instruction == T_MNEM_add)
11648				      ? T_OPCODE_ADD_R3
11649				      : T_OPCODE_SUB_R3);
11650		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11651		  return;
11652		}
11653
11654	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11655		{
11656		  /* Thumb-1 cores (except v6-M) require at least one high
11657		     register in a narrow non flag setting add.  */
11658		  if (Rd > 7 || Rn > 7
11659		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11660		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11661		    {
11662		      if (Rd == Rn)
11663			{
11664			  Rn = Rs;
11665			  Rs = Rd;
11666			}
11667		      inst.instruction = T_OPCODE_ADD_HI;
11668		      inst.instruction |= (Rd & 8) << 4;
11669		      inst.instruction |= (Rd & 7);
11670		      inst.instruction |= Rn << 3;
11671		      return;
11672		    }
11673		}
11674	    }
11675
11676	  constraint (Rd == REG_PC, BAD_PC);
11677	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11678	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11679	  constraint (Rs == REG_PC, BAD_PC);
11680	  reject_bad_reg (Rn);
11681
11682	  /* If we get here, it can't be done in 16 bits.  */
11683	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11684		      _("shift must be constant"));
11685	  inst.instruction = THUMB_OP32 (inst.instruction);
11686	  inst.instruction |= Rd << 8;
11687	  inst.instruction |= Rs << 16;
11688	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11689		      _("shift value over 3 not allowed in thumb mode"));
11690	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11691		      _("only LSL shift allowed in thumb mode"));
11692	  encode_thumb32_shifted_operand (2);
11693	}
11694    }
11695  else
11696    {
11697      constraint (inst.instruction == T_MNEM_adds
11698		  || inst.instruction == T_MNEM_subs,
11699		  BAD_THUMB32);
11700
11701      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11702	{
11703	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11704		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11705		      BAD_HIREG);
11706
11707	  inst.instruction = (inst.instruction == T_MNEM_add
11708			      ? 0x0000 : 0x8000);
11709	  inst.instruction |= (Rd << 4) | Rs;
11710	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11711	  return;
11712	}
11713
11714      Rn = inst.operands[2].reg;
11715      constraint (inst.operands[2].shifted, _("unshifted register required"));
11716
11717      /* We now have Rd, Rs, and Rn set to registers.  */
11718      if (Rd > 7 || Rs > 7 || Rn > 7)
11719	{
11720	  /* Can't do this for SUB.	 */
11721	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11722	  inst.instruction = T_OPCODE_ADD_HI;
11723	  inst.instruction |= (Rd & 8) << 4;
11724	  inst.instruction |= (Rd & 7);
11725	  if (Rs == Rd)
11726	    inst.instruction |= Rn << 3;
11727	  else if (Rn == Rd)
11728	    inst.instruction |= Rs << 3;
11729	  else
11730	    constraint (1, _("dest must overlap one source register"));
11731	}
11732      else
11733	{
11734	  inst.instruction = (inst.instruction == T_MNEM_add
11735			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11736	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11737	}
11738    }
11739}
11740
11741static void
11742do_t_adr (void)
11743{
11744  unsigned Rd;
11745
11746  Rd = inst.operands[0].reg;
11747  reject_bad_reg (Rd);
11748
11749  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11750    {
11751      /* Defer to section relaxation.  */
11752      inst.relax = inst.instruction;
11753      inst.instruction = THUMB_OP16 (inst.instruction);
11754      inst.instruction |= Rd << 4;
11755    }
11756  else if (unified_syntax && inst.size_req != 2)
11757    {
11758      /* Generate a 32-bit opcode.  */
11759      inst.instruction = THUMB_OP32 (inst.instruction);
11760      inst.instruction |= Rd << 8;
11761      inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11762      inst.relocs[0].pc_rel = 1;
11763    }
11764  else
11765    {
11766      /* Generate a 16-bit opcode.  */
11767      inst.instruction = THUMB_OP16 (inst.instruction);
11768      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11769      inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust.  */
11770      inst.relocs[0].pc_rel = 1;
11771      inst.instruction |= Rd << 4;
11772    }
11773
11774  if (inst.relocs[0].exp.X_op == O_symbol
11775      && inst.relocs[0].exp.X_add_symbol != NULL
11776      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11777      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11778    inst.relocs[0].exp.X_add_number += 1;
11779}
11780
11781/* Arithmetic instructions for which there is just one 16-bit
11782   instruction encoding, and it allows only two low registers.
11783   For maximal compatibility with ARM syntax, we allow three register
11784   operands even when Thumb-32 instructions are not available, as long
11785   as the first two are identical.  For instance, both "sbc r0,r1" and
11786   "sbc r0,r0,r1" are allowed.  */
11787static void
11788do_t_arit3 (void)
11789{
11790  int Rd, Rs, Rn;
11791
11792  Rd = inst.operands[0].reg;
11793  Rs = (inst.operands[1].present
11794	? inst.operands[1].reg    /* Rd, Rs, foo */
11795	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11796  Rn = inst.operands[2].reg;
11797
11798  reject_bad_reg (Rd);
11799  reject_bad_reg (Rs);
11800  if (inst.operands[2].isreg)
11801    reject_bad_reg (Rn);
11802
11803  if (unified_syntax)
11804    {
11805      if (!inst.operands[2].isreg)
11806	{
11807	  /* For an immediate, we always generate a 32-bit opcode;
11808	     section relaxation will shrink it later if possible.  */
11809	  inst.instruction = THUMB_OP32 (inst.instruction);
11810	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11811	  inst.instruction |= Rd << 8;
11812	  inst.instruction |= Rs << 16;
11813	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11814	}
11815      else
11816	{
11817	  bfd_boolean narrow;
11818
11819	  /* See if we can do this with a 16-bit instruction.  */
11820	  if (THUMB_SETS_FLAGS (inst.instruction))
11821	    narrow = !in_pred_block ();
11822	  else
11823	    narrow = in_pred_block ();
11824
11825	  if (Rd > 7 || Rn > 7 || Rs > 7)
11826	    narrow = FALSE;
11827	  if (inst.operands[2].shifted)
11828	    narrow = FALSE;
11829	  if (inst.size_req == 4)
11830	    narrow = FALSE;
11831
11832	  if (narrow
11833	      && Rd == Rs)
11834	    {
11835	      inst.instruction = THUMB_OP16 (inst.instruction);
11836	      inst.instruction |= Rd;
11837	      inst.instruction |= Rn << 3;
11838	      return;
11839	    }
11840
11841	  /* If we get here, it can't be done in 16 bits.  */
11842	  constraint (inst.operands[2].shifted
11843		      && inst.operands[2].immisreg,
11844		      _("shift must be constant"));
11845	  inst.instruction = THUMB_OP32 (inst.instruction);
11846	  inst.instruction |= Rd << 8;
11847	  inst.instruction |= Rs << 16;
11848	  encode_thumb32_shifted_operand (2);
11849	}
11850    }
11851  else
11852    {
11853      /* On its face this is a lie - the instruction does set the
11854	 flags.  However, the only supported mnemonic in this mode
11855	 says it doesn't.  */
11856      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11857
11858      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11859		  _("unshifted register required"));
11860      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11861      constraint (Rd != Rs,
11862		  _("dest and source1 must be the same register"));
11863
11864      inst.instruction = THUMB_OP16 (inst.instruction);
11865      inst.instruction |= Rd;
11866      inst.instruction |= Rn << 3;
11867    }
11868}
11869
11870/* Similarly, but for instructions where the arithmetic operation is
11871   commutative, so we can allow either of them to be different from
11872   the destination operand in a 16-bit instruction.  For instance, all
11873   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11874   accepted.  */
11875static void
11876do_t_arit3c (void)
11877{
11878  int Rd, Rs, Rn;
11879
11880  Rd = inst.operands[0].reg;
11881  Rs = (inst.operands[1].present
11882	? inst.operands[1].reg    /* Rd, Rs, foo */
11883	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11884  Rn = inst.operands[2].reg;
11885
11886  reject_bad_reg (Rd);
11887  reject_bad_reg (Rs);
11888  if (inst.operands[2].isreg)
11889    reject_bad_reg (Rn);
11890
11891  if (unified_syntax)
11892    {
11893      if (!inst.operands[2].isreg)
11894	{
11895	  /* For an immediate, we always generate a 32-bit opcode;
11896	     section relaxation will shrink it later if possible.  */
11897	  inst.instruction = THUMB_OP32 (inst.instruction);
11898	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11899	  inst.instruction |= Rd << 8;
11900	  inst.instruction |= Rs << 16;
11901	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11902	}
11903      else
11904	{
11905	  bfd_boolean narrow;
11906
11907	  /* See if we can do this with a 16-bit instruction.  */
11908	  if (THUMB_SETS_FLAGS (inst.instruction))
11909	    narrow = !in_pred_block ();
11910	  else
11911	    narrow = in_pred_block ();
11912
11913	  if (Rd > 7 || Rn > 7 || Rs > 7)
11914	    narrow = FALSE;
11915	  if (inst.operands[2].shifted)
11916	    narrow = FALSE;
11917	  if (inst.size_req == 4)
11918	    narrow = FALSE;
11919
11920	  if (narrow)
11921	    {
11922	      if (Rd == Rs)
11923		{
11924		  inst.instruction = THUMB_OP16 (inst.instruction);
11925		  inst.instruction |= Rd;
11926		  inst.instruction |= Rn << 3;
11927		  return;
11928		}
11929	      if (Rd == Rn)
11930		{
11931		  inst.instruction = THUMB_OP16 (inst.instruction);
11932		  inst.instruction |= Rd;
11933		  inst.instruction |= Rs << 3;
11934		  return;
11935		}
11936	    }
11937
11938	  /* If we get here, it can't be done in 16 bits.  */
11939	  constraint (inst.operands[2].shifted
11940		      && inst.operands[2].immisreg,
11941		      _("shift must be constant"));
11942	  inst.instruction = THUMB_OP32 (inst.instruction);
11943	  inst.instruction |= Rd << 8;
11944	  inst.instruction |= Rs << 16;
11945	  encode_thumb32_shifted_operand (2);
11946	}
11947    }
11948  else
11949    {
11950      /* On its face this is a lie - the instruction does set the
11951	 flags.  However, the only supported mnemonic in this mode
11952	 says it doesn't.  */
11953      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11954
11955      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11956		  _("unshifted register required"));
11957      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11958
11959      inst.instruction = THUMB_OP16 (inst.instruction);
11960      inst.instruction |= Rd;
11961
11962      if (Rd == Rs)
11963	inst.instruction |= Rn << 3;
11964      else if (Rd == Rn)
11965	inst.instruction |= Rs << 3;
11966      else
11967	constraint (1, _("dest must overlap one source register"));
11968    }
11969}
11970
11971static void
11972do_t_bfc (void)
11973{
11974  unsigned Rd;
11975  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11976  constraint (msb > 32, _("bit-field extends past end of register"));
11977  /* The instruction encoding stores the LSB and MSB,
11978     not the LSB and width.  */
11979  Rd = inst.operands[0].reg;
11980  reject_bad_reg (Rd);
11981  inst.instruction |= Rd << 8;
11982  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11983  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11984  inst.instruction |= msb - 1;
11985}
11986
11987static void
11988do_t_bfi (void)
11989{
11990  int Rd, Rn;
11991  unsigned int msb;
11992
11993  Rd = inst.operands[0].reg;
11994  reject_bad_reg (Rd);
11995
11996  /* #0 in second position is alternative syntax for bfc, which is
11997     the same instruction but with REG_PC in the Rm field.  */
11998  if (!inst.operands[1].isreg)
11999    Rn = REG_PC;
12000  else
12001    {
12002      Rn = inst.operands[1].reg;
12003      reject_bad_reg (Rn);
12004    }
12005
12006  msb = inst.operands[2].imm + inst.operands[3].imm;
12007  constraint (msb > 32, _("bit-field extends past end of register"));
12008  /* The instruction encoding stores the LSB and MSB,
12009     not the LSB and width.  */
12010  inst.instruction |= Rd << 8;
12011  inst.instruction |= Rn << 16;
12012  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12013  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12014  inst.instruction |= msb - 1;
12015}
12016
12017static void
12018do_t_bfx (void)
12019{
12020  unsigned Rd, Rn;
12021
12022  Rd = inst.operands[0].reg;
12023  Rn = inst.operands[1].reg;
12024
12025  reject_bad_reg (Rd);
12026  reject_bad_reg (Rn);
12027
12028  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12029	      _("bit-field extends past end of register"));
12030  inst.instruction |= Rd << 8;
12031  inst.instruction |= Rn << 16;
12032  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12033  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12034  inst.instruction |= inst.operands[3].imm - 1;
12035}
12036
12037/* ARM V5 Thumb BLX (argument parse)
12038	BLX <target_addr>	which is BLX(1)
12039	BLX <Rm>		which is BLX(2)
12040   Unfortunately, there are two different opcodes for this mnemonic.
12041   So, the insns[].value is not used, and the code here zaps values
12042	into inst.instruction.
12043
12044   ??? How to take advantage of the additional two bits of displacement
12045   available in Thumb32 mode?  Need new relocation?  */
12046
12047static void
12048do_t_blx (void)
12049{
12050  set_pred_insn_type_last ();
12051
12052  if (inst.operands[0].isreg)
12053    {
12054      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12055      /* We have a register, so this is BLX(2).  */
12056      inst.instruction |= inst.operands[0].reg << 3;
12057    }
12058  else
12059    {
12060      /* No register.  This must be BLX(1).  */
12061      inst.instruction = 0xf000e800;
12062      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12063    }
12064}
12065
12066static void
12067do_t_branch (void)
12068{
12069  int opcode;
12070  int cond;
12071  bfd_reloc_code_real_type reloc;
12072
12073  cond = inst.cond;
12074  set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12075
12076  if (in_pred_block ())
12077    {
12078      /* Conditional branches inside IT blocks are encoded as unconditional
12079	 branches.  */
12080      cond = COND_ALWAYS;
12081    }
12082  else
12083    cond = inst.cond;
12084
12085  if (cond != COND_ALWAYS)
12086    opcode = T_MNEM_bcond;
12087  else
12088    opcode = inst.instruction;
12089
12090  if (unified_syntax
12091      && (inst.size_req == 4
12092	  || (inst.size_req != 2
12093	      && (inst.operands[0].hasreloc
12094		  || inst.relocs[0].exp.X_op == O_constant))))
12095    {
12096      inst.instruction = THUMB_OP32(opcode);
12097      if (cond == COND_ALWAYS)
12098	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12099      else
12100	{
12101	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12102		      _("selected architecture does not support "
12103			"wide conditional branch instruction"));
12104
12105	  gas_assert (cond != 0xF);
12106	  inst.instruction |= cond << 22;
12107	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12108	}
12109    }
12110  else
12111    {
12112      inst.instruction = THUMB_OP16(opcode);
12113      if (cond == COND_ALWAYS)
12114	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12115      else
12116	{
12117	  inst.instruction |= cond << 8;
12118	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12119	}
12120      /* Allow section relaxation.  */
12121      if (unified_syntax && inst.size_req != 2)
12122	inst.relax = opcode;
12123    }
12124  inst.relocs[0].type = reloc;
12125  inst.relocs[0].pc_rel = 1;
12126}
12127
12128/* Actually do the work for Thumb state bkpt and hlt.  The only difference
12129   between the two is the maximum immediate allowed - which is passed in
12130   RANGE.  */
12131static void
12132do_t_bkpt_hlt1 (int range)
12133{
12134  constraint (inst.cond != COND_ALWAYS,
12135	      _("instruction is always unconditional"));
12136  if (inst.operands[0].present)
12137    {
12138      constraint (inst.operands[0].imm > range,
12139		  _("immediate value out of range"));
12140      inst.instruction |= inst.operands[0].imm;
12141    }
12142
12143  set_pred_insn_type (NEUTRAL_IT_INSN);
12144}
12145
12146static void
12147do_t_hlt (void)
12148{
12149  do_t_bkpt_hlt1 (63);
12150}
12151
12152static void
12153do_t_bkpt (void)
12154{
12155  do_t_bkpt_hlt1 (255);
12156}
12157
12158static void
12159do_t_branch23 (void)
12160{
12161  set_pred_insn_type_last ();
12162  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12163
12164  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12165     this file.  We used to simply ignore the PLT reloc type here --
12166     the branch encoding is now needed to deal with TLSCALL relocs.
12167     So if we see a PLT reloc now, put it back to how it used to be to
12168     keep the preexisting behaviour.  */
12169  if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12170    inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12171
12172#if defined(OBJ_COFF)
12173  /* If the destination of the branch is a defined symbol which does not have
12174     the THUMB_FUNC attribute, then we must be calling a function which has
12175     the (interfacearm) attribute.  We look for the Thumb entry point to that
12176     function and change the branch to refer to that function instead.	*/
12177  if (	 inst.relocs[0].exp.X_op == O_symbol
12178      && inst.relocs[0].exp.X_add_symbol != NULL
12179      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12180      && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12181    inst.relocs[0].exp.X_add_symbol
12182      = find_real_start (inst.relocs[0].exp.X_add_symbol);
12183#endif
12184}
12185
12186static void
12187do_t_bx (void)
12188{
12189  set_pred_insn_type_last ();
12190  inst.instruction |= inst.operands[0].reg << 3;
12191  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
12192     should cause the alignment to be checked once it is known.	 This is
12193     because BX PC only works if the instruction is word aligned.  */
12194}
12195
12196static void
12197do_t_bxj (void)
12198{
12199  int Rm;
12200
12201  set_pred_insn_type_last ();
12202  Rm = inst.operands[0].reg;
12203  reject_bad_reg (Rm);
12204  inst.instruction |= Rm << 16;
12205}
12206
12207static void
12208do_t_clz (void)
12209{
12210  unsigned Rd;
12211  unsigned Rm;
12212
12213  Rd = inst.operands[0].reg;
12214  Rm = inst.operands[1].reg;
12215
12216  reject_bad_reg (Rd);
12217  reject_bad_reg (Rm);
12218
12219  inst.instruction |= Rd << 8;
12220  inst.instruction |= Rm << 16;
12221  inst.instruction |= Rm;
12222}
12223
12224/* For the Armv8.1-M conditional instructions.  */
12225static void
12226do_t_cond (void)
12227{
12228  unsigned Rd, Rn, Rm;
12229  signed int cond;
12230
12231  constraint (inst.cond != COND_ALWAYS, BAD_COND);
12232
12233  Rd = inst.operands[0].reg;
12234  switch (inst.instruction)
12235    {
12236      case T_MNEM_csinc:
12237      case T_MNEM_csinv:
12238      case T_MNEM_csneg:
12239      case T_MNEM_csel:
12240	Rn = inst.operands[1].reg;
12241	Rm = inst.operands[2].reg;
12242	cond = inst.operands[3].imm;
12243	constraint (Rn == REG_SP, BAD_SP);
12244	constraint (Rm == REG_SP, BAD_SP);
12245	break;
12246
12247      case T_MNEM_cinc:
12248      case T_MNEM_cinv:
12249      case T_MNEM_cneg:
12250	Rn = inst.operands[1].reg;
12251	cond = inst.operands[2].imm;
12252	/* Invert the last bit to invert the cond.  */
12253	cond = TOGGLE_BIT (cond, 0);
12254	constraint (Rn == REG_SP, BAD_SP);
12255	Rm = Rn;
12256	break;
12257
12258      case T_MNEM_csetm:
12259      case T_MNEM_cset:
12260	cond = inst.operands[1].imm;
12261	/* Invert the last bit to invert the cond.  */
12262	cond = TOGGLE_BIT (cond, 0);
12263	Rn = REG_PC;
12264	Rm = REG_PC;
12265	break;
12266
12267      default: abort ();
12268    }
12269
12270  set_pred_insn_type (OUTSIDE_PRED_INSN);
12271  inst.instruction = THUMB_OP32 (inst.instruction);
12272  inst.instruction |= Rd << 8;
12273  inst.instruction |= Rn << 16;
12274  inst.instruction |= Rm;
12275  inst.instruction |= cond << 4;
12276}
12277
12278static void
12279do_t_csdb (void)
12280{
12281  set_pred_insn_type (OUTSIDE_PRED_INSN);
12282}
12283
12284static void
12285do_t_cps (void)
12286{
12287  set_pred_insn_type (OUTSIDE_PRED_INSN);
12288  inst.instruction |= inst.operands[0].imm;
12289}
12290
12291static void
12292do_t_cpsi (void)
12293{
12294  set_pred_insn_type (OUTSIDE_PRED_INSN);
12295  if (unified_syntax
12296      && (inst.operands[1].present || inst.size_req == 4)
12297      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12298    {
12299      unsigned int imod = (inst.instruction & 0x0030) >> 4;
12300      inst.instruction = 0xf3af8000;
12301      inst.instruction |= imod << 9;
12302      inst.instruction |= inst.operands[0].imm << 5;
12303      if (inst.operands[1].present)
12304	inst.instruction |= 0x100 | inst.operands[1].imm;
12305    }
12306  else
12307    {
12308      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12309		  && (inst.operands[0].imm & 4),
12310		  _("selected processor does not support 'A' form "
12311		    "of this instruction"));
12312      constraint (inst.operands[1].present || inst.size_req == 4,
12313		  _("Thumb does not support the 2-argument "
12314		    "form of this instruction"));
12315      inst.instruction |= inst.operands[0].imm;
12316    }
12317}
12318
12319/* THUMB CPY instruction (argument parse).  */
12320
12321static void
12322do_t_cpy (void)
12323{
12324  if (inst.size_req == 4)
12325    {
12326      inst.instruction = THUMB_OP32 (T_MNEM_mov);
12327      inst.instruction |= inst.operands[0].reg << 8;
12328      inst.instruction |= inst.operands[1].reg;
12329    }
12330  else
12331    {
12332      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12333      inst.instruction |= (inst.operands[0].reg & 0x7);
12334      inst.instruction |= inst.operands[1].reg << 3;
12335    }
12336}
12337
12338static void
12339do_t_cbz (void)
12340{
12341  set_pred_insn_type (OUTSIDE_PRED_INSN);
12342  constraint (inst.operands[0].reg > 7, BAD_HIREG);
12343  inst.instruction |= inst.operands[0].reg;
12344  inst.relocs[0].pc_rel = 1;
12345  inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12346}
12347
12348static void
12349do_t_dbg (void)
12350{
12351  inst.instruction |= inst.operands[0].imm;
12352}
12353
12354static void
12355do_t_div (void)
12356{
12357  unsigned Rd, Rn, Rm;
12358
12359  Rd = inst.operands[0].reg;
12360  Rn = (inst.operands[1].present
12361	? inst.operands[1].reg : Rd);
12362  Rm = inst.operands[2].reg;
12363
12364  reject_bad_reg (Rd);
12365  reject_bad_reg (Rn);
12366  reject_bad_reg (Rm);
12367
12368  inst.instruction |= Rd << 8;
12369  inst.instruction |= Rn << 16;
12370  inst.instruction |= Rm;
12371}
12372
12373static void
12374do_t_hint (void)
12375{
12376  if (unified_syntax && inst.size_req == 4)
12377    inst.instruction = THUMB_OP32 (inst.instruction);
12378  else
12379    inst.instruction = THUMB_OP16 (inst.instruction);
12380}
12381
12382static void
12383do_t_it (void)
12384{
12385  unsigned int cond = inst.operands[0].imm;
12386
12387  set_pred_insn_type (IT_INSN);
12388  now_pred.mask = (inst.instruction & 0xf) | 0x10;
12389  now_pred.cc = cond;
12390  now_pred.warn_deprecated = FALSE;
12391  now_pred.type = SCALAR_PRED;
12392
12393  /* If the condition is a negative condition, invert the mask.  */
12394  if ((cond & 0x1) == 0x0)
12395    {
12396      unsigned int mask = inst.instruction & 0x000f;
12397
12398      if ((mask & 0x7) == 0)
12399	{
12400	  /* No conversion needed.  */
12401	  now_pred.block_length = 1;
12402	}
12403      else if ((mask & 0x3) == 0)
12404	{
12405	  mask ^= 0x8;
12406	  now_pred.block_length = 2;
12407	}
12408      else if ((mask & 0x1) == 0)
12409	{
12410	  mask ^= 0xC;
12411	  now_pred.block_length = 3;
12412	}
12413      else
12414	{
12415	  mask ^= 0xE;
12416	  now_pred.block_length = 4;
12417	}
12418
12419      inst.instruction &= 0xfff0;
12420      inst.instruction |= mask;
12421    }
12422
12423  inst.instruction |= cond << 4;
12424}
12425
12426/* Helper function used for both push/pop and ldm/stm.  */
12427static void
12428encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
12429		     bfd_boolean writeback)
12430{
12431  bfd_boolean load, store;
12432
12433  gas_assert (base != -1 || !do_io);
12434  load = do_io && ((inst.instruction & (1 << 20)) != 0);
12435  store = do_io && !load;
12436
12437  if (mask & (1 << 13))
12438    inst.error =  _("SP not allowed in register list");
12439
12440  if (do_io && (mask & (1 << base)) != 0
12441      && writeback)
12442    inst.error = _("having the base register in the register list when "
12443		   "using write back is UNPREDICTABLE");
12444
12445  if (load)
12446    {
12447      if (mask & (1 << 15))
12448	{
12449	  if (mask & (1 << 14))
12450	    inst.error = _("LR and PC should not both be in register list");
12451	  else
12452	    set_pred_insn_type_last ();
12453	}
12454    }
12455  else if (store)
12456    {
12457      if (mask & (1 << 15))
12458	inst.error = _("PC not allowed in register list");
12459    }
12460
12461  if (do_io && ((mask & (mask - 1)) == 0))
12462    {
12463      /* Single register transfers implemented as str/ldr.  */
12464      if (writeback)
12465	{
12466	  if (inst.instruction & (1 << 23))
12467	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12468	  else
12469	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12470	}
12471      else
12472	{
12473	  if (inst.instruction & (1 << 23))
12474	    inst.instruction = 0x00800000; /* ia -> [base] */
12475	  else
12476	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12477	}
12478
12479      inst.instruction |= 0xf8400000;
12480      if (load)
12481	inst.instruction |= 0x00100000;
12482
12483      mask = ffs (mask) - 1;
12484      mask <<= 12;
12485    }
12486  else if (writeback)
12487    inst.instruction |= WRITE_BACK;
12488
12489  inst.instruction |= mask;
12490  if (do_io)
12491    inst.instruction |= base << 16;
12492}
12493
12494static void
12495do_t_ldmstm (void)
12496{
12497  /* This really doesn't seem worth it.  */
12498  constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12499	      _("expression too complex"));
12500  constraint (inst.operands[1].writeback,
12501	      _("Thumb load/store multiple does not support {reglist}^"));
12502
12503  if (unified_syntax)
12504    {
12505      bfd_boolean narrow;
12506      unsigned mask;
12507
12508      narrow = FALSE;
12509      /* See if we can use a 16-bit instruction.  */
12510      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12511	  && inst.size_req != 4
12512	  && !(inst.operands[1].imm & ~0xff))
12513	{
12514	  mask = 1 << inst.operands[0].reg;
12515
12516	  if (inst.operands[0].reg <= 7)
12517	    {
12518	      if (inst.instruction == T_MNEM_stmia
12519		  ? inst.operands[0].writeback
12520		  : (inst.operands[0].writeback
12521		     == !(inst.operands[1].imm & mask)))
12522		{
12523		  if (inst.instruction == T_MNEM_stmia
12524		      && (inst.operands[1].imm & mask)
12525		      && (inst.operands[1].imm & (mask - 1)))
12526		    as_warn (_("value stored for r%d is UNKNOWN"),
12527			     inst.operands[0].reg);
12528
12529		  inst.instruction = THUMB_OP16 (inst.instruction);
12530		  inst.instruction |= inst.operands[0].reg << 8;
12531		  inst.instruction |= inst.operands[1].imm;
12532		  narrow = TRUE;
12533		}
12534	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12535		{
12536		  /* This means 1 register in reg list one of 3 situations:
12537		     1. Instruction is stmia, but without writeback.
12538		     2. lmdia without writeback, but with Rn not in
12539			reglist.
12540		     3. ldmia with writeback, but with Rn in reglist.
12541		     Case 3 is UNPREDICTABLE behaviour, so we handle
12542		     case 1 and 2 which can be converted into a 16-bit
12543		     str or ldr. The SP cases are handled below.  */
12544		  unsigned long opcode;
12545		  /* First, record an error for Case 3.  */
12546		  if (inst.operands[1].imm & mask
12547		      && inst.operands[0].writeback)
12548		    inst.error =
12549			_("having the base register in the register list when "
12550			  "using write back is UNPREDICTABLE");
12551
12552		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12553							     : T_MNEM_ldr);
12554		  inst.instruction = THUMB_OP16 (opcode);
12555		  inst.instruction |= inst.operands[0].reg << 3;
12556		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
12557		  narrow = TRUE;
12558		}
12559	    }
12560	  else if (inst.operands[0] .reg == REG_SP)
12561	    {
12562	      if (inst.operands[0].writeback)
12563		{
12564		  inst.instruction =
12565			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12566				    ? T_MNEM_push : T_MNEM_pop);
12567		  inst.instruction |= inst.operands[1].imm;
12568		  narrow = TRUE;
12569		}
12570	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12571		{
12572		  inst.instruction =
12573			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12574				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12575		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12576		  narrow = TRUE;
12577		}
12578	    }
12579	}
12580
12581      if (!narrow)
12582	{
12583	  if (inst.instruction < 0xffff)
12584	    inst.instruction = THUMB_OP32 (inst.instruction);
12585
12586	  encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12587			       inst.operands[1].imm,
12588			       inst.operands[0].writeback);
12589	}
12590    }
12591  else
12592    {
12593      constraint (inst.operands[0].reg > 7
12594		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12595      constraint (inst.instruction != T_MNEM_ldmia
12596		  && inst.instruction != T_MNEM_stmia,
12597		  _("Thumb-2 instruction only valid in unified syntax"));
12598      if (inst.instruction == T_MNEM_stmia)
12599	{
12600	  if (!inst.operands[0].writeback)
12601	    as_warn (_("this instruction will write back the base register"));
12602	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12603	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12604	    as_warn (_("value stored for r%d is UNKNOWN"),
12605		     inst.operands[0].reg);
12606	}
12607      else
12608	{
12609	  if (!inst.operands[0].writeback
12610	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12611	    as_warn (_("this instruction will write back the base register"));
12612	  else if (inst.operands[0].writeback
12613		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12614	    as_warn (_("this instruction will not write back the base register"));
12615	}
12616
12617      inst.instruction = THUMB_OP16 (inst.instruction);
12618      inst.instruction |= inst.operands[0].reg << 8;
12619      inst.instruction |= inst.operands[1].imm;
12620    }
12621}
12622
12623static void
12624do_t_ldrex (void)
12625{
12626  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12627	      || inst.operands[1].postind || inst.operands[1].writeback
12628	      || inst.operands[1].immisreg || inst.operands[1].shifted
12629	      || inst.operands[1].negative,
12630	      BAD_ADDR_MODE);
12631
12632  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12633
12634  inst.instruction |= inst.operands[0].reg << 12;
12635  inst.instruction |= inst.operands[1].reg << 16;
12636  inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12637}
12638
12639static void
12640do_t_ldrexd (void)
12641{
12642  if (!inst.operands[1].present)
12643    {
12644      constraint (inst.operands[0].reg == REG_LR,
12645		  _("r14 not allowed as first register "
12646		    "when second register is omitted"));
12647      inst.operands[1].reg = inst.operands[0].reg + 1;
12648    }
12649  constraint (inst.operands[0].reg == inst.operands[1].reg,
12650	      BAD_OVERLAP);
12651
12652  inst.instruction |= inst.operands[0].reg << 12;
12653  inst.instruction |= inst.operands[1].reg << 8;
12654  inst.instruction |= inst.operands[2].reg << 16;
12655}
12656
12657static void
12658do_t_ldst (void)
12659{
12660  unsigned long opcode;
12661  int Rn;
12662
12663  if (inst.operands[0].isreg
12664      && !inst.operands[0].preind
12665      && inst.operands[0].reg == REG_PC)
12666    set_pred_insn_type_last ();
12667
12668  opcode = inst.instruction;
12669  if (unified_syntax)
12670    {
12671      if (!inst.operands[1].isreg)
12672	{
12673	  if (opcode <= 0xffff)
12674	    inst.instruction = THUMB_OP32 (opcode);
12675	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12676	    return;
12677	}
12678      if (inst.operands[1].isreg
12679	  && !inst.operands[1].writeback
12680	  && !inst.operands[1].shifted && !inst.operands[1].postind
12681	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
12682	  && opcode <= 0xffff
12683	  && inst.size_req != 4)
12684	{
12685	  /* Insn may have a 16-bit form.  */
12686	  Rn = inst.operands[1].reg;
12687	  if (inst.operands[1].immisreg)
12688	    {
12689	      inst.instruction = THUMB_OP16 (opcode);
12690	      /* [Rn, Rik] */
12691	      if (Rn <= 7 && inst.operands[1].imm <= 7)
12692		goto op16;
12693	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12694		reject_bad_reg (inst.operands[1].imm);
12695	    }
12696	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12697		    && opcode != T_MNEM_ldrsb)
12698		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12699		   || (Rn == REG_SP && opcode == T_MNEM_str))
12700	    {
12701	      /* [Rn, #const] */
12702	      if (Rn > 7)
12703		{
12704		  if (Rn == REG_PC)
12705		    {
12706		      if (inst.relocs[0].pc_rel)
12707			opcode = T_MNEM_ldr_pc2;
12708		      else
12709			opcode = T_MNEM_ldr_pc;
12710		    }
12711		  else
12712		    {
12713		      if (opcode == T_MNEM_ldr)
12714			opcode = T_MNEM_ldr_sp;
12715		      else
12716			opcode = T_MNEM_str_sp;
12717		    }
12718		  inst.instruction = inst.operands[0].reg << 8;
12719		}
12720	      else
12721		{
12722		  inst.instruction = inst.operands[0].reg;
12723		  inst.instruction |= inst.operands[1].reg << 3;
12724		}
12725	      inst.instruction |= THUMB_OP16 (opcode);
12726	      if (inst.size_req == 2)
12727		inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12728	      else
12729		inst.relax = opcode;
12730	      return;
12731	    }
12732	}
12733      /* Definitely a 32-bit variant.  */
12734
12735      /* Warning for Erratum 752419.  */
12736      if (opcode == T_MNEM_ldr
12737	  && inst.operands[0].reg == REG_SP
12738	  && inst.operands[1].writeback == 1
12739	  && !inst.operands[1].immisreg)
12740	{
12741	  if (no_cpu_selected ()
12742	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12743		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12744		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12745	    as_warn (_("This instruction may be unpredictable "
12746		       "if executed on M-profile cores "
12747		       "with interrupts enabled."));
12748	}
12749
12750      /* Do some validations regarding addressing modes.  */
12751      if (inst.operands[1].immisreg)
12752	reject_bad_reg (inst.operands[1].imm);
12753
12754      constraint (inst.operands[1].writeback == 1
12755		  && inst.operands[0].reg == inst.operands[1].reg,
12756		  BAD_OVERLAP);
12757
12758      inst.instruction = THUMB_OP32 (opcode);
12759      inst.instruction |= inst.operands[0].reg << 12;
12760      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12761      check_ldr_r15_aligned ();
12762      return;
12763    }
12764
12765  constraint (inst.operands[0].reg > 7, BAD_HIREG);
12766
12767  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12768    {
12769      /* Only [Rn,Rm] is acceptable.  */
12770      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12771      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12772		  || inst.operands[1].postind || inst.operands[1].shifted
12773		  || inst.operands[1].negative,
12774		  _("Thumb does not support this addressing mode"));
12775      inst.instruction = THUMB_OP16 (inst.instruction);
12776      goto op16;
12777    }
12778
12779  inst.instruction = THUMB_OP16 (inst.instruction);
12780  if (!inst.operands[1].isreg)
12781    if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12782      return;
12783
12784  constraint (!inst.operands[1].preind
12785	      || inst.operands[1].shifted
12786	      || inst.operands[1].writeback,
12787	      _("Thumb does not support this addressing mode"));
12788  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12789    {
12790      constraint (inst.instruction & 0x0600,
12791		  _("byte or halfword not valid for base register"));
12792      constraint (inst.operands[1].reg == REG_PC
12793		  && !(inst.instruction & THUMB_LOAD_BIT),
12794		  _("r15 based store not allowed"));
12795      constraint (inst.operands[1].immisreg,
12796		  _("invalid base register for register offset"));
12797
12798      if (inst.operands[1].reg == REG_PC)
12799	inst.instruction = T_OPCODE_LDR_PC;
12800      else if (inst.instruction & THUMB_LOAD_BIT)
12801	inst.instruction = T_OPCODE_LDR_SP;
12802      else
12803	inst.instruction = T_OPCODE_STR_SP;
12804
12805      inst.instruction |= inst.operands[0].reg << 8;
12806      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12807      return;
12808    }
12809
12810  constraint (inst.operands[1].reg > 7, BAD_HIREG);
12811  if (!inst.operands[1].immisreg)
12812    {
12813      /* Immediate offset.  */
12814      inst.instruction |= inst.operands[0].reg;
12815      inst.instruction |= inst.operands[1].reg << 3;
12816      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12817      return;
12818    }
12819
12820  /* Register offset.  */
12821  constraint (inst.operands[1].imm > 7, BAD_HIREG);
12822  constraint (inst.operands[1].negative,
12823	      _("Thumb does not support this addressing mode"));
12824
12825 op16:
12826  switch (inst.instruction)
12827    {
12828    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12829    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12830    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12831    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12832    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12833    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12834    case 0x5600 /* ldrsb */:
12835    case 0x5e00 /* ldrsh */: break;
12836    default: abort ();
12837    }
12838
12839  inst.instruction |= inst.operands[0].reg;
12840  inst.instruction |= inst.operands[1].reg << 3;
12841  inst.instruction |= inst.operands[1].imm << 6;
12842}
12843
12844static void
12845do_t_ldstd (void)
12846{
12847  if (!inst.operands[1].present)
12848    {
12849      inst.operands[1].reg = inst.operands[0].reg + 1;
12850      constraint (inst.operands[0].reg == REG_LR,
12851		  _("r14 not allowed here"));
12852      constraint (inst.operands[0].reg == REG_R12,
12853		  _("r12 not allowed here"));
12854    }
12855
12856  if (inst.operands[2].writeback
12857      && (inst.operands[0].reg == inst.operands[2].reg
12858      || inst.operands[1].reg == inst.operands[2].reg))
12859    as_warn (_("base register written back, and overlaps "
12860	       "one of transfer registers"));
12861
12862  inst.instruction |= inst.operands[0].reg << 12;
12863  inst.instruction |= inst.operands[1].reg << 8;
12864  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12865}
12866
12867static void
12868do_t_ldstt (void)
12869{
12870  inst.instruction |= inst.operands[0].reg << 12;
12871  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12872}
12873
12874static void
12875do_t_mla (void)
12876{
12877  unsigned Rd, Rn, Rm, Ra;
12878
12879  Rd = inst.operands[0].reg;
12880  Rn = inst.operands[1].reg;
12881  Rm = inst.operands[2].reg;
12882  Ra = inst.operands[3].reg;
12883
12884  reject_bad_reg (Rd);
12885  reject_bad_reg (Rn);
12886  reject_bad_reg (Rm);
12887  reject_bad_reg (Ra);
12888
12889  inst.instruction |= Rd << 8;
12890  inst.instruction |= Rn << 16;
12891  inst.instruction |= Rm;
12892  inst.instruction |= Ra << 12;
12893}
12894
12895static void
12896do_t_mlal (void)
12897{
12898  unsigned RdLo, RdHi, Rn, Rm;
12899
12900  RdLo = inst.operands[0].reg;
12901  RdHi = inst.operands[1].reg;
12902  Rn = inst.operands[2].reg;
12903  Rm = inst.operands[3].reg;
12904
12905  reject_bad_reg (RdLo);
12906  reject_bad_reg (RdHi);
12907  reject_bad_reg (Rn);
12908  reject_bad_reg (Rm);
12909
12910  inst.instruction |= RdLo << 12;
12911  inst.instruction |= RdHi << 8;
12912  inst.instruction |= Rn << 16;
12913  inst.instruction |= Rm;
12914}
12915
12916static void
12917do_t_mov_cmp (void)
12918{
12919  unsigned Rn, Rm;
12920
12921  Rn = inst.operands[0].reg;
12922  Rm = inst.operands[1].reg;
12923
12924  if (Rn == REG_PC)
12925    set_pred_insn_type_last ();
12926
12927  if (unified_syntax)
12928    {
12929      int r0off = (inst.instruction == T_MNEM_mov
12930		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
12931      unsigned long opcode;
12932      bfd_boolean narrow;
12933      bfd_boolean low_regs;
12934
12935      low_regs = (Rn <= 7 && Rm <= 7);
12936      opcode = inst.instruction;
12937      if (in_pred_block ())
12938	narrow = opcode != T_MNEM_movs;
12939      else
12940	narrow = opcode != T_MNEM_movs || low_regs;
12941      if (inst.size_req == 4
12942	  || inst.operands[1].shifted)
12943	narrow = FALSE;
12944
12945      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
12946      if (opcode == T_MNEM_movs && inst.operands[1].isreg
12947	  && !inst.operands[1].shifted
12948	  && Rn == REG_PC
12949	  && Rm == REG_LR)
12950	{
12951	  inst.instruction = T2_SUBS_PC_LR;
12952	  return;
12953	}
12954
12955      if (opcode == T_MNEM_cmp)
12956	{
12957	  constraint (Rn == REG_PC, BAD_PC);
12958	  if (narrow)
12959	    {
12960	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12961		 but valid.  */
12962	      warn_deprecated_sp (Rm);
12963	      /* R15 was documented as a valid choice for Rm in ARMv6,
12964		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
12965		 tools reject R15, so we do too.  */
12966	      constraint (Rm == REG_PC, BAD_PC);
12967	    }
12968	  else
12969	    reject_bad_reg (Rm);
12970	}
12971      else if (opcode == T_MNEM_mov
12972	       || opcode == T_MNEM_movs)
12973	{
12974	  if (inst.operands[1].isreg)
12975	    {
12976	      if (opcode == T_MNEM_movs)
12977		{
12978		  reject_bad_reg (Rn);
12979		  reject_bad_reg (Rm);
12980		}
12981	      else if (narrow)
12982		{
12983		  /* This is mov.n.  */
12984		  if ((Rn == REG_SP || Rn == REG_PC)
12985		      && (Rm == REG_SP || Rm == REG_PC))
12986		    {
12987		      as_tsktsk (_("Use of r%u as a source register is "
12988				 "deprecated when r%u is the destination "
12989				 "register."), Rm, Rn);
12990		    }
12991		}
12992	      else
12993		{
12994		  /* This is mov.w.  */
12995		  constraint (Rn == REG_PC, BAD_PC);
12996		  constraint (Rm == REG_PC, BAD_PC);
12997		  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12998		    constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12999		}
13000	    }
13001	  else
13002	    reject_bad_reg (Rn);
13003	}
13004
13005      if (!inst.operands[1].isreg)
13006	{
13007	  /* Immediate operand.  */
13008	  if (!in_pred_block () && opcode == T_MNEM_mov)
13009	    narrow = 0;
13010	  if (low_regs && narrow)
13011	    {
13012	      inst.instruction = THUMB_OP16 (opcode);
13013	      inst.instruction |= Rn << 8;
13014	      if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13015		  || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13016		{
13017		  if (inst.size_req == 2)
13018		    inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13019		  else
13020		    inst.relax = opcode;
13021		}
13022	    }
13023	  else
13024	    {
13025	      constraint ((inst.relocs[0].type
13026			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13027			  && (inst.relocs[0].type
13028			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13029			  THUMB1_RELOC_ONLY);
13030
13031	      inst.instruction = THUMB_OP32 (inst.instruction);
13032	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13033	      inst.instruction |= Rn << r0off;
13034	      inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13035	    }
13036	}
13037      else if (inst.operands[1].shifted && inst.operands[1].immisreg
13038	       && (inst.instruction == T_MNEM_mov
13039		   || inst.instruction == T_MNEM_movs))
13040	{
13041	  /* Register shifts are encoded as separate shift instructions.  */
13042	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
13043
13044	  if (in_pred_block ())
13045	    narrow = !flags;
13046	  else
13047	    narrow = flags;
13048
13049	  if (inst.size_req == 4)
13050	    narrow = FALSE;
13051
13052	  if (!low_regs || inst.operands[1].imm > 7)
13053	    narrow = FALSE;
13054
13055	  if (Rn != Rm)
13056	    narrow = FALSE;
13057
13058	  switch (inst.operands[1].shift_kind)
13059	    {
13060	    case SHIFT_LSL:
13061	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13062	      break;
13063	    case SHIFT_ASR:
13064	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13065	      break;
13066	    case SHIFT_LSR:
13067	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13068	      break;
13069	    case SHIFT_ROR:
13070	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13071	      break;
13072	    default:
13073	      abort ();
13074	    }
13075
13076	  inst.instruction = opcode;
13077	  if (narrow)
13078	    {
13079	      inst.instruction |= Rn;
13080	      inst.instruction |= inst.operands[1].imm << 3;
13081	    }
13082	  else
13083	    {
13084	      if (flags)
13085		inst.instruction |= CONDS_BIT;
13086
13087	      inst.instruction |= Rn << 8;
13088	      inst.instruction |= Rm << 16;
13089	      inst.instruction |= inst.operands[1].imm;
13090	    }
13091	}
13092      else if (!narrow)
13093	{
13094	  /* Some mov with immediate shift have narrow variants.
13095	     Register shifts are handled above.  */
13096	  if (low_regs && inst.operands[1].shifted
13097	      && (inst.instruction == T_MNEM_mov
13098		  || inst.instruction == T_MNEM_movs))
13099	    {
13100	      if (in_pred_block ())
13101		narrow = (inst.instruction == T_MNEM_mov);
13102	      else
13103		narrow = (inst.instruction == T_MNEM_movs);
13104	    }
13105
13106	  if (narrow)
13107	    {
13108	      switch (inst.operands[1].shift_kind)
13109		{
13110		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13111		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13112		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13113		default: narrow = FALSE; break;
13114		}
13115	    }
13116
13117	  if (narrow)
13118	    {
13119	      inst.instruction |= Rn;
13120	      inst.instruction |= Rm << 3;
13121	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13122	    }
13123	  else
13124	    {
13125	      inst.instruction = THUMB_OP32 (inst.instruction);
13126	      inst.instruction |= Rn << r0off;
13127	      encode_thumb32_shifted_operand (1);
13128	    }
13129	}
13130      else
13131	switch (inst.instruction)
13132	  {
13133	  case T_MNEM_mov:
13134	    /* In v4t or v5t a move of two lowregs produces unpredictable
13135	       results. Don't allow this.  */
13136	    if (low_regs)
13137	      {
13138		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13139			    "MOV Rd, Rs with two low registers is not "
13140			    "permitted on this architecture");
13141		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13142					arm_ext_v6);
13143	      }
13144
13145	    inst.instruction = T_OPCODE_MOV_HR;
13146	    inst.instruction |= (Rn & 0x8) << 4;
13147	    inst.instruction |= (Rn & 0x7);
13148	    inst.instruction |= Rm << 3;
13149	    break;
13150
13151	  case T_MNEM_movs:
13152	    /* We know we have low registers at this point.
13153	       Generate LSLS Rd, Rs, #0.  */
13154	    inst.instruction = T_OPCODE_LSL_I;
13155	    inst.instruction |= Rn;
13156	    inst.instruction |= Rm << 3;
13157	    break;
13158
13159	  case T_MNEM_cmp:
13160	    if (low_regs)
13161	      {
13162		inst.instruction = T_OPCODE_CMP_LR;
13163		inst.instruction |= Rn;
13164		inst.instruction |= Rm << 3;
13165	      }
13166	    else
13167	      {
13168		inst.instruction = T_OPCODE_CMP_HR;
13169		inst.instruction |= (Rn & 0x8) << 4;
13170		inst.instruction |= (Rn & 0x7);
13171		inst.instruction |= Rm << 3;
13172	      }
13173	    break;
13174	  }
13175      return;
13176    }
13177
13178  inst.instruction = THUMB_OP16 (inst.instruction);
13179
13180  /* PR 10443: Do not silently ignore shifted operands.  */
13181  constraint (inst.operands[1].shifted,
13182	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13183
13184  if (inst.operands[1].isreg)
13185    {
13186      if (Rn < 8 && Rm < 8)
13187	{
13188	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13189	     since a MOV instruction produces unpredictable results.  */
13190	  if (inst.instruction == T_OPCODE_MOV_I8)
13191	    inst.instruction = T_OPCODE_ADD_I3;
13192	  else
13193	    inst.instruction = T_OPCODE_CMP_LR;
13194
13195	  inst.instruction |= Rn;
13196	  inst.instruction |= Rm << 3;
13197	}
13198      else
13199	{
13200	  if (inst.instruction == T_OPCODE_MOV_I8)
13201	    inst.instruction = T_OPCODE_MOV_HR;
13202	  else
13203	    inst.instruction = T_OPCODE_CMP_HR;
13204	  do_t_cpy ();
13205	}
13206    }
13207  else
13208    {
13209      constraint (Rn > 7,
13210		  _("only lo regs allowed with immediate"));
13211      inst.instruction |= Rn << 8;
13212      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13213    }
13214}
13215
13216static void
13217do_t_mov16 (void)
13218{
13219  unsigned Rd;
13220  bfd_vma imm;
13221  bfd_boolean top;
13222
13223  top = (inst.instruction & 0x00800000) != 0;
13224  if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13225    {
13226      constraint (top, _(":lower16: not allowed in this instruction"));
13227      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13228    }
13229  else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13230    {
13231      constraint (!top, _(":upper16: not allowed in this instruction"));
13232      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13233    }
13234
13235  Rd = inst.operands[0].reg;
13236  reject_bad_reg (Rd);
13237
13238  inst.instruction |= Rd << 8;
13239  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13240    {
13241      imm = inst.relocs[0].exp.X_add_number;
13242      inst.instruction |= (imm & 0xf000) << 4;
13243      inst.instruction |= (imm & 0x0800) << 15;
13244      inst.instruction |= (imm & 0x0700) << 4;
13245      inst.instruction |= (imm & 0x00ff);
13246    }
13247}
13248
13249static void
13250do_t_mvn_tst (void)
13251{
13252  unsigned Rn, Rm;
13253
13254  Rn = inst.operands[0].reg;
13255  Rm = inst.operands[1].reg;
13256
13257  if (inst.instruction == T_MNEM_cmp
13258      || inst.instruction == T_MNEM_cmn)
13259    constraint (Rn == REG_PC, BAD_PC);
13260  else
13261    reject_bad_reg (Rn);
13262  reject_bad_reg (Rm);
13263
13264  if (unified_syntax)
13265    {
13266      int r0off = (inst.instruction == T_MNEM_mvn
13267		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13268      bfd_boolean narrow;
13269
13270      if (inst.size_req == 4
13271	  || inst.instruction > 0xffff
13272	  || inst.operands[1].shifted
13273	  || Rn > 7 || Rm > 7)
13274	narrow = FALSE;
13275      else if (inst.instruction == T_MNEM_cmn
13276	       || inst.instruction == T_MNEM_tst)
13277	narrow = TRUE;
13278      else if (THUMB_SETS_FLAGS (inst.instruction))
13279	narrow = !in_pred_block ();
13280      else
13281	narrow = in_pred_block ();
13282
13283      if (!inst.operands[1].isreg)
13284	{
13285	  /* For an immediate, we always generate a 32-bit opcode;
13286	     section relaxation will shrink it later if possible.  */
13287	  if (inst.instruction < 0xffff)
13288	    inst.instruction = THUMB_OP32 (inst.instruction);
13289	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13290	  inst.instruction |= Rn << r0off;
13291	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13292	}
13293      else
13294	{
13295	  /* See if we can do this with a 16-bit instruction.  */
13296	  if (narrow)
13297	    {
13298	      inst.instruction = THUMB_OP16 (inst.instruction);
13299	      inst.instruction |= Rn;
13300	      inst.instruction |= Rm << 3;
13301	    }
13302	  else
13303	    {
13304	      constraint (inst.operands[1].shifted
13305			  && inst.operands[1].immisreg,
13306			  _("shift must be constant"));
13307	      if (inst.instruction < 0xffff)
13308		inst.instruction = THUMB_OP32 (inst.instruction);
13309	      inst.instruction |= Rn << r0off;
13310	      encode_thumb32_shifted_operand (1);
13311	    }
13312	}
13313    }
13314  else
13315    {
13316      constraint (inst.instruction > 0xffff
13317		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13318      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13319		  _("unshifted register required"));
13320      constraint (Rn > 7 || Rm > 7,
13321		  BAD_HIREG);
13322
13323      inst.instruction = THUMB_OP16 (inst.instruction);
13324      inst.instruction |= Rn;
13325      inst.instruction |= Rm << 3;
13326    }
13327}
13328
13329static void
13330do_t_mrs (void)
13331{
13332  unsigned Rd;
13333
13334  if (do_vfp_nsyn_mrs () == SUCCESS)
13335    return;
13336
13337  Rd = inst.operands[0].reg;
13338  reject_bad_reg (Rd);
13339  inst.instruction |= Rd << 8;
13340
13341  if (inst.operands[1].isreg)
13342    {
13343      unsigned br = inst.operands[1].reg;
13344      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13345	as_bad (_("bad register for mrs"));
13346
13347      inst.instruction |= br & (0xf << 16);
13348      inst.instruction |= (br & 0x300) >> 4;
13349      inst.instruction |= (br & SPSR_BIT) >> 2;
13350    }
13351  else
13352    {
13353      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13354
13355      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13356	{
13357	  /* PR gas/12698:  The constraint is only applied for m_profile.
13358	     If the user has specified -march=all, we want to ignore it as
13359	     we are building for any CPU type, including non-m variants.  */
13360	  bfd_boolean m_profile =
13361	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13362	  constraint ((flags != 0) && m_profile, _("selected processor does "
13363						   "not support requested special purpose register"));
13364	}
13365      else
13366	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13367	   devices).  */
13368	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13369		    _("'APSR', 'CPSR' or 'SPSR' expected"));
13370
13371      inst.instruction |= (flags & SPSR_BIT) >> 2;
13372      inst.instruction |= inst.operands[1].imm & 0xff;
13373      inst.instruction |= 0xf0000;
13374    }
13375}
13376
13377static void
13378do_t_msr (void)
13379{
13380  int flags;
13381  unsigned Rn;
13382
13383  if (do_vfp_nsyn_msr () == SUCCESS)
13384    return;
13385
13386  constraint (!inst.operands[1].isreg,
13387	      _("Thumb encoding does not support an immediate here"));
13388
13389  if (inst.operands[0].isreg)
13390    flags = (int)(inst.operands[0].reg);
13391  else
13392    flags = inst.operands[0].imm;
13393
13394  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13395    {
13396      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13397
13398      /* PR gas/12698:  The constraint is only applied for m_profile.
13399	 If the user has specified -march=all, we want to ignore it as
13400	 we are building for any CPU type, including non-m variants.  */
13401      bfd_boolean m_profile =
13402	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13403      constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13404	   && (bits & ~(PSR_s | PSR_f)) != 0)
13405	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13406	      && bits != PSR_f)) && m_profile,
13407	  _("selected processor does not support requested special "
13408	    "purpose register"));
13409    }
13410  else
13411     constraint ((flags & 0xff) != 0, _("selected processor does not support "
13412		 "requested special purpose register"));
13413
13414  Rn = inst.operands[1].reg;
13415  reject_bad_reg (Rn);
13416
13417  inst.instruction |= (flags & SPSR_BIT) >> 2;
13418  inst.instruction |= (flags & 0xf0000) >> 8;
13419  inst.instruction |= (flags & 0x300) >> 4;
13420  inst.instruction |= (flags & 0xff);
13421  inst.instruction |= Rn << 16;
13422}
13423
13424static void
13425do_t_mul (void)
13426{
13427  bfd_boolean narrow;
13428  unsigned Rd, Rn, Rm;
13429
13430  if (!inst.operands[2].present)
13431    inst.operands[2].reg = inst.operands[0].reg;
13432
13433  Rd = inst.operands[0].reg;
13434  Rn = inst.operands[1].reg;
13435  Rm = inst.operands[2].reg;
13436
13437  if (unified_syntax)
13438    {
13439      if (inst.size_req == 4
13440	  || (Rd != Rn
13441	      && Rd != Rm)
13442	  || Rn > 7
13443	  || Rm > 7)
13444	narrow = FALSE;
13445      else if (inst.instruction == T_MNEM_muls)
13446	narrow = !in_pred_block ();
13447      else
13448	narrow = in_pred_block ();
13449    }
13450  else
13451    {
13452      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13453      constraint (Rn > 7 || Rm > 7,
13454		  BAD_HIREG);
13455      narrow = TRUE;
13456    }
13457
13458  if (narrow)
13459    {
13460      /* 16-bit MULS/Conditional MUL.  */
13461      inst.instruction = THUMB_OP16 (inst.instruction);
13462      inst.instruction |= Rd;
13463
13464      if (Rd == Rn)
13465	inst.instruction |= Rm << 3;
13466      else if (Rd == Rm)
13467	inst.instruction |= Rn << 3;
13468      else
13469	constraint (1, _("dest must overlap one source register"));
13470    }
13471  else
13472    {
13473      constraint (inst.instruction != T_MNEM_mul,
13474		  _("Thumb-2 MUL must not set flags"));
13475      /* 32-bit MUL.  */
13476      inst.instruction = THUMB_OP32 (inst.instruction);
13477      inst.instruction |= Rd << 8;
13478      inst.instruction |= Rn << 16;
13479      inst.instruction |= Rm << 0;
13480
13481      reject_bad_reg (Rd);
13482      reject_bad_reg (Rn);
13483      reject_bad_reg (Rm);
13484    }
13485}
13486
13487static void
13488do_t_mull (void)
13489{
13490  unsigned RdLo, RdHi, Rn, Rm;
13491
13492  RdLo = inst.operands[0].reg;
13493  RdHi = inst.operands[1].reg;
13494  Rn = inst.operands[2].reg;
13495  Rm = inst.operands[3].reg;
13496
13497  reject_bad_reg (RdLo);
13498  reject_bad_reg (RdHi);
13499  reject_bad_reg (Rn);
13500  reject_bad_reg (Rm);
13501
13502  inst.instruction |= RdLo << 12;
13503  inst.instruction |= RdHi << 8;
13504  inst.instruction |= Rn << 16;
13505  inst.instruction |= Rm;
13506
13507 if (RdLo == RdHi)
13508    as_tsktsk (_("rdhi and rdlo must be different"));
13509}
13510
13511static void
13512do_t_nop (void)
13513{
13514  set_pred_insn_type (NEUTRAL_IT_INSN);
13515
13516  if (unified_syntax)
13517    {
13518      if (inst.size_req == 4 || inst.operands[0].imm > 15)
13519	{
13520	  inst.instruction = THUMB_OP32 (inst.instruction);
13521	  inst.instruction |= inst.operands[0].imm;
13522	}
13523      else
13524	{
13525	  /* PR9722: Check for Thumb2 availability before
13526	     generating a thumb2 nop instruction.  */
13527	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13528	    {
13529	      inst.instruction = THUMB_OP16 (inst.instruction);
13530	      inst.instruction |= inst.operands[0].imm << 4;
13531	    }
13532	  else
13533	    inst.instruction = 0x46c0;
13534	}
13535    }
13536  else
13537    {
13538      constraint (inst.operands[0].present,
13539		  _("Thumb does not support NOP with hints"));
13540      inst.instruction = 0x46c0;
13541    }
13542}
13543
13544static void
13545do_t_neg (void)
13546{
13547  if (unified_syntax)
13548    {
13549      bfd_boolean narrow;
13550
13551      if (THUMB_SETS_FLAGS (inst.instruction))
13552	narrow = !in_pred_block ();
13553      else
13554	narrow = in_pred_block ();
13555      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13556	narrow = FALSE;
13557      if (inst.size_req == 4)
13558	narrow = FALSE;
13559
13560      if (!narrow)
13561	{
13562	  inst.instruction = THUMB_OP32 (inst.instruction);
13563	  inst.instruction |= inst.operands[0].reg << 8;
13564	  inst.instruction |= inst.operands[1].reg << 16;
13565	}
13566      else
13567	{
13568	  inst.instruction = THUMB_OP16 (inst.instruction);
13569	  inst.instruction |= inst.operands[0].reg;
13570	  inst.instruction |= inst.operands[1].reg << 3;
13571	}
13572    }
13573  else
13574    {
13575      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13576		  BAD_HIREG);
13577      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13578
13579      inst.instruction = THUMB_OP16 (inst.instruction);
13580      inst.instruction |= inst.operands[0].reg;
13581      inst.instruction |= inst.operands[1].reg << 3;
13582    }
13583}
13584
13585static void
13586do_t_orn (void)
13587{
13588  unsigned Rd, Rn;
13589
13590  Rd = inst.operands[0].reg;
13591  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13592
13593  reject_bad_reg (Rd);
13594  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
13595  reject_bad_reg (Rn);
13596
13597  inst.instruction |= Rd << 8;
13598  inst.instruction |= Rn << 16;
13599
13600  if (!inst.operands[2].isreg)
13601    {
13602      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13603      inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13604    }
13605  else
13606    {
13607      unsigned Rm;
13608
13609      Rm = inst.operands[2].reg;
13610      reject_bad_reg (Rm);
13611
13612      constraint (inst.operands[2].shifted
13613		  && inst.operands[2].immisreg,
13614		  _("shift must be constant"));
13615      encode_thumb32_shifted_operand (2);
13616    }
13617}
13618
13619static void
13620do_t_pkhbt (void)
13621{
13622  unsigned Rd, Rn, Rm;
13623
13624  Rd = inst.operands[0].reg;
13625  Rn = inst.operands[1].reg;
13626  Rm = inst.operands[2].reg;
13627
13628  reject_bad_reg (Rd);
13629  reject_bad_reg (Rn);
13630  reject_bad_reg (Rm);
13631
13632  inst.instruction |= Rd << 8;
13633  inst.instruction |= Rn << 16;
13634  inst.instruction |= Rm;
13635  if (inst.operands[3].present)
13636    {
13637      unsigned int val = inst.relocs[0].exp.X_add_number;
13638      constraint (inst.relocs[0].exp.X_op != O_constant,
13639		  _("expression too complex"));
13640      inst.instruction |= (val & 0x1c) << 10;
13641      inst.instruction |= (val & 0x03) << 6;
13642    }
13643}
13644
13645static void
13646do_t_pkhtb (void)
13647{
13648  if (!inst.operands[3].present)
13649    {
13650      unsigned Rtmp;
13651
13652      inst.instruction &= ~0x00000020;
13653
13654      /* PR 10168.  Swap the Rm and Rn registers.  */
13655      Rtmp = inst.operands[1].reg;
13656      inst.operands[1].reg = inst.operands[2].reg;
13657      inst.operands[2].reg = Rtmp;
13658    }
13659  do_t_pkhbt ();
13660}
13661
13662static void
13663do_t_pld (void)
13664{
13665  if (inst.operands[0].immisreg)
13666    reject_bad_reg (inst.operands[0].imm);
13667
13668  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13669}
13670
13671static void
13672do_t_push_pop (void)
13673{
13674  unsigned mask;
13675
13676  constraint (inst.operands[0].writeback,
13677	      _("push/pop do not support {reglist}^"));
13678  constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13679	      _("expression too complex"));
13680
13681  mask = inst.operands[0].imm;
13682  if (inst.size_req != 4 && (mask & ~0xff) == 0)
13683    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13684  else if (inst.size_req != 4
13685	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13686				       ? REG_LR : REG_PC)))
13687    {
13688      inst.instruction = THUMB_OP16 (inst.instruction);
13689      inst.instruction |= THUMB_PP_PC_LR;
13690      inst.instruction |= mask & 0xff;
13691    }
13692  else if (unified_syntax)
13693    {
13694      inst.instruction = THUMB_OP32 (inst.instruction);
13695      encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13696    }
13697  else
13698    {
13699      inst.error = _("invalid register list to push/pop instruction");
13700      return;
13701    }
13702}
13703
13704static void
13705do_t_clrm (void)
13706{
13707  if (unified_syntax)
13708    encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13709  else
13710    {
13711      inst.error = _("invalid register list to push/pop instruction");
13712      return;
13713    }
13714}
13715
13716static void
13717do_t_vscclrm (void)
13718{
13719  if (inst.operands[0].issingle)
13720    {
13721      inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13722      inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13723      inst.instruction |= inst.operands[0].imm;
13724    }
13725  else
13726    {
13727      inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13728      inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13729      inst.instruction |= 1 << 8;
13730      inst.instruction |= inst.operands[0].imm << 1;
13731    }
13732}
13733
13734static void
13735do_t_rbit (void)
13736{
13737  unsigned Rd, Rm;
13738
13739  Rd = inst.operands[0].reg;
13740  Rm = inst.operands[1].reg;
13741
13742  reject_bad_reg (Rd);
13743  reject_bad_reg (Rm);
13744
13745  inst.instruction |= Rd << 8;
13746  inst.instruction |= Rm << 16;
13747  inst.instruction |= Rm;
13748}
13749
13750static void
13751do_t_rev (void)
13752{
13753  unsigned Rd, Rm;
13754
13755  Rd = inst.operands[0].reg;
13756  Rm = inst.operands[1].reg;
13757
13758  reject_bad_reg (Rd);
13759  reject_bad_reg (Rm);
13760
13761  if (Rd <= 7 && Rm <= 7
13762      && inst.size_req != 4)
13763    {
13764      inst.instruction = THUMB_OP16 (inst.instruction);
13765      inst.instruction |= Rd;
13766      inst.instruction |= Rm << 3;
13767    }
13768  else if (unified_syntax)
13769    {
13770      inst.instruction = THUMB_OP32 (inst.instruction);
13771      inst.instruction |= Rd << 8;
13772      inst.instruction |= Rm << 16;
13773      inst.instruction |= Rm;
13774    }
13775  else
13776    inst.error = BAD_HIREG;
13777}
13778
13779static void
13780do_t_rrx (void)
13781{
13782  unsigned Rd, Rm;
13783
13784  Rd = inst.operands[0].reg;
13785  Rm = inst.operands[1].reg;
13786
13787  reject_bad_reg (Rd);
13788  reject_bad_reg (Rm);
13789
13790  inst.instruction |= Rd << 8;
13791  inst.instruction |= Rm;
13792}
13793
13794static void
13795do_t_rsb (void)
13796{
13797  unsigned Rd, Rs;
13798
13799  Rd = inst.operands[0].reg;
13800  Rs = (inst.operands[1].present
13801	? inst.operands[1].reg    /* Rd, Rs, foo */
13802	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
13803
13804  reject_bad_reg (Rd);
13805  reject_bad_reg (Rs);
13806  if (inst.operands[2].isreg)
13807    reject_bad_reg (inst.operands[2].reg);
13808
13809  inst.instruction |= Rd << 8;
13810  inst.instruction |= Rs << 16;
13811  if (!inst.operands[2].isreg)
13812    {
13813      bfd_boolean narrow;
13814
13815      if ((inst.instruction & 0x00100000) != 0)
13816	narrow = !in_pred_block ();
13817      else
13818	narrow = in_pred_block ();
13819
13820      if (Rd > 7 || Rs > 7)
13821	narrow = FALSE;
13822
13823      if (inst.size_req == 4 || !unified_syntax)
13824	narrow = FALSE;
13825
13826      if (inst.relocs[0].exp.X_op != O_constant
13827	  || inst.relocs[0].exp.X_add_number != 0)
13828	narrow = FALSE;
13829
13830      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
13831	 relaxation, but it doesn't seem worth the hassle.  */
13832      if (narrow)
13833	{
13834	  inst.relocs[0].type = BFD_RELOC_UNUSED;
13835	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
13836	  inst.instruction |= Rs << 3;
13837	  inst.instruction |= Rd;
13838	}
13839      else
13840	{
13841	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13842	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13843	}
13844    }
13845  else
13846    encode_thumb32_shifted_operand (2);
13847}
13848
13849static void
13850do_t_setend (void)
13851{
13852  if (warn_on_deprecated
13853      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13854      as_tsktsk (_("setend use is deprecated for ARMv8"));
13855
13856  set_pred_insn_type (OUTSIDE_PRED_INSN);
13857  if (inst.operands[0].imm)
13858    inst.instruction |= 0x8;
13859}
13860
13861static void
13862do_t_shift (void)
13863{
13864  if (!inst.operands[1].present)
13865    inst.operands[1].reg = inst.operands[0].reg;
13866
13867  if (unified_syntax)
13868    {
13869      bfd_boolean narrow;
13870      int shift_kind;
13871
13872      switch (inst.instruction)
13873	{
13874	case T_MNEM_asr:
13875	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13876	case T_MNEM_lsl:
13877	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13878	case T_MNEM_lsr:
13879	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13880	case T_MNEM_ror:
13881	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13882	default: abort ();
13883	}
13884
13885      if (THUMB_SETS_FLAGS (inst.instruction))
13886	narrow = !in_pred_block ();
13887      else
13888	narrow = in_pred_block ();
13889      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13890	narrow = FALSE;
13891      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13892	narrow = FALSE;
13893      if (inst.operands[2].isreg
13894	  && (inst.operands[1].reg != inst.operands[0].reg
13895	      || inst.operands[2].reg > 7))
13896	narrow = FALSE;
13897      if (inst.size_req == 4)
13898	narrow = FALSE;
13899
13900      reject_bad_reg (inst.operands[0].reg);
13901      reject_bad_reg (inst.operands[1].reg);
13902
13903      if (!narrow)
13904	{
13905	  if (inst.operands[2].isreg)
13906	    {
13907	      reject_bad_reg (inst.operands[2].reg);
13908	      inst.instruction = THUMB_OP32 (inst.instruction);
13909	      inst.instruction |= inst.operands[0].reg << 8;
13910	      inst.instruction |= inst.operands[1].reg << 16;
13911	      inst.instruction |= inst.operands[2].reg;
13912
13913	      /* PR 12854: Error on extraneous shifts.  */
13914	      constraint (inst.operands[2].shifted,
13915			  _("extraneous shift as part of operand to shift insn"));
13916	    }
13917	  else
13918	    {
13919	      inst.operands[1].shifted = 1;
13920	      inst.operands[1].shift_kind = shift_kind;
13921	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13922					     ? T_MNEM_movs : T_MNEM_mov);
13923	      inst.instruction |= inst.operands[0].reg << 8;
13924	      encode_thumb32_shifted_operand (1);
13925	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
13926	      inst.relocs[0].type = BFD_RELOC_UNUSED;
13927	    }
13928	}
13929      else
13930	{
13931	  if (inst.operands[2].isreg)
13932	    {
13933	      switch (shift_kind)
13934		{
13935		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13936		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13937		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13938		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13939		default: abort ();
13940		}
13941
13942	      inst.instruction |= inst.operands[0].reg;
13943	      inst.instruction |= inst.operands[2].reg << 3;
13944
13945	      /* PR 12854: Error on extraneous shifts.  */
13946	      constraint (inst.operands[2].shifted,
13947			  _("extraneous shift as part of operand to shift insn"));
13948	    }
13949	  else
13950	    {
13951	      switch (shift_kind)
13952		{
13953		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13954		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13955		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13956		default: abort ();
13957		}
13958	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13959	      inst.instruction |= inst.operands[0].reg;
13960	      inst.instruction |= inst.operands[1].reg << 3;
13961	    }
13962	}
13963    }
13964  else
13965    {
13966      constraint (inst.operands[0].reg > 7
13967		  || inst.operands[1].reg > 7, BAD_HIREG);
13968      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13969
13970      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
13971	{
13972	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
13973	  constraint (inst.operands[0].reg != inst.operands[1].reg,
13974		      _("source1 and dest must be same register"));
13975
13976	  switch (inst.instruction)
13977	    {
13978	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13979	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13980	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13981	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13982	    default: abort ();
13983	    }
13984
13985	  inst.instruction |= inst.operands[0].reg;
13986	  inst.instruction |= inst.operands[2].reg << 3;
13987
13988	  /* PR 12854: Error on extraneous shifts.  */
13989	  constraint (inst.operands[2].shifted,
13990		      _("extraneous shift as part of operand to shift insn"));
13991	}
13992      else
13993	{
13994	  switch (inst.instruction)
13995	    {
13996	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13997	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13998	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13999	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
14000	    default: abort ();
14001	    }
14002	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14003	  inst.instruction |= inst.operands[0].reg;
14004	  inst.instruction |= inst.operands[1].reg << 3;
14005	}
14006    }
14007}
14008
14009static void
14010do_t_simd (void)
14011{
14012  unsigned Rd, Rn, Rm;
14013
14014  Rd = inst.operands[0].reg;
14015  Rn = inst.operands[1].reg;
14016  Rm = inst.operands[2].reg;
14017
14018  reject_bad_reg (Rd);
14019  reject_bad_reg (Rn);
14020  reject_bad_reg (Rm);
14021
14022  inst.instruction |= Rd << 8;
14023  inst.instruction |= Rn << 16;
14024  inst.instruction |= Rm;
14025}
14026
14027static void
14028do_t_simd2 (void)
14029{
14030  unsigned Rd, Rn, Rm;
14031
14032  Rd = inst.operands[0].reg;
14033  Rm = inst.operands[1].reg;
14034  Rn = inst.operands[2].reg;
14035
14036  reject_bad_reg (Rd);
14037  reject_bad_reg (Rn);
14038  reject_bad_reg (Rm);
14039
14040  inst.instruction |= Rd << 8;
14041  inst.instruction |= Rn << 16;
14042  inst.instruction |= Rm;
14043}
14044
14045static void
14046do_t_smc (void)
14047{
14048  unsigned int value = inst.relocs[0].exp.X_add_number;
14049  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14050	      _("SMC is not permitted on this architecture"));
14051  constraint (inst.relocs[0].exp.X_op != O_constant,
14052	      _("expression too complex"));
14053  constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14054
14055  inst.relocs[0].type = BFD_RELOC_UNUSED;
14056  inst.instruction |= (value & 0x000f) << 16;
14057
14058  /* PR gas/15623: SMC instructions must be last in an IT block.  */
14059  set_pred_insn_type_last ();
14060}
14061
14062static void
14063do_t_hvc (void)
14064{
14065  unsigned int value = inst.relocs[0].exp.X_add_number;
14066
14067  inst.relocs[0].type = BFD_RELOC_UNUSED;
14068  inst.instruction |= (value & 0x0fff);
14069  inst.instruction |= (value & 0xf000) << 4;
14070}
14071
14072static void
14073do_t_ssat_usat (int bias)
14074{
14075  unsigned Rd, Rn;
14076
14077  Rd = inst.operands[0].reg;
14078  Rn = inst.operands[2].reg;
14079
14080  reject_bad_reg (Rd);
14081  reject_bad_reg (Rn);
14082
14083  inst.instruction |= Rd << 8;
14084  inst.instruction |= inst.operands[1].imm - bias;
14085  inst.instruction |= Rn << 16;
14086
14087  if (inst.operands[3].present)
14088    {
14089      offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14090
14091      inst.relocs[0].type = BFD_RELOC_UNUSED;
14092
14093      constraint (inst.relocs[0].exp.X_op != O_constant,
14094		  _("expression too complex"));
14095
14096      if (shift_amount != 0)
14097	{
14098	  constraint (shift_amount > 31,
14099		      _("shift expression is too large"));
14100
14101	  if (inst.operands[3].shift_kind == SHIFT_ASR)
14102	    inst.instruction |= 0x00200000;  /* sh bit.  */
14103
14104	  inst.instruction |= (shift_amount & 0x1c) << 10;
14105	  inst.instruction |= (shift_amount & 0x03) << 6;
14106	}
14107    }
14108}
14109
14110static void
14111do_t_ssat (void)
14112{
14113  do_t_ssat_usat (1);
14114}
14115
14116static void
14117do_t_ssat16 (void)
14118{
14119  unsigned Rd, Rn;
14120
14121  Rd = inst.operands[0].reg;
14122  Rn = inst.operands[2].reg;
14123
14124  reject_bad_reg (Rd);
14125  reject_bad_reg (Rn);
14126
14127  inst.instruction |= Rd << 8;
14128  inst.instruction |= inst.operands[1].imm - 1;
14129  inst.instruction |= Rn << 16;
14130}
14131
14132static void
14133do_t_strex (void)
14134{
14135  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14136	      || inst.operands[2].postind || inst.operands[2].writeback
14137	      || inst.operands[2].immisreg || inst.operands[2].shifted
14138	      || inst.operands[2].negative,
14139	      BAD_ADDR_MODE);
14140
14141  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14142
14143  inst.instruction |= inst.operands[0].reg << 8;
14144  inst.instruction |= inst.operands[1].reg << 12;
14145  inst.instruction |= inst.operands[2].reg << 16;
14146  inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14147}
14148
14149static void
14150do_t_strexd (void)
14151{
14152  if (!inst.operands[2].present)
14153    inst.operands[2].reg = inst.operands[1].reg + 1;
14154
14155  constraint (inst.operands[0].reg == inst.operands[1].reg
14156	      || inst.operands[0].reg == inst.operands[2].reg
14157	      || inst.operands[0].reg == inst.operands[3].reg,
14158	      BAD_OVERLAP);
14159
14160  inst.instruction |= inst.operands[0].reg;
14161  inst.instruction |= inst.operands[1].reg << 12;
14162  inst.instruction |= inst.operands[2].reg << 8;
14163  inst.instruction |= inst.operands[3].reg << 16;
14164}
14165
14166static void
14167do_t_sxtah (void)
14168{
14169  unsigned Rd, Rn, Rm;
14170
14171  Rd = inst.operands[0].reg;
14172  Rn = inst.operands[1].reg;
14173  Rm = inst.operands[2].reg;
14174
14175  reject_bad_reg (Rd);
14176  reject_bad_reg (Rn);
14177  reject_bad_reg (Rm);
14178
14179  inst.instruction |= Rd << 8;
14180  inst.instruction |= Rn << 16;
14181  inst.instruction |= Rm;
14182  inst.instruction |= inst.operands[3].imm << 4;
14183}
14184
14185static void
14186do_t_sxth (void)
14187{
14188  unsigned Rd, Rm;
14189
14190  Rd = inst.operands[0].reg;
14191  Rm = inst.operands[1].reg;
14192
14193  reject_bad_reg (Rd);
14194  reject_bad_reg (Rm);
14195
14196  if (inst.instruction <= 0xffff
14197      && inst.size_req != 4
14198      && Rd <= 7 && Rm <= 7
14199      && (!inst.operands[2].present || inst.operands[2].imm == 0))
14200    {
14201      inst.instruction = THUMB_OP16 (inst.instruction);
14202      inst.instruction |= Rd;
14203      inst.instruction |= Rm << 3;
14204    }
14205  else if (unified_syntax)
14206    {
14207      if (inst.instruction <= 0xffff)
14208	inst.instruction = THUMB_OP32 (inst.instruction);
14209      inst.instruction |= Rd << 8;
14210      inst.instruction |= Rm;
14211      inst.instruction |= inst.operands[2].imm << 4;
14212    }
14213  else
14214    {
14215      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14216		  _("Thumb encoding does not support rotation"));
14217      constraint (1, BAD_HIREG);
14218    }
14219}
14220
14221static void
14222do_t_swi (void)
14223{
14224  inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14225}
14226
14227static void
14228do_t_tb (void)
14229{
14230  unsigned Rn, Rm;
14231  int half;
14232
14233  half = (inst.instruction & 0x10) != 0;
14234  set_pred_insn_type_last ();
14235  constraint (inst.operands[0].immisreg,
14236	      _("instruction requires register index"));
14237
14238  Rn = inst.operands[0].reg;
14239  Rm = inst.operands[0].imm;
14240
14241  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14242    constraint (Rn == REG_SP, BAD_SP);
14243  reject_bad_reg (Rm);
14244
14245  constraint (!half && inst.operands[0].shifted,
14246	      _("instruction does not allow shifted index"));
14247  inst.instruction |= (Rn << 16) | Rm;
14248}
14249
14250static void
14251do_t_udf (void)
14252{
14253  if (!inst.operands[0].present)
14254    inst.operands[0].imm = 0;
14255
14256  if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14257    {
14258      constraint (inst.size_req == 2,
14259                  _("immediate value out of range"));
14260      inst.instruction = THUMB_OP32 (inst.instruction);
14261      inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14262      inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14263    }
14264  else
14265    {
14266      inst.instruction = THUMB_OP16 (inst.instruction);
14267      inst.instruction |= inst.operands[0].imm;
14268    }
14269
14270  set_pred_insn_type (NEUTRAL_IT_INSN);
14271}
14272
14273
14274static void
14275do_t_usat (void)
14276{
14277  do_t_ssat_usat (0);
14278}
14279
14280static void
14281do_t_usat16 (void)
14282{
14283  unsigned Rd, Rn;
14284
14285  Rd = inst.operands[0].reg;
14286  Rn = inst.operands[2].reg;
14287
14288  reject_bad_reg (Rd);
14289  reject_bad_reg (Rn);
14290
14291  inst.instruction |= Rd << 8;
14292  inst.instruction |= inst.operands[1].imm;
14293  inst.instruction |= Rn << 16;
14294}
14295
14296/* Checking the range of the branch offset (VAL) with NBITS bits
14297   and IS_SIGNED signedness.  Also checks the LSB to be 0.  */
14298static int
14299v8_1_branch_value_check (int val, int nbits, int is_signed)
14300{
14301  gas_assert (nbits > 0 && nbits <= 32);
14302  if (is_signed)
14303    {
14304      int cmp = (1 << (nbits - 1));
14305      if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14306	return FAIL;
14307    }
14308  else
14309    {
14310      if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14311	return FAIL;
14312    }
14313    return SUCCESS;
14314}
14315
14316/* For branches in Armv8.1-M Mainline.  */
14317static void
14318do_t_branch_future (void)
14319{
14320  unsigned long insn = inst.instruction;
14321
14322  inst.instruction = THUMB_OP32 (inst.instruction);
14323  if (inst.operands[0].hasreloc == 0)
14324    {
14325      if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
14326	as_bad (BAD_BRANCH_OFF);
14327
14328      inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14329    }
14330  else
14331    {
14332      inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14333      inst.relocs[0].pc_rel = 1;
14334    }
14335
14336  switch (insn)
14337    {
14338      case T_MNEM_bf:
14339	if (inst.operands[1].hasreloc == 0)
14340	  {
14341	    int val = inst.operands[1].imm;
14342	    if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
14343	      as_bad (BAD_BRANCH_OFF);
14344
14345	    int immA = (val & 0x0001f000) >> 12;
14346	    int immB = (val & 0x00000ffc) >> 2;
14347	    int immC = (val & 0x00000002) >> 1;
14348	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14349	  }
14350	else
14351	  {
14352	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14353	    inst.relocs[1].pc_rel = 1;
14354	  }
14355	break;
14356
14357      case T_MNEM_bfl:
14358	if (inst.operands[1].hasreloc == 0)
14359	  {
14360	    int val = inst.operands[1].imm;
14361	    if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
14362	      as_bad (BAD_BRANCH_OFF);
14363
14364	    int immA = (val & 0x0007f000) >> 12;
14365	    int immB = (val & 0x00000ffc) >> 2;
14366	    int immC = (val & 0x00000002) >> 1;
14367	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14368	  }
14369	  else
14370	  {
14371	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14372	    inst.relocs[1].pc_rel = 1;
14373	  }
14374	break;
14375
14376      case T_MNEM_bfcsel:
14377	/* Operand 1.  */
14378	if (inst.operands[1].hasreloc == 0)
14379	  {
14380	    int val = inst.operands[1].imm;
14381	    int immA = (val & 0x00001000) >> 12;
14382	    int immB = (val & 0x00000ffc) >> 2;
14383	    int immC = (val & 0x00000002) >> 1;
14384	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14385	  }
14386	  else
14387	  {
14388	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14389	    inst.relocs[1].pc_rel = 1;
14390	  }
14391
14392	/* Operand 2.  */
14393	if (inst.operands[2].hasreloc == 0)
14394	  {
14395	      constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14396	      int val2 = inst.operands[2].imm;
14397	      int val0 = inst.operands[0].imm & 0x1f;
14398	      int diff = val2 - val0;
14399	      if (diff == 4)
14400		inst.instruction |= 1 << 17; /* T bit.  */
14401	      else if (diff != 2)
14402		as_bad (_("out of range label-relative fixup value"));
14403	  }
14404	else
14405	  {
14406	      constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14407	      inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14408	      inst.relocs[2].pc_rel = 1;
14409	  }
14410
14411	/* Operand 3.  */
14412	constraint (inst.cond != COND_ALWAYS, BAD_COND);
14413	inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14414	break;
14415
14416      case T_MNEM_bfx:
14417      case T_MNEM_bflx:
14418	inst.instruction |= inst.operands[1].reg << 16;
14419	break;
14420
14421      default: abort ();
14422    }
14423}
14424
14425/* Helper function for do_t_loloop to handle relocations.  */
14426static void
14427v8_1_loop_reloc (int is_le)
14428{
14429  if (inst.relocs[0].exp.X_op == O_constant)
14430    {
14431      int value = inst.relocs[0].exp.X_add_number;
14432      value = (is_le) ? -value : value;
14433
14434      if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
14435	as_bad (BAD_BRANCH_OFF);
14436
14437      int imml, immh;
14438
14439      immh = (value & 0x00000ffc) >> 2;
14440      imml = (value & 0x00000002) >> 1;
14441
14442      inst.instruction |= (imml << 11) | (immh << 1);
14443    }
14444  else
14445    {
14446      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14447      inst.relocs[0].pc_rel = 1;
14448    }
14449}
14450
14451/* For shifts with four operands in MVE.  */
14452static void
14453do_mve_scalar_shift1 (void)
14454{
14455  unsigned int value = inst.operands[2].imm;
14456
14457  inst.instruction |= inst.operands[0].reg << 16;
14458  inst.instruction |= inst.operands[1].reg << 8;
14459
14460  /* Setting the bit for saturation.  */
14461  inst.instruction |= ((value == 64) ? 0: 1) << 7;
14462
14463  /* Assuming Rm is already checked not to be 11x1.  */
14464  constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14465  constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14466  inst.instruction |= inst.operands[3].reg << 12;
14467}
14468
14469/* For shifts in MVE.  */
14470static void
14471do_mve_scalar_shift (void)
14472{
14473  if (!inst.operands[2].present)
14474    {
14475      inst.operands[2] = inst.operands[1];
14476      inst.operands[1].reg = 0xf;
14477    }
14478
14479  inst.instruction |= inst.operands[0].reg << 16;
14480  inst.instruction |= inst.operands[1].reg << 8;
14481
14482  if (inst.operands[2].isreg)
14483    {
14484      /* Assuming Rm is already checked not to be 11x1.  */
14485      constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14486      constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14487      inst.instruction |= inst.operands[2].reg << 12;
14488    }
14489  else
14490    {
14491      /* Assuming imm is already checked as [1,32].  */
14492      unsigned int value = inst.operands[2].imm;
14493      inst.instruction |= (value & 0x1c) << 10;
14494      inst.instruction |= (value & 0x03) << 6;
14495      /* Change last 4 bits from 0xd to 0xf.  */
14496      inst.instruction |= 0x2;
14497    }
14498}
14499
14500/* MVE instruction encoder helpers.  */
14501#define M_MNEM_vabav	0xee800f01
14502#define M_MNEM_vmladav	  0xeef00e00
14503#define M_MNEM_vmladava	  0xeef00e20
14504#define M_MNEM_vmladavx	  0xeef01e00
14505#define M_MNEM_vmladavax  0xeef01e20
14506#define M_MNEM_vmlsdav	  0xeef00e01
14507#define M_MNEM_vmlsdava	  0xeef00e21
14508#define M_MNEM_vmlsdavx	  0xeef01e01
14509#define M_MNEM_vmlsdavax  0xeef01e21
14510#define M_MNEM_vmullt	0xee011e00
14511#define M_MNEM_vmullb	0xee010e00
14512#define M_MNEM_vctp	0xf000e801
14513#define M_MNEM_vst20	0xfc801e00
14514#define M_MNEM_vst21	0xfc801e20
14515#define M_MNEM_vst40	0xfc801e01
14516#define M_MNEM_vst41	0xfc801e21
14517#define M_MNEM_vst42	0xfc801e41
14518#define M_MNEM_vst43	0xfc801e61
14519#define M_MNEM_vld20	0xfc901e00
14520#define M_MNEM_vld21	0xfc901e20
14521#define M_MNEM_vld40	0xfc901e01
14522#define M_MNEM_vld41	0xfc901e21
14523#define M_MNEM_vld42	0xfc901e41
14524#define M_MNEM_vld43	0xfc901e61
14525#define M_MNEM_vstrb	0xec000e00
14526#define M_MNEM_vstrh	0xec000e10
14527#define M_MNEM_vstrw	0xec000e40
14528#define M_MNEM_vstrd	0xec000e50
14529#define M_MNEM_vldrb	0xec100e00
14530#define M_MNEM_vldrh	0xec100e10
14531#define M_MNEM_vldrw	0xec100e40
14532#define M_MNEM_vldrd	0xec100e50
14533#define M_MNEM_vmovlt	0xeea01f40
14534#define M_MNEM_vmovlb	0xeea00f40
14535#define M_MNEM_vmovnt	0xfe311e81
14536#define M_MNEM_vmovnb	0xfe310e81
14537#define M_MNEM_vadc	0xee300f00
14538#define M_MNEM_vadci	0xee301f00
14539#define M_MNEM_vbrsr	0xfe011e60
14540#define M_MNEM_vaddlv	0xee890f00
14541#define M_MNEM_vaddlva	0xee890f20
14542#define M_MNEM_vaddv	0xeef10f00
14543#define M_MNEM_vaddva	0xeef10f20
14544#define M_MNEM_vddup	0xee011f6e
14545#define M_MNEM_vdwdup	0xee011f60
14546#define M_MNEM_vidup	0xee010f6e
14547#define M_MNEM_viwdup	0xee010f60
14548#define M_MNEM_vmaxv	0xeee20f00
14549#define M_MNEM_vmaxav	0xeee00f00
14550#define M_MNEM_vminv	0xeee20f80
14551#define M_MNEM_vminav	0xeee00f80
14552#define M_MNEM_vmlaldav	  0xee800e00
14553#define M_MNEM_vmlaldava  0xee800e20
14554#define M_MNEM_vmlaldavx  0xee801e00
14555#define M_MNEM_vmlaldavax 0xee801e20
14556#define M_MNEM_vmlsldav	  0xee800e01
14557#define M_MNEM_vmlsldava  0xee800e21
14558#define M_MNEM_vmlsldavx  0xee801e01
14559#define M_MNEM_vmlsldavax 0xee801e21
14560#define M_MNEM_vrmlaldavhx  0xee801f00
14561#define M_MNEM_vrmlaldavhax 0xee801f20
14562#define M_MNEM_vrmlsldavh   0xfe800e01
14563#define M_MNEM_vrmlsldavha  0xfe800e21
14564#define M_MNEM_vrmlsldavhx  0xfe801e01
14565#define M_MNEM_vrmlsldavhax 0xfe801e21
14566#define M_MNEM_vqmovnt	  0xee331e01
14567#define M_MNEM_vqmovnb	  0xee330e01
14568#define M_MNEM_vqmovunt	  0xee311e81
14569#define M_MNEM_vqmovunb	  0xee310e81
14570#define M_MNEM_vshrnt	    0xee801fc1
14571#define M_MNEM_vshrnb	    0xee800fc1
14572#define M_MNEM_vrshrnt	    0xfe801fc1
14573#define M_MNEM_vqshrnt	    0xee801f40
14574#define M_MNEM_vqshrnb	    0xee800f40
14575#define M_MNEM_vqshrunt	    0xee801fc0
14576#define M_MNEM_vqshrunb	    0xee800fc0
14577#define M_MNEM_vrshrnb	    0xfe800fc1
14578#define M_MNEM_vqrshrnt	    0xee801f41
14579#define M_MNEM_vqrshrnb	    0xee800f41
14580#define M_MNEM_vqrshrunt    0xfe801fc0
14581#define M_MNEM_vqrshrunb    0xfe800fc0
14582
14583/* Bfloat16 instruction encoder helpers.  */
14584#define B_MNEM_vfmat 0xfc300850
14585#define B_MNEM_vfmab 0xfc300810
14586
14587/* Neon instruction encoder helpers.  */
14588
14589/* Encodings for the different types for various Neon opcodes.  */
14590
14591/* An "invalid" code for the following tables.  */
14592#define N_INV -1u
14593
14594struct neon_tab_entry
14595{
14596  unsigned integer;
14597  unsigned float_or_poly;
14598  unsigned scalar_or_imm;
14599};
14600
14601/* Map overloaded Neon opcodes to their respective encodings.  */
14602#define NEON_ENC_TAB					\
14603  X(vabd,	0x0000700, 0x1200d00, N_INV),		\
14604  X(vabdl,	0x0800700, N_INV,     N_INV),		\
14605  X(vmax,	0x0000600, 0x0000f00, N_INV),		\
14606  X(vmin,	0x0000610, 0x0200f00, N_INV),		\
14607  X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
14608  X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
14609  X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
14610  X(vadd,	0x0000800, 0x0000d00, N_INV),		\
14611  X(vaddl,	0x0800000, N_INV,     N_INV),		\
14612  X(vsub,	0x1000800, 0x0200d00, N_INV),		\
14613  X(vsubl,	0x0800200, N_INV,     N_INV),		\
14614  X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
14615  X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
14616  X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
14617  /* Register variants of the following two instructions are encoded as
14618     vcge / vcgt with the operands reversed.  */  	\
14619  X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
14620  X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
14621  X(vfma,	N_INV, 0x0000c10, N_INV),		\
14622  X(vfms,	N_INV, 0x0200c10, N_INV),		\
14623  X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
14624  X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
14625  X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
14626  X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
14627  X(vmlal,	0x0800800, N_INV,     0x0800240),	\
14628  X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
14629  X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
14630  X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
14631  X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
14632  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
14633  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
14634  X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
14635  X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
14636  X(vshl,	0x0000400, N_INV,     0x0800510),	\
14637  X(vqshl,	0x0000410, N_INV,     0x0800710),	\
14638  X(vand,	0x0000110, N_INV,     0x0800030),	\
14639  X(vbic,	0x0100110, N_INV,     0x0800030),	\
14640  X(veor,	0x1000110, N_INV,     N_INV),		\
14641  X(vorn,	0x0300110, N_INV,     0x0800010),	\
14642  X(vorr,	0x0200110, N_INV,     0x0800010),	\
14643  X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
14644  X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
14645  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
14646  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
14647  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
14648  X(vst1,	0x0000000, 0x0800000, N_INV),		\
14649  X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
14650  X(vst2,	0x0000100, 0x0800100, N_INV),		\
14651  X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
14652  X(vst3,	0x0000200, 0x0800200, N_INV),		\
14653  X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
14654  X(vst4,	0x0000300, 0x0800300, N_INV),		\
14655  X(vmovn,	0x1b20200, N_INV,     N_INV),		\
14656  X(vtrn,	0x1b20080, N_INV,     N_INV),		\
14657  X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
14658  X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
14659  X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
14660  X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
14661  X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
14662  X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
14663  X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
14664  X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
14665  X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
14666  X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
14667  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
14668  X(vseleq,	0xe000a00, N_INV,     N_INV),		\
14669  X(vselvs,	0xe100a00, N_INV,     N_INV),		\
14670  X(vselge,	0xe200a00, N_INV,     N_INV),		\
14671  X(vselgt,	0xe300a00, N_INV,     N_INV),		\
14672  X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
14673  X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
14674  X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
14675  X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
14676  X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
14677  X(aes,	0x3b00300, N_INV,     N_INV),		\
14678  X(sha3op,	0x2000c00, N_INV,     N_INV),		\
14679  X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
14680  X(sha2op,     0x3ba0380, N_INV,     N_INV)
14681
14682enum neon_opc
14683{
14684#define X(OPC,I,F,S) N_MNEM_##OPC
14685NEON_ENC_TAB
14686#undef X
14687};
14688
14689static const struct neon_tab_entry neon_enc_tab[] =
14690{
14691#define X(OPC,I,F,S) { (I), (F), (S) }
14692NEON_ENC_TAB
14693#undef X
14694};
14695
14696/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
14697#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14698#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
14699#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14700#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14701#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14702#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14703#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14704#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14705#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14706#define NEON_ENC_SINGLE_(X) \
14707  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14708#define NEON_ENC_DOUBLE_(X) \
14709  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14710#define NEON_ENC_FPV8_(X) \
14711  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14712
14713#define NEON_ENCODE(type, inst)					\
14714  do								\
14715    {								\
14716      inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
14717      inst.is_neon = 1;						\
14718    }								\
14719  while (0)
14720
14721#define check_neon_suffixes						\
14722  do									\
14723    {									\
14724      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
14725	{								\
14726	  as_bad (_("invalid neon suffix for non neon instruction"));	\
14727	  return;							\
14728	}								\
14729    }									\
14730  while (0)
14731
14732/* Define shapes for instruction operands. The following mnemonic characters
14733   are used in this table:
14734
14735     F - VFP S<n> register
14736     D - Neon D<n> register
14737     Q - Neon Q<n> register
14738     I - Immediate
14739     S - Scalar
14740     R - ARM register
14741     L - D<n> register list
14742
14743   This table is used to generate various data:
14744     - enumerations of the form NS_DDR to be used as arguments to
14745       neon_select_shape.
14746     - a table classifying shapes into single, double, quad, mixed.
14747     - a table used to drive neon_select_shape.  */
14748
14749#define NEON_SHAPE_DEF			\
14750  X(4, (R, R, Q, Q), QUAD),		\
14751  X(4, (Q, R, R, I), QUAD),		\
14752  X(4, (R, R, S, S), QUAD),		\
14753  X(4, (S, S, R, R), QUAD),		\
14754  X(3, (Q, R, I), QUAD),		\
14755  X(3, (I, Q, Q), QUAD),		\
14756  X(3, (I, Q, R), QUAD),		\
14757  X(3, (R, Q, Q), QUAD),		\
14758  X(3, (D, D, D), DOUBLE),		\
14759  X(3, (Q, Q, Q), QUAD),		\
14760  X(3, (D, D, I), DOUBLE),		\
14761  X(3, (Q, Q, I), QUAD),		\
14762  X(3, (D, D, S), DOUBLE),		\
14763  X(3, (Q, Q, S), QUAD),		\
14764  X(3, (Q, Q, R), QUAD),		\
14765  X(3, (R, R, Q), QUAD),		\
14766  X(2, (R, Q),	  QUAD),		\
14767  X(2, (D, D), DOUBLE),			\
14768  X(2, (Q, Q), QUAD),			\
14769  X(2, (D, S), DOUBLE),			\
14770  X(2, (Q, S), QUAD),			\
14771  X(2, (D, R), DOUBLE),			\
14772  X(2, (Q, R), QUAD),			\
14773  X(2, (D, I), DOUBLE),			\
14774  X(2, (Q, I), QUAD),			\
14775  X(3, (D, L, D), DOUBLE),		\
14776  X(2, (D, Q), MIXED),			\
14777  X(2, (Q, D), MIXED),			\
14778  X(3, (D, Q, I), MIXED),		\
14779  X(3, (Q, D, I), MIXED),		\
14780  X(3, (Q, D, D), MIXED),		\
14781  X(3, (D, Q, Q), MIXED),		\
14782  X(3, (Q, Q, D), MIXED),		\
14783  X(3, (Q, D, S), MIXED),		\
14784  X(3, (D, Q, S), MIXED),		\
14785  X(4, (D, D, D, I), DOUBLE),		\
14786  X(4, (Q, Q, Q, I), QUAD),		\
14787  X(4, (D, D, S, I), DOUBLE),		\
14788  X(4, (Q, Q, S, I), QUAD),		\
14789  X(2, (F, F), SINGLE),			\
14790  X(3, (F, F, F), SINGLE),		\
14791  X(2, (F, I), SINGLE),			\
14792  X(2, (F, D), MIXED),			\
14793  X(2, (D, F), MIXED),			\
14794  X(3, (F, F, I), MIXED),		\
14795  X(4, (R, R, F, F), SINGLE),		\
14796  X(4, (F, F, R, R), SINGLE),		\
14797  X(3, (D, R, R), DOUBLE),		\
14798  X(3, (R, R, D), DOUBLE),		\
14799  X(2, (S, R), SINGLE),			\
14800  X(2, (R, S), SINGLE),			\
14801  X(2, (F, R), SINGLE),			\
14802  X(2, (R, F), SINGLE),			\
14803/* Used for MVE tail predicated loop instructions.  */\
14804  X(2, (R, R), QUAD),			\
14805/* Half float shape supported so far.  */\
14806  X (2, (H, D), MIXED),			\
14807  X (2, (D, H), MIXED),			\
14808  X (2, (H, F), MIXED),			\
14809  X (2, (F, H), MIXED),			\
14810  X (2, (H, H), HALF),			\
14811  X (2, (H, R), HALF),			\
14812  X (2, (R, H), HALF),			\
14813  X (2, (H, I), HALF),			\
14814  X (3, (H, H, H), HALF),		\
14815  X (3, (H, F, I), MIXED),		\
14816  X (3, (F, H, I), MIXED),		\
14817  X (3, (D, H, H), MIXED),		\
14818  X (3, (D, H, S), MIXED)
14819
14820#define S2(A,B)		NS_##A##B
14821#define S3(A,B,C)	NS_##A##B##C
14822#define S4(A,B,C,D)	NS_##A##B##C##D
14823
14824#define X(N, L, C) S##N L
14825
14826enum neon_shape
14827{
14828  NEON_SHAPE_DEF,
14829  NS_NULL
14830};
14831
14832#undef X
14833#undef S2
14834#undef S3
14835#undef S4
14836
14837enum neon_shape_class
14838{
14839  SC_HALF,
14840  SC_SINGLE,
14841  SC_DOUBLE,
14842  SC_QUAD,
14843  SC_MIXED
14844};
14845
14846#define X(N, L, C) SC_##C
14847
14848static enum neon_shape_class neon_shape_class[] =
14849{
14850  NEON_SHAPE_DEF
14851};
14852
14853#undef X
14854
14855enum neon_shape_el
14856{
14857  SE_H,
14858  SE_F,
14859  SE_D,
14860  SE_Q,
14861  SE_I,
14862  SE_S,
14863  SE_R,
14864  SE_L
14865};
14866
14867/* Register widths of above.  */
14868static unsigned neon_shape_el_size[] =
14869{
14870  16,
14871  32,
14872  64,
14873  128,
14874  0,
14875  32,
14876  32,
14877  0
14878};
14879
14880struct neon_shape_info
14881{
14882  unsigned els;
14883  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14884};
14885
14886#define S2(A,B)		{ SE_##A, SE_##B }
14887#define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
14888#define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
14889
14890#define X(N, L, C) { N, S##N L }
14891
14892static struct neon_shape_info neon_shape_tab[] =
14893{
14894  NEON_SHAPE_DEF
14895};
14896
14897#undef X
14898#undef S2
14899#undef S3
14900#undef S4
14901
14902/* Bit masks used in type checking given instructions.
14903  'N_EQK' means the type must be the same as (or based on in some way) the key
14904   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14905   set, various other bits can be set as well in order to modify the meaning of
14906   the type constraint.  */
14907
14908enum neon_type_mask
14909{
14910  N_S8   = 0x0000001,
14911  N_S16  = 0x0000002,
14912  N_S32  = 0x0000004,
14913  N_S64  = 0x0000008,
14914  N_U8   = 0x0000010,
14915  N_U16  = 0x0000020,
14916  N_U32  = 0x0000040,
14917  N_U64  = 0x0000080,
14918  N_I8   = 0x0000100,
14919  N_I16  = 0x0000200,
14920  N_I32  = 0x0000400,
14921  N_I64  = 0x0000800,
14922  N_8    = 0x0001000,
14923  N_16   = 0x0002000,
14924  N_32   = 0x0004000,
14925  N_64   = 0x0008000,
14926  N_P8   = 0x0010000,
14927  N_P16  = 0x0020000,
14928  N_F16  = 0x0040000,
14929  N_F32  = 0x0080000,
14930  N_F64  = 0x0100000,
14931  N_P64	 = 0x0200000,
14932  N_BF16 = 0x0400000,
14933  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
14934  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
14935  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
14936  N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
14937  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
14938  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
14939  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
14940  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
14941  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
14942  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
14943  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
14944  N_UTYP = 0,
14945  N_MAX_NONSPECIAL = N_P64
14946};
14947
14948#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14949
14950#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14951#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14952#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14953#define N_S_32     (N_S8 | N_S16 | N_S32)
14954#define N_F_16_32  (N_F16 | N_F32)
14955#define N_SUF_32   (N_SU_32 | N_F_16_32)
14956#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
14957#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14958#define N_F_ALL    (N_F16 | N_F32 | N_F64)
14959#define N_I_MVE	   (N_I8 | N_I16 | N_I32)
14960#define N_F_MVE	   (N_F16 | N_F32)
14961#define N_SU_MVE   (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14962
14963/* Pass this as the first type argument to neon_check_type to ignore types
14964   altogether.  */
14965#define N_IGNORE_TYPE (N_KEY | N_EQK)
14966
14967/* Select a "shape" for the current instruction (describing register types or
14968   sizes) from a list of alternatives. Return NS_NULL if the current instruction
14969   doesn't fit. For non-polymorphic shapes, checking is usually done as a
14970   function of operand parsing, so this function doesn't need to be called.
14971   Shapes should be listed in order of decreasing length.  */
14972
14973static enum neon_shape
14974neon_select_shape (enum neon_shape shape, ...)
14975{
14976  va_list ap;
14977  enum neon_shape first_shape = shape;
14978
14979  /* Fix missing optional operands. FIXME: we don't know at this point how
14980     many arguments we should have, so this makes the assumption that we have
14981     > 1. This is true of all current Neon opcodes, I think, but may not be
14982     true in the future.  */
14983  if (!inst.operands[1].present)
14984    inst.operands[1] = inst.operands[0];
14985
14986  va_start (ap, shape);
14987
14988  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14989    {
14990      unsigned j;
14991      int matches = 1;
14992
14993      for (j = 0; j < neon_shape_tab[shape].els; j++)
14994	{
14995	  if (!inst.operands[j].present)
14996	    {
14997	      matches = 0;
14998	      break;
14999	    }
15000
15001	  switch (neon_shape_tab[shape].el[j])
15002	    {
15003	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
15004		 a VFP single precision register operand, it's essentially
15005		 means only half of the register is used.
15006
15007		 If the type specifier is given after the mnemonics, the
15008		 information is stored in inst.vectype.  If the type specifier
15009		 is given after register operand, the information is stored
15010		 in inst.operands[].vectype.
15011
15012		 When there is only one type specifier, and all the register
15013		 operands are the same type of hardware register, the type
15014		 specifier applies to all register operands.
15015
15016		 If no type specifier is given, the shape is inferred from
15017		 operand information.
15018
15019		 for example:
15020		 vadd.f16 s0, s1, s2:		NS_HHH
15021		 vabs.f16 s0, s1:		NS_HH
15022		 vmov.f16 s0, r1:		NS_HR
15023		 vmov.f16 r0, s1:		NS_RH
15024		 vcvt.f16 r0, s1:		NS_RH
15025		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
15026		 vcvt.f16.s32	s2, s2:		NS_HF
15027	      */
15028	    case SE_H:
15029	      if (!(inst.operands[j].isreg
15030		    && inst.operands[j].isvec
15031		    && inst.operands[j].issingle
15032		    && !inst.operands[j].isquad
15033		    && ((inst.vectype.elems == 1
15034			 && inst.vectype.el[0].size == 16)
15035			|| (inst.vectype.elems > 1
15036			    && inst.vectype.el[j].size == 16)
15037			|| (inst.vectype.elems == 0
15038			    && inst.operands[j].vectype.type != NT_invtype
15039			    && inst.operands[j].vectype.size == 16))))
15040		matches = 0;
15041	      break;
15042
15043	    case SE_F:
15044	      if (!(inst.operands[j].isreg
15045		    && inst.operands[j].isvec
15046		    && inst.operands[j].issingle
15047		    && !inst.operands[j].isquad
15048		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15049			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15050			|| (inst.vectype.elems == 0
15051			    && (inst.operands[j].vectype.size == 32
15052				|| inst.operands[j].vectype.type == NT_invtype)))))
15053		matches = 0;
15054	      break;
15055
15056	    case SE_D:
15057	      if (!(inst.operands[j].isreg
15058		    && inst.operands[j].isvec
15059		    && !inst.operands[j].isquad
15060		    && !inst.operands[j].issingle))
15061		matches = 0;
15062	      break;
15063
15064	    case SE_R:
15065	      if (!(inst.operands[j].isreg
15066		    && !inst.operands[j].isvec))
15067		matches = 0;
15068	      break;
15069
15070	    case SE_Q:
15071	      if (!(inst.operands[j].isreg
15072		    && inst.operands[j].isvec
15073		    && inst.operands[j].isquad
15074		    && !inst.operands[j].issingle))
15075		matches = 0;
15076	      break;
15077
15078	    case SE_I:
15079	      if (!(!inst.operands[j].isreg
15080		    && !inst.operands[j].isscalar))
15081		matches = 0;
15082	      break;
15083
15084	    case SE_S:
15085	      if (!(!inst.operands[j].isreg
15086		    && inst.operands[j].isscalar))
15087		matches = 0;
15088	      break;
15089
15090	    case SE_L:
15091	      break;
15092	    }
15093	  if (!matches)
15094	    break;
15095	}
15096      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15097	/* We've matched all the entries in the shape table, and we don't
15098	   have any left over operands which have not been matched.  */
15099	break;
15100    }
15101
15102  va_end (ap);
15103
15104  if (shape == NS_NULL && first_shape != NS_NULL)
15105    first_error (_("invalid instruction shape"));
15106
15107  return shape;
15108}
15109
15110/* True if SHAPE is predominantly a quadword operation (most of the time, this
15111   means the Q bit should be set).  */
15112
15113static int
15114neon_quad (enum neon_shape shape)
15115{
15116  return neon_shape_class[shape] == SC_QUAD;
15117}
15118
15119static void
15120neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15121		       unsigned *g_size)
15122{
15123  /* Allow modification to be made to types which are constrained to be
15124     based on the key element, based on bits set alongside N_EQK.  */
15125  if ((typebits & N_EQK) != 0)
15126    {
15127      if ((typebits & N_HLF) != 0)
15128	*g_size /= 2;
15129      else if ((typebits & N_DBL) != 0)
15130	*g_size *= 2;
15131      if ((typebits & N_SGN) != 0)
15132	*g_type = NT_signed;
15133      else if ((typebits & N_UNS) != 0)
15134	*g_type = NT_unsigned;
15135      else if ((typebits & N_INT) != 0)
15136	*g_type = NT_integer;
15137      else if ((typebits & N_FLT) != 0)
15138	*g_type = NT_float;
15139      else if ((typebits & N_SIZ) != 0)
15140	*g_type = NT_untyped;
15141    }
15142}
15143
15144/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15145   operand type, i.e. the single type specified in a Neon instruction when it
15146   is the only one given.  */
15147
15148static struct neon_type_el
15149neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15150{
15151  struct neon_type_el dest = *key;
15152
15153  gas_assert ((thisarg & N_EQK) != 0);
15154
15155  neon_modify_type_size (thisarg, &dest.type, &dest.size);
15156
15157  return dest;
15158}
15159
15160/* Convert Neon type and size into compact bitmask representation.  */
15161
15162static enum neon_type_mask
15163type_chk_of_el_type (enum neon_el_type type, unsigned size)
15164{
15165  switch (type)
15166    {
15167    case NT_untyped:
15168      switch (size)
15169	{
15170	case 8:  return N_8;
15171	case 16: return N_16;
15172	case 32: return N_32;
15173	case 64: return N_64;
15174	default: ;
15175	}
15176      break;
15177
15178    case NT_integer:
15179      switch (size)
15180	{
15181	case 8:  return N_I8;
15182	case 16: return N_I16;
15183	case 32: return N_I32;
15184	case 64: return N_I64;
15185	default: ;
15186	}
15187      break;
15188
15189    case NT_float:
15190      switch (size)
15191	{
15192	case 16: return N_F16;
15193	case 32: return N_F32;
15194	case 64: return N_F64;
15195	default: ;
15196	}
15197      break;
15198
15199    case NT_poly:
15200      switch (size)
15201	{
15202	case 8:  return N_P8;
15203	case 16: return N_P16;
15204	case 64: return N_P64;
15205	default: ;
15206	}
15207      break;
15208
15209    case NT_signed:
15210      switch (size)
15211	{
15212	case 8:  return N_S8;
15213	case 16: return N_S16;
15214	case 32: return N_S32;
15215	case 64: return N_S64;
15216	default: ;
15217	}
15218      break;
15219
15220    case NT_unsigned:
15221      switch (size)
15222	{
15223	case 8:  return N_U8;
15224	case 16: return N_U16;
15225	case 32: return N_U32;
15226	case 64: return N_U64;
15227	default: ;
15228	}
15229      break;
15230
15231    case NT_bfloat:
15232      if (size == 16) return N_BF16;
15233      break;
15234
15235    default: ;
15236    }
15237
15238  return N_UTYP;
15239}
15240
15241/* Convert compact Neon bitmask type representation to a type and size. Only
15242   handles the case where a single bit is set in the mask.  */
15243
15244static int
15245el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15246		     enum neon_type_mask mask)
15247{
15248  if ((mask & N_EQK) != 0)
15249    return FAIL;
15250
15251  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15252    *size = 8;
15253  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15254	   != 0)
15255    *size = 16;
15256  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15257    *size = 32;
15258  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15259    *size = 64;
15260  else
15261    return FAIL;
15262
15263  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15264    *type = NT_signed;
15265  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15266    *type = NT_unsigned;
15267  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15268    *type = NT_integer;
15269  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15270    *type = NT_untyped;
15271  else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15272    *type = NT_poly;
15273  else if ((mask & (N_F_ALL)) != 0)
15274    *type = NT_float;
15275  else if ((mask & (N_BF16)) != 0)
15276    *type = NT_bfloat;
15277  else
15278    return FAIL;
15279
15280  return SUCCESS;
15281}
15282
15283/* Modify a bitmask of allowed types. This is only needed for type
15284   relaxation.  */
15285
15286static unsigned
15287modify_types_allowed (unsigned allowed, unsigned mods)
15288{
15289  unsigned size;
15290  enum neon_el_type type;
15291  unsigned destmask;
15292  int i;
15293
15294  destmask = 0;
15295
15296  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15297    {
15298      if (el_type_of_type_chk (&type, &size,
15299			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
15300	{
15301	  neon_modify_type_size (mods, &type, &size);
15302	  destmask |= type_chk_of_el_type (type, size);
15303	}
15304    }
15305
15306  return destmask;
15307}
15308
15309/* Check type and return type classification.
15310   The manual states (paraphrase): If one datatype is given, it indicates the
15311   type given in:
15312    - the second operand, if there is one
15313    - the operand, if there is no second operand
15314    - the result, if there are no operands.
15315   This isn't quite good enough though, so we use a concept of a "key" datatype
15316   which is set on a per-instruction basis, which is the one which matters when
15317   only one data type is written.
15318   Note: this function has side-effects (e.g. filling in missing operands). All
15319   Neon instructions should call it before performing bit encoding.  */
15320
15321static struct neon_type_el
15322neon_check_type (unsigned els, enum neon_shape ns, ...)
15323{
15324  va_list ap;
15325  unsigned i, pass, key_el = 0;
15326  unsigned types[NEON_MAX_TYPE_ELS];
15327  enum neon_el_type k_type = NT_invtype;
15328  unsigned k_size = -1u;
15329  struct neon_type_el badtype = {NT_invtype, -1};
15330  unsigned key_allowed = 0;
15331
15332  /* Optional registers in Neon instructions are always (not) in operand 1.
15333     Fill in the missing operand here, if it was omitted.  */
15334  if (els > 1 && !inst.operands[1].present)
15335    inst.operands[1] = inst.operands[0];
15336
15337  /* Suck up all the varargs.  */
15338  va_start (ap, ns);
15339  for (i = 0; i < els; i++)
15340    {
15341      unsigned thisarg = va_arg (ap, unsigned);
15342      if (thisarg == N_IGNORE_TYPE)
15343	{
15344	  va_end (ap);
15345	  return badtype;
15346	}
15347      types[i] = thisarg;
15348      if ((thisarg & N_KEY) != 0)
15349	key_el = i;
15350    }
15351  va_end (ap);
15352
15353  if (inst.vectype.elems > 0)
15354    for (i = 0; i < els; i++)
15355      if (inst.operands[i].vectype.type != NT_invtype)
15356	{
15357	  first_error (_("types specified in both the mnemonic and operands"));
15358	  return badtype;
15359	}
15360
15361  /* Duplicate inst.vectype elements here as necessary.
15362     FIXME: No idea if this is exactly the same as the ARM assembler,
15363     particularly when an insn takes one register and one non-register
15364     operand. */
15365  if (inst.vectype.elems == 1 && els > 1)
15366    {
15367      unsigned j;
15368      inst.vectype.elems = els;
15369      inst.vectype.el[key_el] = inst.vectype.el[0];
15370      for (j = 0; j < els; j++)
15371	if (j != key_el)
15372	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15373						  types[j]);
15374    }
15375  else if (inst.vectype.elems == 0 && els > 0)
15376    {
15377      unsigned j;
15378      /* No types were given after the mnemonic, so look for types specified
15379	 after each operand. We allow some flexibility here; as long as the
15380	 "key" operand has a type, we can infer the others.  */
15381      for (j = 0; j < els; j++)
15382	if (inst.operands[j].vectype.type != NT_invtype)
15383	  inst.vectype.el[j] = inst.operands[j].vectype;
15384
15385      if (inst.operands[key_el].vectype.type != NT_invtype)
15386	{
15387	  for (j = 0; j < els; j++)
15388	    if (inst.operands[j].vectype.type == NT_invtype)
15389	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15390						      types[j]);
15391	}
15392      else
15393	{
15394	  first_error (_("operand types can't be inferred"));
15395	  return badtype;
15396	}
15397    }
15398  else if (inst.vectype.elems != els)
15399    {
15400      first_error (_("type specifier has the wrong number of parts"));
15401      return badtype;
15402    }
15403
15404  for (pass = 0; pass < 2; pass++)
15405    {
15406      for (i = 0; i < els; i++)
15407	{
15408	  unsigned thisarg = types[i];
15409	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15410	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15411	  enum neon_el_type g_type = inst.vectype.el[i].type;
15412	  unsigned g_size = inst.vectype.el[i].size;
15413
15414	  /* Decay more-specific signed & unsigned types to sign-insensitive
15415	     integer types if sign-specific variants are unavailable.  */
15416	  if ((g_type == NT_signed || g_type == NT_unsigned)
15417	      && (types_allowed & N_SU_ALL) == 0)
15418	    g_type = NT_integer;
15419
15420	  /* If only untyped args are allowed, decay any more specific types to
15421	     them. Some instructions only care about signs for some element
15422	     sizes, so handle that properly.  */
15423	  if (((types_allowed & N_UNT) == 0)
15424	      && ((g_size == 8 && (types_allowed & N_8) != 0)
15425		  || (g_size == 16 && (types_allowed & N_16) != 0)
15426		  || (g_size == 32 && (types_allowed & N_32) != 0)
15427		  || (g_size == 64 && (types_allowed & N_64) != 0)))
15428	    g_type = NT_untyped;
15429
15430	  if (pass == 0)
15431	    {
15432	      if ((thisarg & N_KEY) != 0)
15433		{
15434		  k_type = g_type;
15435		  k_size = g_size;
15436		  key_allowed = thisarg & ~N_KEY;
15437
15438		  /* Check architecture constraint on FP16 extension.  */
15439		  if (k_size == 16
15440		      && k_type == NT_float
15441		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15442		    {
15443		      inst.error = _(BAD_FP16);
15444		      return badtype;
15445		    }
15446		}
15447	    }
15448	  else
15449	    {
15450	      if ((thisarg & N_VFP) != 0)
15451		{
15452		  enum neon_shape_el regshape;
15453		  unsigned regwidth, match;
15454
15455		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
15456		  if (ns == NS_NULL)
15457		    {
15458		      first_error (_("invalid instruction shape"));
15459		      return badtype;
15460		    }
15461		  regshape = neon_shape_tab[ns].el[i];
15462		  regwidth = neon_shape_el_size[regshape];
15463
15464		  /* In VFP mode, operands must match register widths. If we
15465		     have a key operand, use its width, else use the width of
15466		     the current operand.  */
15467		  if (k_size != -1u)
15468		    match = k_size;
15469		  else
15470		    match = g_size;
15471
15472		  /* FP16 will use a single precision register.  */
15473		  if (regwidth == 32 && match == 16)
15474		    {
15475		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15476			match = regwidth;
15477		      else
15478			{
15479			  inst.error = _(BAD_FP16);
15480			  return badtype;
15481			}
15482		    }
15483
15484		  if (regwidth != match)
15485		    {
15486		      first_error (_("operand size must match register width"));
15487		      return badtype;
15488		    }
15489		}
15490
15491	      if ((thisarg & N_EQK) == 0)
15492		{
15493		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
15494
15495		  if ((given_type & types_allowed) == 0)
15496		    {
15497		      first_error (BAD_SIMD_TYPE);
15498		      return badtype;
15499		    }
15500		}
15501	      else
15502		{
15503		  enum neon_el_type mod_k_type = k_type;
15504		  unsigned mod_k_size = k_size;
15505		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15506		  if (g_type != mod_k_type || g_size != mod_k_size)
15507		    {
15508		      first_error (_("inconsistent types in Neon instruction"));
15509		      return badtype;
15510		    }
15511		}
15512	    }
15513	}
15514    }
15515
15516  return inst.vectype.el[key_el];
15517}
15518
15519/* Neon-style VFP instruction forwarding.  */
15520
15521/* Thumb VFP instructions have 0xE in the condition field.  */
15522
15523static void
15524do_vfp_cond_or_thumb (void)
15525{
15526  inst.is_neon = 1;
15527
15528  if (thumb_mode)
15529    inst.instruction |= 0xe0000000;
15530  else
15531    inst.instruction |= inst.cond << 28;
15532}
15533
15534/* Look up and encode a simple mnemonic, for use as a helper function for the
15535   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
15536   etc.  It is assumed that operand parsing has already been done, and that the
15537   operands are in the form expected by the given opcode (this isn't necessarily
15538   the same as the form in which they were parsed, hence some massaging must
15539   take place before this function is called).
15540   Checks current arch version against that in the looked-up opcode.  */
15541
15542static void
15543do_vfp_nsyn_opcode (const char *opname)
15544{
15545  const struct asm_opcode *opcode;
15546
15547  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
15548
15549  if (!opcode)
15550    abort ();
15551
15552  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15553		thumb_mode ? *opcode->tvariant : *opcode->avariant),
15554	      _(BAD_FPU));
15555
15556  inst.is_neon = 1;
15557
15558  if (thumb_mode)
15559    {
15560      inst.instruction = opcode->tvalue;
15561      opcode->tencode ();
15562    }
15563  else
15564    {
15565      inst.instruction = (inst.cond << 28) | opcode->avalue;
15566      opcode->aencode ();
15567    }
15568}
15569
15570static void
15571do_vfp_nsyn_add_sub (enum neon_shape rs)
15572{
15573  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15574
15575  if (rs == NS_FFF || rs == NS_HHH)
15576    {
15577      if (is_add)
15578	do_vfp_nsyn_opcode ("fadds");
15579      else
15580	do_vfp_nsyn_opcode ("fsubs");
15581
15582      /* ARMv8.2 fp16 instruction.  */
15583      if (rs == NS_HHH)
15584	do_scalar_fp16_v82_encode ();
15585    }
15586  else
15587    {
15588      if (is_add)
15589	do_vfp_nsyn_opcode ("faddd");
15590      else
15591	do_vfp_nsyn_opcode ("fsubd");
15592    }
15593}
15594
15595/* Check operand types to see if this is a VFP instruction, and if so call
15596   PFN ().  */
15597
15598static int
15599try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15600{
15601  enum neon_shape rs;
15602  struct neon_type_el et;
15603
15604  switch (args)
15605    {
15606    case 2:
15607      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15608      et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15609      break;
15610
15611    case 3:
15612      rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15613      et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15614			    N_F_ALL | N_KEY | N_VFP);
15615      break;
15616
15617    default:
15618      abort ();
15619    }
15620
15621  if (et.type != NT_invtype)
15622    {
15623      pfn (rs);
15624      return SUCCESS;
15625    }
15626
15627  inst.error = NULL;
15628  return FAIL;
15629}
15630
15631static void
15632do_vfp_nsyn_mla_mls (enum neon_shape rs)
15633{
15634  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15635
15636  if (rs == NS_FFF || rs == NS_HHH)
15637    {
15638      if (is_mla)
15639	do_vfp_nsyn_opcode ("fmacs");
15640      else
15641	do_vfp_nsyn_opcode ("fnmacs");
15642
15643      /* ARMv8.2 fp16 instruction.  */
15644      if (rs == NS_HHH)
15645	do_scalar_fp16_v82_encode ();
15646    }
15647  else
15648    {
15649      if (is_mla)
15650	do_vfp_nsyn_opcode ("fmacd");
15651      else
15652	do_vfp_nsyn_opcode ("fnmacd");
15653    }
15654}
15655
15656static void
15657do_vfp_nsyn_fma_fms (enum neon_shape rs)
15658{
15659  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15660
15661  if (rs == NS_FFF || rs == NS_HHH)
15662    {
15663      if (is_fma)
15664	do_vfp_nsyn_opcode ("ffmas");
15665      else
15666	do_vfp_nsyn_opcode ("ffnmas");
15667
15668      /* ARMv8.2 fp16 instruction.  */
15669      if (rs == NS_HHH)
15670	do_scalar_fp16_v82_encode ();
15671    }
15672  else
15673    {
15674      if (is_fma)
15675	do_vfp_nsyn_opcode ("ffmad");
15676      else
15677	do_vfp_nsyn_opcode ("ffnmad");
15678    }
15679}
15680
15681static void
15682do_vfp_nsyn_mul (enum neon_shape rs)
15683{
15684  if (rs == NS_FFF || rs == NS_HHH)
15685    {
15686      do_vfp_nsyn_opcode ("fmuls");
15687
15688      /* ARMv8.2 fp16 instruction.  */
15689      if (rs == NS_HHH)
15690	do_scalar_fp16_v82_encode ();
15691    }
15692  else
15693    do_vfp_nsyn_opcode ("fmuld");
15694}
15695
15696static void
15697do_vfp_nsyn_abs_neg (enum neon_shape rs)
15698{
15699  int is_neg = (inst.instruction & 0x80) != 0;
15700  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15701
15702  if (rs == NS_FF || rs == NS_HH)
15703    {
15704      if (is_neg)
15705	do_vfp_nsyn_opcode ("fnegs");
15706      else
15707	do_vfp_nsyn_opcode ("fabss");
15708
15709      /* ARMv8.2 fp16 instruction.  */
15710      if (rs == NS_HH)
15711	do_scalar_fp16_v82_encode ();
15712    }
15713  else
15714    {
15715      if (is_neg)
15716	do_vfp_nsyn_opcode ("fnegd");
15717      else
15718	do_vfp_nsyn_opcode ("fabsd");
15719    }
15720}
15721
15722/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15723   insns belong to Neon, and are handled elsewhere.  */
15724
15725static void
15726do_vfp_nsyn_ldm_stm (int is_dbmode)
15727{
15728  int is_ldm = (inst.instruction & (1 << 20)) != 0;
15729  if (is_ldm)
15730    {
15731      if (is_dbmode)
15732	do_vfp_nsyn_opcode ("fldmdbs");
15733      else
15734	do_vfp_nsyn_opcode ("fldmias");
15735    }
15736  else
15737    {
15738      if (is_dbmode)
15739	do_vfp_nsyn_opcode ("fstmdbs");
15740      else
15741	do_vfp_nsyn_opcode ("fstmias");
15742    }
15743}
15744
15745static void
15746do_vfp_nsyn_sqrt (void)
15747{
15748  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15749  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15750
15751  if (rs == NS_FF || rs == NS_HH)
15752    {
15753      do_vfp_nsyn_opcode ("fsqrts");
15754
15755      /* ARMv8.2 fp16 instruction.  */
15756      if (rs == NS_HH)
15757	do_scalar_fp16_v82_encode ();
15758    }
15759  else
15760    do_vfp_nsyn_opcode ("fsqrtd");
15761}
15762
15763static void
15764do_vfp_nsyn_div (void)
15765{
15766  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15767  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15768		   N_F_ALL | N_KEY | N_VFP);
15769
15770  if (rs == NS_FFF || rs == NS_HHH)
15771    {
15772      do_vfp_nsyn_opcode ("fdivs");
15773
15774      /* ARMv8.2 fp16 instruction.  */
15775      if (rs == NS_HHH)
15776	do_scalar_fp16_v82_encode ();
15777    }
15778  else
15779    do_vfp_nsyn_opcode ("fdivd");
15780}
15781
15782static void
15783do_vfp_nsyn_nmul (void)
15784{
15785  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15786  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15787		   N_F_ALL | N_KEY | N_VFP);
15788
15789  if (rs == NS_FFF || rs == NS_HHH)
15790    {
15791      NEON_ENCODE (SINGLE, inst);
15792      do_vfp_sp_dyadic ();
15793
15794      /* ARMv8.2 fp16 instruction.  */
15795      if (rs == NS_HHH)
15796	do_scalar_fp16_v82_encode ();
15797    }
15798  else
15799    {
15800      NEON_ENCODE (DOUBLE, inst);
15801      do_vfp_dp_rd_rn_rm ();
15802    }
15803  do_vfp_cond_or_thumb ();
15804
15805}
15806
15807/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15808   (0, 1, 2, 3).  */
15809
15810static unsigned
15811neon_logbits (unsigned x)
15812{
15813  return ffs (x) - 4;
15814}
15815
15816#define LOW4(R) ((R) & 0xf)
15817#define HI1(R) (((R) >> 4) & 1)
15818
15819static unsigned
15820mve_get_vcmp_vpt_cond (struct neon_type_el et)
15821{
15822  switch (et.type)
15823    {
15824    default:
15825      first_error (BAD_EL_TYPE);
15826      return 0;
15827    case NT_float:
15828      switch (inst.operands[0].imm)
15829	{
15830	default:
15831	  first_error (_("invalid condition"));
15832	  return 0;
15833	case 0x0:
15834	  /* eq.  */
15835	  return 0;
15836	case 0x1:
15837	  /* ne.  */
15838	  return 1;
15839	case 0xa:
15840	  /* ge/  */
15841	  return 4;
15842	case 0xb:
15843	  /* lt.  */
15844	  return 5;
15845	case 0xc:
15846	  /* gt.  */
15847	  return 6;
15848	case 0xd:
15849	  /* le.  */
15850	  return 7;
15851	}
15852    case NT_integer:
15853      /* only accept eq and ne.  */
15854      if (inst.operands[0].imm > 1)
15855	{
15856	  first_error (_("invalid condition"));
15857	  return 0;
15858	}
15859      return inst.operands[0].imm;
15860    case NT_unsigned:
15861      if (inst.operands[0].imm == 0x2)
15862	return 2;
15863      else if (inst.operands[0].imm == 0x8)
15864	return 3;
15865      else
15866	{
15867	  first_error (_("invalid condition"));
15868	  return 0;
15869	}
15870    case NT_signed:
15871      switch (inst.operands[0].imm)
15872	{
15873	  default:
15874	    first_error (_("invalid condition"));
15875	    return 0;
15876	  case 0xa:
15877	    /* ge.  */
15878	    return 4;
15879	  case 0xb:
15880	    /* lt.  */
15881	    return 5;
15882	  case 0xc:
15883	    /* gt.  */
15884	    return 6;
15885	  case 0xd:
15886	    /* le.  */
15887	    return 7;
15888	}
15889    }
15890  /* Should be unreachable.  */
15891  abort ();
15892}
15893
15894/* For VCTP (create vector tail predicate) in MVE.  */
15895static void
15896do_mve_vctp (void)
15897{
15898  int dt = 0;
15899  unsigned size = 0x0;
15900
15901  if (inst.cond > COND_ALWAYS)
15902    inst.pred_insn_type = INSIDE_VPT_INSN;
15903  else
15904    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15905
15906  /* This is a typical MVE instruction which has no type but have size 8, 16,
15907     32 and 64.  For instructions with no type, inst.vectype.el[j].type is set
15908     to NT_untyped and size is updated in inst.vectype.el[j].size.  */
15909  if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15910    dt = inst.vectype.el[0].size;
15911
15912  /* Setting this does not indicate an actual NEON instruction, but only
15913     indicates that the mnemonic accepts neon-style type suffixes.  */
15914  inst.is_neon = 1;
15915
15916  switch (dt)
15917    {
15918      case 8:
15919	break;
15920      case 16:
15921	size = 0x1; break;
15922      case 32:
15923	size = 0x2; break;
15924      case 64:
15925	size = 0x3; break;
15926      default:
15927	first_error (_("Type is not allowed for this instruction"));
15928    }
15929  inst.instruction |= size << 20;
15930  inst.instruction |= inst.operands[0].reg << 16;
15931}
15932
15933static void
15934do_mve_vpt (void)
15935{
15936  /* We are dealing with a vector predicated block.  */
15937  if (inst.operands[0].present)
15938    {
15939      enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
15940      struct neon_type_el et
15941	= neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
15942			   N_EQK);
15943
15944      unsigned fcond = mve_get_vcmp_vpt_cond (et);
15945
15946      constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
15947
15948      if (et.type == NT_invtype)
15949	return;
15950
15951      if (et.type == NT_float)
15952	{
15953	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
15954		      BAD_FPU);
15955	  constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
15956	  inst.instruction |= (et.size == 16) << 28;
15957	  inst.instruction |= 0x3 << 20;
15958	}
15959      else
15960	{
15961	  constraint (et.size != 8 && et.size != 16 && et.size != 32,
15962		      BAD_EL_TYPE);
15963	  inst.instruction |= 1 << 28;
15964	  inst.instruction |= neon_logbits (et.size) << 20;
15965	}
15966
15967      if (inst.operands[2].isquad)
15968	{
15969	  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15970	  inst.instruction |= LOW4 (inst.operands[2].reg);
15971	  inst.instruction |= (fcond & 0x2) >> 1;
15972	}
15973      else
15974	{
15975	  if (inst.operands[2].reg == REG_SP)
15976	    as_tsktsk (MVE_BAD_SP);
15977	  inst.instruction |= 1 << 6;
15978	  inst.instruction |= (fcond & 0x2) << 4;
15979	  inst.instruction |= inst.operands[2].reg;
15980	}
15981      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15982      inst.instruction |= (fcond & 0x4) << 10;
15983      inst.instruction |= (fcond & 0x1) << 7;
15984
15985    }
15986    set_pred_insn_type (VPT_INSN);
15987    now_pred.cc = 0;
15988    now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
15989		    | ((inst.instruction & 0xe000) >> 13);
15990    now_pred.warn_deprecated = FALSE;
15991    now_pred.type = VECTOR_PRED;
15992    inst.is_neon = 1;
15993}
15994
15995static void
15996do_mve_vcmp (void)
15997{
15998  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
15999  if (!inst.operands[1].isreg || !inst.operands[1].isquad)
16000    first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16001  if (!inst.operands[2].present)
16002    first_error (_("MVE vector or ARM register expected"));
16003  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16004
16005  /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe.  */
16006  if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16007      && inst.operands[1].isquad)
16008    {
16009      inst.instruction = N_MNEM_vcmp;
16010      inst.cond = 0x10;
16011    }
16012
16013  if (inst.cond > COND_ALWAYS)
16014    inst.pred_insn_type = INSIDE_VPT_INSN;
16015  else
16016    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16017
16018  enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16019  struct neon_type_el et
16020    = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16021		       N_EQK);
16022
16023  constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16024	      && !inst.operands[2].iszr, BAD_PC);
16025
16026  unsigned fcond = mve_get_vcmp_vpt_cond (et);
16027
16028  inst.instruction = 0xee010f00;
16029  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16030  inst.instruction |= (fcond & 0x4) << 10;
16031  inst.instruction |= (fcond & 0x1) << 7;
16032  if (et.type == NT_float)
16033    {
16034      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16035		  BAD_FPU);
16036      inst.instruction |= (et.size == 16) << 28;
16037      inst.instruction |= 0x3 << 20;
16038    }
16039  else
16040    {
16041      inst.instruction |= 1 << 28;
16042      inst.instruction |= neon_logbits (et.size) << 20;
16043    }
16044  if (inst.operands[2].isquad)
16045    {
16046      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16047      inst.instruction |= (fcond & 0x2) >> 1;
16048      inst.instruction |= LOW4 (inst.operands[2].reg);
16049    }
16050  else
16051    {
16052      if (inst.operands[2].reg == REG_SP)
16053	as_tsktsk (MVE_BAD_SP);
16054      inst.instruction |= 1 << 6;
16055      inst.instruction |= (fcond & 0x2) << 4;
16056      inst.instruction |= inst.operands[2].reg;
16057    }
16058
16059  inst.is_neon = 1;
16060  return;
16061}
16062
16063static void
16064do_mve_vmaxa_vmina (void)
16065{
16066  if (inst.cond > COND_ALWAYS)
16067    inst.pred_insn_type = INSIDE_VPT_INSN;
16068  else
16069    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16070
16071  enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16072  struct neon_type_el et
16073    = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16074
16075  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16076  inst.instruction |= neon_logbits (et.size) << 18;
16077  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16078  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16079  inst.instruction |= LOW4 (inst.operands[1].reg);
16080  inst.is_neon = 1;
16081}
16082
16083static void
16084do_mve_vfmas (void)
16085{
16086  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16087  struct neon_type_el et
16088    = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16089
16090  if (inst.cond > COND_ALWAYS)
16091    inst.pred_insn_type = INSIDE_VPT_INSN;
16092  else
16093    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16094
16095  if (inst.operands[2].reg == REG_SP)
16096    as_tsktsk (MVE_BAD_SP);
16097  else if (inst.operands[2].reg == REG_PC)
16098    as_tsktsk (MVE_BAD_PC);
16099
16100  inst.instruction |= (et.size == 16) << 28;
16101  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16102  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16103  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16104  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16105  inst.instruction |= inst.operands[2].reg;
16106  inst.is_neon = 1;
16107}
16108
16109static void
16110do_mve_viddup (void)
16111{
16112  if (inst.cond > COND_ALWAYS)
16113    inst.pred_insn_type = INSIDE_VPT_INSN;
16114  else
16115    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16116
16117  unsigned imm = inst.relocs[0].exp.X_add_number;
16118  constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16119	      _("immediate must be either 1, 2, 4 or 8"));
16120
16121  enum neon_shape rs;
16122  struct neon_type_el et;
16123  unsigned Rm;
16124  if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16125    {
16126      rs = neon_select_shape (NS_QRI, NS_NULL);
16127      et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16128      Rm = 7;
16129    }
16130  else
16131    {
16132      constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16133      if (inst.operands[2].reg == REG_SP)
16134	as_tsktsk (MVE_BAD_SP);
16135      else if (inst.operands[2].reg == REG_PC)
16136	first_error (BAD_PC);
16137
16138      rs = neon_select_shape (NS_QRRI, NS_NULL);
16139      et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16140      Rm = inst.operands[2].reg >> 1;
16141    }
16142  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16143  inst.instruction |= neon_logbits (et.size) << 20;
16144  inst.instruction |= inst.operands[1].reg << 16;
16145  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16146  inst.instruction |= (imm > 2) << 7;
16147  inst.instruction |= Rm << 1;
16148  inst.instruction |= (imm == 2 || imm == 8);
16149  inst.is_neon = 1;
16150}
16151
16152static void
16153do_mve_vmlas (void)
16154{
16155  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16156  struct neon_type_el et
16157    = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16158
16159  if (inst.operands[2].reg == REG_PC)
16160    as_tsktsk (MVE_BAD_PC);
16161  else if (inst.operands[2].reg == REG_SP)
16162    as_tsktsk (MVE_BAD_SP);
16163
16164  if (inst.cond > COND_ALWAYS)
16165    inst.pred_insn_type = INSIDE_VPT_INSN;
16166  else
16167    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16168
16169  inst.instruction |= (et.type == NT_unsigned) << 28;
16170  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16171  inst.instruction |= neon_logbits (et.size) << 20;
16172  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16173  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16174  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16175  inst.instruction |= inst.operands[2].reg;
16176  inst.is_neon = 1;
16177}
16178
16179static void
16180do_mve_vshll (void)
16181{
16182  struct neon_type_el et
16183    = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16184
16185  if (inst.cond > COND_ALWAYS)
16186    inst.pred_insn_type = INSIDE_VPT_INSN;
16187  else
16188    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16189
16190  int imm = inst.operands[2].imm;
16191  constraint (imm < 1 || (unsigned)imm > et.size,
16192	      _("immediate value out of range"));
16193
16194  if ((unsigned)imm == et.size)
16195    {
16196      inst.instruction |= neon_logbits (et.size) << 18;
16197      inst.instruction |= 0x110001;
16198    }
16199  else
16200    {
16201      inst.instruction |= (et.size + imm) << 16;
16202      inst.instruction |= 0x800140;
16203    }
16204
16205  inst.instruction |= (et.type == NT_unsigned) << 28;
16206  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16207  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16208  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16209  inst.instruction |= LOW4 (inst.operands[1].reg);
16210  inst.is_neon = 1;
16211}
16212
16213static void
16214do_mve_vshlc (void)
16215{
16216  if (inst.cond > COND_ALWAYS)
16217    inst.pred_insn_type = INSIDE_VPT_INSN;
16218  else
16219    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16220
16221  if (inst.operands[1].reg == REG_PC)
16222    as_tsktsk (MVE_BAD_PC);
16223  else if (inst.operands[1].reg == REG_SP)
16224    as_tsktsk (MVE_BAD_SP);
16225
16226  int imm = inst.operands[2].imm;
16227  constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16228
16229  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16230  inst.instruction |= (imm & 0x1f) << 16;
16231  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16232  inst.instruction |= inst.operands[1].reg;
16233  inst.is_neon = 1;
16234}
16235
16236static void
16237do_mve_vshrn (void)
16238{
16239  unsigned types;
16240  switch (inst.instruction)
16241    {
16242    case M_MNEM_vshrnt:
16243    case M_MNEM_vshrnb:
16244    case M_MNEM_vrshrnt:
16245    case M_MNEM_vrshrnb:
16246      types = N_I16 | N_I32;
16247      break;
16248    case M_MNEM_vqshrnt:
16249    case M_MNEM_vqshrnb:
16250    case M_MNEM_vqrshrnt:
16251    case M_MNEM_vqrshrnb:
16252      types = N_U16 | N_U32 | N_S16 | N_S32;
16253      break;
16254    case M_MNEM_vqshrunt:
16255    case M_MNEM_vqshrunb:
16256    case M_MNEM_vqrshrunt:
16257    case M_MNEM_vqrshrunb:
16258      types = N_S16 | N_S32;
16259      break;
16260    default:
16261      abort ();
16262    }
16263
16264  struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16265
16266  if (inst.cond > COND_ALWAYS)
16267    inst.pred_insn_type = INSIDE_VPT_INSN;
16268  else
16269    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16270
16271  unsigned Qd = inst.operands[0].reg;
16272  unsigned Qm = inst.operands[1].reg;
16273  unsigned imm = inst.operands[2].imm;
16274  constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16275	      et.size == 16
16276	      ? _("immediate operand expected in the range [1,8]")
16277	      : _("immediate operand expected in the range [1,16]"));
16278
16279  inst.instruction |= (et.type == NT_unsigned) << 28;
16280  inst.instruction |= HI1 (Qd) << 22;
16281  inst.instruction |= (et.size - imm) << 16;
16282  inst.instruction |= LOW4 (Qd) << 12;
16283  inst.instruction |= HI1 (Qm) << 5;
16284  inst.instruction |= LOW4 (Qm);
16285  inst.is_neon = 1;
16286}
16287
16288static void
16289do_mve_vqmovn (void)
16290{
16291  struct neon_type_el et;
16292  if (inst.instruction == M_MNEM_vqmovnt
16293     || inst.instruction == M_MNEM_vqmovnb)
16294    et = neon_check_type (2, NS_QQ, N_EQK,
16295			  N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16296  else
16297    et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16298
16299  if (inst.cond > COND_ALWAYS)
16300    inst.pred_insn_type = INSIDE_VPT_INSN;
16301  else
16302    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16303
16304  inst.instruction |= (et.type == NT_unsigned) << 28;
16305  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16306  inst.instruction |= (et.size == 32) << 18;
16307  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16308  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16309  inst.instruction |= LOW4 (inst.operands[1].reg);
16310  inst.is_neon = 1;
16311}
16312
16313static void
16314do_mve_vpsel (void)
16315{
16316  neon_select_shape (NS_QQQ, NS_NULL);
16317
16318  if (inst.cond > COND_ALWAYS)
16319    inst.pred_insn_type = INSIDE_VPT_INSN;
16320  else
16321    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16322
16323  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16324  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16325  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16326  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16327  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16328  inst.instruction |= LOW4 (inst.operands[2].reg);
16329  inst.is_neon = 1;
16330}
16331
16332static void
16333do_mve_vpnot (void)
16334{
16335  if (inst.cond > COND_ALWAYS)
16336    inst.pred_insn_type = INSIDE_VPT_INSN;
16337  else
16338    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16339}
16340
16341static void
16342do_mve_vmaxnma_vminnma (void)
16343{
16344  enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16345  struct neon_type_el et
16346    = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16347
16348  if (inst.cond > COND_ALWAYS)
16349    inst.pred_insn_type = INSIDE_VPT_INSN;
16350  else
16351    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16352
16353  inst.instruction |= (et.size == 16) << 28;
16354  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16355  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16356  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16357  inst.instruction |= LOW4 (inst.operands[1].reg);
16358  inst.is_neon = 1;
16359}
16360
16361static void
16362do_mve_vcmul (void)
16363{
16364  enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16365  struct neon_type_el et
16366    = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16367
16368  if (inst.cond > COND_ALWAYS)
16369    inst.pred_insn_type = INSIDE_VPT_INSN;
16370  else
16371    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16372
16373  unsigned rot = inst.relocs[0].exp.X_add_number;
16374  constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16375	      _("immediate out of range"));
16376
16377  if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16378			|| inst.operands[0].reg == inst.operands[2].reg))
16379    as_tsktsk (BAD_MVE_SRCDEST);
16380
16381  inst.instruction |= (et.size == 32) << 28;
16382  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16383  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16384  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16385  inst.instruction |= (rot > 90) << 12;
16386  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16387  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16388  inst.instruction |= LOW4 (inst.operands[2].reg);
16389  inst.instruction |= (rot == 90 || rot == 270);
16390  inst.is_neon = 1;
16391}
16392
16393/* To handle the Low Overhead Loop instructions
16394   in Armv8.1-M Mainline and MVE.  */
16395static void
16396do_t_loloop (void)
16397{
16398  unsigned long insn = inst.instruction;
16399
16400  inst.instruction = THUMB_OP32 (inst.instruction);
16401
16402  if (insn == T_MNEM_lctp)
16403    return;
16404
16405  set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16406
16407  if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16408    {
16409      struct neon_type_el et
16410       = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16411      inst.instruction |= neon_logbits (et.size) << 20;
16412      inst.is_neon = 1;
16413    }
16414
16415  switch (insn)
16416    {
16417    case T_MNEM_letp:
16418      constraint (!inst.operands[0].present,
16419		  _("expected LR"));
16420      /* fall through.  */
16421    case T_MNEM_le:
16422      /* le <label>.  */
16423      if (!inst.operands[0].present)
16424       inst.instruction |= 1 << 21;
16425
16426      v8_1_loop_reloc (TRUE);
16427      break;
16428
16429    case T_MNEM_wls:
16430    case T_MNEM_wlstp:
16431      v8_1_loop_reloc (FALSE);
16432      /* fall through.  */
16433    case T_MNEM_dlstp:
16434    case T_MNEM_dls:
16435      constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16436
16437      if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16438       constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16439      else if (inst.operands[1].reg == REG_PC)
16440       as_tsktsk (MVE_BAD_PC);
16441      if (inst.operands[1].reg == REG_SP)
16442       as_tsktsk (MVE_BAD_SP);
16443
16444      inst.instruction |= (inst.operands[1].reg << 16);
16445      break;
16446
16447    default:
16448      abort ();
16449    }
16450}
16451
16452
16453static void
16454do_vfp_nsyn_cmp (void)
16455{
16456  enum neon_shape rs;
16457  if (!inst.operands[0].isreg)
16458    {
16459      do_mve_vcmp ();
16460      return;
16461    }
16462  else
16463    {
16464      constraint (inst.operands[2].present, BAD_SYNTAX);
16465      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16466		  BAD_FPU);
16467    }
16468
16469  if (inst.operands[1].isreg)
16470    {
16471      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16472      neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16473
16474      if (rs == NS_FF || rs == NS_HH)
16475	{
16476	  NEON_ENCODE (SINGLE, inst);
16477	  do_vfp_sp_monadic ();
16478	}
16479      else
16480	{
16481	  NEON_ENCODE (DOUBLE, inst);
16482	  do_vfp_dp_rd_rm ();
16483	}
16484    }
16485  else
16486    {
16487      rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16488      neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16489
16490      switch (inst.instruction & 0x0fffffff)
16491	{
16492	case N_MNEM_vcmp:
16493	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16494	  break;
16495	case N_MNEM_vcmpe:
16496	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16497	  break;
16498	default:
16499	  abort ();
16500	}
16501
16502      if (rs == NS_FI || rs == NS_HI)
16503	{
16504	  NEON_ENCODE (SINGLE, inst);
16505	  do_vfp_sp_compare_z ();
16506	}
16507      else
16508	{
16509	  NEON_ENCODE (DOUBLE, inst);
16510	  do_vfp_dp_rd ();
16511	}
16512    }
16513  do_vfp_cond_or_thumb ();
16514
16515  /* ARMv8.2 fp16 instruction.  */
16516  if (rs == NS_HI || rs == NS_HH)
16517    do_scalar_fp16_v82_encode ();
16518}
16519
16520static void
16521nsyn_insert_sp (void)
16522{
16523  inst.operands[1] = inst.operands[0];
16524  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16525  inst.operands[0].reg = REG_SP;
16526  inst.operands[0].isreg = 1;
16527  inst.operands[0].writeback = 1;
16528  inst.operands[0].present = 1;
16529}
16530
16531/* Fix up Neon data-processing instructions, ORing in the correct bits for
16532   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
16533
16534static void
16535neon_dp_fixup (struct arm_it* insn)
16536{
16537  unsigned int i = insn->instruction;
16538  insn->is_neon = 1;
16539
16540  if (thumb_mode)
16541    {
16542      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
16543      if (i & (1 << 24))
16544	i |= 1 << 28;
16545
16546      i &= ~(1 << 24);
16547
16548      i |= 0xef000000;
16549    }
16550  else
16551    i |= 0xf2000000;
16552
16553  insn->instruction = i;
16554}
16555
16556static void
16557mve_encode_qqr (int size, int U, int fp)
16558{
16559  if (inst.operands[2].reg == REG_SP)
16560    as_tsktsk (MVE_BAD_SP);
16561  else if (inst.operands[2].reg == REG_PC)
16562    as_tsktsk (MVE_BAD_PC);
16563
16564  if (fp)
16565    {
16566      /* vadd.  */
16567      if (((unsigned)inst.instruction) == 0xd00)
16568	inst.instruction = 0xee300f40;
16569      /* vsub.  */
16570      else if (((unsigned)inst.instruction) == 0x200d00)
16571	inst.instruction = 0xee301f40;
16572      /* vmul.  */
16573      else if (((unsigned)inst.instruction) == 0x1000d10)
16574	inst.instruction = 0xee310e60;
16575
16576      /* Setting size which is 1 for F16 and 0 for F32.  */
16577      inst.instruction |= (size == 16) << 28;
16578    }
16579  else
16580    {
16581      /* vadd.  */
16582      if (((unsigned)inst.instruction) == 0x800)
16583	inst.instruction = 0xee010f40;
16584      /* vsub.  */
16585      else if (((unsigned)inst.instruction) == 0x1000800)
16586	inst.instruction = 0xee011f40;
16587      /* vhadd.  */
16588      else if (((unsigned)inst.instruction) == 0)
16589	inst.instruction = 0xee000f40;
16590      /* vhsub.  */
16591      else if (((unsigned)inst.instruction) == 0x200)
16592	inst.instruction = 0xee001f40;
16593      /* vmla.  */
16594      else if (((unsigned)inst.instruction) == 0x900)
16595	inst.instruction = 0xee010e40;
16596      /* vmul.  */
16597      else if (((unsigned)inst.instruction) == 0x910)
16598	inst.instruction = 0xee011e60;
16599      /* vqadd.  */
16600      else if (((unsigned)inst.instruction) == 0x10)
16601	inst.instruction = 0xee000f60;
16602      /* vqsub.  */
16603      else if (((unsigned)inst.instruction) == 0x210)
16604	inst.instruction = 0xee001f60;
16605      /* vqrdmlah.  */
16606      else if (((unsigned)inst.instruction) == 0x3000b10)
16607	inst.instruction = 0xee000e40;
16608      /* vqdmulh.  */
16609      else if (((unsigned)inst.instruction) == 0x0000b00)
16610	inst.instruction = 0xee010e60;
16611      /* vqrdmulh.  */
16612      else if (((unsigned)inst.instruction) == 0x1000b00)
16613	inst.instruction = 0xfe010e60;
16614
16615      /* Set U-bit.  */
16616      inst.instruction |= U << 28;
16617
16618      /* Setting bits for size.  */
16619      inst.instruction |= neon_logbits (size) << 20;
16620    }
16621  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16622  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16623  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16624  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16625  inst.instruction |= inst.operands[2].reg;
16626  inst.is_neon = 1;
16627}
16628
16629static void
16630mve_encode_rqq (unsigned bit28, unsigned size)
16631{
16632  inst.instruction |= bit28 << 28;
16633  inst.instruction |= neon_logbits (size) << 20;
16634  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16635  inst.instruction |= inst.operands[0].reg << 12;
16636  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16637  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16638  inst.instruction |= LOW4 (inst.operands[2].reg);
16639  inst.is_neon = 1;
16640}
16641
16642static void
16643mve_encode_qqq (int ubit, int size)
16644{
16645
16646  inst.instruction |= (ubit != 0) << 28;
16647  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16648  inst.instruction |= neon_logbits (size) << 20;
16649  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16650  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16651  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16652  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16653  inst.instruction |= LOW4 (inst.operands[2].reg);
16654
16655  inst.is_neon = 1;
16656}
16657
16658static void
16659mve_encode_rq (unsigned bit28, unsigned size)
16660{
16661  inst.instruction |= bit28 << 28;
16662  inst.instruction |= neon_logbits (size) << 18;
16663  inst.instruction |= inst.operands[0].reg << 12;
16664  inst.instruction |= LOW4 (inst.operands[1].reg);
16665  inst.is_neon = 1;
16666}
16667
16668static void
16669mve_encode_rrqq (unsigned U, unsigned size)
16670{
16671  constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16672
16673  inst.instruction |= U << 28;
16674  inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16675  inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16676  inst.instruction |= (size == 32) << 16;
16677  inst.instruction |= inst.operands[0].reg << 12;
16678  inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16679  inst.instruction |= inst.operands[3].reg;
16680  inst.is_neon = 1;
16681}
16682
16683/* Helper function for neon_three_same handling the operands.  */
16684static void
16685neon_three_args (int isquad)
16686{
16687  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16688  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16689  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16690  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16691  inst.instruction |= LOW4 (inst.operands[2].reg);
16692  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16693  inst.instruction |= (isquad != 0) << 6;
16694  inst.is_neon = 1;
16695}
16696
16697/* Encode insns with bit pattern:
16698
16699  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
16700  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
16701
16702  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16703  different meaning for some instruction.  */
16704
16705static void
16706neon_three_same (int isquad, int ubit, int size)
16707{
16708  neon_three_args (isquad);
16709  inst.instruction |= (ubit != 0) << 24;
16710  if (size != -1)
16711    inst.instruction |= neon_logbits (size) << 20;
16712
16713  neon_dp_fixup (&inst);
16714}
16715
16716/* Encode instructions of the form:
16717
16718  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
16719  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
16720
16721  Don't write size if SIZE == -1.  */
16722
16723static void
16724neon_two_same (int qbit, int ubit, int size)
16725{
16726  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16727  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16728  inst.instruction |= LOW4 (inst.operands[1].reg);
16729  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16730  inst.instruction |= (qbit != 0) << 6;
16731  inst.instruction |= (ubit != 0) << 24;
16732
16733  if (size != -1)
16734    inst.instruction |= neon_logbits (size) << 18;
16735
16736  neon_dp_fixup (&inst);
16737}
16738
16739enum vfp_or_neon_is_neon_bits
16740{
16741NEON_CHECK_CC = 1,
16742NEON_CHECK_ARCH = 2,
16743NEON_CHECK_ARCH8 = 4
16744};
16745
16746/* Call this function if an instruction which may have belonged to the VFP or
16747 Neon instruction sets, but turned out to be a Neon instruction (due to the
16748 operand types involved, etc.). We have to check and/or fix-up a couple of
16749 things:
16750
16751   - Make sure the user hasn't attempted to make a Neon instruction
16752     conditional.
16753   - Alter the value in the condition code field if necessary.
16754   - Make sure that the arch supports Neon instructions.
16755
16756 Which of these operations take place depends on bits from enum
16757 vfp_or_neon_is_neon_bits.
16758
16759 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16760 current instruction's condition is COND_ALWAYS, the condition field is
16761 changed to inst.uncond_value.  This is necessary because instructions shared
16762 between VFP and Neon may be conditional for the VFP variants only, and the
16763 unconditional Neon version must have, e.g., 0xF in the condition field.  */
16764
16765static int
16766vfp_or_neon_is_neon (unsigned check)
16767{
16768/* Conditions are always legal in Thumb mode (IT blocks).  */
16769if (!thumb_mode && (check & NEON_CHECK_CC))
16770  {
16771    if (inst.cond != COND_ALWAYS)
16772      {
16773	first_error (_(BAD_COND));
16774	return FAIL;
16775      }
16776    if (inst.uncond_value != -1)
16777      inst.instruction |= inst.uncond_value << 28;
16778  }
16779
16780
16781  if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16782      || ((check & NEON_CHECK_ARCH8)
16783	  && !mark_feature_used (&fpu_neon_ext_armv8)))
16784    {
16785      first_error (_(BAD_FPU));
16786      return FAIL;
16787    }
16788
16789return SUCCESS;
16790}
16791
16792
16793/* Return TRUE if the SIMD instruction is available for the current
16794   cpu_variant.  FP is set to TRUE if this is a SIMD floating-point
16795   instruction.  CHECK contains th.  CHECK contains the set of bits to pass to
16796   vfp_or_neon_is_neon for the NEON specific checks.  */
16797
16798static bfd_boolean
16799check_simd_pred_availability (int fp, unsigned check)
16800{
16801if (inst.cond > COND_ALWAYS)
16802  {
16803    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16804      {
16805	inst.error = BAD_FPU;
16806	return FALSE;
16807      }
16808    inst.pred_insn_type = INSIDE_VPT_INSN;
16809  }
16810else if (inst.cond < COND_ALWAYS)
16811  {
16812    if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16813      inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16814    else if (vfp_or_neon_is_neon (check) == FAIL)
16815      return FALSE;
16816  }
16817else
16818  {
16819    if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16820	&& vfp_or_neon_is_neon (check) == FAIL)
16821      return FALSE;
16822
16823    if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16824      inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16825  }
16826return TRUE;
16827}
16828
16829/* Neon instruction encoders, in approximate order of appearance.  */
16830
16831static void
16832do_neon_dyadic_i_su (void)
16833{
16834  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16835   return;
16836
16837  enum neon_shape rs;
16838  struct neon_type_el et;
16839  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16840    rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16841  else
16842    rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16843
16844  et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16845
16846
16847  if (rs != NS_QQR)
16848    neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16849  else
16850    mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16851}
16852
16853static void
16854do_neon_dyadic_i64_su (void)
16855{
16856  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
16857    return;
16858  enum neon_shape rs;
16859  struct neon_type_el et;
16860  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16861    {
16862      rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16863      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16864    }
16865  else
16866    {
16867      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16868      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16869    }
16870  if (rs == NS_QQR)
16871    mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16872  else
16873    neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16874}
16875
16876static void
16877neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16878		unsigned immbits)
16879{
16880  unsigned size = et.size >> 3;
16881  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16882  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16883  inst.instruction |= LOW4 (inst.operands[1].reg);
16884  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16885  inst.instruction |= (isquad != 0) << 6;
16886  inst.instruction |= immbits << 16;
16887  inst.instruction |= (size >> 3) << 7;
16888  inst.instruction |= (size & 0x7) << 19;
16889  if (write_ubit)
16890    inst.instruction |= (uval != 0) << 24;
16891
16892  neon_dp_fixup (&inst);
16893}
16894
16895static void
16896do_neon_shl (void)
16897{
16898  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16899   return;
16900
16901  if (!inst.operands[2].isreg)
16902    {
16903      enum neon_shape rs;
16904      struct neon_type_el et;
16905      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16906	{
16907	  rs = neon_select_shape (NS_QQI, NS_NULL);
16908	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16909	}
16910      else
16911	{
16912	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16913	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16914	}
16915      int imm = inst.operands[2].imm;
16916
16917      constraint (imm < 0 || (unsigned)imm >= et.size,
16918		  _("immediate out of range for shift"));
16919      NEON_ENCODE (IMMED, inst);
16920      neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16921    }
16922  else
16923    {
16924      enum neon_shape rs;
16925      struct neon_type_el et;
16926      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16927	{
16928	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16929	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16930	}
16931      else
16932	{
16933	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16934	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16935	}
16936
16937
16938      if (rs == NS_QQR)
16939	{
16940	  constraint (inst.operands[0].reg != inst.operands[1].reg,
16941		       _("invalid instruction shape"));
16942	  if (inst.operands[2].reg == REG_SP)
16943	    as_tsktsk (MVE_BAD_SP);
16944	  else if (inst.operands[2].reg == REG_PC)
16945	    as_tsktsk (MVE_BAD_PC);
16946
16947	  inst.instruction = 0xee311e60;
16948	  inst.instruction |= (et.type == NT_unsigned) << 28;
16949	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16950	  inst.instruction |= neon_logbits (et.size) << 18;
16951	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16952	  inst.instruction |= inst.operands[2].reg;
16953	  inst.is_neon = 1;
16954	}
16955      else
16956	{
16957	  unsigned int tmp;
16958
16959	  /* VSHL/VQSHL 3-register variants have syntax such as:
16960	       vshl.xx Dd, Dm, Dn
16961	     whereas other 3-register operations encoded by neon_three_same have
16962	     syntax like:
16963	       vadd.xx Dd, Dn, Dm
16964	     (i.e. with Dn & Dm reversed). Swap operands[1].reg and
16965	     operands[2].reg here.  */
16966	  tmp = inst.operands[2].reg;
16967	  inst.operands[2].reg = inst.operands[1].reg;
16968	  inst.operands[1].reg = tmp;
16969	  NEON_ENCODE (INTEGER, inst);
16970	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16971	}
16972    }
16973}
16974
16975static void
16976do_neon_qshl (void)
16977{
16978  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16979   return;
16980
16981  if (!inst.operands[2].isreg)
16982    {
16983      enum neon_shape rs;
16984      struct neon_type_el et;
16985      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16986	{
16987	  rs = neon_select_shape (NS_QQI, NS_NULL);
16988	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
16989	}
16990      else
16991	{
16992	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16993	  et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16994	}
16995      int imm = inst.operands[2].imm;
16996
16997      constraint (imm < 0 || (unsigned)imm >= et.size,
16998		  _("immediate out of range for shift"));
16999      NEON_ENCODE (IMMED, inst);
17000      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
17001    }
17002  else
17003    {
17004      enum neon_shape rs;
17005      struct neon_type_el et;
17006
17007      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17008	{
17009	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17010	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17011	}
17012      else
17013	{
17014	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17015	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17016	}
17017
17018      if (rs == NS_QQR)
17019	{
17020	  constraint (inst.operands[0].reg != inst.operands[1].reg,
17021		       _("invalid instruction shape"));
17022	  if (inst.operands[2].reg == REG_SP)
17023	    as_tsktsk (MVE_BAD_SP);
17024	  else if (inst.operands[2].reg == REG_PC)
17025	    as_tsktsk (MVE_BAD_PC);
17026
17027	  inst.instruction = 0xee311ee0;
17028	  inst.instruction |= (et.type == NT_unsigned) << 28;
17029	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17030	  inst.instruction |= neon_logbits (et.size) << 18;
17031	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17032	  inst.instruction |= inst.operands[2].reg;
17033	  inst.is_neon = 1;
17034	}
17035      else
17036	{
17037	  unsigned int tmp;
17038
17039	  /* See note in do_neon_shl.  */
17040	  tmp = inst.operands[2].reg;
17041	  inst.operands[2].reg = inst.operands[1].reg;
17042	  inst.operands[1].reg = tmp;
17043	  NEON_ENCODE (INTEGER, inst);
17044	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17045	}
17046    }
17047}
17048
17049static void
17050do_neon_rshl (void)
17051{
17052  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17053   return;
17054
17055  enum neon_shape rs;
17056  struct neon_type_el et;
17057  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17058    {
17059      rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17060      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17061    }
17062  else
17063    {
17064      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17065      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17066    }
17067
17068  unsigned int tmp;
17069
17070  if (rs == NS_QQR)
17071    {
17072      if (inst.operands[2].reg == REG_PC)
17073	as_tsktsk (MVE_BAD_PC);
17074      else if (inst.operands[2].reg == REG_SP)
17075	as_tsktsk (MVE_BAD_SP);
17076
17077      constraint (inst.operands[0].reg != inst.operands[1].reg,
17078		  _("invalid instruction shape"));
17079
17080      if (inst.instruction == 0x0000510)
17081	/* We are dealing with vqrshl.  */
17082	inst.instruction = 0xee331ee0;
17083      else
17084	/* We are dealing with vrshl.  */
17085	inst.instruction = 0xee331e60;
17086
17087      inst.instruction |= (et.type == NT_unsigned) << 28;
17088      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17089      inst.instruction |= neon_logbits (et.size) << 18;
17090      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17091      inst.instruction |= inst.operands[2].reg;
17092      inst.is_neon = 1;
17093    }
17094  else
17095    {
17096      tmp = inst.operands[2].reg;
17097      inst.operands[2].reg = inst.operands[1].reg;
17098      inst.operands[1].reg = tmp;
17099      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17100    }
17101}
17102
17103static int
17104neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17105{
17106  /* Handle .I8 pseudo-instructions.  */
17107  if (size == 8)
17108    {
17109      /* Unfortunately, this will make everything apart from zero out-of-range.
17110	 FIXME is this the intended semantics? There doesn't seem much point in
17111	 accepting .I8 if so.  */
17112      immediate |= immediate << 8;
17113      size = 16;
17114    }
17115
17116  if (size >= 32)
17117    {
17118      if (immediate == (immediate & 0x000000ff))
17119	{
17120	  *immbits = immediate;
17121	  return 0x1;
17122	}
17123      else if (immediate == (immediate & 0x0000ff00))
17124	{
17125	  *immbits = immediate >> 8;
17126	  return 0x3;
17127	}
17128      else if (immediate == (immediate & 0x00ff0000))
17129	{
17130	  *immbits = immediate >> 16;
17131	  return 0x5;
17132	}
17133      else if (immediate == (immediate & 0xff000000))
17134	{
17135	  *immbits = immediate >> 24;
17136	  return 0x7;
17137	}
17138      if ((immediate & 0xffff) != (immediate >> 16))
17139	goto bad_immediate;
17140      immediate &= 0xffff;
17141    }
17142
17143  if (immediate == (immediate & 0x000000ff))
17144    {
17145      *immbits = immediate;
17146      return 0x9;
17147    }
17148  else if (immediate == (immediate & 0x0000ff00))
17149    {
17150      *immbits = immediate >> 8;
17151      return 0xb;
17152    }
17153
17154  bad_immediate:
17155  first_error (_("immediate value out of range"));
17156  return FAIL;
17157}
17158
17159static void
17160do_neon_logic (void)
17161{
17162  if (inst.operands[2].present && inst.operands[2].isreg)
17163    {
17164      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17165      if (rs == NS_QQQ
17166	  && !check_simd_pred_availability (FALSE,
17167					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17168	return;
17169      else if (rs != NS_QQQ
17170	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17171	first_error (BAD_FPU);
17172
17173      neon_check_type (3, rs, N_IGNORE_TYPE);
17174      /* U bit and size field were set as part of the bitmask.  */
17175      NEON_ENCODE (INTEGER, inst);
17176      neon_three_same (neon_quad (rs), 0, -1);
17177    }
17178  else
17179    {
17180      const int three_ops_form = (inst.operands[2].present
17181				  && !inst.operands[2].isreg);
17182      const int immoperand = (three_ops_form ? 2 : 1);
17183      enum neon_shape rs = (three_ops_form
17184			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17185			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17186      /* Because neon_select_shape makes the second operand a copy of the first
17187	 if the second operand is not present.  */
17188      if (rs == NS_QQI
17189	  && !check_simd_pred_availability (FALSE,
17190					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17191	return;
17192      else if (rs != NS_QQI
17193	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17194	first_error (BAD_FPU);
17195
17196      struct neon_type_el et;
17197      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17198	et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17199      else
17200	et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17201			      | N_KEY, N_EQK);
17202
17203      if (et.type == NT_invtype)
17204	return;
17205      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17206      unsigned immbits;
17207      int cmode;
17208
17209
17210      if (three_ops_form)
17211	constraint (inst.operands[0].reg != inst.operands[1].reg,
17212		    _("first and second operands shall be the same register"));
17213
17214      NEON_ENCODE (IMMED, inst);
17215
17216      immbits = inst.operands[immoperand].imm;
17217      if (et.size == 64)
17218	{
17219	  /* .i64 is a pseudo-op, so the immediate must be a repeating
17220	     pattern.  */
17221	  if (immbits != (inst.operands[immoperand].regisimm ?
17222			  inst.operands[immoperand].reg : 0))
17223	    {
17224	      /* Set immbits to an invalid constant.  */
17225	      immbits = 0xdeadbeef;
17226	    }
17227	}
17228
17229      switch (opcode)
17230	{
17231	case N_MNEM_vbic:
17232	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17233	  break;
17234
17235	case N_MNEM_vorr:
17236	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17237	  break;
17238
17239	case N_MNEM_vand:
17240	  /* Pseudo-instruction for VBIC.  */
17241	  neon_invert_size (&immbits, 0, et.size);
17242	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17243	  break;
17244
17245	case N_MNEM_vorn:
17246	  /* Pseudo-instruction for VORR.  */
17247	  neon_invert_size (&immbits, 0, et.size);
17248	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17249	  break;
17250
17251	default:
17252	  abort ();
17253	}
17254
17255      if (cmode == FAIL)
17256	return;
17257
17258      inst.instruction |= neon_quad (rs) << 6;
17259      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17260      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17261      inst.instruction |= cmode << 8;
17262      neon_write_immbits (immbits);
17263
17264      neon_dp_fixup (&inst);
17265    }
17266}
17267
17268static void
17269do_neon_bitfield (void)
17270{
17271  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17272  neon_check_type (3, rs, N_IGNORE_TYPE);
17273  neon_three_same (neon_quad (rs), 0, -1);
17274}
17275
17276static void
17277neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17278		  unsigned destbits)
17279{
17280  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17281  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17282					    types | N_KEY);
17283  if (et.type == NT_float)
17284    {
17285      NEON_ENCODE (FLOAT, inst);
17286      if (rs == NS_QQR)
17287	mve_encode_qqr (et.size, 0, 1);
17288      else
17289	neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17290    }
17291  else
17292    {
17293      NEON_ENCODE (INTEGER, inst);
17294      if (rs == NS_QQR)
17295	mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17296      else
17297	neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17298    }
17299}
17300
17301
17302static void
17303do_neon_dyadic_if_su_d (void)
17304{
17305  /* This version only allow D registers, but that constraint is enforced during
17306     operand parsing so we don't need to do anything extra here.  */
17307  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17308}
17309
17310static void
17311do_neon_dyadic_if_i_d (void)
17312{
17313  /* The "untyped" case can't happen. Do this to stop the "U" bit being
17314     affected if we specify unsigned args.  */
17315  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17316}
17317
17318static void
17319do_mve_vstr_vldr_QI (int size, int elsize, int load)
17320{
17321  constraint (size < 32, BAD_ADDR_MODE);
17322  constraint (size != elsize, BAD_EL_TYPE);
17323  constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17324  constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17325  constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17326	      _("destination register and offset register may not be the"
17327		" same"));
17328
17329  int imm = inst.relocs[0].exp.X_add_number;
17330  int add = 1;
17331  if (imm < 0)
17332    {
17333      add = 0;
17334      imm = -imm;
17335    }
17336  constraint ((imm % (size / 8) != 0)
17337	      || imm > (0x7f << neon_logbits (size)),
17338	      (size == 32) ? _("immediate must be a multiple of 4 in the"
17339			       " range of +/-[0,508]")
17340			   : _("immediate must be a multiple of 8 in the"
17341			       " range of +/-[0,1016]"));
17342  inst.instruction |= 0x11 << 24;
17343  inst.instruction |= add << 23;
17344  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17345  inst.instruction |= inst.operands[1].writeback << 21;
17346  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17347  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17348  inst.instruction |= 1 << 12;
17349  inst.instruction |= (size == 64) << 8;
17350  inst.instruction &= 0xffffff00;
17351  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17352  inst.instruction |= imm >> neon_logbits (size);
17353}
17354
17355static void
17356do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17357{
17358    unsigned os = inst.operands[1].imm >> 5;
17359    unsigned type = inst.vectype.el[0].type;
17360    constraint (os != 0 && size == 8,
17361		_("can not shift offsets when accessing less than half-word"));
17362    constraint (os && os != neon_logbits (size),
17363		_("shift immediate must be 1, 2 or 3 for half-word, word"
17364		  " or double-word accesses respectively"));
17365    if (inst.operands[1].reg == REG_PC)
17366      as_tsktsk (MVE_BAD_PC);
17367
17368    switch (size)
17369      {
17370      case 8:
17371	constraint (elsize >= 64, BAD_EL_TYPE);
17372	break;
17373      case 16:
17374	constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17375	break;
17376      case 32:
17377      case 64:
17378	constraint (elsize != size, BAD_EL_TYPE);
17379	break;
17380      default:
17381	break;
17382      }
17383    constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17384		BAD_ADDR_MODE);
17385    if (load)
17386      {
17387	constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17388		    _("destination register and offset register may not be"
17389		    " the same"));
17390	constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17391	constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17392		    BAD_EL_TYPE);
17393	inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17394      }
17395    else
17396      {
17397	constraint (type != NT_untyped, BAD_EL_TYPE);
17398      }
17399
17400    inst.instruction |= 1 << 23;
17401    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17402    inst.instruction |= inst.operands[1].reg << 16;
17403    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17404    inst.instruction |= neon_logbits (elsize) << 7;
17405    inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17406    inst.instruction |= LOW4 (inst.operands[1].imm);
17407    inst.instruction |= !!os;
17408}
17409
17410static void
17411do_mve_vstr_vldr_RI (int size, int elsize, int load)
17412{
17413  enum neon_el_type type = inst.vectype.el[0].type;
17414
17415  constraint (size >= 64, BAD_ADDR_MODE);
17416  switch (size)
17417    {
17418    case 16:
17419      constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17420      break;
17421    case 32:
17422      constraint (elsize != size, BAD_EL_TYPE);
17423      break;
17424    default:
17425      break;
17426    }
17427  if (load)
17428    {
17429      constraint (elsize != size && type != NT_unsigned
17430		  && type != NT_signed, BAD_EL_TYPE);
17431    }
17432  else
17433    {
17434      constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17435    }
17436
17437  int imm = inst.relocs[0].exp.X_add_number;
17438  int add = 1;
17439  if (imm < 0)
17440    {
17441      add = 0;
17442      imm = -imm;
17443    }
17444
17445  if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17446    {
17447      switch (size)
17448	{
17449	case 8:
17450	  constraint (1, _("immediate must be in the range of +/-[0,127]"));
17451	  break;
17452	case 16:
17453	  constraint (1, _("immediate must be a multiple of 2 in the"
17454			   " range of +/-[0,254]"));
17455	  break;
17456	case 32:
17457	  constraint (1, _("immediate must be a multiple of 4 in the"
17458			   " range of +/-[0,508]"));
17459	  break;
17460	}
17461    }
17462
17463  if (size != elsize)
17464    {
17465      constraint (inst.operands[1].reg > 7, BAD_HIREG);
17466      constraint (inst.operands[0].reg > 14,
17467		  _("MVE vector register in the range [Q0..Q7] expected"));
17468      inst.instruction |= (load && type == NT_unsigned) << 28;
17469      inst.instruction |= (size == 16) << 19;
17470      inst.instruction |= neon_logbits (elsize) << 7;
17471    }
17472  else
17473    {
17474      if (inst.operands[1].reg == REG_PC)
17475	as_tsktsk (MVE_BAD_PC);
17476      else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17477	as_tsktsk (MVE_BAD_SP);
17478      inst.instruction |= 1 << 12;
17479      inst.instruction |= neon_logbits (size) << 7;
17480    }
17481  inst.instruction |= inst.operands[1].preind << 24;
17482  inst.instruction |= add << 23;
17483  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17484  inst.instruction |= inst.operands[1].writeback << 21;
17485  inst.instruction |= inst.operands[1].reg << 16;
17486  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17487  inst.instruction &= 0xffffff80;
17488  inst.instruction |= imm >> neon_logbits (size);
17489
17490}
17491
17492static void
17493do_mve_vstr_vldr (void)
17494{
17495  unsigned size;
17496  int load = 0;
17497
17498  if (inst.cond > COND_ALWAYS)
17499    inst.pred_insn_type = INSIDE_VPT_INSN;
17500  else
17501    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17502
17503  switch (inst.instruction)
17504    {
17505    default:
17506      gas_assert (0);
17507      break;
17508    case M_MNEM_vldrb:
17509      load = 1;
17510      /* fall through.  */
17511    case M_MNEM_vstrb:
17512      size = 8;
17513      break;
17514    case M_MNEM_vldrh:
17515      load = 1;
17516      /* fall through.  */
17517    case M_MNEM_vstrh:
17518      size = 16;
17519      break;
17520    case M_MNEM_vldrw:
17521      load = 1;
17522      /* fall through.  */
17523    case M_MNEM_vstrw:
17524      size = 32;
17525      break;
17526    case M_MNEM_vldrd:
17527      load = 1;
17528      /* fall through.  */
17529    case M_MNEM_vstrd:
17530      size = 64;
17531      break;
17532    }
17533  unsigned elsize = inst.vectype.el[0].size;
17534
17535  if (inst.operands[1].isquad)
17536    {
17537      /* We are dealing with [Q, imm]{!} cases.  */
17538      do_mve_vstr_vldr_QI (size, elsize, load);
17539    }
17540  else
17541    {
17542      if (inst.operands[1].immisreg == 2)
17543	{
17544	  /* We are dealing with [R, Q, {UXTW #os}] cases.  */
17545	  do_mve_vstr_vldr_RQ (size, elsize, load);
17546	}
17547      else if (!inst.operands[1].immisreg)
17548	{
17549	  /* We are dealing with [R, Imm]{!}/[R], Imm cases.  */
17550	  do_mve_vstr_vldr_RI (size, elsize, load);
17551	}
17552      else
17553	constraint (1, BAD_ADDR_MODE);
17554    }
17555
17556  inst.is_neon = 1;
17557}
17558
17559static void
17560do_mve_vst_vld (void)
17561{
17562  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17563    return;
17564
17565  constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17566	      || inst.relocs[0].exp.X_add_number != 0
17567	      || inst.operands[1].immisreg != 0,
17568	      BAD_ADDR_MODE);
17569  constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17570  if (inst.operands[1].reg == REG_PC)
17571    as_tsktsk (MVE_BAD_PC);
17572  else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17573    as_tsktsk (MVE_BAD_SP);
17574
17575
17576  /* These instructions are one of the "exceptions" mentioned in
17577     handle_pred_state.  They are MVE instructions that are not VPT compatible
17578     and do not accept a VPT code, thus appending such a code is a syntax
17579     error.  */
17580  if (inst.cond > COND_ALWAYS)
17581    first_error (BAD_SYNTAX);
17582  /* If we append a scalar condition code we can set this to
17583     MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error.  */
17584  else if (inst.cond < COND_ALWAYS)
17585    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17586  else
17587    inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17588
17589  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17590  inst.instruction |= inst.operands[1].writeback << 21;
17591  inst.instruction |= inst.operands[1].reg << 16;
17592  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17593  inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17594  inst.is_neon = 1;
17595}
17596
17597static void
17598do_mve_vaddlv (void)
17599{
17600  enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17601  struct neon_type_el et
17602    = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17603
17604  if (et.type == NT_invtype)
17605    first_error (BAD_EL_TYPE);
17606
17607  if (inst.cond > COND_ALWAYS)
17608    inst.pred_insn_type = INSIDE_VPT_INSN;
17609  else
17610    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17611
17612  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17613
17614  inst.instruction |= (et.type == NT_unsigned) << 28;
17615  inst.instruction |= inst.operands[1].reg << 19;
17616  inst.instruction |= inst.operands[0].reg << 12;
17617  inst.instruction |= inst.operands[2].reg;
17618  inst.is_neon = 1;
17619}
17620
17621static void
17622do_neon_dyadic_if_su (void)
17623{
17624  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17625  struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17626					    N_SUF_32 | N_KEY);
17627
17628  constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17629	       || inst.instruction == ((unsigned) N_MNEM_vmin))
17630	      && et.type == NT_float
17631	      && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17632
17633  if (!check_simd_pred_availability (et.type == NT_float,
17634				     NEON_CHECK_ARCH | NEON_CHECK_CC))
17635    return;
17636
17637  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17638}
17639
17640static void
17641do_neon_addsub_if_i (void)
17642{
17643  if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17644      && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17645    return;
17646
17647  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17648  struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17649					    N_EQK, N_IF_32 | N_I64 | N_KEY);
17650
17651  constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17652  /* If we are parsing Q registers and the element types match MVE, which NEON
17653     also supports, then we must check whether this is an instruction that can
17654     be used by both MVE/NEON.  This distinction can be made based on whether
17655     they are predicated or not.  */
17656  if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17657    {
17658      if (!check_simd_pred_availability (et.type == NT_float,
17659					 NEON_CHECK_ARCH | NEON_CHECK_CC))
17660	return;
17661    }
17662  else
17663    {
17664      /* If they are either in a D register or are using an unsupported.  */
17665      if (rs != NS_QQR
17666	  && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17667	return;
17668    }
17669
17670  /* The "untyped" case can't happen. Do this to stop the "U" bit being
17671     affected if we specify unsigned args.  */
17672  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17673}
17674
17675/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17676   result to be:
17677     V<op> A,B     (A is operand 0, B is operand 2)
17678   to mean:
17679     V<op> A,B,A
17680   not:
17681     V<op> A,B,B
17682   so handle that case specially.  */
17683
17684static void
17685neon_exchange_operands (void)
17686{
17687  if (inst.operands[1].present)
17688    {
17689      void *scratch = xmalloc (sizeof (inst.operands[0]));
17690
17691      /* Swap operands[1] and operands[2].  */
17692      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17693      inst.operands[1] = inst.operands[2];
17694      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17695      free (scratch);
17696    }
17697  else
17698    {
17699      inst.operands[1] = inst.operands[2];
17700      inst.operands[2] = inst.operands[0];
17701    }
17702}
17703
17704static void
17705neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17706{
17707  if (inst.operands[2].isreg)
17708    {
17709      if (invert)
17710	neon_exchange_operands ();
17711      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17712    }
17713  else
17714    {
17715      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17716      struct neon_type_el et = neon_check_type (2, rs,
17717	N_EQK | N_SIZ, immtypes | N_KEY);
17718
17719      NEON_ENCODE (IMMED, inst);
17720      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17721      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17722      inst.instruction |= LOW4 (inst.operands[1].reg);
17723      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17724      inst.instruction |= neon_quad (rs) << 6;
17725      inst.instruction |= (et.type == NT_float) << 10;
17726      inst.instruction |= neon_logbits (et.size) << 18;
17727
17728      neon_dp_fixup (&inst);
17729    }
17730}
17731
17732static void
17733do_neon_cmp (void)
17734{
17735  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
17736}
17737
17738static void
17739do_neon_cmp_inv (void)
17740{
17741  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
17742}
17743
17744static void
17745do_neon_ceq (void)
17746{
17747  neon_compare (N_IF_32, N_IF_32, FALSE);
17748}
17749
17750/* For multiply instructions, we have the possibility of 16-bit or 32-bit
17751   scalars, which are encoded in 5 bits, M : Rm.
17752   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17753   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17754   index in M.
17755
17756   Dot Product instructions are similar to multiply instructions except elsize
17757   should always be 32.
17758
17759   This function translates SCALAR, which is GAS's internal encoding of indexed
17760   scalar register, to raw encoding.  There is also register and index range
17761   check based on ELSIZE.  */
17762
17763static unsigned
17764neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17765{
17766  unsigned regno = NEON_SCALAR_REG (scalar);
17767  unsigned elno = NEON_SCALAR_INDEX (scalar);
17768
17769  switch (elsize)
17770    {
17771    case 16:
17772      if (regno > 7 || elno > 3)
17773	goto bad_scalar;
17774      return regno | (elno << 3);
17775
17776    case 32:
17777      if (regno > 15 || elno > 1)
17778	goto bad_scalar;
17779      return regno | (elno << 4);
17780
17781    default:
17782    bad_scalar:
17783      first_error (_("scalar out of range for multiply instruction"));
17784    }
17785
17786  return 0;
17787}
17788
17789/* Encode multiply / multiply-accumulate scalar instructions.  */
17790
17791static void
17792neon_mul_mac (struct neon_type_el et, int ubit)
17793{
17794  unsigned scalar;
17795
17796  /* Give a more helpful error message if we have an invalid type.  */
17797  if (et.type == NT_invtype)
17798    return;
17799
17800  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17801  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17802  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17803  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17804  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17805  inst.instruction |= LOW4 (scalar);
17806  inst.instruction |= HI1 (scalar) << 5;
17807  inst.instruction |= (et.type == NT_float) << 8;
17808  inst.instruction |= neon_logbits (et.size) << 20;
17809  inst.instruction |= (ubit != 0) << 24;
17810
17811  neon_dp_fixup (&inst);
17812}
17813
17814static void
17815do_neon_mac_maybe_scalar (void)
17816{
17817  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17818    return;
17819
17820  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17821    return;
17822
17823  if (inst.operands[2].isscalar)
17824    {
17825      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17826      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17827      struct neon_type_el et = neon_check_type (3, rs,
17828	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17829      NEON_ENCODE (SCALAR, inst);
17830      neon_mul_mac (et, neon_quad (rs));
17831    }
17832  else if (!inst.operands[2].isvec)
17833    {
17834      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17835
17836      enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17837      neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17838
17839      neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17840    }
17841  else
17842    {
17843      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17844      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
17845	 affected if we specify unsigned args.  */
17846      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17847    }
17848}
17849
17850static void
17851do_bfloat_vfma (void)
17852{
17853  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17854  constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17855  enum neon_shape rs;
17856  int t_bit = 0;
17857
17858  if (inst.instruction != B_MNEM_vfmab)
17859  {
17860      t_bit = 1;
17861      inst.instruction = B_MNEM_vfmat;
17862  }
17863
17864  if (inst.operands[2].isscalar)
17865    {
17866      rs = neon_select_shape (NS_QQS, NS_NULL);
17867      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17868
17869      inst.instruction |= (1 << 25);
17870      int index = inst.operands[2].reg & 0xf;
17871      constraint (!(index < 4), _("index must be in the range 0 to 3"));
17872      inst.operands[2].reg >>= 4;
17873      constraint (!(inst.operands[2].reg < 8),
17874		  _("indexed register must be less than 8"));
17875      neon_three_args (t_bit);
17876      inst.instruction |= ((index & 1) << 3);
17877      inst.instruction |= ((index & 2) << 4);
17878    }
17879  else
17880    {
17881      rs = neon_select_shape (NS_QQQ, NS_NULL);
17882      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17883      neon_three_args (t_bit);
17884    }
17885
17886}
17887
17888static void
17889do_neon_fmac (void)
17890{
17891  if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17892      && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17893    return;
17894
17895  if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17896    return;
17897
17898  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17899    {
17900      enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17901      struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17902						N_EQK);
17903
17904      if (rs == NS_QQR)
17905	{
17906
17907	  if (inst.operands[2].reg == REG_SP)
17908	    as_tsktsk (MVE_BAD_SP);
17909	  else if (inst.operands[2].reg == REG_PC)
17910	    as_tsktsk (MVE_BAD_PC);
17911
17912	  inst.instruction = 0xee310e40;
17913	  inst.instruction |= (et.size == 16) << 28;
17914	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17915	  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17916	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17917	  inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17918	  inst.instruction |= inst.operands[2].reg;
17919	  inst.is_neon = 1;
17920	  return;
17921	}
17922    }
17923  else
17924    {
17925      constraint (!inst.operands[2].isvec, BAD_FPU);
17926    }
17927
17928  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17929}
17930
17931static void
17932do_mve_vfma (void)
17933{
17934  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
17935      inst.cond == COND_ALWAYS)
17936    {
17937      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17938      inst.instruction = N_MNEM_vfma;
17939      inst.pred_insn_type = INSIDE_VPT_INSN;
17940      inst.cond = 0xf;
17941      return do_neon_fmac();
17942    }
17943  else
17944    {
17945      do_bfloat_vfma();
17946    }
17947}
17948
17949static void
17950do_neon_tst (void)
17951{
17952  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17953  struct neon_type_el et = neon_check_type (3, rs,
17954    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
17955  neon_three_same (neon_quad (rs), 0, et.size);
17956}
17957
17958/* VMUL with 3 registers allows the P8 type. The scalar version supports the
17959   same types as the MAC equivalents. The polynomial type for this instruction
17960   is encoded the same as the integer type.  */
17961
17962static void
17963do_neon_mul (void)
17964{
17965  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
17966    return;
17967
17968  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17969    return;
17970
17971  if (inst.operands[2].isscalar)
17972    {
17973      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17974      do_neon_mac_maybe_scalar ();
17975    }
17976  else
17977    {
17978      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17979	{
17980	  enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17981	  struct neon_type_el et
17982	    = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
17983	  if (et.type == NT_float)
17984	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
17985			BAD_FPU);
17986
17987	  neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
17988	}
17989      else
17990	{
17991	  constraint (!inst.operands[2].isvec, BAD_FPU);
17992	  neon_dyadic_misc (NT_poly,
17993			    N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
17994	}
17995    }
17996}
17997
17998static void
17999do_neon_qdmulh (void)
18000{
18001  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18002   return;
18003
18004  if (inst.operands[2].isscalar)
18005    {
18006      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18007      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18008      struct neon_type_el et = neon_check_type (3, rs,
18009	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18010      NEON_ENCODE (SCALAR, inst);
18011      neon_mul_mac (et, neon_quad (rs));
18012    }
18013  else
18014    {
18015      enum neon_shape rs;
18016      struct neon_type_el et;
18017      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18018	{
18019	  rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18020	  et = neon_check_type (3, rs,
18021	    N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18022	}
18023      else
18024	{
18025	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18026	  et = neon_check_type (3, rs,
18027	    N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18028	}
18029
18030      NEON_ENCODE (INTEGER, inst);
18031      if (rs == NS_QQR)
18032	mve_encode_qqr (et.size, 0, 0);
18033      else
18034	/* The U bit (rounding) comes from bit mask.  */
18035	neon_three_same (neon_quad (rs), 0, et.size);
18036    }
18037}
18038
18039static void
18040do_mve_vaddv (void)
18041{
18042  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18043  struct neon_type_el et
18044    = neon_check_type (2, rs, N_EQK,  N_SU_32 | N_KEY);
18045
18046  if (et.type == NT_invtype)
18047    first_error (BAD_EL_TYPE);
18048
18049  if (inst.cond > COND_ALWAYS)
18050    inst.pred_insn_type = INSIDE_VPT_INSN;
18051  else
18052    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18053
18054  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18055
18056  mve_encode_rq (et.type == NT_unsigned, et.size);
18057}
18058
18059static void
18060do_mve_vhcadd (void)
18061{
18062  enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18063  struct neon_type_el et
18064    = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18065
18066  if (inst.cond > COND_ALWAYS)
18067    inst.pred_insn_type = INSIDE_VPT_INSN;
18068  else
18069    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18070
18071  unsigned rot = inst.relocs[0].exp.X_add_number;
18072  constraint (rot != 90 && rot != 270, _("immediate out of range"));
18073
18074  if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18075    as_tsktsk (_("Warning: 32-bit element size and same first and third "
18076		 "operand makes instruction UNPREDICTABLE"));
18077
18078  mve_encode_qqq (0, et.size);
18079  inst.instruction |= (rot == 270) << 12;
18080  inst.is_neon = 1;
18081}
18082
18083static void
18084do_mve_vqdmull (void)
18085{
18086  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18087  struct neon_type_el et
18088    = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18089
18090  if (et.size == 32
18091      && (inst.operands[0].reg == inst.operands[1].reg
18092	  || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18093    as_tsktsk (BAD_MVE_SRCDEST);
18094
18095  if (inst.cond > COND_ALWAYS)
18096    inst.pred_insn_type = INSIDE_VPT_INSN;
18097  else
18098    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18099
18100  if (rs == NS_QQQ)
18101    {
18102      mve_encode_qqq (et.size == 32, 64);
18103      inst.instruction |= 1;
18104    }
18105  else
18106    {
18107      mve_encode_qqr (64, et.size == 32, 0);
18108      inst.instruction |= 0x3 << 5;
18109    }
18110}
18111
18112static void
18113do_mve_vadc (void)
18114{
18115  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18116  struct neon_type_el et
18117    = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18118
18119  if (et.type == NT_invtype)
18120    first_error (BAD_EL_TYPE);
18121
18122  if (inst.cond > COND_ALWAYS)
18123    inst.pred_insn_type = INSIDE_VPT_INSN;
18124  else
18125    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18126
18127  mve_encode_qqq (0, 64);
18128}
18129
18130static void
18131do_mve_vbrsr (void)
18132{
18133  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18134  struct neon_type_el et
18135    = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18136
18137  if (inst.cond > COND_ALWAYS)
18138    inst.pred_insn_type = INSIDE_VPT_INSN;
18139  else
18140    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18141
18142  mve_encode_qqr (et.size, 0, 0);
18143}
18144
18145static void
18146do_mve_vsbc (void)
18147{
18148  neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18149
18150  if (inst.cond > COND_ALWAYS)
18151    inst.pred_insn_type = INSIDE_VPT_INSN;
18152  else
18153    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18154
18155  mve_encode_qqq (1, 64);
18156}
18157
18158static void
18159do_mve_vmulh (void)
18160{
18161  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18162  struct neon_type_el et
18163    = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18164
18165  if (inst.cond > COND_ALWAYS)
18166    inst.pred_insn_type = INSIDE_VPT_INSN;
18167  else
18168    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18169
18170  mve_encode_qqq (et.type == NT_unsigned, et.size);
18171}
18172
18173static void
18174do_mve_vqdmlah (void)
18175{
18176  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18177  struct neon_type_el et
18178    = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18179
18180  if (inst.cond > COND_ALWAYS)
18181    inst.pred_insn_type = INSIDE_VPT_INSN;
18182  else
18183    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18184
18185  mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18186}
18187
18188static void
18189do_mve_vqdmladh (void)
18190{
18191  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18192  struct neon_type_el et
18193    = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18194
18195  if (inst.cond > COND_ALWAYS)
18196    inst.pred_insn_type = INSIDE_VPT_INSN;
18197  else
18198    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18199
18200  mve_encode_qqq (0, et.size);
18201}
18202
18203
18204static void
18205do_mve_vmull (void)
18206{
18207
18208  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18209					  NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18210  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
18211      && inst.cond == COND_ALWAYS
18212      && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18213    {
18214      if (rs == NS_QQQ)
18215	{
18216
18217	  struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18218						    N_SUF_32 | N_F64 | N_P8
18219						    | N_P16 | N_I_MVE | N_KEY);
18220	  if (((et.type == NT_poly) && et.size == 8
18221	       && ARM_CPU_IS_ANY (cpu_variant))
18222	      || (et.type == NT_integer) || (et.type == NT_float))
18223	    goto neon_vmul;
18224	}
18225      else
18226	goto neon_vmul;
18227    }
18228
18229  constraint (rs != NS_QQQ, BAD_FPU);
18230  struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18231					    N_SU_32 | N_P8 | N_P16 | N_KEY);
18232
18233  /* We are dealing with MVE's vmullt.  */
18234  if (et.size == 32
18235      && (inst.operands[0].reg == inst.operands[1].reg
18236	  || inst.operands[0].reg == inst.operands[2].reg))
18237    as_tsktsk (BAD_MVE_SRCDEST);
18238
18239  if (inst.cond > COND_ALWAYS)
18240    inst.pred_insn_type = INSIDE_VPT_INSN;
18241  else
18242    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18243
18244  if (et.type == NT_poly)
18245    mve_encode_qqq (neon_logbits (et.size), 64);
18246  else
18247    mve_encode_qqq (et.type == NT_unsigned, et.size);
18248
18249  return;
18250
18251neon_vmul:
18252  inst.instruction = N_MNEM_vmul;
18253  inst.cond = 0xb;
18254  if (thumb_mode)
18255    inst.pred_insn_type = INSIDE_IT_INSN;
18256  do_neon_mul ();
18257}
18258
18259static void
18260do_mve_vabav (void)
18261{
18262  enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18263
18264  if (rs == NS_NULL)
18265    return;
18266
18267  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18268    return;
18269
18270  struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18271					    | N_S16 | N_S32 | N_U8 | N_U16
18272					    | N_U32);
18273
18274  if (inst.cond > COND_ALWAYS)
18275    inst.pred_insn_type = INSIDE_VPT_INSN;
18276  else
18277    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18278
18279  mve_encode_rqq (et.type == NT_unsigned, et.size);
18280}
18281
18282static void
18283do_mve_vmladav (void)
18284{
18285  enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18286  struct neon_type_el et = neon_check_type (3, rs,
18287					    N_EQK, N_EQK, N_SU_MVE | N_KEY);
18288
18289  if (et.type == NT_unsigned
18290      && (inst.instruction == M_MNEM_vmladavx
18291	  || inst.instruction == M_MNEM_vmladavax
18292	  || inst.instruction == M_MNEM_vmlsdav
18293	  || inst.instruction == M_MNEM_vmlsdava
18294	  || inst.instruction == M_MNEM_vmlsdavx
18295	  || inst.instruction == M_MNEM_vmlsdavax))
18296    first_error (BAD_SIMD_TYPE);
18297
18298  constraint (inst.operands[2].reg > 14,
18299	      _("MVE vector register in the range [Q0..Q7] expected"));
18300
18301  if (inst.cond > COND_ALWAYS)
18302    inst.pred_insn_type = INSIDE_VPT_INSN;
18303  else
18304    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18305
18306  if (inst.instruction == M_MNEM_vmlsdav
18307      || inst.instruction == M_MNEM_vmlsdava
18308      || inst.instruction == M_MNEM_vmlsdavx
18309      || inst.instruction == M_MNEM_vmlsdavax)
18310    inst.instruction |= (et.size == 8) << 28;
18311  else
18312    inst.instruction |= (et.size == 8) << 8;
18313
18314  mve_encode_rqq (et.type == NT_unsigned, 64);
18315  inst.instruction |= (et.size == 32) << 16;
18316}
18317
18318static void
18319do_mve_vmlaldav (void)
18320{
18321  enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18322  struct neon_type_el et
18323    = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18324		       N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18325
18326  if (et.type == NT_unsigned
18327      && (inst.instruction == M_MNEM_vmlsldav
18328	  || inst.instruction == M_MNEM_vmlsldava
18329	  || inst.instruction == M_MNEM_vmlsldavx
18330	  || inst.instruction == M_MNEM_vmlsldavax))
18331    first_error (BAD_SIMD_TYPE);
18332
18333  if (inst.cond > COND_ALWAYS)
18334    inst.pred_insn_type = INSIDE_VPT_INSN;
18335  else
18336    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18337
18338  mve_encode_rrqq (et.type == NT_unsigned, et.size);
18339}
18340
18341static void
18342do_mve_vrmlaldavh (void)
18343{
18344  struct neon_type_el et;
18345  if (inst.instruction == M_MNEM_vrmlsldavh
18346     || inst.instruction == M_MNEM_vrmlsldavha
18347     || inst.instruction == M_MNEM_vrmlsldavhx
18348     || inst.instruction == M_MNEM_vrmlsldavhax)
18349    {
18350      et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18351      if (inst.operands[1].reg == REG_SP)
18352	as_tsktsk (MVE_BAD_SP);
18353    }
18354  else
18355    {
18356      if (inst.instruction == M_MNEM_vrmlaldavhx
18357	  || inst.instruction == M_MNEM_vrmlaldavhax)
18358	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18359      else
18360	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18361			      N_U32 | N_S32 | N_KEY);
18362      /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18363	 with vmax/min instructions, making the use of SP in assembly really
18364	 nonsensical, so instead of issuing a warning like we do for other uses
18365	 of SP for the odd register operand we error out.  */
18366      constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18367    }
18368
18369  /* Make sure we still check the second operand is an odd one and that PC is
18370     disallowed.  This because we are parsing for any GPR operand, to be able
18371     to distinguish between giving a warning or an error for SP as described
18372     above.  */
18373  constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18374  constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18375
18376  if (inst.cond > COND_ALWAYS)
18377    inst.pred_insn_type = INSIDE_VPT_INSN;
18378  else
18379    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18380
18381  mve_encode_rrqq (et.type == NT_unsigned, 0);
18382}
18383
18384
18385static void
18386do_mve_vmaxnmv (void)
18387{
18388  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18389  struct neon_type_el et
18390    = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18391
18392  if (inst.cond > COND_ALWAYS)
18393    inst.pred_insn_type = INSIDE_VPT_INSN;
18394  else
18395    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18396
18397  if (inst.operands[0].reg == REG_SP)
18398    as_tsktsk (MVE_BAD_SP);
18399  else if (inst.operands[0].reg == REG_PC)
18400    as_tsktsk (MVE_BAD_PC);
18401
18402  mve_encode_rq (et.size == 16, 64);
18403}
18404
18405static void
18406do_mve_vmaxv (void)
18407{
18408  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18409  struct neon_type_el et;
18410
18411  if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18412    et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18413  else
18414    et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18415
18416  if (inst.cond > COND_ALWAYS)
18417    inst.pred_insn_type = INSIDE_VPT_INSN;
18418  else
18419    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18420
18421  if (inst.operands[0].reg == REG_SP)
18422    as_tsktsk (MVE_BAD_SP);
18423  else if (inst.operands[0].reg == REG_PC)
18424    as_tsktsk (MVE_BAD_PC);
18425
18426  mve_encode_rq (et.type == NT_unsigned, et.size);
18427}
18428
18429
18430static void
18431do_neon_qrdmlah (void)
18432{
18433  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18434   return;
18435  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18436    {
18437      /* Check we're on the correct architecture.  */
18438      if (!mark_feature_used (&fpu_neon_ext_armv8))
18439	inst.error
18440	  = _("instruction form not available on this architecture.");
18441      else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18442	{
18443	  as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18444	  record_feature_use (&fpu_neon_ext_v8_1);
18445	}
18446	if (inst.operands[2].isscalar)
18447	  {
18448	    enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18449	    struct neon_type_el et = neon_check_type (3, rs,
18450	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18451	    NEON_ENCODE (SCALAR, inst);
18452	    neon_mul_mac (et, neon_quad (rs));
18453	  }
18454	else
18455	  {
18456	    enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18457	    struct neon_type_el et = neon_check_type (3, rs,
18458	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18459	    NEON_ENCODE (INTEGER, inst);
18460	    /* The U bit (rounding) comes from bit mask.  */
18461	    neon_three_same (neon_quad (rs), 0, et.size);
18462	  }
18463    }
18464  else
18465    {
18466      enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18467      struct neon_type_el et
18468	= neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18469
18470      NEON_ENCODE (INTEGER, inst);
18471      mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18472    }
18473}
18474
18475static void
18476do_neon_fcmp_absolute (void)
18477{
18478  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18479  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18480					    N_F_16_32 | N_KEY);
18481  /* Size field comes from bit mask.  */
18482  neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18483}
18484
18485static void
18486do_neon_fcmp_absolute_inv (void)
18487{
18488  neon_exchange_operands ();
18489  do_neon_fcmp_absolute ();
18490}
18491
18492static void
18493do_neon_step (void)
18494{
18495  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18496  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18497					    N_F_16_32 | N_KEY);
18498  neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18499}
18500
18501static void
18502do_neon_abs_neg (void)
18503{
18504  enum neon_shape rs;
18505  struct neon_type_el et;
18506
18507  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18508    return;
18509
18510  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18511  et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18512
18513  if (!check_simd_pred_availability (et.type == NT_float,
18514				     NEON_CHECK_ARCH | NEON_CHECK_CC))
18515    return;
18516
18517  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18518  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18519  inst.instruction |= LOW4 (inst.operands[1].reg);
18520  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18521  inst.instruction |= neon_quad (rs) << 6;
18522  inst.instruction |= (et.type == NT_float) << 10;
18523  inst.instruction |= neon_logbits (et.size) << 18;
18524
18525  neon_dp_fixup (&inst);
18526}
18527
18528static void
18529do_neon_sli (void)
18530{
18531  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18532    return;
18533
18534  enum neon_shape rs;
18535  struct neon_type_el et;
18536  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18537    {
18538      rs = neon_select_shape (NS_QQI, NS_NULL);
18539      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18540    }
18541  else
18542    {
18543      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18544      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18545    }
18546
18547
18548  int imm = inst.operands[2].imm;
18549  constraint (imm < 0 || (unsigned)imm >= et.size,
18550	      _("immediate out of range for insert"));
18551  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18552}
18553
18554static void
18555do_neon_sri (void)
18556{
18557  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18558    return;
18559
18560  enum neon_shape rs;
18561  struct neon_type_el et;
18562  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18563    {
18564      rs = neon_select_shape (NS_QQI, NS_NULL);
18565      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18566    }
18567  else
18568    {
18569      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18570      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18571    }
18572
18573  int imm = inst.operands[2].imm;
18574  constraint (imm < 1 || (unsigned)imm > et.size,
18575	      _("immediate out of range for insert"));
18576  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
18577}
18578
18579static void
18580do_neon_qshlu_imm (void)
18581{
18582  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18583    return;
18584
18585  enum neon_shape rs;
18586  struct neon_type_el et;
18587  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18588    {
18589      rs = neon_select_shape (NS_QQI, NS_NULL);
18590      et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18591    }
18592  else
18593    {
18594      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18595      et = neon_check_type (2, rs, N_EQK | N_UNS,
18596			    N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18597    }
18598
18599  int imm = inst.operands[2].imm;
18600  constraint (imm < 0 || (unsigned)imm >= et.size,
18601	      _("immediate out of range for shift"));
18602  /* Only encodes the 'U present' variant of the instruction.
18603     In this case, signed types have OP (bit 8) set to 0.
18604     Unsigned types have OP set to 1.  */
18605  inst.instruction |= (et.type == NT_unsigned) << 8;
18606  /* The rest of the bits are the same as other immediate shifts.  */
18607  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18608}
18609
18610static void
18611do_neon_qmovn (void)
18612{
18613  struct neon_type_el et = neon_check_type (2, NS_DQ,
18614    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18615  /* Saturating move where operands can be signed or unsigned, and the
18616     destination has the same signedness.  */
18617  NEON_ENCODE (INTEGER, inst);
18618  if (et.type == NT_unsigned)
18619    inst.instruction |= 0xc0;
18620  else
18621    inst.instruction |= 0x80;
18622  neon_two_same (0, 1, et.size / 2);
18623}
18624
18625static void
18626do_neon_qmovun (void)
18627{
18628  struct neon_type_el et = neon_check_type (2, NS_DQ,
18629    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18630  /* Saturating move with unsigned results. Operands must be signed.  */
18631  NEON_ENCODE (INTEGER, inst);
18632  neon_two_same (0, 1, et.size / 2);
18633}
18634
18635static void
18636do_neon_rshift_sat_narrow (void)
18637{
18638  /* FIXME: Types for narrowing. If operands are signed, results can be signed
18639     or unsigned. If operands are unsigned, results must also be unsigned.  */
18640  struct neon_type_el et = neon_check_type (2, NS_DQI,
18641    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18642  int imm = inst.operands[2].imm;
18643  /* This gets the bounds check, size encoding and immediate bits calculation
18644     right.  */
18645  et.size /= 2;
18646
18647  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18648     VQMOVN.I<size> <Dd>, <Qm>.  */
18649  if (imm == 0)
18650    {
18651      inst.operands[2].present = 0;
18652      inst.instruction = N_MNEM_vqmovn;
18653      do_neon_qmovn ();
18654      return;
18655    }
18656
18657  constraint (imm < 1 || (unsigned)imm > et.size,
18658	      _("immediate out of range"));
18659  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
18660}
18661
18662static void
18663do_neon_rshift_sat_narrow_u (void)
18664{
18665  /* FIXME: Types for narrowing. If operands are signed, results can be signed
18666     or unsigned. If operands are unsigned, results must also be unsigned.  */
18667  struct neon_type_el et = neon_check_type (2, NS_DQI,
18668    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18669  int imm = inst.operands[2].imm;
18670  /* This gets the bounds check, size encoding and immediate bits calculation
18671     right.  */
18672  et.size /= 2;
18673
18674  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18675     VQMOVUN.I<size> <Dd>, <Qm>.  */
18676  if (imm == 0)
18677    {
18678      inst.operands[2].present = 0;
18679      inst.instruction = N_MNEM_vqmovun;
18680      do_neon_qmovun ();
18681      return;
18682    }
18683
18684  constraint (imm < 1 || (unsigned)imm > et.size,
18685	      _("immediate out of range"));
18686  /* FIXME: The manual is kind of unclear about what value U should have in
18687     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18688     must be 1.  */
18689  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
18690}
18691
18692static void
18693do_neon_movn (void)
18694{
18695  struct neon_type_el et = neon_check_type (2, NS_DQ,
18696    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18697  NEON_ENCODE (INTEGER, inst);
18698  neon_two_same (0, 1, et.size / 2);
18699}
18700
18701static void
18702do_neon_rshift_narrow (void)
18703{
18704  struct neon_type_el et = neon_check_type (2, NS_DQI,
18705    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18706  int imm = inst.operands[2].imm;
18707  /* This gets the bounds check, size encoding and immediate bits calculation
18708     right.  */
18709  et.size /= 2;
18710
18711  /* If immediate is zero then we are a pseudo-instruction for
18712     VMOVN.I<size> <Dd>, <Qm>  */
18713  if (imm == 0)
18714    {
18715      inst.operands[2].present = 0;
18716      inst.instruction = N_MNEM_vmovn;
18717      do_neon_movn ();
18718      return;
18719    }
18720
18721  constraint (imm < 1 || (unsigned)imm > et.size,
18722	      _("immediate out of range for narrowing operation"));
18723  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
18724}
18725
18726static void
18727do_neon_shll (void)
18728{
18729  /* FIXME: Type checking when lengthening.  */
18730  struct neon_type_el et = neon_check_type (2, NS_QDI,
18731    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18732  unsigned imm = inst.operands[2].imm;
18733
18734  if (imm == et.size)
18735    {
18736      /* Maximum shift variant.  */
18737      NEON_ENCODE (INTEGER, inst);
18738      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18739      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18740      inst.instruction |= LOW4 (inst.operands[1].reg);
18741      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18742      inst.instruction |= neon_logbits (et.size) << 18;
18743
18744      neon_dp_fixup (&inst);
18745    }
18746  else
18747    {
18748      /* A more-specific type check for non-max versions.  */
18749      et = neon_check_type (2, NS_QDI,
18750	N_EQK | N_DBL, N_SU_32 | N_KEY);
18751      NEON_ENCODE (IMMED, inst);
18752      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
18753    }
18754}
18755
18756/* Check the various types for the VCVT instruction, and return which version
18757   the current instruction is.  */
18758
18759#define CVT_FLAVOUR_VAR							      \
18760  CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
18761  CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
18762  CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
18763  CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
18764  /* Half-precision conversions.  */					      \
18765  CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18766  CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18767  CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
18768  CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
18769  CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
18770  CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
18771  /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
18772     Compared with single/double precision variants, only the co-processor    \
18773     field is different, so the encoding flow is reused here.  */	      \
18774  CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
18775  CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
18776  CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18777  CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18778  CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg,   NULL, NULL, NULL)	      \
18779  /* VFP instructions.  */						      \
18780  CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
18781  CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
18782  CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18783  CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18784  CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
18785  CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
18786  /* VFP instructions with bitshift.  */				      \
18787  CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
18788  CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
18789  CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
18790  CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
18791  CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
18792  CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
18793  CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
18794  CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
18795
18796#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18797  neon_cvt_flavour_##C,
18798
18799/* The different types of conversions we can do.  */
18800enum neon_cvt_flavour
18801{
18802  CVT_FLAVOUR_VAR
18803  neon_cvt_flavour_invalid,
18804  neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18805};
18806
18807#undef CVT_VAR
18808
18809static enum neon_cvt_flavour
18810get_neon_cvt_flavour (enum neon_shape rs)
18811{
18812#define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
18813  et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
18814  if (et.type != NT_invtype)				\
18815    {							\
18816      inst.error = NULL;				\
18817      return (neon_cvt_flavour_##C);			\
18818    }
18819
18820  struct neon_type_el et;
18821  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18822			|| rs == NS_FF) ? N_VFP : 0;
18823  /* The instruction versions which take an immediate take one register
18824     argument, which is extended to the width of the full register. Thus the
18825     "source" and "destination" registers must have the same width.  Hack that
18826     here by making the size equal to the key (wider, in this case) operand.  */
18827  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18828
18829  CVT_FLAVOUR_VAR;
18830
18831  return neon_cvt_flavour_invalid;
18832#undef CVT_VAR
18833}
18834
18835enum neon_cvt_mode
18836{
18837  neon_cvt_mode_a,
18838  neon_cvt_mode_n,
18839  neon_cvt_mode_p,
18840  neon_cvt_mode_m,
18841  neon_cvt_mode_z,
18842  neon_cvt_mode_x,
18843  neon_cvt_mode_r
18844};
18845
18846/* Neon-syntax VFP conversions.  */
18847
18848static void
18849do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18850{
18851  const char *opname = 0;
18852
18853  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18854      || rs == NS_FHI || rs == NS_HFI)
18855    {
18856      /* Conversions with immediate bitshift.  */
18857      const char *enc[] =
18858	{
18859#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18860	  CVT_FLAVOUR_VAR
18861	  NULL
18862#undef CVT_VAR
18863	};
18864
18865      if (flavour < (int) ARRAY_SIZE (enc))
18866	{
18867	  opname = enc[flavour];
18868	  constraint (inst.operands[0].reg != inst.operands[1].reg,
18869		      _("operands 0 and 1 must be the same register"));
18870	  inst.operands[1] = inst.operands[2];
18871	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18872	}
18873    }
18874  else
18875    {
18876      /* Conversions without bitshift.  */
18877      const char *enc[] =
18878	{
18879#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18880	  CVT_FLAVOUR_VAR
18881	  NULL
18882#undef CVT_VAR
18883	};
18884
18885      if (flavour < (int) ARRAY_SIZE (enc))
18886	opname = enc[flavour];
18887    }
18888
18889  if (opname)
18890    do_vfp_nsyn_opcode (opname);
18891
18892  /* ARMv8.2 fp16 VCVT instruction.  */
18893  if (flavour == neon_cvt_flavour_s32_f16
18894      || flavour == neon_cvt_flavour_u32_f16
18895      || flavour == neon_cvt_flavour_f16_u32
18896      || flavour == neon_cvt_flavour_f16_s32)
18897    do_scalar_fp16_v82_encode ();
18898}
18899
18900static void
18901do_vfp_nsyn_cvtz (void)
18902{
18903  enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18904  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18905  const char *enc[] =
18906    {
18907#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18908      CVT_FLAVOUR_VAR
18909      NULL
18910#undef CVT_VAR
18911    };
18912
18913  if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18914    do_vfp_nsyn_opcode (enc[flavour]);
18915}
18916
18917static void
18918do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18919		      enum neon_cvt_mode mode)
18920{
18921  int sz, op;
18922  int rm;
18923
18924  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18925     D register operands.  */
18926  if (flavour == neon_cvt_flavour_s32_f64
18927      || flavour == neon_cvt_flavour_u32_f64)
18928    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18929		_(BAD_FPU));
18930
18931  if (flavour == neon_cvt_flavour_s32_f16
18932      || flavour == neon_cvt_flavour_u32_f16)
18933    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18934		_(BAD_FP16));
18935
18936  set_pred_insn_type (OUTSIDE_PRED_INSN);
18937
18938  switch (flavour)
18939    {
18940    case neon_cvt_flavour_s32_f64:
18941      sz = 1;
18942      op = 1;
18943      break;
18944    case neon_cvt_flavour_s32_f32:
18945      sz = 0;
18946      op = 1;
18947      break;
18948    case neon_cvt_flavour_s32_f16:
18949      sz = 0;
18950      op = 1;
18951      break;
18952    case neon_cvt_flavour_u32_f64:
18953      sz = 1;
18954      op = 0;
18955      break;
18956    case neon_cvt_flavour_u32_f32:
18957      sz = 0;
18958      op = 0;
18959      break;
18960    case neon_cvt_flavour_u32_f16:
18961      sz = 0;
18962      op = 0;
18963      break;
18964    default:
18965      first_error (_("invalid instruction shape"));
18966      return;
18967    }
18968
18969  switch (mode)
18970    {
18971    case neon_cvt_mode_a: rm = 0; break;
18972    case neon_cvt_mode_n: rm = 1; break;
18973    case neon_cvt_mode_p: rm = 2; break;
18974    case neon_cvt_mode_m: rm = 3; break;
18975    default: first_error (_("invalid rounding mode")); return;
18976    }
18977
18978  NEON_ENCODE (FPV8, inst);
18979  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
18980  encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
18981  inst.instruction |= sz << 8;
18982
18983  /* ARMv8.2 fp16 VCVT instruction.  */
18984  if (flavour == neon_cvt_flavour_s32_f16
18985      ||flavour == neon_cvt_flavour_u32_f16)
18986    do_scalar_fp16_v82_encode ();
18987  inst.instruction |= op << 7;
18988  inst.instruction |= rm << 16;
18989  inst.instruction |= 0xf0000000;
18990  inst.is_neon = TRUE;
18991}
18992
18993static void
18994do_neon_cvt_1 (enum neon_cvt_mode mode)
18995{
18996  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
18997					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
18998					  NS_FH, NS_HF, NS_FHI, NS_HFI,
18999					  NS_NULL);
19000  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19001
19002  if (flavour == neon_cvt_flavour_invalid)
19003    return;
19004
19005  /* PR11109: Handle round-to-zero for VCVT conversions.  */
19006  if (mode == neon_cvt_mode_z
19007      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19008      && (flavour == neon_cvt_flavour_s16_f16
19009	  || flavour == neon_cvt_flavour_u16_f16
19010	  || flavour == neon_cvt_flavour_s32_f32
19011	  || flavour == neon_cvt_flavour_u32_f32
19012	  || flavour == neon_cvt_flavour_s32_f64
19013	  || flavour == neon_cvt_flavour_u32_f64)
19014      && (rs == NS_FD || rs == NS_FF))
19015    {
19016      do_vfp_nsyn_cvtz ();
19017      return;
19018    }
19019
19020  /* ARMv8.2 fp16 VCVT conversions.  */
19021  if (mode == neon_cvt_mode_z
19022      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19023      && (flavour == neon_cvt_flavour_s32_f16
19024	  || flavour == neon_cvt_flavour_u32_f16)
19025      && (rs == NS_FH))
19026    {
19027      do_vfp_nsyn_cvtz ();
19028      do_scalar_fp16_v82_encode ();
19029      return;
19030    }
19031
19032  /* VFP rather than Neon conversions.  */
19033  if (flavour >= neon_cvt_flavour_first_fp)
19034    {
19035      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19036	do_vfp_nsyn_cvt (rs, flavour);
19037      else
19038	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19039
19040      return;
19041    }
19042
19043  switch (rs)
19044    {
19045    case NS_QQI:
19046      if (mode == neon_cvt_mode_z
19047	  && (flavour == neon_cvt_flavour_f16_s16
19048	      || flavour == neon_cvt_flavour_f16_u16
19049	      || flavour == neon_cvt_flavour_s16_f16
19050	      || flavour == neon_cvt_flavour_u16_f16
19051	      || flavour == neon_cvt_flavour_f32_u32
19052	      || flavour == neon_cvt_flavour_f32_s32
19053	      || flavour == neon_cvt_flavour_s32_f32
19054	      || flavour == neon_cvt_flavour_u32_f32))
19055	{
19056	  if (!check_simd_pred_availability (TRUE,
19057					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19058	    return;
19059	}
19060      else if (mode == neon_cvt_mode_n)
19061	{
19062	  /* We are dealing with vcvt with the 'ne' condition.  */
19063	  inst.cond = 0x1;
19064	  inst.instruction = N_MNEM_vcvt;
19065	  do_neon_cvt_1 (neon_cvt_mode_z);
19066	  return;
19067	}
19068      /* fall through.  */
19069    case NS_DDI:
19070      {
19071	unsigned immbits;
19072	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19073			     0x0000100, 0x1000100, 0x0, 0x1000000};
19074
19075	if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19076	    && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19077	    return;
19078
19079	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19080	  {
19081	    constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19082			_("immediate value out of range"));
19083	    switch (flavour)
19084	      {
19085		case neon_cvt_flavour_f16_s16:
19086		case neon_cvt_flavour_f16_u16:
19087		case neon_cvt_flavour_s16_f16:
19088		case neon_cvt_flavour_u16_f16:
19089		  constraint (inst.operands[2].imm > 16,
19090			      _("immediate value out of range"));
19091		  break;
19092		case neon_cvt_flavour_f32_u32:
19093		case neon_cvt_flavour_f32_s32:
19094		case neon_cvt_flavour_s32_f32:
19095		case neon_cvt_flavour_u32_f32:
19096		  constraint (inst.operands[2].imm > 32,
19097			      _("immediate value out of range"));
19098		  break;
19099		default:
19100		  inst.error = BAD_FPU;
19101		  return;
19102	      }
19103	  }
19104
19105	/* Fixed-point conversion with #0 immediate is encoded as an
19106	   integer conversion.  */
19107	if (inst.operands[2].present && inst.operands[2].imm == 0)
19108	  goto int_encode;
19109	NEON_ENCODE (IMMED, inst);
19110	if (flavour != neon_cvt_flavour_invalid)
19111	  inst.instruction |= enctab[flavour];
19112	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19113	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19114	inst.instruction |= LOW4 (inst.operands[1].reg);
19115	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19116	inst.instruction |= neon_quad (rs) << 6;
19117	inst.instruction |= 1 << 21;
19118	if (flavour < neon_cvt_flavour_s16_f16)
19119	  {
19120	    inst.instruction |= 1 << 21;
19121	    immbits = 32 - inst.operands[2].imm;
19122	    inst.instruction |= immbits << 16;
19123	  }
19124	else
19125	  {
19126	    inst.instruction |= 3 << 20;
19127	    immbits = 16 - inst.operands[2].imm;
19128	    inst.instruction |= immbits << 16;
19129	    inst.instruction &= ~(1 << 9);
19130	  }
19131
19132	neon_dp_fixup (&inst);
19133      }
19134      break;
19135
19136    case NS_QQ:
19137      if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19138	   || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19139	  && (flavour == neon_cvt_flavour_s16_f16
19140	      || flavour == neon_cvt_flavour_u16_f16
19141	      || flavour == neon_cvt_flavour_s32_f32
19142	      || flavour == neon_cvt_flavour_u32_f32))
19143	{
19144	  if (!check_simd_pred_availability (TRUE,
19145					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19146	    return;
19147	}
19148      else if (mode == neon_cvt_mode_z
19149	       && (flavour == neon_cvt_flavour_f16_s16
19150		   || flavour == neon_cvt_flavour_f16_u16
19151		   || flavour == neon_cvt_flavour_s16_f16
19152		   || flavour == neon_cvt_flavour_u16_f16
19153		   || flavour == neon_cvt_flavour_f32_u32
19154		   || flavour == neon_cvt_flavour_f32_s32
19155		   || flavour == neon_cvt_flavour_s32_f32
19156		   || flavour == neon_cvt_flavour_u32_f32))
19157	{
19158	  if (!check_simd_pred_availability (TRUE,
19159					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19160	    return;
19161	}
19162      /* fall through.  */
19163    case NS_DD:
19164      if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19165	{
19166
19167	  NEON_ENCODE (FLOAT, inst);
19168	  if (!check_simd_pred_availability (TRUE,
19169					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19170	    return;
19171
19172	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19173	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19174	  inst.instruction |= LOW4 (inst.operands[1].reg);
19175	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19176	  inst.instruction |= neon_quad (rs) << 6;
19177	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19178			       || flavour == neon_cvt_flavour_u32_f32) << 7;
19179	  inst.instruction |= mode << 8;
19180	  if (flavour == neon_cvt_flavour_u16_f16
19181	      || flavour == neon_cvt_flavour_s16_f16)
19182	    /* Mask off the original size bits and reencode them.  */
19183	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19184
19185	  if (thumb_mode)
19186	    inst.instruction |= 0xfc000000;
19187	  else
19188	    inst.instruction |= 0xf0000000;
19189	}
19190      else
19191	{
19192    int_encode:
19193	  {
19194	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19195				  0x100, 0x180, 0x0, 0x080};
19196
19197	    NEON_ENCODE (INTEGER, inst);
19198
19199	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19200	    {
19201	      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19202		return;
19203	    }
19204
19205	    if (flavour != neon_cvt_flavour_invalid)
19206	      inst.instruction |= enctab[flavour];
19207
19208	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19209	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19210	    inst.instruction |= LOW4 (inst.operands[1].reg);
19211	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19212	    inst.instruction |= neon_quad (rs) << 6;
19213	    if (flavour >= neon_cvt_flavour_s16_f16
19214		&& flavour <= neon_cvt_flavour_f16_u16)
19215	      /* Half precision.  */
19216	      inst.instruction |= 1 << 18;
19217	    else
19218	      inst.instruction |= 2 << 18;
19219
19220	    neon_dp_fixup (&inst);
19221	  }
19222	}
19223      break;
19224
19225    /* Half-precision conversions for Advanced SIMD -- neon.  */
19226    case NS_QD:
19227    case NS_DQ:
19228      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19229	return;
19230
19231      if ((rs == NS_DQ)
19232	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19233	  {
19234	    as_bad (_("operand size must match register width"));
19235	    break;
19236	  }
19237
19238      if ((rs == NS_QD)
19239	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19240	  {
19241	    as_bad (_("operand size must match register width"));
19242	    break;
19243	  }
19244
19245      if (rs == NS_DQ)
19246	{
19247	  if (flavour == neon_cvt_flavour_bf16_f32)
19248	    {
19249	      if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19250		return;
19251	      constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19252	      /* VCVT.bf16.f32.  */
19253	      inst.instruction = 0x11b60640;
19254	    }
19255	  else
19256	    /* VCVT.f16.f32.  */
19257	    inst.instruction = 0x3b60600;
19258	}
19259      else
19260	/* VCVT.f32.f16.  */
19261	inst.instruction = 0x3b60700;
19262
19263      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19264      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19265      inst.instruction |= LOW4 (inst.operands[1].reg);
19266      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19267      neon_dp_fixup (&inst);
19268      break;
19269
19270    default:
19271      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
19272      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19273	do_vfp_nsyn_cvt (rs, flavour);
19274      else
19275	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19276    }
19277}
19278
19279static void
19280do_neon_cvtr (void)
19281{
19282  do_neon_cvt_1 (neon_cvt_mode_x);
19283}
19284
19285static void
19286do_neon_cvt (void)
19287{
19288  do_neon_cvt_1 (neon_cvt_mode_z);
19289}
19290
19291static void
19292do_neon_cvta (void)
19293{
19294  do_neon_cvt_1 (neon_cvt_mode_a);
19295}
19296
19297static void
19298do_neon_cvtn (void)
19299{
19300  do_neon_cvt_1 (neon_cvt_mode_n);
19301}
19302
19303static void
19304do_neon_cvtp (void)
19305{
19306  do_neon_cvt_1 (neon_cvt_mode_p);
19307}
19308
19309static void
19310do_neon_cvtm (void)
19311{
19312  do_neon_cvt_1 (neon_cvt_mode_m);
19313}
19314
19315static void
19316do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
19317{
19318  if (is_double)
19319    mark_feature_used (&fpu_vfp_ext_armv8);
19320
19321  encode_arm_vfp_reg (inst.operands[0].reg,
19322		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19323  encode_arm_vfp_reg (inst.operands[1].reg,
19324		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19325  inst.instruction |= to ? 0x10000 : 0;
19326  inst.instruction |= t ? 0x80 : 0;
19327  inst.instruction |= is_double ? 0x100 : 0;
19328  do_vfp_cond_or_thumb ();
19329}
19330
19331static void
19332do_neon_cvttb_1 (bfd_boolean t)
19333{
19334  enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19335					  NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19336
19337  if (rs == NS_NULL)
19338    return;
19339  else if (rs == NS_QQ || rs == NS_QQI)
19340    {
19341      int single_to_half = 0;
19342      if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
19343	return;
19344
19345      enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19346
19347      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19348	  && (flavour ==  neon_cvt_flavour_u16_f16
19349	      || flavour ==  neon_cvt_flavour_s16_f16
19350	      || flavour ==  neon_cvt_flavour_f16_s16
19351	      || flavour ==  neon_cvt_flavour_f16_u16
19352	      || flavour ==  neon_cvt_flavour_u32_f32
19353	      || flavour ==  neon_cvt_flavour_s32_f32
19354	      || flavour ==  neon_cvt_flavour_f32_s32
19355	      || flavour ==  neon_cvt_flavour_f32_u32))
19356	{
19357	  inst.cond = 0xf;
19358	  inst.instruction = N_MNEM_vcvt;
19359	  set_pred_insn_type (INSIDE_VPT_INSN);
19360	  do_neon_cvt_1 (neon_cvt_mode_z);
19361	  return;
19362	}
19363      else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19364	single_to_half = 1;
19365      else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19366	{
19367	  first_error (BAD_FPU);
19368	  return;
19369	}
19370
19371      inst.instruction = 0xee3f0e01;
19372      inst.instruction |= single_to_half << 28;
19373      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19374      inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19375      inst.instruction |= t << 12;
19376      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19377      inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19378      inst.is_neon = 1;
19379    }
19380  else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19381    {
19382      inst.error = NULL;
19383      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19384    }
19385  else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19386    {
19387      inst.error = NULL;
19388      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
19389    }
19390  else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19391    {
19392      /* The VCVTB and VCVTT instructions with D-register operands
19393         don't work for SP only targets.  */
19394      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19395		  _(BAD_FPU));
19396
19397      inst.error = NULL;
19398      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
19399    }
19400  else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19401    {
19402      /* The VCVTB and VCVTT instructions with D-register operands
19403         don't work for SP only targets.  */
19404      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19405		  _(BAD_FPU));
19406
19407      inst.error = NULL;
19408      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
19409    }
19410  else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19411    {
19412      constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19413      inst.error = NULL;
19414      inst.instruction |= (1 << 8);
19415      inst.instruction &= ~(1 << 9);
19416      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19417    }
19418  else
19419    return;
19420}
19421
19422static void
19423do_neon_cvtb (void)
19424{
19425  do_neon_cvttb_1 (FALSE);
19426}
19427
19428
19429static void
19430do_neon_cvtt (void)
19431{
19432  do_neon_cvttb_1 (TRUE);
19433}
19434
19435static void
19436neon_move_immediate (void)
19437{
19438  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19439  struct neon_type_el et = neon_check_type (2, rs,
19440    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19441  unsigned immlo, immhi = 0, immbits;
19442  int op, cmode, float_p;
19443
19444  constraint (et.type == NT_invtype,
19445	      _("operand size must be specified for immediate VMOV"));
19446
19447  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
19448  op = (inst.instruction & (1 << 5)) != 0;
19449
19450  immlo = inst.operands[1].imm;
19451  if (inst.operands[1].regisimm)
19452    immhi = inst.operands[1].reg;
19453
19454  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19455	      _("immediate has bits set outside the operand size"));
19456
19457  float_p = inst.operands[1].immisfloat;
19458
19459  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19460					et.size, et.type)) == FAIL)
19461    {
19462      /* Invert relevant bits only.  */
19463      neon_invert_size (&immlo, &immhi, et.size);
19464      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19465	 with one or the other; those cases are caught by
19466	 neon_cmode_for_move_imm.  */
19467      op = !op;
19468      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19469					    &op, et.size, et.type)) == FAIL)
19470	{
19471	  first_error (_("immediate out of range"));
19472	  return;
19473	}
19474    }
19475
19476  inst.instruction &= ~(1 << 5);
19477  inst.instruction |= op << 5;
19478
19479  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19480  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19481  inst.instruction |= neon_quad (rs) << 6;
19482  inst.instruction |= cmode << 8;
19483
19484  neon_write_immbits (immbits);
19485}
19486
19487static void
19488do_neon_mvn (void)
19489{
19490  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
19491    return;
19492
19493  if (inst.operands[1].isreg)
19494    {
19495      enum neon_shape rs;
19496      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19497	rs = neon_select_shape (NS_QQ, NS_NULL);
19498      else
19499	rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19500
19501      NEON_ENCODE (INTEGER, inst);
19502      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19503      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19504      inst.instruction |= LOW4 (inst.operands[1].reg);
19505      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19506      inst.instruction |= neon_quad (rs) << 6;
19507    }
19508  else
19509    {
19510      NEON_ENCODE (IMMED, inst);
19511      neon_move_immediate ();
19512    }
19513
19514  neon_dp_fixup (&inst);
19515
19516  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19517    {
19518      constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19519    }
19520}
19521
19522/* Encode instructions of form:
19523
19524  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
19525  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
19526
19527static void
19528neon_mixed_length (struct neon_type_el et, unsigned size)
19529{
19530  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19531  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19532  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19533  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19534  inst.instruction |= LOW4 (inst.operands[2].reg);
19535  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19536  inst.instruction |= (et.type == NT_unsigned) << 24;
19537  inst.instruction |= neon_logbits (size) << 20;
19538
19539  neon_dp_fixup (&inst);
19540}
19541
19542static void
19543do_neon_dyadic_long (void)
19544{
19545  enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
19546  if (rs == NS_QDD)
19547    {
19548      if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19549	return;
19550
19551      NEON_ENCODE (INTEGER, inst);
19552      /* FIXME: Type checking for lengthening op.  */
19553      struct neon_type_el et = neon_check_type (3, NS_QDD,
19554	N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19555      neon_mixed_length (et, et.size);
19556    }
19557  else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19558	   && (inst.cond == 0xf || inst.cond == 0x10))
19559    {
19560      /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19561	 in an IT block with le/lt conditions.  */
19562
19563      if (inst.cond == 0xf)
19564	inst.cond = 0xb;
19565      else if (inst.cond == 0x10)
19566	inst.cond = 0xd;
19567
19568      inst.pred_insn_type = INSIDE_IT_INSN;
19569
19570      if (inst.instruction == N_MNEM_vaddl)
19571	{
19572	  inst.instruction = N_MNEM_vadd;
19573	  do_neon_addsub_if_i ();
19574	}
19575      else if (inst.instruction == N_MNEM_vsubl)
19576	{
19577	  inst.instruction = N_MNEM_vsub;
19578	  do_neon_addsub_if_i ();
19579	}
19580      else if (inst.instruction == N_MNEM_vabdl)
19581	{
19582	  inst.instruction = N_MNEM_vabd;
19583	  do_neon_dyadic_if_su ();
19584	}
19585    }
19586  else
19587    first_error (BAD_FPU);
19588}
19589
19590static void
19591do_neon_abal (void)
19592{
19593  struct neon_type_el et = neon_check_type (3, NS_QDD,
19594    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19595  neon_mixed_length (et, et.size);
19596}
19597
19598static void
19599neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19600{
19601  if (inst.operands[2].isscalar)
19602    {
19603      struct neon_type_el et = neon_check_type (3, NS_QDS,
19604	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19605      NEON_ENCODE (SCALAR, inst);
19606      neon_mul_mac (et, et.type == NT_unsigned);
19607    }
19608  else
19609    {
19610      struct neon_type_el et = neon_check_type (3, NS_QDD,
19611	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19612      NEON_ENCODE (INTEGER, inst);
19613      neon_mixed_length (et, et.size);
19614    }
19615}
19616
19617static void
19618do_neon_mac_maybe_scalar_long (void)
19619{
19620  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19621}
19622
19623/* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19624   internal SCALAR.  QUAD_P is 1 if it's for Q format, otherwise it's 0.  */
19625
19626static unsigned
19627neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19628{
19629  unsigned regno = NEON_SCALAR_REG (scalar);
19630  unsigned elno = NEON_SCALAR_INDEX (scalar);
19631
19632  if (quad_p)
19633    {
19634      if (regno > 7 || elno > 3)
19635	goto bad_scalar;
19636
19637      return ((regno & 0x7)
19638	      | ((elno & 0x1) << 3)
19639	      | (((elno >> 1) & 0x1) << 5));
19640    }
19641  else
19642    {
19643      if (regno > 15 || elno > 1)
19644	goto bad_scalar;
19645
19646      return (((regno & 0x1) << 5)
19647	      | ((regno >> 1) & 0x7)
19648	      | ((elno & 0x1) << 3));
19649    }
19650
19651bad_scalar:
19652  first_error (_("scalar out of range for multiply instruction"));
19653  return 0;
19654}
19655
19656static void
19657do_neon_fmac_maybe_scalar_long (int subtype)
19658{
19659  enum neon_shape rs;
19660  int high8;
19661  /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding.  'size"
19662     field (bits[21:20]) has different meaning.  For scalar index variant, it's
19663     used to differentiate add and subtract, otherwise it's with fixed value
19664     0x2.  */
19665  int size = -1;
19666
19667  /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19668     be a scalar index register.  */
19669  if (inst.operands[2].isscalar)
19670    {
19671      high8 = 0xfe000000;
19672      if (subtype)
19673	size = 16;
19674      rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19675    }
19676  else
19677    {
19678      high8 = 0xfc000000;
19679      size = 32;
19680      if (subtype)
19681	inst.instruction |= (0x1 << 23);
19682      rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19683    }
19684
19685
19686  if (inst.cond != COND_ALWAYS)
19687    as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19688	       "behaviour is UNPREDICTABLE"));
19689
19690  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19691	      _(BAD_FP16));
19692
19693  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19694	      _(BAD_FPU));
19695
19696  /* "opcode" from template has included "ubit", so simply pass 0 here.  Also,
19697     the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19698     so we simply pass -1 as size.  */
19699  unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19700  neon_three_same (quad_p, 0, size);
19701
19702  /* Undo neon_dp_fixup.  Redo the high eight bits.  */
19703  inst.instruction &= 0x00ffffff;
19704  inst.instruction |= high8;
19705
19706#define LOW1(R) ((R) & 0x1)
19707#define HI4(R) (((R) >> 1) & 0xf)
19708  /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19709     whether the instruction is in Q form and whether Vm is a scalar indexed
19710     operand.  */
19711  if (inst.operands[2].isscalar)
19712    {
19713      unsigned rm
19714	= neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19715      inst.instruction &= 0xffffffd0;
19716      inst.instruction |= rm;
19717
19718      if (!quad_p)
19719	{
19720	  /* Redo Rn as well.  */
19721	  inst.instruction &= 0xfff0ff7f;
19722	  inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19723	  inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19724	}
19725    }
19726  else if (!quad_p)
19727    {
19728      /* Redo Rn and Rm.  */
19729      inst.instruction &= 0xfff0ff50;
19730      inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19731      inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19732      inst.instruction |= HI4 (inst.operands[2].reg);
19733      inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19734    }
19735}
19736
19737static void
19738do_neon_vfmal (void)
19739{
19740  return do_neon_fmac_maybe_scalar_long (0);
19741}
19742
19743static void
19744do_neon_vfmsl (void)
19745{
19746  return do_neon_fmac_maybe_scalar_long (1);
19747}
19748
19749static void
19750do_neon_dyadic_wide (void)
19751{
19752  struct neon_type_el et = neon_check_type (3, NS_QQD,
19753    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19754  neon_mixed_length (et, et.size);
19755}
19756
19757static void
19758do_neon_dyadic_narrow (void)
19759{
19760  struct neon_type_el et = neon_check_type (3, NS_QDD,
19761    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19762  /* Operand sign is unimportant, and the U bit is part of the opcode,
19763     so force the operand type to integer.  */
19764  et.type = NT_integer;
19765  neon_mixed_length (et, et.size / 2);
19766}
19767
19768static void
19769do_neon_mul_sat_scalar_long (void)
19770{
19771  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19772}
19773
19774static void
19775do_neon_vmull (void)
19776{
19777  if (inst.operands[2].isscalar)
19778    do_neon_mac_maybe_scalar_long ();
19779  else
19780    {
19781      struct neon_type_el et = neon_check_type (3, NS_QDD,
19782	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19783
19784      if (et.type == NT_poly)
19785	NEON_ENCODE (POLY, inst);
19786      else
19787	NEON_ENCODE (INTEGER, inst);
19788
19789      /* For polynomial encoding the U bit must be zero, and the size must
19790	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19791	 obviously, as 0b10).  */
19792      if (et.size == 64)
19793	{
19794	  /* Check we're on the correct architecture.  */
19795	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
19796	    inst.error =
19797	      _("Instruction form not available on this architecture.");
19798
19799	  et.size = 32;
19800	}
19801
19802      neon_mixed_length (et, et.size);
19803    }
19804}
19805
19806static void
19807do_neon_ext (void)
19808{
19809  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19810  struct neon_type_el et = neon_check_type (3, rs,
19811    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19812  unsigned imm = (inst.operands[3].imm * et.size) / 8;
19813
19814  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19815	      _("shift out of range"));
19816  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19817  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19818  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19819  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19820  inst.instruction |= LOW4 (inst.operands[2].reg);
19821  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19822  inst.instruction |= neon_quad (rs) << 6;
19823  inst.instruction |= imm << 8;
19824
19825  neon_dp_fixup (&inst);
19826}
19827
19828static void
19829do_neon_rev (void)
19830{
19831  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
19832   return;
19833
19834  enum neon_shape rs;
19835  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19836    rs = neon_select_shape (NS_QQ, NS_NULL);
19837  else
19838    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19839
19840  struct neon_type_el et = neon_check_type (2, rs,
19841    N_EQK, N_8 | N_16 | N_32 | N_KEY);
19842
19843  unsigned op = (inst.instruction >> 7) & 3;
19844  /* N (width of reversed regions) is encoded as part of the bitmask. We
19845     extract it here to check the elements to be reversed are smaller.
19846     Otherwise we'd get a reserved instruction.  */
19847  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19848
19849  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19850      && inst.operands[0].reg == inst.operands[1].reg)
19851    as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19852		 " operands makes instruction UNPREDICTABLE"));
19853
19854  gas_assert (elsize != 0);
19855  constraint (et.size >= elsize,
19856	      _("elements must be smaller than reversal region"));
19857  neon_two_same (neon_quad (rs), 1, et.size);
19858}
19859
19860static void
19861do_neon_dup (void)
19862{
19863  if (inst.operands[1].isscalar)
19864    {
19865      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19866		  BAD_FPU);
19867      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19868      struct neon_type_el et = neon_check_type (2, rs,
19869	N_EQK, N_8 | N_16 | N_32 | N_KEY);
19870      unsigned sizebits = et.size >> 3;
19871      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19872      int logsize = neon_logbits (et.size);
19873      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19874
19875      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19876	return;
19877
19878      NEON_ENCODE (SCALAR, inst);
19879      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19880      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19881      inst.instruction |= LOW4 (dm);
19882      inst.instruction |= HI1 (dm) << 5;
19883      inst.instruction |= neon_quad (rs) << 6;
19884      inst.instruction |= x << 17;
19885      inst.instruction |= sizebits << 16;
19886
19887      neon_dp_fixup (&inst);
19888    }
19889  else
19890    {
19891      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19892      struct neon_type_el et = neon_check_type (2, rs,
19893	N_8 | N_16 | N_32 | N_KEY, N_EQK);
19894      if (rs == NS_QR)
19895	{
19896	  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
19897	    return;
19898	}
19899      else
19900	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19901		    BAD_FPU);
19902
19903      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19904	{
19905	  if (inst.operands[1].reg == REG_SP)
19906	    as_tsktsk (MVE_BAD_SP);
19907	  else if (inst.operands[1].reg == REG_PC)
19908	    as_tsktsk (MVE_BAD_PC);
19909	}
19910
19911      /* Duplicate ARM register to lanes of vector.  */
19912      NEON_ENCODE (ARMREG, inst);
19913      switch (et.size)
19914	{
19915	case 8:  inst.instruction |= 0x400000; break;
19916	case 16: inst.instruction |= 0x000020; break;
19917	case 32: inst.instruction |= 0x000000; break;
19918	default: break;
19919	}
19920      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19921      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19922      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19923      inst.instruction |= neon_quad (rs) << 21;
19924      /* The encoding for this instruction is identical for the ARM and Thumb
19925	 variants, except for the condition field.  */
19926      do_vfp_cond_or_thumb ();
19927    }
19928}
19929
19930static void
19931do_mve_mov (int toQ)
19932{
19933  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19934    return;
19935  if (inst.cond > COND_ALWAYS)
19936    inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
19937
19938  unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
19939  if (toQ)
19940    {
19941      Q0 = 0;
19942      Q1 = 1;
19943      Rt = 2;
19944      Rt2 = 3;
19945    }
19946
19947  constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
19948	      _("Index one must be [2,3] and index two must be two less than"
19949		" index one."));
19950  constraint (inst.operands[Rt].reg == inst.operands[Rt2].reg,
19951	      _("General purpose registers may not be the same"));
19952  constraint (inst.operands[Rt].reg == REG_SP
19953	      || inst.operands[Rt2].reg == REG_SP,
19954	      BAD_SP);
19955  constraint (inst.operands[Rt].reg == REG_PC
19956	      || inst.operands[Rt2].reg == REG_PC,
19957	      BAD_PC);
19958
19959  inst.instruction = 0xec000f00;
19960  inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
19961  inst.instruction |= !!toQ << 20;
19962  inst.instruction |= inst.operands[Rt2].reg << 16;
19963  inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
19964  inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
19965  inst.instruction |= inst.operands[Rt].reg;
19966}
19967
19968static void
19969do_mve_movn (void)
19970{
19971  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19972    return;
19973
19974  if (inst.cond > COND_ALWAYS)
19975    inst.pred_insn_type = INSIDE_VPT_INSN;
19976  else
19977    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
19978
19979  struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
19980					    | N_KEY);
19981
19982  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19983  inst.instruction |= (neon_logbits (et.size) - 1) << 18;
19984  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19985  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19986  inst.instruction |= LOW4 (inst.operands[1].reg);
19987  inst.is_neon = 1;
19988
19989}
19990
19991/* VMOV has particularly many variations. It can be one of:
19992     0. VMOV<c><q> <Qd>, <Qm>
19993     1. VMOV<c><q> <Dd>, <Dm>
19994   (Register operations, which are VORR with Rm = Rn.)
19995     2. VMOV<c><q>.<dt> <Qd>, #<imm>
19996     3. VMOV<c><q>.<dt> <Dd>, #<imm>
19997   (Immediate loads.)
19998     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
19999   (ARM register to scalar.)
20000     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20001   (Two ARM registers to vector.)
20002     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20003   (Scalar to ARM register.)
20004     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20005   (Vector to two ARM registers.)
20006     8. VMOV.F32 <Sd>, <Sm>
20007     9. VMOV.F64 <Dd>, <Dm>
20008   (VFP register moves.)
20009    10. VMOV.F32 <Sd>, #imm
20010    11. VMOV.F64 <Dd>, #imm
20011   (VFP float immediate load.)
20012    12. VMOV <Rd>, <Sm>
20013   (VFP single to ARM reg.)
20014    13. VMOV <Sd>, <Rm>
20015   (ARM reg to VFP single.)
20016    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20017   (Two ARM regs to two VFP singles.)
20018    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20019   (Two VFP singles to two ARM regs.)
20020   16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20021   17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20022   18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20023   19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20024
20025   These cases can be disambiguated using neon_select_shape, except cases 1/9
20026   and 3/11 which depend on the operand type too.
20027
20028   All the encoded bits are hardcoded by this function.
20029
20030   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20031   Cases 5, 7 may be used with VFPv2 and above.
20032
20033   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20034   can specify a type where it doesn't make sense to, and is ignored).  */
20035
20036static void
20037do_neon_mov (void)
20038{
20039  enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20040					  NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20041					  NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20042					  NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20043					  NS_NULL);
20044  struct neon_type_el et;
20045  const char *ldconst = 0;
20046
20047  switch (rs)
20048    {
20049    case NS_DD:  /* case 1/9.  */
20050      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20051      /* It is not an error here if no type is given.  */
20052      inst.error = NULL;
20053
20054      /* In MVE we interpret the following instructions as same, so ignoring
20055	 the following type (float) and size (64) checks.
20056	 a: VMOV<c><q> <Dd>, <Dm>
20057	 b: VMOV<c><q>.F64 <Dd>, <Dm>.  */
20058      if ((et.type == NT_float && et.size == 64)
20059	  || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20060	{
20061	  do_vfp_nsyn_opcode ("fcpyd");
20062	  break;
20063	}
20064      /* fall through.  */
20065
20066    case NS_QQ:  /* case 0/1.  */
20067      {
20068	if (!check_simd_pred_availability (FALSE,
20069					   NEON_CHECK_CC | NEON_CHECK_ARCH))
20070	  return;
20071	/* The architecture manual I have doesn't explicitly state which
20072	   value the U bit should have for register->register moves, but
20073	   the equivalent VORR instruction has U = 0, so do that.  */
20074	inst.instruction = 0x0200110;
20075	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20076	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20077	inst.instruction |= LOW4 (inst.operands[1].reg);
20078	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20079	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20080	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20081	inst.instruction |= neon_quad (rs) << 6;
20082
20083	neon_dp_fixup (&inst);
20084      }
20085      break;
20086
20087    case NS_DI:  /* case 3/11.  */
20088      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20089      inst.error = NULL;
20090      if (et.type == NT_float && et.size == 64)
20091	{
20092	  /* case 11 (fconstd).  */
20093	  ldconst = "fconstd";
20094	  goto encode_fconstd;
20095	}
20096      /* fall through.  */
20097
20098    case NS_QI:  /* case 2/3.  */
20099      if (!check_simd_pred_availability (FALSE,
20100					 NEON_CHECK_CC | NEON_CHECK_ARCH))
20101	return;
20102      inst.instruction = 0x0800010;
20103      neon_move_immediate ();
20104      neon_dp_fixup (&inst);
20105      break;
20106
20107    case NS_SR:  /* case 4.  */
20108      {
20109	unsigned bcdebits = 0;
20110	int logsize;
20111	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20112	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20113
20114	/* .<size> is optional here, defaulting to .32. */
20115	if (inst.vectype.elems == 0
20116	    && inst.operands[0].vectype.type == NT_invtype
20117	    && inst.operands[1].vectype.type == NT_invtype)
20118	  {
20119	    inst.vectype.el[0].type = NT_untyped;
20120	    inst.vectype.el[0].size = 32;
20121	    inst.vectype.elems = 1;
20122	  }
20123
20124	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20125	logsize = neon_logbits (et.size);
20126
20127	if (et.size != 32)
20128	  {
20129	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20130		&& vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20131	      return;
20132	  }
20133	else
20134	  {
20135	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20136			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20137			_(BAD_FPU));
20138	  }
20139
20140	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20141	  {
20142	    if (inst.operands[1].reg == REG_SP)
20143	      as_tsktsk (MVE_BAD_SP);
20144	    else if (inst.operands[1].reg == REG_PC)
20145	      as_tsktsk (MVE_BAD_PC);
20146	  }
20147	unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20148
20149	constraint (et.type == NT_invtype, _("bad type for scalar"));
20150	constraint (x >= size / et.size, _("scalar index out of range"));
20151
20152
20153	switch (et.size)
20154	  {
20155	  case 8:  bcdebits = 0x8; break;
20156	  case 16: bcdebits = 0x1; break;
20157	  case 32: bcdebits = 0x0; break;
20158	  default: ;
20159	  }
20160
20161	bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20162
20163	inst.instruction = 0xe000b10;
20164	do_vfp_cond_or_thumb ();
20165	inst.instruction |= LOW4 (dn) << 16;
20166	inst.instruction |= HI1 (dn) << 7;
20167	inst.instruction |= inst.operands[1].reg << 12;
20168	inst.instruction |= (bcdebits & 3) << 5;
20169	inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20170	inst.instruction |= (x >> (3-logsize)) << 16;
20171      }
20172      break;
20173
20174    case NS_DRR:  /* case 5 (fmdrr).  */
20175      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20176		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20177		  _(BAD_FPU));
20178
20179      inst.instruction = 0xc400b10;
20180      do_vfp_cond_or_thumb ();
20181      inst.instruction |= LOW4 (inst.operands[0].reg);
20182      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20183      inst.instruction |= inst.operands[1].reg << 12;
20184      inst.instruction |= inst.operands[2].reg << 16;
20185      break;
20186
20187    case NS_RS:  /* case 6.  */
20188      {
20189	unsigned logsize;
20190	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20191	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20192	unsigned abcdebits = 0;
20193
20194	/* .<dt> is optional here, defaulting to .32. */
20195	if (inst.vectype.elems == 0
20196	    && inst.operands[0].vectype.type == NT_invtype
20197	    && inst.operands[1].vectype.type == NT_invtype)
20198	  {
20199	    inst.vectype.el[0].type = NT_untyped;
20200	    inst.vectype.el[0].size = 32;
20201	    inst.vectype.elems = 1;
20202	  }
20203
20204	et = neon_check_type (2, NS_NULL,
20205			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20206	logsize = neon_logbits (et.size);
20207
20208	if (et.size != 32)
20209	  {
20210	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20211		&& vfp_or_neon_is_neon (NEON_CHECK_CC
20212					| NEON_CHECK_ARCH) == FAIL)
20213	      return;
20214	  }
20215	else
20216	  {
20217	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20218			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20219			_(BAD_FPU));
20220	  }
20221
20222	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20223	  {
20224	    if (inst.operands[0].reg == REG_SP)
20225	      as_tsktsk (MVE_BAD_SP);
20226	    else if (inst.operands[0].reg == REG_PC)
20227	      as_tsktsk (MVE_BAD_PC);
20228	  }
20229
20230	unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20231
20232	constraint (et.type == NT_invtype, _("bad type for scalar"));
20233	constraint (x >= size / et.size, _("scalar index out of range"));
20234
20235	switch (et.size)
20236	  {
20237	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20238	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20239	  case 32: abcdebits = 0x00; break;
20240	  default: ;
20241	  }
20242
20243	abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20244	inst.instruction = 0xe100b10;
20245	do_vfp_cond_or_thumb ();
20246	inst.instruction |= LOW4 (dn) << 16;
20247	inst.instruction |= HI1 (dn) << 7;
20248	inst.instruction |= inst.operands[0].reg << 12;
20249	inst.instruction |= (abcdebits & 3) << 5;
20250	inst.instruction |= (abcdebits >> 2) << 21;
20251	inst.instruction |= (x >> (3-logsize)) << 16;
20252      }
20253      break;
20254
20255    case NS_RRD:  /* case 7 (fmrrd).  */
20256      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20257		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20258		  _(BAD_FPU));
20259
20260      inst.instruction = 0xc500b10;
20261      do_vfp_cond_or_thumb ();
20262      inst.instruction |= inst.operands[0].reg << 12;
20263      inst.instruction |= inst.operands[1].reg << 16;
20264      inst.instruction |= LOW4 (inst.operands[2].reg);
20265      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20266      break;
20267
20268    case NS_FF:  /* case 8 (fcpys).  */
20269      do_vfp_nsyn_opcode ("fcpys");
20270      break;
20271
20272    case NS_HI:
20273    case NS_FI:  /* case 10 (fconsts).  */
20274      ldconst = "fconsts";
20275    encode_fconstd:
20276      if (!inst.operands[1].immisfloat)
20277	{
20278	  unsigned new_imm;
20279	  /* Immediate has to fit in 8 bits so float is enough.  */
20280	  float imm = (float) inst.operands[1].imm;
20281	  memcpy (&new_imm, &imm, sizeof (float));
20282	  /* But the assembly may have been written to provide an integer
20283	     bit pattern that equates to a float, so check that the
20284	     conversion has worked.  */
20285	  if (is_quarter_float (new_imm))
20286	    {
20287	      if (is_quarter_float (inst.operands[1].imm))
20288		as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20289
20290	      inst.operands[1].imm = new_imm;
20291	      inst.operands[1].immisfloat = 1;
20292	    }
20293	}
20294
20295      if (is_quarter_float (inst.operands[1].imm))
20296	{
20297	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20298	  do_vfp_nsyn_opcode (ldconst);
20299
20300	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
20301	  if (rs == NS_HI)
20302	    do_scalar_fp16_v82_encode ();
20303	}
20304      else
20305	first_error (_("immediate out of range"));
20306      break;
20307
20308    case NS_RH:
20309    case NS_RF:  /* case 12 (fmrs).  */
20310      do_vfp_nsyn_opcode ("fmrs");
20311      /* ARMv8.2 fp16 vmov.f16 instruction.  */
20312      if (rs == NS_RH)
20313	do_scalar_fp16_v82_encode ();
20314      break;
20315
20316    case NS_HR:
20317    case NS_FR:  /* case 13 (fmsr).  */
20318      do_vfp_nsyn_opcode ("fmsr");
20319      /* ARMv8.2 fp16 vmov.f16 instruction.  */
20320      if (rs == NS_HR)
20321	do_scalar_fp16_v82_encode ();
20322      break;
20323
20324    case NS_RRSS:
20325      do_mve_mov (0);
20326      break;
20327    case NS_SSRR:
20328      do_mve_mov (1);
20329      break;
20330
20331    /* The encoders for the fmrrs and fmsrr instructions expect three operands
20332       (one of which is a list), but we have parsed four.  Do some fiddling to
20333       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20334       expect.  */
20335    case NS_RRFF:  /* case 14 (fmrrs).  */
20336      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20337		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20338		  _(BAD_FPU));
20339      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20340		  _("VFP registers must be adjacent"));
20341      inst.operands[2].imm = 2;
20342      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20343      do_vfp_nsyn_opcode ("fmrrs");
20344      break;
20345
20346    case NS_FFRR:  /* case 15 (fmsrr).  */
20347      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20348		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20349		  _(BAD_FPU));
20350      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20351		  _("VFP registers must be adjacent"));
20352      inst.operands[1] = inst.operands[2];
20353      inst.operands[2] = inst.operands[3];
20354      inst.operands[0].imm = 2;
20355      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20356      do_vfp_nsyn_opcode ("fmsrr");
20357      break;
20358
20359    case NS_NULL:
20360      /* neon_select_shape has determined that the instruction
20361	 shape is wrong and has already set the error message.  */
20362      break;
20363
20364    default:
20365      abort ();
20366    }
20367}
20368
20369static void
20370do_mve_movl (void)
20371{
20372  if (!(inst.operands[0].present && inst.operands[0].isquad
20373      && inst.operands[1].present && inst.operands[1].isquad
20374      && !inst.operands[2].present))
20375    {
20376      inst.instruction = 0;
20377      inst.cond = 0xb;
20378      if (thumb_mode)
20379	set_pred_insn_type (INSIDE_IT_INSN);
20380      do_neon_mov ();
20381      return;
20382    }
20383
20384  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20385    return;
20386
20387  if (inst.cond != COND_ALWAYS)
20388    inst.pred_insn_type = INSIDE_VPT_INSN;
20389
20390  struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20391					    | N_S16 | N_U16 | N_KEY);
20392
20393  inst.instruction |= (et.type == NT_unsigned) << 28;
20394  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20395  inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20396  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20397  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20398  inst.instruction |= LOW4 (inst.operands[1].reg);
20399  inst.is_neon = 1;
20400}
20401
20402static void
20403do_neon_rshift_round_imm (void)
20404{
20405  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20406   return;
20407
20408  enum neon_shape rs;
20409  struct neon_type_el et;
20410
20411  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20412    {
20413      rs = neon_select_shape (NS_QQI, NS_NULL);
20414      et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20415    }
20416  else
20417    {
20418      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20419      et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20420    }
20421  int imm = inst.operands[2].imm;
20422
20423  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
20424  if (imm == 0)
20425    {
20426      inst.operands[2].present = 0;
20427      do_neon_mov ();
20428      return;
20429    }
20430
20431  constraint (imm < 1 || (unsigned)imm > et.size,
20432	      _("immediate out of range for shift"));
20433  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
20434		  et.size - imm);
20435}
20436
20437static void
20438do_neon_movhf (void)
20439{
20440  enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20441  constraint (rs != NS_HH, _("invalid suffix"));
20442
20443  if (inst.cond != COND_ALWAYS)
20444    {
20445      if (thumb_mode)
20446	{
20447	  as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20448		     " the behaviour is UNPREDICTABLE"));
20449	}
20450      else
20451	{
20452	  inst.error = BAD_COND;
20453	  return;
20454	}
20455    }
20456
20457  do_vfp_sp_monadic ();
20458
20459  inst.is_neon = 1;
20460  inst.instruction |= 0xf0000000;
20461}
20462
20463static void
20464do_neon_movl (void)
20465{
20466  struct neon_type_el et = neon_check_type (2, NS_QD,
20467    N_EQK | N_DBL, N_SU_32 | N_KEY);
20468  unsigned sizebits = et.size >> 3;
20469  inst.instruction |= sizebits << 19;
20470  neon_two_same (0, et.type == NT_unsigned, -1);
20471}
20472
20473static void
20474do_neon_trn (void)
20475{
20476  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20477  struct neon_type_el et = neon_check_type (2, rs,
20478    N_EQK, N_8 | N_16 | N_32 | N_KEY);
20479  NEON_ENCODE (INTEGER, inst);
20480  neon_two_same (neon_quad (rs), 1, et.size);
20481}
20482
20483static void
20484do_neon_zip_uzp (void)
20485{
20486  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20487  struct neon_type_el et = neon_check_type (2, rs,
20488    N_EQK, N_8 | N_16 | N_32 | N_KEY);
20489  if (rs == NS_DD && et.size == 32)
20490    {
20491      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
20492      inst.instruction = N_MNEM_vtrn;
20493      do_neon_trn ();
20494      return;
20495    }
20496  neon_two_same (neon_quad (rs), 1, et.size);
20497}
20498
20499static void
20500do_neon_sat_abs_neg (void)
20501{
20502  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
20503    return;
20504
20505  enum neon_shape rs;
20506  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20507    rs = neon_select_shape (NS_QQ, NS_NULL);
20508  else
20509    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20510  struct neon_type_el et = neon_check_type (2, rs,
20511    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20512  neon_two_same (neon_quad (rs), 1, et.size);
20513}
20514
20515static void
20516do_neon_pair_long (void)
20517{
20518  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20519  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20520  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
20521  inst.instruction |= (et.type == NT_unsigned) << 7;
20522  neon_two_same (neon_quad (rs), 1, et.size);
20523}
20524
20525static void
20526do_neon_recip_est (void)
20527{
20528  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20529  struct neon_type_el et = neon_check_type (2, rs,
20530    N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20531  inst.instruction |= (et.type == NT_float) << 8;
20532  neon_two_same (neon_quad (rs), 1, et.size);
20533}
20534
20535static void
20536do_neon_cls (void)
20537{
20538  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20539    return;
20540
20541  enum neon_shape rs;
20542  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20543   rs = neon_select_shape (NS_QQ, NS_NULL);
20544  else
20545   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20546
20547  struct neon_type_el et = neon_check_type (2, rs,
20548    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20549  neon_two_same (neon_quad (rs), 1, et.size);
20550}
20551
20552static void
20553do_neon_clz (void)
20554{
20555  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20556    return;
20557
20558  enum neon_shape rs;
20559  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20560   rs = neon_select_shape (NS_QQ, NS_NULL);
20561  else
20562   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20563
20564  struct neon_type_el et = neon_check_type (2, rs,
20565    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20566  neon_two_same (neon_quad (rs), 1, et.size);
20567}
20568
20569static void
20570do_neon_cnt (void)
20571{
20572  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20573  struct neon_type_el et = neon_check_type (2, rs,
20574    N_EQK | N_INT, N_8 | N_KEY);
20575  neon_two_same (neon_quad (rs), 1, et.size);
20576}
20577
20578static void
20579do_neon_swp (void)
20580{
20581  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20582  neon_two_same (neon_quad (rs), 1, -1);
20583}
20584
20585static void
20586do_neon_tbl_tbx (void)
20587{
20588  unsigned listlenbits;
20589  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20590
20591  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20592    {
20593      first_error (_("bad list length for table lookup"));
20594      return;
20595    }
20596
20597  listlenbits = inst.operands[1].imm - 1;
20598  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20599  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20600  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20601  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20602  inst.instruction |= LOW4 (inst.operands[2].reg);
20603  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20604  inst.instruction |= listlenbits << 8;
20605
20606  neon_dp_fixup (&inst);
20607}
20608
20609static void
20610do_neon_ldm_stm (void)
20611{
20612  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20613	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20614	      _(BAD_FPU));
20615  /* P, U and L bits are part of bitmask.  */
20616  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20617  unsigned offsetbits = inst.operands[1].imm * 2;
20618
20619  if (inst.operands[1].issingle)
20620    {
20621      do_vfp_nsyn_ldm_stm (is_dbmode);
20622      return;
20623    }
20624
20625  constraint (is_dbmode && !inst.operands[0].writeback,
20626	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
20627
20628  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20629	      _("register list must contain at least 1 and at most 16 "
20630		"registers"));
20631
20632  inst.instruction |= inst.operands[0].reg << 16;
20633  inst.instruction |= inst.operands[0].writeback << 21;
20634  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20635  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20636
20637  inst.instruction |= offsetbits;
20638
20639  do_vfp_cond_or_thumb ();
20640}
20641
20642static void
20643do_vfp_nsyn_pop (void)
20644{
20645  nsyn_insert_sp ();
20646  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20647    return do_vfp_nsyn_opcode ("vldm");
20648  }
20649
20650  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20651	      _(BAD_FPU));
20652
20653  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20654	      _("register list must contain at least 1 and at most 16 "
20655		"registers"));
20656
20657  if (inst.operands[1].issingle)
20658    do_vfp_nsyn_opcode ("fldmias");
20659  else
20660    do_vfp_nsyn_opcode ("fldmiad");
20661}
20662
20663static void
20664do_vfp_nsyn_push (void)
20665{
20666  nsyn_insert_sp ();
20667  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20668    return do_vfp_nsyn_opcode ("vstmdb");
20669  }
20670
20671  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20672	      _(BAD_FPU));
20673
20674  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20675	      _("register list must contain at least 1 and at most 16 "
20676		"registers"));
20677
20678  if (inst.operands[1].issingle)
20679    do_vfp_nsyn_opcode ("fstmdbs");
20680  else
20681    do_vfp_nsyn_opcode ("fstmdbd");
20682}
20683
20684
20685static void
20686do_neon_ldr_str (void)
20687{
20688  int is_ldr = (inst.instruction & (1 << 20)) != 0;
20689
20690  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20691     And is UNPREDICTABLE in thumb mode.  */
20692  if (!is_ldr
20693      && inst.operands[1].reg == REG_PC
20694      && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20695    {
20696      if (thumb_mode)
20697	inst.error = _("Use of PC here is UNPREDICTABLE");
20698      else if (warn_on_deprecated)
20699	as_tsktsk (_("Use of PC here is deprecated"));
20700    }
20701
20702  if (inst.operands[0].issingle)
20703    {
20704      if (is_ldr)
20705	do_vfp_nsyn_opcode ("flds");
20706      else
20707	do_vfp_nsyn_opcode ("fsts");
20708
20709      /* ARMv8.2 vldr.16/vstr.16 instruction.  */
20710      if (inst.vectype.el[0].size == 16)
20711	do_scalar_fp16_v82_encode ();
20712    }
20713  else
20714    {
20715      if (is_ldr)
20716	do_vfp_nsyn_opcode ("fldd");
20717      else
20718	do_vfp_nsyn_opcode ("fstd");
20719    }
20720}
20721
20722static void
20723do_t_vldr_vstr_sysreg (void)
20724{
20725  int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20726  bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20727
20728  /* Use of PC is UNPREDICTABLE.  */
20729  if (inst.operands[1].reg == REG_PC)
20730    inst.error = _("Use of PC here is UNPREDICTABLE");
20731
20732  if (inst.operands[1].immisreg)
20733    inst.error = _("instruction does not accept register index");
20734
20735  if (!inst.operands[1].isreg)
20736    inst.error = _("instruction does not accept PC-relative addressing");
20737
20738  if (abs (inst.operands[1].imm) >= (1 << 7))
20739    inst.error = _("immediate value out of range");
20740
20741  inst.instruction = 0xec000f80;
20742  if (is_vldr)
20743    inst.instruction |= 1 << sysreg_vldr_bitno;
20744  encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20745  inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20746  inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20747}
20748
20749static void
20750do_vldr_vstr (void)
20751{
20752  bfd_boolean sysreg_op = !inst.operands[0].isreg;
20753
20754  /* VLDR/VSTR (System Register).  */
20755  if (sysreg_op)
20756    {
20757      if (!mark_feature_used (&arm_ext_v8_1m_main))
20758	as_bad (_("Instruction not permitted on this architecture"));
20759
20760      do_t_vldr_vstr_sysreg ();
20761    }
20762  /* VLDR/VSTR.  */
20763  else
20764    {
20765      if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20766	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20767	as_bad (_("Instruction not permitted on this architecture"));
20768      do_neon_ldr_str ();
20769    }
20770}
20771
20772/* "interleave" version also handles non-interleaving register VLD1/VST1
20773   instructions.  */
20774
20775static void
20776do_neon_ld_st_interleave (void)
20777{
20778  struct neon_type_el et = neon_check_type (1, NS_NULL,
20779					    N_8 | N_16 | N_32 | N_64);
20780  unsigned alignbits = 0;
20781  unsigned idx;
20782  /* The bits in this table go:
20783     0: register stride of one (0) or two (1)
20784     1,2: register list length, minus one (1, 2, 3, 4).
20785     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20786     We use -1 for invalid entries.  */
20787  const int typetable[] =
20788    {
20789      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
20790       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
20791       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
20792       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
20793    };
20794  int typebits;
20795
20796  if (et.type == NT_invtype)
20797    return;
20798
20799  if (inst.operands[1].immisalign)
20800    switch (inst.operands[1].imm >> 8)
20801      {
20802      case 64: alignbits = 1; break;
20803      case 128:
20804	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20805	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20806	  goto bad_alignment;
20807	alignbits = 2;
20808	break;
20809      case 256:
20810	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20811	  goto bad_alignment;
20812	alignbits = 3;
20813	break;
20814      default:
20815      bad_alignment:
20816	first_error (_("bad alignment"));
20817	return;
20818      }
20819
20820  inst.instruction |= alignbits << 4;
20821  inst.instruction |= neon_logbits (et.size) << 6;
20822
20823  /* Bits [4:6] of the immediate in a list specifier encode register stride
20824     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20825     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20826     up the right value for "type" in a table based on this value and the given
20827     list style, then stick it back.  */
20828  idx = ((inst.operands[0].imm >> 4) & 7)
20829	| (((inst.instruction >> 8) & 3) << 3);
20830
20831  typebits = typetable[idx];
20832
20833  constraint (typebits == -1, _("bad list type for instruction"));
20834  constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20835	      BAD_EL_TYPE);
20836
20837  inst.instruction &= ~0xf00;
20838  inst.instruction |= typebits << 8;
20839}
20840
20841/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20842   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20843   otherwise. The variable arguments are a list of pairs of legal (size, align)
20844   values, terminated with -1.  */
20845
20846static int
20847neon_alignment_bit (int size, int align, int *do_alignment, ...)
20848{
20849  va_list ap;
20850  int result = FAIL, thissize, thisalign;
20851
20852  if (!inst.operands[1].immisalign)
20853    {
20854      *do_alignment = 0;
20855      return SUCCESS;
20856    }
20857
20858  va_start (ap, do_alignment);
20859
20860  do
20861    {
20862      thissize = va_arg (ap, int);
20863      if (thissize == -1)
20864	break;
20865      thisalign = va_arg (ap, int);
20866
20867      if (size == thissize && align == thisalign)
20868	result = SUCCESS;
20869    }
20870  while (result != SUCCESS);
20871
20872  va_end (ap);
20873
20874  if (result == SUCCESS)
20875    *do_alignment = 1;
20876  else
20877    first_error (_("unsupported alignment for instruction"));
20878
20879  return result;
20880}
20881
20882static void
20883do_neon_ld_st_lane (void)
20884{
20885  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20886  int align_good, do_alignment = 0;
20887  int logsize = neon_logbits (et.size);
20888  int align = inst.operands[1].imm >> 8;
20889  int n = (inst.instruction >> 8) & 3;
20890  int max_el = 64 / et.size;
20891
20892  if (et.type == NT_invtype)
20893    return;
20894
20895  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20896	      _("bad list length"));
20897  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20898	      _("scalar index out of range"));
20899  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20900	      && et.size == 8,
20901	      _("stride of 2 unavailable when element size is 8"));
20902
20903  switch (n)
20904    {
20905    case 0:  /* VLD1 / VST1.  */
20906      align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20907				       32, 32, -1);
20908      if (align_good == FAIL)
20909	return;
20910      if (do_alignment)
20911	{
20912	  unsigned alignbits = 0;
20913	  switch (et.size)
20914	    {
20915	    case 16: alignbits = 0x1; break;
20916	    case 32: alignbits = 0x3; break;
20917	    default: ;
20918	    }
20919	  inst.instruction |= alignbits << 4;
20920	}
20921      break;
20922
20923    case 1:  /* VLD2 / VST2.  */
20924      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20925		      16, 32, 32, 64, -1);
20926      if (align_good == FAIL)
20927	return;
20928      if (do_alignment)
20929	inst.instruction |= 1 << 4;
20930      break;
20931
20932    case 2:  /* VLD3 / VST3.  */
20933      constraint (inst.operands[1].immisalign,
20934		  _("can't use alignment with this instruction"));
20935      break;
20936
20937    case 3:  /* VLD4 / VST4.  */
20938      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20939				       16, 64, 32, 64, 32, 128, -1);
20940      if (align_good == FAIL)
20941	return;
20942      if (do_alignment)
20943	{
20944	  unsigned alignbits = 0;
20945	  switch (et.size)
20946	    {
20947	    case 8:  alignbits = 0x1; break;
20948	    case 16: alignbits = 0x1; break;
20949	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
20950	    default: ;
20951	    }
20952	  inst.instruction |= alignbits << 4;
20953	}
20954      break;
20955
20956    default: ;
20957    }
20958
20959  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
20960  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20961    inst.instruction |= 1 << (4 + logsize);
20962
20963  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
20964  inst.instruction |= logsize << 10;
20965}
20966
20967/* Encode single n-element structure to all lanes VLD<n> instructions.  */
20968
20969static void
20970do_neon_ld_dup (void)
20971{
20972  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20973  int align_good, do_alignment = 0;
20974
20975  if (et.type == NT_invtype)
20976    return;
20977
20978  switch ((inst.instruction >> 8) & 3)
20979    {
20980    case 0:  /* VLD1.  */
20981      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
20982      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20983				       &do_alignment, 16, 16, 32, 32, -1);
20984      if (align_good == FAIL)
20985	return;
20986      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
20987	{
20988	case 1: break;
20989	case 2: inst.instruction |= 1 << 5; break;
20990	default: first_error (_("bad list length")); return;
20991	}
20992      inst.instruction |= neon_logbits (et.size) << 6;
20993      break;
20994
20995    case 1:  /* VLD2.  */
20996      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20997				       &do_alignment, 8, 16, 16, 32, 32, 64,
20998				       -1);
20999      if (align_good == FAIL)
21000	return;
21001      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21002		  _("bad list length"));
21003      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21004	inst.instruction |= 1 << 5;
21005      inst.instruction |= neon_logbits (et.size) << 6;
21006      break;
21007
21008    case 2:  /* VLD3.  */
21009      constraint (inst.operands[1].immisalign,
21010		  _("can't use alignment with this instruction"));
21011      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21012		  _("bad list length"));
21013      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21014	inst.instruction |= 1 << 5;
21015      inst.instruction |= neon_logbits (et.size) << 6;
21016      break;
21017
21018    case 3:  /* VLD4.  */
21019      {
21020	int align = inst.operands[1].imm >> 8;
21021	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21022					 16, 64, 32, 64, 32, 128, -1);
21023	if (align_good == FAIL)
21024	  return;
21025	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21026		    _("bad list length"));
21027	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21028	  inst.instruction |= 1 << 5;
21029	if (et.size == 32 && align == 128)
21030	  inst.instruction |= 0x3 << 6;
21031	else
21032	  inst.instruction |= neon_logbits (et.size) << 6;
21033      }
21034      break;
21035
21036    default: ;
21037    }
21038
21039  inst.instruction |= do_alignment << 4;
21040}
21041
21042/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21043   apart from bits [11:4].  */
21044
21045static void
21046do_neon_ldx_stx (void)
21047{
21048  if (inst.operands[1].isreg)
21049    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21050
21051  switch (NEON_LANE (inst.operands[0].imm))
21052    {
21053    case NEON_INTERLEAVE_LANES:
21054      NEON_ENCODE (INTERLV, inst);
21055      do_neon_ld_st_interleave ();
21056      break;
21057
21058    case NEON_ALL_LANES:
21059      NEON_ENCODE (DUP, inst);
21060      if (inst.instruction == N_INV)
21061	{
21062	  first_error ("only loads support such operands");
21063	  break;
21064	}
21065      do_neon_ld_dup ();
21066      break;
21067
21068    default:
21069      NEON_ENCODE (LANE, inst);
21070      do_neon_ld_st_lane ();
21071    }
21072
21073  /* L bit comes from bit mask.  */
21074  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21075  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21076  inst.instruction |= inst.operands[1].reg << 16;
21077
21078  if (inst.operands[1].postind)
21079    {
21080      int postreg = inst.operands[1].imm & 0xf;
21081      constraint (!inst.operands[1].immisreg,
21082		  _("post-index must be a register"));
21083      constraint (postreg == 0xd || postreg == 0xf,
21084		  _("bad register for post-index"));
21085      inst.instruction |= postreg;
21086    }
21087  else
21088    {
21089      constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21090      constraint (inst.relocs[0].exp.X_op != O_constant
21091		  || inst.relocs[0].exp.X_add_number != 0,
21092		  BAD_ADDR_MODE);
21093
21094      if (inst.operands[1].writeback)
21095	{
21096	  inst.instruction |= 0xd;
21097	}
21098      else
21099	inst.instruction |= 0xf;
21100    }
21101
21102  if (thumb_mode)
21103    inst.instruction |= 0xf9000000;
21104  else
21105    inst.instruction |= 0xf4000000;
21106}
21107
21108/* FP v8.  */
21109static void
21110do_vfp_nsyn_fpv8 (enum neon_shape rs)
21111{
21112  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21113     D register operands.  */
21114  if (neon_shape_class[rs] == SC_DOUBLE)
21115    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21116		_(BAD_FPU));
21117
21118  NEON_ENCODE (FPV8, inst);
21119
21120  if (rs == NS_FFF || rs == NS_HHH)
21121    {
21122      do_vfp_sp_dyadic ();
21123
21124      /* ARMv8.2 fp16 instruction.  */
21125      if (rs == NS_HHH)
21126	do_scalar_fp16_v82_encode ();
21127    }
21128  else
21129    do_vfp_dp_rd_rn_rm ();
21130
21131  if (rs == NS_DDD)
21132    inst.instruction |= 0x100;
21133
21134  inst.instruction |= 0xf0000000;
21135}
21136
21137static void
21138do_vsel (void)
21139{
21140  set_pred_insn_type (OUTSIDE_PRED_INSN);
21141
21142  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21143    first_error (_("invalid instruction shape"));
21144}
21145
21146static void
21147do_vmaxnm (void)
21148{
21149  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21150    set_pred_insn_type (OUTSIDE_PRED_INSN);
21151
21152  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21153    return;
21154
21155  if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21156    return;
21157
21158  neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21159}
21160
21161static void
21162do_vrint_1 (enum neon_cvt_mode mode)
21163{
21164  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21165  struct neon_type_el et;
21166
21167  if (rs == NS_NULL)
21168    return;
21169
21170  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21171     D register operands.  */
21172  if (neon_shape_class[rs] == SC_DOUBLE)
21173    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21174		_(BAD_FPU));
21175
21176  et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21177			| N_VFP);
21178  if (et.type != NT_invtype)
21179    {
21180      /* VFP encodings.  */
21181      if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21182	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21183	set_pred_insn_type (OUTSIDE_PRED_INSN);
21184
21185      NEON_ENCODE (FPV8, inst);
21186      if (rs == NS_FF || rs == NS_HH)
21187	do_vfp_sp_monadic ();
21188      else
21189	do_vfp_dp_rd_rm ();
21190
21191      switch (mode)
21192	{
21193	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21194	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21195	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21196	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21197	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21198	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21199	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21200	default: abort ();
21201	}
21202
21203      inst.instruction |= (rs == NS_DD) << 8;
21204      do_vfp_cond_or_thumb ();
21205
21206      /* ARMv8.2 fp16 vrint instruction.  */
21207      if (rs == NS_HH)
21208      do_scalar_fp16_v82_encode ();
21209    }
21210  else
21211    {
21212      /* Neon encodings (or something broken...).  */
21213      inst.error = NULL;
21214      et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21215
21216      if (et.type == NT_invtype)
21217	return;
21218
21219      if (!check_simd_pred_availability (TRUE,
21220					 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21221	return;
21222
21223      NEON_ENCODE (FLOAT, inst);
21224
21225      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21226      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21227      inst.instruction |= LOW4 (inst.operands[1].reg);
21228      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21229      inst.instruction |= neon_quad (rs) << 6;
21230      /* Mask off the original size bits and reencode them.  */
21231      inst.instruction = ((inst.instruction & 0xfff3ffff)
21232			  | neon_logbits (et.size) << 18);
21233
21234      switch (mode)
21235	{
21236	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21237	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21238	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21239	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21240	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21241	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21242	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21243	default: abort ();
21244	}
21245
21246      if (thumb_mode)
21247	inst.instruction |= 0xfc000000;
21248      else
21249	inst.instruction |= 0xf0000000;
21250    }
21251}
21252
21253static void
21254do_vrintx (void)
21255{
21256  do_vrint_1 (neon_cvt_mode_x);
21257}
21258
21259static void
21260do_vrintz (void)
21261{
21262  do_vrint_1 (neon_cvt_mode_z);
21263}
21264
21265static void
21266do_vrintr (void)
21267{
21268  do_vrint_1 (neon_cvt_mode_r);
21269}
21270
21271static void
21272do_vrinta (void)
21273{
21274  do_vrint_1 (neon_cvt_mode_a);
21275}
21276
21277static void
21278do_vrintn (void)
21279{
21280  do_vrint_1 (neon_cvt_mode_n);
21281}
21282
21283static void
21284do_vrintp (void)
21285{
21286  do_vrint_1 (neon_cvt_mode_p);
21287}
21288
21289static void
21290do_vrintm (void)
21291{
21292  do_vrint_1 (neon_cvt_mode_m);
21293}
21294
21295static unsigned
21296neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21297{
21298  unsigned regno = NEON_SCALAR_REG (opnd);
21299  unsigned elno = NEON_SCALAR_INDEX (opnd);
21300
21301  if (elsize == 16 && elno < 2 && regno < 16)
21302    return regno | (elno << 4);
21303  else if (elsize == 32 && elno == 0)
21304    return regno;
21305
21306  first_error (_("scalar out of range"));
21307  return 0;
21308}
21309
21310static void
21311do_vcmla (void)
21312{
21313  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21314	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21315		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21316  constraint (inst.relocs[0].exp.X_op != O_constant,
21317	      _("expression too complex"));
21318  unsigned rot = inst.relocs[0].exp.X_add_number;
21319  constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21320	      _("immediate out of range"));
21321  rot /= 90;
21322
21323  if (!check_simd_pred_availability (TRUE,
21324				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21325    return;
21326
21327  if (inst.operands[2].isscalar)
21328    {
21329      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21330	first_error (_("invalid instruction shape"));
21331      enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21332      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21333				       N_KEY | N_F16 | N_F32).size;
21334      unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21335      inst.is_neon = 1;
21336      inst.instruction = 0xfe000800;
21337      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21338      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21339      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21340      inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21341      inst.instruction |= LOW4 (m);
21342      inst.instruction |= HI1 (m) << 5;
21343      inst.instruction |= neon_quad (rs) << 6;
21344      inst.instruction |= rot << 20;
21345      inst.instruction |= (size == 32) << 23;
21346    }
21347  else
21348    {
21349      enum neon_shape rs;
21350      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21351	rs = neon_select_shape (NS_QQQI, NS_NULL);
21352      else
21353	rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21354
21355      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21356				       N_KEY | N_F16 | N_F32).size;
21357      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21358	  && (inst.operands[0].reg == inst.operands[1].reg
21359	      || inst.operands[0].reg == inst.operands[2].reg))
21360	as_tsktsk (BAD_MVE_SRCDEST);
21361
21362      neon_three_same (neon_quad (rs), 0, -1);
21363      inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21364      inst.instruction |= 0xfc200800;
21365      inst.instruction |= rot << 23;
21366      inst.instruction |= (size == 32) << 20;
21367    }
21368}
21369
21370static void
21371do_vcadd (void)
21372{
21373  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21374	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21375		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21376  constraint (inst.relocs[0].exp.X_op != O_constant,
21377	      _("expression too complex"));
21378
21379  unsigned rot = inst.relocs[0].exp.X_add_number;
21380  constraint (rot != 90 && rot != 270, _("immediate out of range"));
21381  enum neon_shape rs;
21382  struct neon_type_el et;
21383  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21384    {
21385      rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21386      et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21387    }
21388  else
21389    {
21390      rs = neon_select_shape (NS_QQQI, NS_NULL);
21391      et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21392			    | N_I16 | N_I32);
21393      if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21394	as_tsktsk (_("Warning: 32-bit element size and same first and third "
21395		     "operand makes instruction UNPREDICTABLE"));
21396    }
21397
21398  if (et.type == NT_invtype)
21399    return;
21400
21401  if (!check_simd_pred_availability (et.type == NT_float,
21402				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21403    return;
21404
21405  if (et.type == NT_float)
21406    {
21407      neon_three_same (neon_quad (rs), 0, -1);
21408      inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21409      inst.instruction |= 0xfc800800;
21410      inst.instruction |= (rot == 270) << 24;
21411      inst.instruction |= (et.size == 32) << 20;
21412    }
21413  else
21414    {
21415      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21416      inst.instruction = 0xfe000f00;
21417      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21418      inst.instruction |= neon_logbits (et.size) << 20;
21419      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21420      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21421      inst.instruction |= (rot == 270) << 12;
21422      inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21423      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21424      inst.instruction |= LOW4 (inst.operands[2].reg);
21425      inst.is_neon = 1;
21426    }
21427}
21428
21429/* Dot Product instructions encoding support.  */
21430
21431static void
21432do_neon_dotproduct (int unsigned_p)
21433{
21434  enum neon_shape rs;
21435  unsigned scalar_oprd2 = 0;
21436  int high8;
21437
21438  if (inst.cond != COND_ALWAYS)
21439    as_warn (_("Dot Product instructions cannot be conditional,  the behaviour "
21440	       "is UNPREDICTABLE"));
21441
21442  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21443	      _(BAD_FPU));
21444
21445  /* Dot Product instructions are in three-same D/Q register format or the third
21446     operand can be a scalar index register.  */
21447  if (inst.operands[2].isscalar)
21448    {
21449      scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21450      high8 = 0xfe000000;
21451      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21452    }
21453  else
21454    {
21455      high8 = 0xfc000000;
21456      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21457    }
21458
21459  if (unsigned_p)
21460    neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21461  else
21462    neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21463
21464  /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21465     Product instruction, so we pass 0 as the "ubit" parameter.  And the
21466     "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter.  */
21467  neon_three_same (neon_quad (rs), 0, 32);
21468
21469  /* Undo neon_dp_fixup.  Dot Product instructions are using a slightly
21470     different NEON three-same encoding.  */
21471  inst.instruction &= 0x00ffffff;
21472  inst.instruction |= high8;
21473  /* Encode 'U' bit which indicates signedness.  */
21474  inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21475  /* Re-encode operand2 if it's indexed scalar operand.  What has been encoded
21476     from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21477     the instruction encoding.  */
21478  if (inst.operands[2].isscalar)
21479    {
21480      inst.instruction &= 0xffffffd0;
21481      inst.instruction |= LOW4 (scalar_oprd2);
21482      inst.instruction |= HI1 (scalar_oprd2) << 5;
21483    }
21484}
21485
21486/* Dot Product instructions for signed integer.  */
21487
21488static void
21489do_neon_dotproduct_s (void)
21490{
21491  return do_neon_dotproduct (0);
21492}
21493
21494/* Dot Product instructions for unsigned integer.  */
21495
21496static void
21497do_neon_dotproduct_u (void)
21498{
21499  return do_neon_dotproduct (1);
21500}
21501
21502static void
21503do_vusdot (void)
21504{
21505  enum neon_shape rs;
21506  set_pred_insn_type (OUTSIDE_PRED_INSN);
21507  if (inst.operands[2].isscalar)
21508    {
21509      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21510      neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21511
21512      inst.instruction |= (1 << 25);
21513      int index = inst.operands[2].reg & 0xf;
21514      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21515      inst.operands[2].reg >>= 4;
21516      constraint (!(inst.operands[2].reg < 16),
21517		  _("indexed register must be less than 16"));
21518      neon_three_args (rs == NS_QQS);
21519      inst.instruction |= (index << 5);
21520    }
21521  else
21522    {
21523      inst.instruction |= (1 << 21);
21524      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21525      neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21526      neon_three_args (rs == NS_QQQ);
21527    }
21528}
21529
21530static void
21531do_vsudot (void)
21532{
21533  enum neon_shape rs;
21534  set_pred_insn_type (OUTSIDE_PRED_INSN);
21535  if (inst.operands[2].isscalar)
21536    {
21537      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21538      neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21539
21540      inst.instruction |= (1 << 25);
21541      int index = inst.operands[2].reg & 0xf;
21542      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21543      inst.operands[2].reg >>= 4;
21544      constraint (!(inst.operands[2].reg < 16),
21545		  _("indexed register must be less than 16"));
21546      neon_three_args (rs == NS_QQS);
21547      inst.instruction |= (index << 5);
21548    }
21549}
21550
21551static void
21552do_vsmmla (void)
21553{
21554  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21555  neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21556
21557  set_pred_insn_type (OUTSIDE_PRED_INSN);
21558
21559  neon_three_args (1);
21560
21561}
21562
21563static void
21564do_vummla (void)
21565{
21566  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21567  neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21568
21569  set_pred_insn_type (OUTSIDE_PRED_INSN);
21570
21571  neon_three_args (1);
21572
21573}
21574
21575/* Crypto v1 instructions.  */
21576static void
21577do_crypto_2op_1 (unsigned elttype, int op)
21578{
21579  set_pred_insn_type (OUTSIDE_PRED_INSN);
21580
21581  if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
21582      == NT_invtype)
21583    return;
21584
21585  inst.error = NULL;
21586
21587  NEON_ENCODE (INTEGER, inst);
21588  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21589  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21590  inst.instruction |= LOW4 (inst.operands[1].reg);
21591  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21592  if (op != -1)
21593    inst.instruction |= op << 6;
21594
21595  if (thumb_mode)
21596    inst.instruction |= 0xfc000000;
21597  else
21598    inst.instruction |= 0xf0000000;
21599}
21600
21601static void
21602do_crypto_3op_1 (int u, int op)
21603{
21604  set_pred_insn_type (OUTSIDE_PRED_INSN);
21605
21606  if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
21607		       N_32 | N_UNT | N_KEY).type == NT_invtype)
21608    return;
21609
21610  inst.error = NULL;
21611
21612  NEON_ENCODE (INTEGER, inst);
21613  neon_three_same (1, u, 8 << op);
21614}
21615
21616static void
21617do_aese (void)
21618{
21619  do_crypto_2op_1 (N_8, 0);
21620}
21621
21622static void
21623do_aesd (void)
21624{
21625  do_crypto_2op_1 (N_8, 1);
21626}
21627
21628static void
21629do_aesmc (void)
21630{
21631  do_crypto_2op_1 (N_8, 2);
21632}
21633
21634static void
21635do_aesimc (void)
21636{
21637  do_crypto_2op_1 (N_8, 3);
21638}
21639
21640static void
21641do_sha1c (void)
21642{
21643  do_crypto_3op_1 (0, 0);
21644}
21645
21646static void
21647do_sha1p (void)
21648{
21649  do_crypto_3op_1 (0, 1);
21650}
21651
21652static void
21653do_sha1m (void)
21654{
21655  do_crypto_3op_1 (0, 2);
21656}
21657
21658static void
21659do_sha1su0 (void)
21660{
21661  do_crypto_3op_1 (0, 3);
21662}
21663
21664static void
21665do_sha256h (void)
21666{
21667  do_crypto_3op_1 (1, 0);
21668}
21669
21670static void
21671do_sha256h2 (void)
21672{
21673  do_crypto_3op_1 (1, 1);
21674}
21675
21676static void
21677do_sha256su1 (void)
21678{
21679  do_crypto_3op_1 (1, 2);
21680}
21681
21682static void
21683do_sha1h (void)
21684{
21685  do_crypto_2op_1 (N_32, -1);
21686}
21687
21688static void
21689do_sha1su1 (void)
21690{
21691  do_crypto_2op_1 (N_32, 0);
21692}
21693
21694static void
21695do_sha256su0 (void)
21696{
21697  do_crypto_2op_1 (N_32, 1);
21698}
21699
21700static void
21701do_crc32_1 (unsigned int poly, unsigned int sz)
21702{
21703  unsigned int Rd = inst.operands[0].reg;
21704  unsigned int Rn = inst.operands[1].reg;
21705  unsigned int Rm = inst.operands[2].reg;
21706
21707  set_pred_insn_type (OUTSIDE_PRED_INSN);
21708  inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
21709  inst.instruction |= LOW4 (Rn) << 16;
21710  inst.instruction |= LOW4 (Rm);
21711  inst.instruction |= sz << (thumb_mode ? 4 : 21);
21712  inst.instruction |= poly << (thumb_mode ? 20 : 9);
21713
21714  if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
21715    as_warn (UNPRED_REG ("r15"));
21716}
21717
21718static void
21719do_crc32b (void)
21720{
21721  do_crc32_1 (0, 0);
21722}
21723
21724static void
21725do_crc32h (void)
21726{
21727  do_crc32_1 (0, 1);
21728}
21729
21730static void
21731do_crc32w (void)
21732{
21733  do_crc32_1 (0, 2);
21734}
21735
21736static void
21737do_crc32cb (void)
21738{
21739  do_crc32_1 (1, 0);
21740}
21741
21742static void
21743do_crc32ch (void)
21744{
21745  do_crc32_1 (1, 1);
21746}
21747
21748static void
21749do_crc32cw (void)
21750{
21751  do_crc32_1 (1, 2);
21752}
21753
21754static void
21755do_vjcvt (void)
21756{
21757  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21758	      _(BAD_FPU));
21759  neon_check_type (2, NS_FD, N_S32, N_F64);
21760  do_vfp_sp_dp_cvt ();
21761  do_vfp_cond_or_thumb ();
21762}
21763
21764static void
21765do_vdot (void)
21766{
21767  enum neon_shape rs;
21768  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21769  set_pred_insn_type (OUTSIDE_PRED_INSN);
21770  if (inst.operands[2].isscalar)
21771    {
21772      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21773      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21774
21775      inst.instruction |= (1 << 25);
21776      int index = inst.operands[2].reg & 0xf;
21777      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21778      inst.operands[2].reg >>= 4;
21779      constraint (!(inst.operands[2].reg < 16),
21780		  _("indexed register must be less than 16"));
21781      neon_three_args (rs == NS_QQS);
21782      inst.instruction |= (index << 5);
21783    }
21784  else
21785    {
21786      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21787      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21788      neon_three_args (rs == NS_QQQ);
21789    }
21790}
21791
21792static void
21793do_vmmla (void)
21794{
21795  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21796  neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21797
21798  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21799  set_pred_insn_type (OUTSIDE_PRED_INSN);
21800
21801  neon_three_args (1);
21802}
21803
21804
21805/* Overall per-instruction processing.	*/
21806
21807/* We need to be able to fix up arbitrary expressions in some statements.
21808   This is so that we can handle symbols that are an arbitrary distance from
21809   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
21810   which returns part of an address in a form which will be valid for
21811   a data instruction.	We do this by pushing the expression into a symbol
21812   in the expr_section, and creating a fix for that.  */
21813
21814static void
21815fix_new_arm (fragS *	   frag,
21816	     int	   where,
21817	     short int	   size,
21818	     expressionS * exp,
21819	     int	   pc_rel,
21820	     int	   reloc)
21821{
21822  fixS *	   new_fix;
21823
21824  switch (exp->X_op)
21825    {
21826    case O_constant:
21827      if (pc_rel)
21828	{
21829	  /* Create an absolute valued symbol, so we have something to
21830	     refer to in the object file.  Unfortunately for us, gas's
21831	     generic expression parsing will already have folded out
21832	     any use of .set foo/.type foo %function that may have
21833	     been used to set type information of the target location,
21834	     that's being specified symbolically.  We have to presume
21835	     the user knows what they are doing.  */
21836	  char name[16 + 8];
21837	  symbolS *symbol;
21838
21839	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
21840
21841	  symbol = symbol_find_or_make (name);
21842	  S_SET_SEGMENT (symbol, absolute_section);
21843	  symbol_set_frag (symbol, &zero_address_frag);
21844	  S_SET_VALUE (symbol, exp->X_add_number);
21845	  exp->X_op = O_symbol;
21846	  exp->X_add_symbol = symbol;
21847	  exp->X_add_number = 0;
21848	}
21849      /* FALLTHROUGH */
21850    case O_symbol:
21851    case O_add:
21852    case O_subtract:
21853      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
21854			     (enum bfd_reloc_code_real) reloc);
21855      break;
21856
21857    default:
21858      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
21859				  pc_rel, (enum bfd_reloc_code_real) reloc);
21860      break;
21861    }
21862
21863  /* Mark whether the fix is to a THUMB instruction, or an ARM
21864     instruction.  */
21865  new_fix->tc_fix_data = thumb_mode;
21866}
21867
21868/* Create a frg for an instruction requiring relaxation.  */
21869static void
21870output_relax_insn (void)
21871{
21872  char * to;
21873  symbolS *sym;
21874  int offset;
21875
21876  /* The size of the instruction is unknown, so tie the debug info to the
21877     start of the instruction.  */
21878  dwarf2_emit_insn (0);
21879
21880  switch (inst.relocs[0].exp.X_op)
21881    {
21882    case O_symbol:
21883      sym = inst.relocs[0].exp.X_add_symbol;
21884      offset = inst.relocs[0].exp.X_add_number;
21885      break;
21886    case O_constant:
21887      sym = NULL;
21888      offset = inst.relocs[0].exp.X_add_number;
21889      break;
21890    default:
21891      sym = make_expr_symbol (&inst.relocs[0].exp);
21892      offset = 0;
21893      break;
21894  }
21895  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
21896		 inst.relax, sym, offset, NULL/*offset, opcode*/);
21897  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
21898}
21899
21900/* Write a 32-bit thumb instruction to buf.  */
21901static void
21902put_thumb32_insn (char * buf, unsigned long insn)
21903{
21904  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
21905  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
21906}
21907
21908static void
21909output_inst (const char * str)
21910{
21911  char * to = NULL;
21912
21913  if (inst.error)
21914    {
21915      as_bad ("%s -- `%s'", inst.error, str);
21916      return;
21917    }
21918  if (inst.relax)
21919    {
21920      output_relax_insn ();
21921      return;
21922    }
21923  if (inst.size == 0)
21924    return;
21925
21926  to = frag_more (inst.size);
21927  /* PR 9814: Record the thumb mode into the current frag so that we know
21928     what type of NOP padding to use, if necessary.  We override any previous
21929     setting so that if the mode has changed then the NOPS that we use will
21930     match the encoding of the last instruction in the frag.  */
21931  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21932
21933  if (thumb_mode && (inst.size > THUMB_SIZE))
21934    {
21935      gas_assert (inst.size == (2 * THUMB_SIZE));
21936      put_thumb32_insn (to, inst.instruction);
21937    }
21938  else if (inst.size > INSN_SIZE)
21939    {
21940      gas_assert (inst.size == (2 * INSN_SIZE));
21941      md_number_to_chars (to, inst.instruction, INSN_SIZE);
21942      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
21943    }
21944  else
21945    md_number_to_chars (to, inst.instruction, inst.size);
21946
21947  int r;
21948  for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
21949    {
21950      if (inst.relocs[r].type != BFD_RELOC_UNUSED)
21951	fix_new_arm (frag_now, to - frag_now->fr_literal,
21952		     inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
21953		     inst.relocs[r].type);
21954    }
21955
21956  dwarf2_emit_insn (inst.size);
21957}
21958
21959static char *
21960output_it_inst (int cond, int mask, char * to)
21961{
21962  unsigned long instruction = 0xbf00;
21963
21964  mask &= 0xf;
21965  instruction |= mask;
21966  instruction |= cond << 4;
21967
21968  if (to == NULL)
21969    {
21970      to = frag_more (2);
21971#ifdef OBJ_ELF
21972      dwarf2_emit_insn (2);
21973#endif
21974    }
21975
21976  md_number_to_chars (to, instruction, 2);
21977
21978  return to;
21979}
21980
21981/* Tag values used in struct asm_opcode's tag field.  */
21982enum opcode_tag
21983{
21984  OT_unconditional,	/* Instruction cannot be conditionalized.
21985			   The ARM condition field is still 0xE.  */
21986  OT_unconditionalF,	/* Instruction cannot be conditionalized
21987			   and carries 0xF in its ARM condition field.  */
21988  OT_csuffix,		/* Instruction takes a conditional suffix.  */
21989  OT_csuffixF,		/* Some forms of the instruction take a scalar
21990			   conditional suffix, others place 0xF where the
21991			   condition field would be, others take a vector
21992			   conditional suffix.  */
21993  OT_cinfix3,		/* Instruction takes a conditional infix,
21994			   beginning at character index 3.  (In
21995			   unified mode, it becomes a suffix.)  */
21996  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
21997			    tsts, cmps, cmns, and teqs. */
21998  OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
21999			   character index 3, even in unified mode.  Used for
22000			   legacy instructions where suffix and infix forms
22001			   may be ambiguous.  */
22002  OT_csuf_or_in3,	/* Instruction takes either a conditional
22003			   suffix or an infix at character index 3.  */
22004  OT_odd_infix_unc,	/* This is the unconditional variant of an
22005			   instruction that takes a conditional infix
22006			   at an unusual position.  In unified mode,
22007			   this variant will accept a suffix.  */
22008  OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
22009			   are the conditional variants of instructions that
22010			   take conditional infixes in unusual positions.
22011			   The infix appears at character index
22012			   (tag - OT_odd_infix_0).  These are not accepted
22013			   in unified mode.  */
22014};
22015
22016/* Subroutine of md_assemble, responsible for looking up the primary
22017   opcode from the mnemonic the user wrote.  STR points to the
22018   beginning of the mnemonic.
22019
22020   This is not simply a hash table lookup, because of conditional
22021   variants.  Most instructions have conditional variants, which are
22022   expressed with a _conditional affix_ to the mnemonic.  If we were
22023   to encode each conditional variant as a literal string in the opcode
22024   table, it would have approximately 20,000 entries.
22025
22026   Most mnemonics take this affix as a suffix, and in unified syntax,
22027   'most' is upgraded to 'all'.  However, in the divided syntax, some
22028   instructions take the affix as an infix, notably the s-variants of
22029   the arithmetic instructions.  Of those instructions, all but six
22030   have the infix appear after the third character of the mnemonic.
22031
22032   Accordingly, the algorithm for looking up primary opcodes given
22033   an identifier is:
22034
22035   1. Look up the identifier in the opcode table.
22036      If we find a match, go to step U.
22037
22038   2. Look up the last two characters of the identifier in the
22039      conditions table.  If we find a match, look up the first N-2
22040      characters of the identifier in the opcode table.  If we
22041      find a match, go to step CE.
22042
22043   3. Look up the fourth and fifth characters of the identifier in
22044      the conditions table.  If we find a match, extract those
22045      characters from the identifier, and look up the remaining
22046      characters in the opcode table.  If we find a match, go
22047      to step CM.
22048
22049   4. Fail.
22050
22051   U. Examine the tag field of the opcode structure, in case this is
22052      one of the six instructions with its conditional infix in an
22053      unusual place.  If it is, the tag tells us where to find the
22054      infix; look it up in the conditions table and set inst.cond
22055      accordingly.  Otherwise, this is an unconditional instruction.
22056      Again set inst.cond accordingly.  Return the opcode structure.
22057
22058  CE. Examine the tag field to make sure this is an instruction that
22059      should receive a conditional suffix.  If it is not, fail.
22060      Otherwise, set inst.cond from the suffix we already looked up,
22061      and return the opcode structure.
22062
22063  CM. Examine the tag field to make sure this is an instruction that
22064      should receive a conditional infix after the third character.
22065      If it is not, fail.  Otherwise, undo the edits to the current
22066      line of input and proceed as for case CE.  */
22067
22068static const struct asm_opcode *
22069opcode_lookup (char **str)
22070{
22071  char *end, *base;
22072  char *affix;
22073  const struct asm_opcode *opcode;
22074  const struct asm_cond *cond;
22075  char save[2];
22076
22077  /* Scan up to the end of the mnemonic, which must end in white space,
22078     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
22079  for (base = end = *str; *end != '\0'; end++)
22080    if (*end == ' ' || *end == '.')
22081      break;
22082
22083  if (end == base)
22084    return NULL;
22085
22086  /* Handle a possible width suffix and/or Neon type suffix.  */
22087  if (end[0] == '.')
22088    {
22089      int offset = 2;
22090
22091      /* The .w and .n suffixes are only valid if the unified syntax is in
22092	 use.  */
22093      if (unified_syntax && end[1] == 'w')
22094	inst.size_req = 4;
22095      else if (unified_syntax && end[1] == 'n')
22096	inst.size_req = 2;
22097      else
22098	offset = 0;
22099
22100      inst.vectype.elems = 0;
22101
22102      *str = end + offset;
22103
22104      if (end[offset] == '.')
22105	{
22106	  /* See if we have a Neon type suffix (possible in either unified or
22107	     non-unified ARM syntax mode).  */
22108	  if (parse_neon_type (&inst.vectype, str) == FAIL)
22109	    return NULL;
22110	}
22111      else if (end[offset] != '\0' && end[offset] != ' ')
22112	return NULL;
22113    }
22114  else
22115    *str = end;
22116
22117  /* Look for unaffixed or special-case affixed mnemonic.  */
22118  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22119						    end - base);
22120  if (opcode)
22121    {
22122      /* step U */
22123      if (opcode->tag < OT_odd_infix_0)
22124	{
22125	  inst.cond = COND_ALWAYS;
22126	  return opcode;
22127	}
22128
22129      if (warn_on_deprecated && unified_syntax)
22130	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22131      affix = base + (opcode->tag - OT_odd_infix_0);
22132      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22133      gas_assert (cond);
22134
22135      inst.cond = cond->value;
22136      return opcode;
22137    }
22138 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22139   {
22140    /* Cannot have a conditional suffix on a mnemonic of less than a character.
22141     */
22142    if (end - base < 2)
22143      return NULL;
22144     affix = end - 1;
22145     cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
22146     opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22147						      affix - base);
22148     /* If this opcode can not be vector predicated then don't accept it with a
22149	vector predication code.  */
22150     if (opcode && !opcode->mayBeVecPred)
22151       opcode = NULL;
22152   }
22153  if (!opcode || !cond)
22154    {
22155      /* Cannot have a conditional suffix on a mnemonic of less than two
22156	 characters.  */
22157      if (end - base < 3)
22158	return NULL;
22159
22160      /* Look for suffixed mnemonic.  */
22161      affix = end - 2;
22162      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22163      opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22164							affix - base);
22165    }
22166
22167  if (opcode && cond)
22168    {
22169      /* step CE */
22170      switch (opcode->tag)
22171	{
22172	case OT_cinfix3_legacy:
22173	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
22174	  break;
22175
22176	case OT_cinfix3:
22177	case OT_cinfix3_deprecated:
22178	case OT_odd_infix_unc:
22179	  if (!unified_syntax)
22180	    return NULL;
22181	  /* Fall through.  */
22182
22183	case OT_csuffix:
22184	case OT_csuffixF:
22185	case OT_csuf_or_in3:
22186	  inst.cond = cond->value;
22187	  return opcode;
22188
22189	case OT_unconditional:
22190	case OT_unconditionalF:
22191	  if (thumb_mode)
22192	    inst.cond = cond->value;
22193	  else
22194	    {
22195	      /* Delayed diagnostic.  */
22196	      inst.error = BAD_COND;
22197	      inst.cond = COND_ALWAYS;
22198	    }
22199	  return opcode;
22200
22201	default:
22202	  return NULL;
22203	}
22204    }
22205
22206  /* Cannot have a usual-position infix on a mnemonic of less than
22207     six characters (five would be a suffix).  */
22208  if (end - base < 6)
22209    return NULL;
22210
22211  /* Look for infixed mnemonic in the usual position.  */
22212  affix = base + 3;
22213  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22214  if (!cond)
22215    return NULL;
22216
22217  memcpy (save, affix, 2);
22218  memmove (affix, affix + 2, (end - affix) - 2);
22219  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22220						    (end - base) - 2);
22221  memmove (affix + 2, affix, (end - affix) - 2);
22222  memcpy (affix, save, 2);
22223
22224  if (opcode
22225      && (opcode->tag == OT_cinfix3
22226	  || opcode->tag == OT_cinfix3_deprecated
22227	  || opcode->tag == OT_csuf_or_in3
22228	  || opcode->tag == OT_cinfix3_legacy))
22229    {
22230      /* Step CM.  */
22231      if (warn_on_deprecated && unified_syntax
22232	  && (opcode->tag == OT_cinfix3
22233	      || opcode->tag == OT_cinfix3_deprecated))
22234	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22235
22236      inst.cond = cond->value;
22237      return opcode;
22238    }
22239
22240  return NULL;
22241}
22242
22243/* This function generates an initial IT instruction, leaving its block
22244   virtually open for the new instructions. Eventually,
22245   the mask will be updated by now_pred_add_mask () each time
22246   a new instruction needs to be included in the IT block.
22247   Finally, the block is closed with close_automatic_it_block ().
22248   The block closure can be requested either from md_assemble (),
22249   a tencode (), or due to a label hook.  */
22250
22251static void
22252new_automatic_it_block (int cond)
22253{
22254  now_pred.state = AUTOMATIC_PRED_BLOCK;
22255  now_pred.mask = 0x18;
22256  now_pred.cc = cond;
22257  now_pred.block_length = 1;
22258  mapping_state (MAP_THUMB);
22259  now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22260  now_pred.warn_deprecated = FALSE;
22261  now_pred.insn_cond = TRUE;
22262}
22263
22264/* Close an automatic IT block.
22265   See comments in new_automatic_it_block ().  */
22266
22267static void
22268close_automatic_it_block (void)
22269{
22270  now_pred.mask = 0x10;
22271  now_pred.block_length = 0;
22272}
22273
22274/* Update the mask of the current automatically-generated IT
22275   instruction. See comments in new_automatic_it_block ().  */
22276
22277static void
22278now_pred_add_mask (int cond)
22279{
22280#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
22281#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
22282					      | ((bitvalue) << (nbit)))
22283  const int resulting_bit = (cond & 1);
22284
22285  now_pred.mask &= 0xf;
22286  now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22287				   resulting_bit,
22288				  (5 - now_pred.block_length));
22289  now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22290				   1,
22291				   ((5 - now_pred.block_length) - 1));
22292  output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22293
22294#undef CLEAR_BIT
22295#undef SET_BIT_VALUE
22296}
22297
22298/* The IT blocks handling machinery is accessed through the these functions:
22299     it_fsm_pre_encode ()               from md_assemble ()
22300     set_pred_insn_type ()		optional, from the tencode functions
22301     set_pred_insn_type_last ()		ditto
22302     in_pred_block ()			ditto
22303     it_fsm_post_encode ()              from md_assemble ()
22304     force_automatic_it_block_close ()  from label handling functions
22305
22306   Rationale:
22307     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22308	initializing the IT insn type with a generic initial value depending
22309	on the inst.condition.
22310     2) During the tencode function, two things may happen:
22311	a) The tencode function overrides the IT insn type by
22312	   calling either set_pred_insn_type (type) or
22313	   set_pred_insn_type_last ().
22314	b) The tencode function queries the IT block state by
22315	   calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22316
22317	Both set_pred_insn_type and in_pred_block run the internal FSM state
22318	handling function (handle_pred_state), because: a) setting the IT insn
22319	type may incur in an invalid state (exiting the function),
22320	and b) querying the state requires the FSM to be updated.
22321	Specifically we want to avoid creating an IT block for conditional
22322	branches, so it_fsm_pre_encode is actually a guess and we can't
22323	determine whether an IT block is required until the tencode () routine
22324	has decided what type of instruction this actually it.
22325	Because of this, if set_pred_insn_type and in_pred_block have to be
22326	used, set_pred_insn_type has to be called first.
22327
22328	set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22329	that determines the insn IT type depending on the inst.cond code.
22330	When a tencode () routine encodes an instruction that can be
22331	either outside an IT block, or, in the case of being inside, has to be
22332	the last one, set_pred_insn_type_last () will determine the proper
22333	IT instruction type based on the inst.cond code. Otherwise,
22334	set_pred_insn_type can be called for overriding that logic or
22335	for covering other cases.
22336
22337	Calling handle_pred_state () may not transition the IT block state to
22338	OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22339	still queried. Instead, if the FSM determines that the state should
22340	be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22341	after the tencode () function: that's what it_fsm_post_encode () does.
22342
22343	Since in_pred_block () calls the state handling function to get an
22344	updated state, an error may occur (due to invalid insns combination).
22345	In that case, inst.error is set.
22346	Therefore, inst.error has to be checked after the execution of
22347	the tencode () routine.
22348
22349     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22350	any pending state change (if any) that didn't take place in
22351	handle_pred_state () as explained above.  */
22352
22353static void
22354it_fsm_pre_encode (void)
22355{
22356  if (inst.cond != COND_ALWAYS)
22357    inst.pred_insn_type =  INSIDE_IT_INSN;
22358  else
22359    inst.pred_insn_type = OUTSIDE_PRED_INSN;
22360
22361  now_pred.state_handled = 0;
22362}
22363
22364/* IT state FSM handling function.  */
22365/* MVE instructions and non-MVE instructions are handled differently because of
22366   the introduction of VPT blocks.
22367   Specifications say that any non-MVE instruction inside a VPT block is
22368   UNPREDICTABLE, with the exception of the BKPT instruction.  Whereas most MVE
22369   instructions are deemed to be UNPREDICTABLE if inside an IT block.  For the
22370   few exceptions we have MVE_UNPREDICABLE_INSN.
22371   The error messages provided depending on the different combinations possible
22372   are described in the cases below:
22373   For 'most' MVE instructions:
22374   1) In an IT block, with an IT code: syntax error
22375   2) In an IT block, with a VPT code: error: must be in a VPT block
22376   3) In an IT block, with no code: warning: UNPREDICTABLE
22377   4) In a VPT block, with an IT code: syntax error
22378   5) In a VPT block, with a VPT code: OK!
22379   6) In a VPT block, with no code: error: missing code
22380   7) Outside a pred block, with an IT code: error: syntax error
22381   8) Outside a pred block, with a VPT code: error: should be in a VPT block
22382   9) Outside a pred block, with no code: OK!
22383   For non-MVE instructions:
22384   10) In an IT block, with an IT code: OK!
22385   11) In an IT block, with a VPT code: syntax error
22386   12) In an IT block, with no code: error: missing code
22387   13) In a VPT block, with an IT code: error: should be in an IT block
22388   14) In a VPT block, with a VPT code: syntax error
22389   15) In a VPT block, with no code: UNPREDICTABLE
22390   16) Outside a pred block, with an IT code: error: should be in an IT block
22391   17) Outside a pred block, with a VPT code: syntax error
22392   18) Outside a pred block, with no code: OK!
22393 */
22394
22395
22396static int
22397handle_pred_state (void)
22398{
22399  now_pred.state_handled = 1;
22400  now_pred.insn_cond = FALSE;
22401
22402  switch (now_pred.state)
22403    {
22404    case OUTSIDE_PRED_BLOCK:
22405      switch (inst.pred_insn_type)
22406	{
22407	case MVE_UNPREDICABLE_INSN:
22408	case MVE_OUTSIDE_PRED_INSN:
22409	  if (inst.cond < COND_ALWAYS)
22410	    {
22411	      /* Case 7: Outside a pred block, with an IT code: error: syntax
22412		 error.  */
22413	      inst.error = BAD_SYNTAX;
22414	      return FAIL;
22415	    }
22416	  /* Case 9:  Outside a pred block, with no code: OK!  */
22417	  break;
22418	case OUTSIDE_PRED_INSN:
22419	  if (inst.cond > COND_ALWAYS)
22420	    {
22421	      /* Case 17:  Outside a pred block, with a VPT code: syntax error.
22422	       */
22423	      inst.error = BAD_SYNTAX;
22424	      return FAIL;
22425	    }
22426	  /* Case 18: Outside a pred block, with no code: OK!  */
22427	  break;
22428
22429	case INSIDE_VPT_INSN:
22430	  /* Case 8: Outside a pred block, with a VPT code: error: should be in
22431	     a VPT block.  */
22432	  inst.error = BAD_OUT_VPT;
22433	  return FAIL;
22434
22435	case INSIDE_IT_INSN:
22436	case INSIDE_IT_LAST_INSN:
22437	  if (inst.cond < COND_ALWAYS)
22438	    {
22439	      /* Case 16: Outside a pred block, with an IT code: error: should
22440		 be in an IT block.  */
22441	      if (thumb_mode == 0)
22442		{
22443		  if (unified_syntax
22444		      && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22445		    as_tsktsk (_("Warning: conditional outside an IT block"\
22446				 " for Thumb."));
22447		}
22448	      else
22449		{
22450		  if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22451		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22452		    {
22453		      /* Automatically generate the IT instruction.  */
22454		      new_automatic_it_block (inst.cond);
22455		      if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22456			close_automatic_it_block ();
22457		    }
22458		  else
22459		    {
22460		      inst.error = BAD_OUT_IT;
22461		      return FAIL;
22462		    }
22463		}
22464	      break;
22465	    }
22466	  else if (inst.cond > COND_ALWAYS)
22467	    {
22468	      /* Case 17: Outside a pred block, with a VPT code: syntax error.
22469	       */
22470	      inst.error = BAD_SYNTAX;
22471	      return FAIL;
22472	    }
22473	  else
22474	    gas_assert (0);
22475	case IF_INSIDE_IT_LAST_INSN:
22476	case NEUTRAL_IT_INSN:
22477	  break;
22478
22479	case VPT_INSN:
22480	  if (inst.cond != COND_ALWAYS)
22481	    first_error (BAD_SYNTAX);
22482	  now_pred.state = MANUAL_PRED_BLOCK;
22483	  now_pred.block_length = 0;
22484	  now_pred.type = VECTOR_PRED;
22485	  now_pred.cc = 0;
22486	  break;
22487	case IT_INSN:
22488	  now_pred.state = MANUAL_PRED_BLOCK;
22489	  now_pred.block_length = 0;
22490	  now_pred.type = SCALAR_PRED;
22491	  break;
22492	}
22493      break;
22494
22495    case AUTOMATIC_PRED_BLOCK:
22496      /* Three things may happen now:
22497	 a) We should increment current it block size;
22498	 b) We should close current it block (closing insn or 4 insns);
22499	 c) We should close current it block and start a new one (due
22500	 to incompatible conditions or
22501	 4 insns-length block reached).  */
22502
22503      switch (inst.pred_insn_type)
22504	{
22505	case INSIDE_VPT_INSN:
22506	case VPT_INSN:
22507	case MVE_UNPREDICABLE_INSN:
22508	case MVE_OUTSIDE_PRED_INSN:
22509	  gas_assert (0);
22510	case OUTSIDE_PRED_INSN:
22511	  /* The closure of the block shall happen immediately,
22512	     so any in_pred_block () call reports the block as closed.  */
22513	  force_automatic_it_block_close ();
22514	  break;
22515
22516	case INSIDE_IT_INSN:
22517	case INSIDE_IT_LAST_INSN:
22518	case IF_INSIDE_IT_LAST_INSN:
22519	  now_pred.block_length++;
22520
22521	  if (now_pred.block_length > 4
22522	      || !now_pred_compatible (inst.cond))
22523	    {
22524	      force_automatic_it_block_close ();
22525	      if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
22526		new_automatic_it_block (inst.cond);
22527	    }
22528	  else
22529	    {
22530	      now_pred.insn_cond = TRUE;
22531	      now_pred_add_mask (inst.cond);
22532	    }
22533
22534	  if (now_pred.state == AUTOMATIC_PRED_BLOCK
22535	      && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
22536		  || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
22537	    close_automatic_it_block ();
22538	  break;
22539
22540	case NEUTRAL_IT_INSN:
22541	  now_pred.block_length++;
22542	  now_pred.insn_cond = TRUE;
22543
22544	  if (now_pred.block_length > 4)
22545	    force_automatic_it_block_close ();
22546	  else
22547	    now_pred_add_mask (now_pred.cc & 1);
22548	  break;
22549
22550	case IT_INSN:
22551	  close_automatic_it_block ();
22552	  now_pred.state = MANUAL_PRED_BLOCK;
22553	  break;
22554	}
22555      break;
22556
22557    case MANUAL_PRED_BLOCK:
22558      {
22559	int cond, is_last;
22560	if (now_pred.type == SCALAR_PRED)
22561	  {
22562	    /* Check conditional suffixes.  */
22563	    cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
22564	    now_pred.mask <<= 1;
22565	    now_pred.mask &= 0x1f;
22566	    is_last = (now_pred.mask == 0x10);
22567	  }
22568	else
22569	  {
22570	    now_pred.cc ^= (now_pred.mask >> 4);
22571	    cond = now_pred.cc + 0xf;
22572	    now_pred.mask <<= 1;
22573	    now_pred.mask &= 0x1f;
22574	    is_last = now_pred.mask == 0x10;
22575	  }
22576	now_pred.insn_cond = TRUE;
22577
22578	switch (inst.pred_insn_type)
22579	  {
22580	  case OUTSIDE_PRED_INSN:
22581	    if (now_pred.type == SCALAR_PRED)
22582	      {
22583		if (inst.cond == COND_ALWAYS)
22584		  {
22585		    /* Case 12: In an IT block, with no code: error: missing
22586		       code.  */
22587		    inst.error = BAD_NOT_IT;
22588		    return FAIL;
22589		  }
22590		else if (inst.cond > COND_ALWAYS)
22591		  {
22592		    /* Case 11: In an IT block, with a VPT code: syntax error.
22593		     */
22594		    inst.error = BAD_SYNTAX;
22595		    return FAIL;
22596		  }
22597		else if (thumb_mode)
22598		  {
22599		    /* This is for some special cases where a non-MVE
22600		       instruction is not allowed in an IT block, such as cbz,
22601		       but are put into one with a condition code.
22602		       You could argue this should be a syntax error, but we
22603		       gave the 'not allowed in IT block' diagnostic in the
22604		       past so we will keep doing so.  */
22605		    inst.error = BAD_NOT_IT;
22606		    return FAIL;
22607		  }
22608		break;
22609	      }
22610	    else
22611	      {
22612		/* Case 15: In a VPT block, with no code: UNPREDICTABLE.  */
22613		as_tsktsk (MVE_NOT_VPT);
22614		return SUCCESS;
22615	      }
22616	  case MVE_OUTSIDE_PRED_INSN:
22617	    if (now_pred.type == SCALAR_PRED)
22618	      {
22619		if (inst.cond == COND_ALWAYS)
22620		  {
22621		    /* Case 3: In an IT block, with no code: warning:
22622		       UNPREDICTABLE.  */
22623		    as_tsktsk (MVE_NOT_IT);
22624		    return SUCCESS;
22625		  }
22626		else if (inst.cond < COND_ALWAYS)
22627		  {
22628		    /* Case 1: In an IT block, with an IT code: syntax error.
22629		     */
22630		    inst.error = BAD_SYNTAX;
22631		    return FAIL;
22632		  }
22633		else
22634		  gas_assert (0);
22635	      }
22636	    else
22637	      {
22638		if (inst.cond < COND_ALWAYS)
22639		  {
22640		    /* Case 4: In a VPT block, with an IT code: syntax error.
22641		     */
22642		    inst.error = BAD_SYNTAX;
22643		    return FAIL;
22644		  }
22645		else if (inst.cond == COND_ALWAYS)
22646		  {
22647		    /* Case 6: In a VPT block, with no code: error: missing
22648		       code.  */
22649		    inst.error = BAD_NOT_VPT;
22650		    return FAIL;
22651		  }
22652		else
22653		  {
22654		    gas_assert (0);
22655		  }
22656	      }
22657	  case MVE_UNPREDICABLE_INSN:
22658	    as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
22659	    return SUCCESS;
22660	  case INSIDE_IT_INSN:
22661	    if (inst.cond > COND_ALWAYS)
22662	      {
22663		/* Case 11: In an IT block, with a VPT code: syntax error.  */
22664		/* Case 14: In a VPT block, with a VPT code: syntax error.  */
22665		inst.error = BAD_SYNTAX;
22666		return FAIL;
22667	      }
22668	    else if (now_pred.type == SCALAR_PRED)
22669	      {
22670		/* Case 10: In an IT block, with an IT code: OK!  */
22671		if (cond != inst.cond)
22672		  {
22673		    inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
22674		      BAD_VPT_COND;
22675		    return FAIL;
22676		  }
22677	      }
22678	    else
22679	      {
22680		/* Case 13: In a VPT block, with an IT code: error: should be
22681		   in an IT block.  */
22682		inst.error = BAD_OUT_IT;
22683		return FAIL;
22684	      }
22685	    break;
22686
22687	  case INSIDE_VPT_INSN:
22688	    if (now_pred.type == SCALAR_PRED)
22689	      {
22690		/* Case 2: In an IT block, with a VPT code: error: must be in a
22691		   VPT block.  */
22692		inst.error = BAD_OUT_VPT;
22693		return FAIL;
22694	      }
22695	    /* Case 5:  In a VPT block, with a VPT code: OK!  */
22696	    else if (cond != inst.cond)
22697	      {
22698		inst.error = BAD_VPT_COND;
22699		return FAIL;
22700	      }
22701	    break;
22702	  case INSIDE_IT_LAST_INSN:
22703	  case IF_INSIDE_IT_LAST_INSN:
22704	    if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
22705	      {
22706		/* Case 4: In a VPT block, with an IT code: syntax error.  */
22707		/* Case 11: In an IT block, with a VPT code: syntax error.  */
22708		inst.error = BAD_SYNTAX;
22709		return FAIL;
22710	      }
22711	    else if (cond != inst.cond)
22712	      {
22713		inst.error = BAD_IT_COND;
22714		return FAIL;
22715	      }
22716	    if (!is_last)
22717	      {
22718		inst.error = BAD_BRANCH;
22719		return FAIL;
22720	      }
22721	    break;
22722
22723	  case NEUTRAL_IT_INSN:
22724	    /* The BKPT instruction is unconditional even in a IT or VPT
22725	       block.  */
22726	    break;
22727
22728	  case IT_INSN:
22729	    if (now_pred.type == SCALAR_PRED)
22730	      {
22731		inst.error = BAD_IT_IT;
22732		return FAIL;
22733	      }
22734	    /* fall through.  */
22735	  case VPT_INSN:
22736	    if (inst.cond == COND_ALWAYS)
22737	      {
22738		/* Executing a VPT/VPST instruction inside an IT block or a
22739		   VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
22740		 */
22741		if (now_pred.type == SCALAR_PRED)
22742		  as_tsktsk (MVE_NOT_IT);
22743		else
22744		  as_tsktsk (MVE_NOT_VPT);
22745		return SUCCESS;
22746	      }
22747	    else
22748	      {
22749		/* VPT/VPST do not accept condition codes.  */
22750		inst.error = BAD_SYNTAX;
22751		return FAIL;
22752	      }
22753	  }
22754	}
22755      break;
22756    }
22757
22758  return SUCCESS;
22759}
22760
22761struct depr_insn_mask
22762{
22763  unsigned long pattern;
22764  unsigned long mask;
22765  const char* description;
22766};
22767
22768/* List of 16-bit instruction patterns deprecated in an IT block in
22769   ARMv8.  */
22770static const struct depr_insn_mask depr_it_insns[] = {
22771  { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
22772  { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
22773  { 0xa000, 0xb800, N_("ADR") },
22774  { 0x4800, 0xf800, N_("Literal loads") },
22775  { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
22776  { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
22777  /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
22778     field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
22779  { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
22780  { 0, 0, NULL }
22781};
22782
22783static void
22784it_fsm_post_encode (void)
22785{
22786  int is_last;
22787
22788  if (!now_pred.state_handled)
22789    handle_pred_state ();
22790
22791  if (now_pred.insn_cond
22792      && warn_on_restrict_it
22793      && !now_pred.warn_deprecated
22794      && warn_on_deprecated
22795      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
22796      && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
22797    {
22798      if (inst.instruction >= 0x10000)
22799	{
22800	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
22801		     "performance deprecated in ARMv8-A and ARMv8-R"));
22802	  now_pred.warn_deprecated = TRUE;
22803	}
22804      else
22805	{
22806	  const struct depr_insn_mask *p = depr_it_insns;
22807
22808	  while (p->mask != 0)
22809	    {
22810	      if ((inst.instruction & p->mask) == p->pattern)
22811		{
22812		  as_tsktsk (_("IT blocks containing 16-bit Thumb "
22813			       "instructions of the following class are "
22814			       "performance deprecated in ARMv8-A and "
22815			       "ARMv8-R: %s"), p->description);
22816		  now_pred.warn_deprecated = TRUE;
22817		  break;
22818		}
22819
22820	      ++p;
22821	    }
22822	}
22823
22824      if (now_pred.block_length > 1)
22825	{
22826	  as_tsktsk (_("IT blocks containing more than one conditional "
22827		     "instruction are performance deprecated in ARMv8-A and "
22828		     "ARMv8-R"));
22829	  now_pred.warn_deprecated = TRUE;
22830	}
22831    }
22832
22833    is_last = (now_pred.mask == 0x10);
22834    if (is_last)
22835      {
22836	now_pred.state = OUTSIDE_PRED_BLOCK;
22837	now_pred.mask = 0;
22838      }
22839}
22840
22841static void
22842force_automatic_it_block_close (void)
22843{
22844  if (now_pred.state == AUTOMATIC_PRED_BLOCK)
22845    {
22846      close_automatic_it_block ();
22847      now_pred.state = OUTSIDE_PRED_BLOCK;
22848      now_pred.mask = 0;
22849    }
22850}
22851
22852static int
22853in_pred_block (void)
22854{
22855  if (!now_pred.state_handled)
22856    handle_pred_state ();
22857
22858  return now_pred.state != OUTSIDE_PRED_BLOCK;
22859}
22860
22861/* Whether OPCODE only has T32 encoding.  Since this function is only used by
22862   t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
22863   here, hence the "known" in the function name.  */
22864
22865static bfd_boolean
22866known_t32_only_insn (const struct asm_opcode *opcode)
22867{
22868  /* Original Thumb-1 wide instruction.  */
22869  if (opcode->tencode == do_t_blx
22870      || opcode->tencode == do_t_branch23
22871      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
22872      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
22873    return TRUE;
22874
22875  /* Wide-only instruction added to ARMv8-M Baseline.  */
22876  if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
22877      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
22878      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
22879      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
22880    return TRUE;
22881
22882  return FALSE;
22883}
22884
22885/* Whether wide instruction variant can be used if available for a valid OPCODE
22886   in ARCH.  */
22887
22888static bfd_boolean
22889t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
22890{
22891  if (known_t32_only_insn (opcode))
22892    return TRUE;
22893
22894  /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
22895     of variant T3 of B.W is checked in do_t_branch.  */
22896  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22897      && opcode->tencode == do_t_branch)
22898    return TRUE;
22899
22900  /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit.  */
22901  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22902      && opcode->tencode == do_t_mov_cmp
22903      /* Make sure CMP instruction is not affected.  */
22904      && opcode->aencode == do_mov)
22905    return TRUE;
22906
22907  /* Wide instruction variants of all instructions with narrow *and* wide
22908     variants become available with ARMv6t2.  Other opcodes are either
22909     narrow-only or wide-only and are thus available if OPCODE is valid.  */
22910  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
22911    return TRUE;
22912
22913  /* OPCODE with narrow only instruction variant or wide variant not
22914     available.  */
22915  return FALSE;
22916}
22917
22918void
22919md_assemble (char *str)
22920{
22921  char *p = str;
22922  const struct asm_opcode * opcode;
22923
22924  /* Align the previous label if needed.  */
22925  if (last_label_seen != NULL)
22926    {
22927      symbol_set_frag (last_label_seen, frag_now);
22928      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
22929      S_SET_SEGMENT (last_label_seen, now_seg);
22930    }
22931
22932  memset (&inst, '\0', sizeof (inst));
22933  int r;
22934  for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22935    inst.relocs[r].type = BFD_RELOC_UNUSED;
22936
22937  opcode = opcode_lookup (&p);
22938  if (!opcode)
22939    {
22940      /* It wasn't an instruction, but it might be a register alias of
22941	 the form alias .req reg, or a Neon .dn/.qn directive.  */
22942      if (! create_register_alias (str, p)
22943	  && ! create_neon_reg_alias (str, p))
22944	as_bad (_("bad instruction `%s'"), str);
22945
22946      return;
22947    }
22948
22949  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
22950    as_tsktsk (_("s suffix on comparison instruction is deprecated"));
22951
22952  /* The value which unconditional instructions should have in place of the
22953     condition field.  */
22954  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
22955
22956  if (thumb_mode)
22957    {
22958      arm_feature_set variant;
22959
22960      variant = cpu_variant;
22961      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
22962      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
22963	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
22964      /* Check that this instruction is supported for this CPU.  */
22965      if (!opcode->tvariant
22966	  || (thumb_mode == 1
22967	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
22968	{
22969	  if (opcode->tencode == do_t_swi)
22970	    as_bad (_("SVC is not permitted on this architecture"));
22971	  else
22972	    as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
22973	  return;
22974	}
22975      if (inst.cond != COND_ALWAYS && !unified_syntax
22976	  && opcode->tencode != do_t_branch)
22977	{
22978	  as_bad (_("Thumb does not support conditional execution"));
22979	  return;
22980	}
22981
22982      /* Two things are addressed here:
22983	 1) Implicit require narrow instructions on Thumb-1.
22984	    This avoids relaxation accidentally introducing Thumb-2
22985	    instructions.
22986	 2) Reject wide instructions in non Thumb-2 cores.
22987
22988	 Only instructions with narrow and wide variants need to be handled
22989	 but selecting all non wide-only instructions is easier.  */
22990      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
22991	  && !t32_insn_ok (variant, opcode))
22992	{
22993	  if (inst.size_req == 0)
22994	    inst.size_req = 2;
22995	  else if (inst.size_req == 4)
22996	    {
22997	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
22998		as_bad (_("selected processor does not support 32bit wide "
22999			  "variant of instruction `%s'"), str);
23000	      else
23001		as_bad (_("selected processor does not support `%s' in "
23002			  "Thumb-2 mode"), str);
23003	      return;
23004	    }
23005	}
23006
23007      inst.instruction = opcode->tvalue;
23008
23009      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
23010	{
23011	  /* Prepare the pred_insn_type for those encodings that don't set
23012	     it.  */
23013	  it_fsm_pre_encode ();
23014
23015	  opcode->tencode ();
23016
23017	  it_fsm_post_encode ();
23018	}
23019
23020      if (!(inst.error || inst.relax))
23021	{
23022	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23023	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
23024	  if (inst.size_req && inst.size_req != inst.size)
23025	    {
23026	      as_bad (_("cannot honor width suffix -- `%s'"), str);
23027	      return;
23028	    }
23029	}
23030
23031      /* Something has gone badly wrong if we try to relax a fixed size
23032	 instruction.  */
23033      gas_assert (inst.size_req == 0 || !inst.relax);
23034
23035      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23036			      *opcode->tvariant);
23037      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23038	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
23039	 of relaxable instructions will be considered later after we finish all
23040	 relaxation.  */
23041      if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23042	variant = arm_arch_none;
23043      else
23044	variant = cpu_variant;
23045      if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23046	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23047				arm_ext_v6t2);
23048
23049      check_neon_suffixes;
23050
23051      if (!inst.error)
23052	{
23053	  mapping_state (MAP_THUMB);
23054	}
23055    }
23056  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23057    {
23058      bfd_boolean is_bx;
23059
23060      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
23061      is_bx = (opcode->aencode == do_bx);
23062
23063      /* Check that this instruction is supported for this CPU.  */
23064      if (!(is_bx && fix_v4bx)
23065	  && !(opcode->avariant &&
23066	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23067	{
23068	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23069	  return;
23070	}
23071      if (inst.size_req)
23072	{
23073	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23074	  return;
23075	}
23076
23077      inst.instruction = opcode->avalue;
23078      if (opcode->tag == OT_unconditionalF)
23079	inst.instruction |= 0xFU << 28;
23080      else
23081	inst.instruction |= inst.cond << 28;
23082      inst.size = INSN_SIZE;
23083      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
23084	{
23085	  it_fsm_pre_encode ();
23086	  opcode->aencode ();
23087	  it_fsm_post_encode ();
23088	}
23089      /* Arm mode bx is marked as both v4T and v5 because it's still required
23090	 on a hypothetical non-thumb v5 core.  */
23091      if (is_bx)
23092	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23093      else
23094	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23095				*opcode->avariant);
23096
23097      check_neon_suffixes;
23098
23099      if (!inst.error)
23100	{
23101	  mapping_state (MAP_ARM);
23102	}
23103    }
23104  else
23105    {
23106      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23107		"-- `%s'"), str);
23108      return;
23109    }
23110  output_inst (str);
23111}
23112
23113static void
23114check_pred_blocks_finished (void)
23115{
23116#ifdef OBJ_ELF
23117  asection *sect;
23118
23119  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23120    {
23121      segment_info_type *seginfo = seg_info (sect);
23122
23123      if (seginfo && seginfo->tc_segment_info_data.current_pred.state
23124	  == MANUAL_PRED_BLOCK)
23125	{
23126	  if (now_pred.type == SCALAR_PRED)
23127	    as_warn (_("section '%s' finished with an open IT block."),
23128		     sect->name);
23129	  else
23130	    as_warn (_("section '%s' finished with an open VPT/VPST block."),
23131		   sect->name);
23132	}
23133    }
23134#else
23135  if (now_pred.state == MANUAL_PRED_BLOCK)
23136    {
23137      if (now_pred.type == SCALAR_PRED)
23138       as_warn (_("file finished with an open IT block."));
23139      else
23140	as_warn (_("file finished with an open VPT/VPST block."));
23141    }
23142#endif
23143}
23144
23145/* Various frobbings of labels and their addresses.  */
23146
23147void
23148arm_start_line_hook (void)
23149{
23150  last_label_seen = NULL;
23151}
23152
23153void
23154arm_frob_label (symbolS * sym)
23155{
23156  last_label_seen = sym;
23157
23158  ARM_SET_THUMB (sym, thumb_mode);
23159
23160#if defined OBJ_COFF || defined OBJ_ELF
23161  ARM_SET_INTERWORK (sym, support_interwork);
23162#endif
23163
23164  force_automatic_it_block_close ();
23165
23166  /* Note - do not allow local symbols (.Lxxx) to be labelled
23167     as Thumb functions.  This is because these labels, whilst
23168     they exist inside Thumb code, are not the entry points for
23169     possible ARM->Thumb calls.	 Also, these labels can be used
23170     as part of a computed goto or switch statement.  eg gcc
23171     can generate code that looks like this:
23172
23173		ldr  r2, [pc, .Laaa]
23174		lsl  r3, r3, #2
23175		ldr  r2, [r3, r2]
23176		mov  pc, r2
23177
23178       .Lbbb:  .word .Lxxx
23179       .Lccc:  .word .Lyyy
23180       ..etc...
23181       .Laaa:	.word Lbbb
23182
23183     The first instruction loads the address of the jump table.
23184     The second instruction converts a table index into a byte offset.
23185     The third instruction gets the jump address out of the table.
23186     The fourth instruction performs the jump.
23187
23188     If the address stored at .Laaa is that of a symbol which has the
23189     Thumb_Func bit set, then the linker will arrange for this address
23190     to have the bottom bit set, which in turn would mean that the
23191     address computation performed by the third instruction would end
23192     up with the bottom bit set.  Since the ARM is capable of unaligned
23193     word loads, the instruction would then load the incorrect address
23194     out of the jump table, and chaos would ensue.  */
23195  if (label_is_thumb_function_name
23196      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23197      && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23198    {
23199      /* When the address of a Thumb function is taken the bottom
23200	 bit of that address should be set.  This will allow
23201	 interworking between Arm and Thumb functions to work
23202	 correctly.  */
23203
23204      THUMB_SET_FUNC (sym, 1);
23205
23206      label_is_thumb_function_name = FALSE;
23207    }
23208
23209  dwarf2_emit_label (sym);
23210}
23211
23212bfd_boolean
23213arm_data_in_code (void)
23214{
23215  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
23216    {
23217      *input_line_pointer = '/';
23218      input_line_pointer += 5;
23219      *input_line_pointer = 0;
23220      return TRUE;
23221    }
23222
23223  return FALSE;
23224}
23225
23226char *
23227arm_canonicalize_symbol_name (char * name)
23228{
23229  int len;
23230
23231  if (thumb_mode && (len = strlen (name)) > 5
23232      && streq (name + len - 5, "/data"))
23233    *(name + len - 5) = 0;
23234
23235  return name;
23236}
23237
23238/* Table of all register names defined by default.  The user can
23239   define additional names with .req.  Note that all register names
23240   should appear in both upper and lowercase variants.	Some registers
23241   also have mixed-case names.	*/
23242
23243#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
23244#define REGNUM(p,n,t) REGDEF(p##n, n, t)
23245#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23246#define REGSET(p,t) \
23247  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23248  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23249  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23250  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23251#define REGSETH(p,t) \
23252  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23253  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23254  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23255  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23256#define REGSET2(p,t) \
23257  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23258  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23259  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23260  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23261#define SPLRBANK(base,bank,t) \
23262  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23263  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23264  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23265  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23266  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23267  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23268
23269static const struct reg_entry reg_names[] =
23270{
23271  /* ARM integer registers.  */
23272  REGSET(r, RN), REGSET(R, RN),
23273
23274  /* ATPCS synonyms.  */
23275  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23276  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23277  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23278
23279  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23280  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23281  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23282
23283  /* Well-known aliases.  */
23284  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23285  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23286
23287  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23288  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23289
23290  /* Defining the new Zero register from ARMv8.1-M.  */
23291  REGDEF(zr,15,ZR),
23292  REGDEF(ZR,15,ZR),
23293
23294  /* Coprocessor numbers.  */
23295  REGSET(p, CP), REGSET(P, CP),
23296
23297  /* Coprocessor register numbers.  The "cr" variants are for backward
23298     compatibility.  */
23299  REGSET(c,  CN), REGSET(C, CN),
23300  REGSET(cr, CN), REGSET(CR, CN),
23301
23302  /* ARM banked registers.  */
23303  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23304  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23305  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23306  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23307  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23308  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23309  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23310
23311  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23312  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23313  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23314  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23315  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23316  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23317  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23318  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23319
23320  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23321  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23322  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23323  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23324  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23325  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23326  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23327  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23328  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23329
23330  /* FPA registers.  */
23331  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23332  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23333
23334  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23335  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23336
23337  /* VFP SP registers.	*/
23338  REGSET(s,VFS),  REGSET(S,VFS),
23339  REGSETH(s,VFS), REGSETH(S,VFS),
23340
23341  /* VFP DP Registers.	*/
23342  REGSET(d,VFD),  REGSET(D,VFD),
23343  /* Extra Neon DP registers.  */
23344  REGSETH(d,VFD), REGSETH(D,VFD),
23345
23346  /* Neon QP registers.  */
23347  REGSET2(q,NQ),  REGSET2(Q,NQ),
23348
23349  /* VFP control registers.  */
23350  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23351  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23352  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23353  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23354  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23355  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23356  REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23357  REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23358  REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23359  REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23360  REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23361
23362  /* Maverick DSP coprocessor registers.  */
23363  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
23364  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
23365
23366  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23367  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23368  REGDEF(dspsc,0,DSPSC),
23369
23370  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23371  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23372  REGDEF(DSPSC,0,DSPSC),
23373
23374  /* iWMMXt data registers - p0, c0-15.	 */
23375  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23376
23377  /* iWMMXt control registers - p1, c0-3.  */
23378  REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
23379  REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
23380  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
23381  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
23382
23383  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
23384  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
23385  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
23386  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
23387  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
23388
23389  /* XScale accumulator registers.  */
23390  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23391};
23392#undef REGDEF
23393#undef REGNUM
23394#undef REGSET
23395
23396/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
23397   within psr_required_here.  */
23398static const struct asm_psr psrs[] =
23399{
23400  /* Backward compatibility notation.  Note that "all" is no longer
23401     truly all possible PSR bits.  */
23402  {"all",  PSR_c | PSR_f},
23403  {"flg",  PSR_f},
23404  {"ctl",  PSR_c},
23405
23406  /* Individual flags.	*/
23407  {"f",	   PSR_f},
23408  {"c",	   PSR_c},
23409  {"x",	   PSR_x},
23410  {"s",	   PSR_s},
23411
23412  /* Combinations of flags.  */
23413  {"fs",   PSR_f | PSR_s},
23414  {"fx",   PSR_f | PSR_x},
23415  {"fc",   PSR_f | PSR_c},
23416  {"sf",   PSR_s | PSR_f},
23417  {"sx",   PSR_s | PSR_x},
23418  {"sc",   PSR_s | PSR_c},
23419  {"xf",   PSR_x | PSR_f},
23420  {"xs",   PSR_x | PSR_s},
23421  {"xc",   PSR_x | PSR_c},
23422  {"cf",   PSR_c | PSR_f},
23423  {"cs",   PSR_c | PSR_s},
23424  {"cx",   PSR_c | PSR_x},
23425  {"fsx",  PSR_f | PSR_s | PSR_x},
23426  {"fsc",  PSR_f | PSR_s | PSR_c},
23427  {"fxs",  PSR_f | PSR_x | PSR_s},
23428  {"fxc",  PSR_f | PSR_x | PSR_c},
23429  {"fcs",  PSR_f | PSR_c | PSR_s},
23430  {"fcx",  PSR_f | PSR_c | PSR_x},
23431  {"sfx",  PSR_s | PSR_f | PSR_x},
23432  {"sfc",  PSR_s | PSR_f | PSR_c},
23433  {"sxf",  PSR_s | PSR_x | PSR_f},
23434  {"sxc",  PSR_s | PSR_x | PSR_c},
23435  {"scf",  PSR_s | PSR_c | PSR_f},
23436  {"scx",  PSR_s | PSR_c | PSR_x},
23437  {"xfs",  PSR_x | PSR_f | PSR_s},
23438  {"xfc",  PSR_x | PSR_f | PSR_c},
23439  {"xsf",  PSR_x | PSR_s | PSR_f},
23440  {"xsc",  PSR_x | PSR_s | PSR_c},
23441  {"xcf",  PSR_x | PSR_c | PSR_f},
23442  {"xcs",  PSR_x | PSR_c | PSR_s},
23443  {"cfs",  PSR_c | PSR_f | PSR_s},
23444  {"cfx",  PSR_c | PSR_f | PSR_x},
23445  {"csf",  PSR_c | PSR_s | PSR_f},
23446  {"csx",  PSR_c | PSR_s | PSR_x},
23447  {"cxf",  PSR_c | PSR_x | PSR_f},
23448  {"cxs",  PSR_c | PSR_x | PSR_s},
23449  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23450  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23451  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23452  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23453  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23454  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23455  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23456  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23457  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23458  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23459  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23460  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23461  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23462  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23463  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23464  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23465  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23466  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23467  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23468  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23469  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23470  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23471  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23472  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23473};
23474
23475/* Table of V7M psr names.  */
23476static const struct asm_psr v7m_psrs[] =
23477{
23478  {"apsr",	   0x0 }, {"APSR",	   0x0 },
23479  {"iapsr",	   0x1 }, {"IAPSR",	   0x1 },
23480  {"eapsr",	   0x2 }, {"EAPSR",	   0x2 },
23481  {"psr",	   0x3 }, {"PSR",	   0x3 },
23482  {"xpsr",	   0x3 }, {"XPSR",	   0x3 }, {"xPSR",	  3 },
23483  {"ipsr",	   0x5 }, {"IPSR",	   0x5 },
23484  {"epsr",	   0x6 }, {"EPSR",	   0x6 },
23485  {"iepsr",	   0x7 }, {"IEPSR",	   0x7 },
23486  {"msp",	   0x8 }, {"MSP",	   0x8 },
23487  {"psp",	   0x9 }, {"PSP",	   0x9 },
23488  {"msplim",	   0xa }, {"MSPLIM",	   0xa },
23489  {"psplim",	   0xb }, {"PSPLIM",	   0xb },
23490  {"primask",	   0x10}, {"PRIMASK",	   0x10},
23491  {"basepri",	   0x11}, {"BASEPRI",	   0x11},
23492  {"basepri_max",  0x12}, {"BASEPRI_MAX",  0x12},
23493  {"faultmask",	   0x13}, {"FAULTMASK",	   0x13},
23494  {"control",	   0x14}, {"CONTROL",	   0x14},
23495  {"msp_ns",	   0x88}, {"MSP_NS",	   0x88},
23496  {"psp_ns",	   0x89}, {"PSP_NS",	   0x89},
23497  {"msplim_ns",	   0x8a}, {"MSPLIM_NS",	   0x8a},
23498  {"psplim_ns",	   0x8b}, {"PSPLIM_NS",	   0x8b},
23499  {"primask_ns",   0x90}, {"PRIMASK_NS",   0x90},
23500  {"basepri_ns",   0x91}, {"BASEPRI_NS",   0x91},
23501  {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
23502  {"control_ns",   0x94}, {"CONTROL_NS",   0x94},
23503  {"sp_ns",	   0x98}, {"SP_NS",	   0x98 }
23504};
23505
23506/* Table of all shift-in-operand names.	 */
23507static const struct asm_shift_name shift_names [] =
23508{
23509  { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
23510  { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
23511  { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
23512  { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
23513  { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
23514  { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX },
23515  { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
23516};
23517
23518/* Table of all explicit relocation names.  */
23519#ifdef OBJ_ELF
23520static struct reloc_entry reloc_names[] =
23521{
23522  { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
23523  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
23524  { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
23525  { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
23526  { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
23527  { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
23528  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
23529  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
23530  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
23531  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
23532  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
23533  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
23534  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
23535	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
23536  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
23537	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
23538  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
23539	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
23540  { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
23541	{ "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
23542  { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23543	{ "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23544  { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
23545	{ "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
23546   { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC },      { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
23547   { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC },    { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
23548   { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC },   { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
23549};
23550#endif
23551
23552/* Table of all conditional affixes.  */
23553static const struct asm_cond conds[] =
23554{
23555  {"eq", 0x0},
23556  {"ne", 0x1},
23557  {"cs", 0x2}, {"hs", 0x2},
23558  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
23559  {"mi", 0x4},
23560  {"pl", 0x5},
23561  {"vs", 0x6},
23562  {"vc", 0x7},
23563  {"hi", 0x8},
23564  {"ls", 0x9},
23565  {"ge", 0xa},
23566  {"lt", 0xb},
23567  {"gt", 0xc},
23568  {"le", 0xd},
23569  {"al", 0xe}
23570};
23571static const struct asm_cond vconds[] =
23572{
23573    {"t", 0xf},
23574    {"e", 0x10}
23575};
23576
23577#define UL_BARRIER(L,U,CODE,FEAT) \
23578  { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
23579  { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
23580
23581static struct asm_barrier_opt barrier_opt_names[] =
23582{
23583  UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
23584  UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
23585  UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
23586  UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
23587  UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
23588  UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
23589  UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
23590  UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
23591  UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
23592  UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
23593  UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
23594  UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
23595  UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
23596  UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
23597  UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
23598  UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
23599};
23600
23601#undef UL_BARRIER
23602
23603/* Table of ARM-format instructions.	*/
23604
23605/* Macros for gluing together operand strings.  N.B. In all cases
23606   other than OPS0, the trailing OP_stop comes from default
23607   zero-initialization of the unspecified elements of the array.  */
23608#define OPS0()		  { OP_stop, }
23609#define OPS1(a)		  { OP_##a, }
23610#define OPS2(a,b)	  { OP_##a,OP_##b, }
23611#define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
23612#define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
23613#define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
23614#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
23615
23616/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
23617   This is useful when mixing operands for ARM and THUMB, i.e. using the
23618   MIX_ARM_THUMB_OPERANDS macro.
23619   In order to use these macros, prefix the number of operands with _
23620   e.g. _3.  */
23621#define OPS_1(a)	   { a, }
23622#define OPS_2(a,b)	   { a,b, }
23623#define OPS_3(a,b,c)	   { a,b,c, }
23624#define OPS_4(a,b,c,d)	   { a,b,c,d, }
23625#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
23626#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
23627
23628/* These macros abstract out the exact format of the mnemonic table and
23629   save some repeated characters.  */
23630
23631/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
23632#define TxCE(mnem, op, top, nops, ops, ae, te) \
23633  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
23634    THUMB_VARIANT, do_##ae, do_##te, 0 }
23635
23636/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
23637   a T_MNEM_xyz enumerator.  */
23638#define TCE(mnem, aop, top, nops, ops, ae, te) \
23639      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
23640#define tCE(mnem, aop, top, nops, ops, ae, te) \
23641      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23642
23643/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
23644   infix after the third character.  */
23645#define TxC3(mnem, op, top, nops, ops, ae, te) \
23646  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
23647    THUMB_VARIANT, do_##ae, do_##te, 0 }
23648#define TxC3w(mnem, op, top, nops, ops, ae, te) \
23649  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
23650    THUMB_VARIANT, do_##ae, do_##te, 0 }
23651#define TC3(mnem, aop, top, nops, ops, ae, te) \
23652      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
23653#define TC3w(mnem, aop, top, nops, ops, ae, te) \
23654      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
23655#define tC3(mnem, aop, top, nops, ops, ae, te) \
23656      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23657#define tC3w(mnem, aop, top, nops, ops, ae, te) \
23658      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23659
23660/* Mnemonic that cannot be conditionalized.  The ARM condition-code
23661   field is still 0xE.  Many of the Thumb variants can be executed
23662   conditionally, so this is checked separately.  */
23663#define TUE(mnem, op, top, nops, ops, ae, te)				\
23664  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23665    THUMB_VARIANT, do_##ae, do_##te, 0 }
23666
23667/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
23668   Used by mnemonics that have very minimal differences in the encoding for
23669   ARM and Thumb variants and can be handled in a common function.  */
23670#define TUEc(mnem, op, top, nops, ops, en) \
23671  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23672    THUMB_VARIANT, do_##en, do_##en, 0 }
23673
23674/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
23675   condition code field.  */
23676#define TUF(mnem, op, top, nops, ops, ae, te)				\
23677  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
23678    THUMB_VARIANT, do_##ae, do_##te, 0 }
23679
23680/* ARM-only variants of all the above.  */
23681#define CE(mnem,  op, nops, ops, ae)	\
23682  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23683
23684#define C3(mnem, op, nops, ops, ae)	\
23685  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23686
23687/* Thumb-only variants of TCE and TUE.  */
23688#define ToC(mnem, top, nops, ops, te) \
23689  { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23690    do_##te, 0 }
23691
23692#define ToU(mnem, top, nops, ops, te) \
23693  { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
23694    NULL, do_##te, 0 }
23695
23696/* T_MNEM_xyz enumerator variants of ToC.  */
23697#define toC(mnem, top, nops, ops, te) \
23698  { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
23699    do_##te, 0 }
23700
23701/* T_MNEM_xyz enumerator variants of ToU.  */
23702#define toU(mnem, top, nops, ops, te) \
23703  { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
23704    NULL, do_##te, 0 }
23705
23706/* Legacy mnemonics that always have conditional infix after the third
23707   character.  */
23708#define CL(mnem, op, nops, ops, ae)	\
23709  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23710    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23711
23712/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
23713#define cCE(mnem,  op, nops, ops, ae)	\
23714  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23715
23716/* mov instructions that are shared between coprocessor and MVE.  */
23717#define mcCE(mnem,  op, nops, ops, ae)	\
23718  { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
23719
23720/* Legacy coprocessor instructions where conditional infix and conditional
23721   suffix are ambiguous.  For consistency this includes all FPA instructions,
23722   not just the potentially ambiguous ones.  */
23723#define cCL(mnem, op, nops, ops, ae)	\
23724  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23725    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23726
23727/* Coprocessor, takes either a suffix or a position-3 infix
23728   (for an FPA corner case). */
23729#define C3E(mnem, op, nops, ops, ae) \
23730  { mnem, OPS##nops ops, OT_csuf_or_in3, \
23731    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23732
23733#define xCM_(m1, m2, m3, op, nops, ops, ae)	\
23734  { m1 #m2 m3, OPS##nops ops, \
23735    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
23736    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23737
23738#define CM(m1, m2, op, nops, ops, ae)	\
23739  xCM_ (m1,   , m2, op, nops, ops, ae),	\
23740  xCM_ (m1, eq, m2, op, nops, ops, ae),	\
23741  xCM_ (m1, ne, m2, op, nops, ops, ae),	\
23742  xCM_ (m1, cs, m2, op, nops, ops, ae),	\
23743  xCM_ (m1, hs, m2, op, nops, ops, ae),	\
23744  xCM_ (m1, cc, m2, op, nops, ops, ae),	\
23745  xCM_ (m1, ul, m2, op, nops, ops, ae),	\
23746  xCM_ (m1, lo, m2, op, nops, ops, ae),	\
23747  xCM_ (m1, mi, m2, op, nops, ops, ae),	\
23748  xCM_ (m1, pl, m2, op, nops, ops, ae),	\
23749  xCM_ (m1, vs, m2, op, nops, ops, ae),	\
23750  xCM_ (m1, vc, m2, op, nops, ops, ae),	\
23751  xCM_ (m1, hi, m2, op, nops, ops, ae),	\
23752  xCM_ (m1, ls, m2, op, nops, ops, ae),	\
23753  xCM_ (m1, ge, m2, op, nops, ops, ae),	\
23754  xCM_ (m1, lt, m2, op, nops, ops, ae),	\
23755  xCM_ (m1, gt, m2, op, nops, ops, ae),	\
23756  xCM_ (m1, le, m2, op, nops, ops, ae),	\
23757  xCM_ (m1, al, m2, op, nops, ops, ae)
23758
23759#define UE(mnem, op, nops, ops, ae)	\
23760  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23761
23762#define UF(mnem, op, nops, ops, ae)	\
23763  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23764
23765/* Neon data-processing. ARM versions are unconditional with cond=0xf.
23766   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
23767   use the same encoding function for each.  */
23768#define NUF(mnem, op, nops, ops, enc)					\
23769  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
23770    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23771
23772/* Neon data processing, version which indirects through neon_enc_tab for
23773   the various overloaded versions of opcodes.  */
23774#define nUF(mnem, op, nops, ops, enc)					\
23775  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
23776    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23777
23778/* Neon insn with conditional suffix for the ARM version, non-overloaded
23779   version.  */
23780#define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
23781  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
23782    THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23783
23784#define NCE(mnem, op, nops, ops, enc)					\
23785   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23786
23787#define NCEF(mnem, op, nops, ops, enc)					\
23788    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23789
23790/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
23791#define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
23792  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
23793    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23794
23795#define nCE(mnem, op, nops, ops, enc)					\
23796   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23797
23798#define nCEF(mnem, op, nops, ops, enc)					\
23799    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23800
23801/*   */
23802#define mCEF(mnem, op, nops, ops, enc)				\
23803  { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op,	\
23804    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23805
23806
23807/* nCEF but for MVE predicated instructions.  */
23808#define mnCEF(mnem, op, nops, ops, enc)					\
23809    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23810
23811/* nCE but for MVE predicated instructions.  */
23812#define mnCE(mnem, op, nops, ops, enc)					\
23813   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23814
23815/* NUF but for potentially MVE predicated instructions.  */
23816#define MNUF(mnem, op, nops, ops, enc)					\
23817  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
23818    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23819
23820/* nUF but for potentially MVE predicated instructions.  */
23821#define mnUF(mnem, op, nops, ops, enc)					\
23822  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
23823    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23824
23825/* ToC but for potentially MVE predicated instructions.  */
23826#define mToC(mnem, top, nops, ops, te) \
23827  { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23828    do_##te, 1 }
23829
23830/* NCE but for MVE predicated instructions.  */
23831#define MNCE(mnem, op, nops, ops, enc)					\
23832   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23833
23834/* NCEF but for MVE predicated instructions.  */
23835#define MNCEF(mnem, op, nops, ops, enc)					\
23836    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23837#define do_0 0
23838
23839static const struct asm_opcode insns[] =
23840{
23841#define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
23842#define THUMB_VARIANT  & arm_ext_v4t
23843 tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
23844 tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
23845 tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
23846 tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
23847 tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
23848 tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
23849 tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
23850 tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
23851 tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
23852 tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
23853 tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
23854 tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
23855 tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
23856 tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
23857 tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
23858 tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
23859
23860 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
23861    for setting PSR flag bits.  They are obsolete in V6 and do not
23862    have Thumb equivalents. */
23863 tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
23864 tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
23865  CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
23866 tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
23867 tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
23868  CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
23869 tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
23870 tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
23871  CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
23872
23873 tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
23874 tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
23875 tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
23876 tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
23877
23878 tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
23879 tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23880 tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
23881								OP_RRnpc),
23882					OP_ADDRGLDR),ldst, t_ldst),
23883 tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23884
23885 tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23886 tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23887 tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23888 tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23889 tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23890 tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
23891
23892 tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
23893 TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
23894
23895  /* Pseudo ops.  */
23896 tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
23897  C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
23898 tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
23899 tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
23900
23901  /* Thumb-compatibility pseudo ops.  */
23902 tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
23903 tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
23904 tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
23905 tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
23906 tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
23907 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
23908 tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
23909 tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
23910 tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
23911 tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
23912 tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
23913 tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
23914
23915 /* These may simplify to neg.  */
23916 TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
23917 TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
23918
23919#undef THUMB_VARIANT
23920#define THUMB_VARIANT  & arm_ext_os
23921
23922 TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
23923 TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
23924
23925#undef  THUMB_VARIANT
23926#define THUMB_VARIANT  & arm_ext_v6
23927
23928 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
23929
23930 /* V1 instructions with no Thumb analogue prior to V6T2.  */
23931#undef  THUMB_VARIANT
23932#define THUMB_VARIANT  & arm_ext_v6t2
23933
23934 TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
23935 TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
23936  CL("teqp",	130f000,           2, (RR, SH),      cmp),
23937
23938 TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23939 TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23940 TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
23941 TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23942
23943 TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23944 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23945
23946 TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23947 TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23948
23949 /* V1 instructions with no Thumb analogue at all.  */
23950  CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
23951  C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
23952
23953  C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
23954  C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
23955  C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
23956  C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
23957  C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
23958  C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
23959  C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
23960  C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
23961
23962#undef  ARM_VARIANT
23963#define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
23964#undef  THUMB_VARIANT
23965#define THUMB_VARIANT  & arm_ext_v4t
23966
23967 tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
23968 tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
23969
23970#undef  THUMB_VARIANT
23971#define THUMB_VARIANT  & arm_ext_v6t2
23972
23973 TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
23974  C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
23975
23976  /* Generic coprocessor instructions.	*/
23977 TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
23978 TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
23979 TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
23980 TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
23981 TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
23982 TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
23983 TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
23984
23985#undef  ARM_VARIANT
23986#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
23987
23988  CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23989  C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23990
23991#undef  ARM_VARIANT
23992#define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
23993#undef  THUMB_VARIANT
23994#define THUMB_VARIANT  & arm_ext_msr
23995
23996 TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
23997 TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
23998
23999#undef  ARM_VARIANT
24000#define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
24001#undef  THUMB_VARIANT
24002#define THUMB_VARIANT  & arm_ext_v6t2
24003
24004 TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24005  CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24006 TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24007  CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24008 TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24009  CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24010 TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24011  CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24012
24013#undef  ARM_VARIANT
24014#define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
24015#undef  THUMB_VARIANT
24016#define THUMB_VARIANT  & arm_ext_v4t
24017
24018 tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24019 tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24020 tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24021 tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24022 tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24023 tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24024
24025#undef  ARM_VARIANT
24026#define ARM_VARIANT  & arm_ext_v4t_5
24027
24028  /* ARM Architecture 4T.  */
24029  /* Note: bx (and blx) are required on V5, even if the processor does
24030     not support Thumb.	 */
24031 TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
24032
24033#undef  ARM_VARIANT
24034#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
24035#undef  THUMB_VARIANT
24036#define THUMB_VARIANT  & arm_ext_v5t
24037
24038  /* Note: blx has 2 variants; the .value coded here is for
24039     BLX(2).  Only this variant has conditional execution.  */
24040 TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
24041 TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
24042
24043#undef  THUMB_VARIANT
24044#define THUMB_VARIANT  & arm_ext_v6t2
24045
24046 TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
24047 TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24048 TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24049 TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24050 TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24051 TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
24052 TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24053 TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24054
24055#undef  ARM_VARIANT
24056#define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
24057#undef  THUMB_VARIANT
24058#define THUMB_VARIANT  & arm_ext_v5exp
24059
24060 TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24061 TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24062 TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24063 TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24064
24065 TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24066 TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24067
24068 TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24069 TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24070 TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24071 TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24072
24073 TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24074 TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24075 TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24076 TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24077
24078 TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24079 TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24080
24081 TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24082 TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24083 TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24084 TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24085
24086#undef  ARM_VARIANT
24087#define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
24088#undef  THUMB_VARIANT
24089#define THUMB_VARIANT  & arm_ext_v6t2
24090
24091 TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
24092 TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24093     ldrd, t_ldstd),
24094 TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24095				       ADDRGLDRS), ldrd, t_ldstd),
24096
24097 TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24098 TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24099
24100#undef  ARM_VARIANT
24101#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
24102
24103 TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
24104
24105#undef  ARM_VARIANT
24106#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
24107#undef  THUMB_VARIANT
24108#define THUMB_VARIANT  & arm_ext_v6
24109
24110 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24111 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24112 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24113 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24114 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24115 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24116 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24117 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24118 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24119 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
24120
24121#undef  THUMB_VARIANT
24122#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24123
24124 TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
24125 TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24126				      strex,  t_strex),
24127#undef  THUMB_VARIANT
24128#define THUMB_VARIANT  & arm_ext_v6t2
24129
24130 TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24131 TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24132
24133 TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
24134 TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
24135
24136/*  ARM V6 not included in V7M.  */
24137#undef  THUMB_VARIANT
24138#define THUMB_VARIANT  & arm_ext_v6_notm
24139 TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24140 TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24141  UF(rfeib,	9900a00,           1, (RRw),			   rfe),
24142  UF(rfeda,	8100a00,           1, (RRw),			   rfe),
24143 TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24144 TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24145  UF(rfefa,	8100a00,           1, (RRw),			   rfe),
24146 TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24147  UF(rfeed,	9900a00,           1, (RRw),			   rfe),
24148 TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24149 TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24150 TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24151  UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
24152  UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
24153  UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
24154  UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
24155 TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24156 TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24157 TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
24158
24159/*  ARM V6 not included in V7M (eg. integer SIMD).  */
24160#undef  THUMB_VARIANT
24161#define THUMB_VARIANT  & arm_ext_v6_dsp
24162 TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
24163 TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
24164 TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24165 TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24166 TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24167 /* Old name for QASX.  */
24168 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24169 TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24170 /* Old name for QSAX.  */
24171 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24172 TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24173 TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24174 TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24175 TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24176 TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24177 /* Old name for SASX.  */
24178 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24179 TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24180 TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24181 TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24182 /* Old name for SHASX.  */
24183 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24184 TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24185 /* Old name for SHSAX.  */
24186 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24187 TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24188 TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24189 TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24190 /* Old name for SSAX.  */
24191 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24192 TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24193 TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24194 TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24195 TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24196 TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24197 /* Old name for UASX.  */
24198 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24199 TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24200 TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24201 TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24202 /* Old name for UHASX.  */
24203 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24204 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24205 /* Old name for UHSAX.  */
24206 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24207 TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24208 TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24209 TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24210 TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24211 TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24212 /* Old name for UQASX.  */
24213 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24214 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24215 /* Old name for UQSAX.  */
24216 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24217 TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24218 TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24219 TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24220 TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24221 /* Old name for USAX.  */
24222 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24223 TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24224 TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24225 TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24226 TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24227 TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24228 TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24229 TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24230 TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24231 TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24232 TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24233 TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24234 TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24235 TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24236 TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24237 TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24238 TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24239 TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24240 TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24241 TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24242 TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24243 TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24244 TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24245 TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24246 TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24247 TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24248 TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24249 TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24250 TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24251 TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
24252 TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
24253 TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
24254 TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
24255 TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
24256
24257#undef  ARM_VARIANT
24258#define ARM_VARIANT   & arm_ext_v6k_v6t2
24259#undef  THUMB_VARIANT
24260#define THUMB_VARIANT & arm_ext_v6k_v6t2
24261
24262 tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
24263 tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
24264 tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
24265 tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
24266
24267#undef  THUMB_VARIANT
24268#define THUMB_VARIANT  & arm_ext_v6_notm
24269 TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24270				      ldrexd, t_ldrexd),
24271 TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24272				       RRnpcb), strexd, t_strexd),
24273
24274#undef  THUMB_VARIANT
24275#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24276 TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24277     rd_rn,  rd_rn),
24278 TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24279     rd_rn,  rd_rn),
24280 TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24281     strex, t_strexbh),
24282 TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24283     strex, t_strexbh),
24284 TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
24285
24286#undef  ARM_VARIANT
24287#define ARM_VARIANT    & arm_ext_sec
24288#undef  THUMB_VARIANT
24289#define THUMB_VARIANT  & arm_ext_sec
24290
24291 TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
24292
24293#undef	ARM_VARIANT
24294#define	ARM_VARIANT    & arm_ext_virt
24295#undef	THUMB_VARIANT
24296#define	THUMB_VARIANT    & arm_ext_virt
24297
24298 TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24299 TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
24300
24301#undef	ARM_VARIANT
24302#define	ARM_VARIANT    & arm_ext_pan
24303#undef	THUMB_VARIANT
24304#define	THUMB_VARIANT  & arm_ext_pan
24305
24306 TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
24307
24308#undef  ARM_VARIANT
24309#define ARM_VARIANT    & arm_ext_v6t2
24310#undef  THUMB_VARIANT
24311#define THUMB_VARIANT  & arm_ext_v6t2
24312
24313 TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
24314 TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24315 TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24316 TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24317
24318 TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24319 TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
24320
24321 TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24322 TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24323 TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24324 TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24325
24326#undef  ARM_VARIANT
24327#define ARM_VARIANT    & arm_ext_v3
24328#undef  THUMB_VARIANT
24329#define THUMB_VARIANT  & arm_ext_v6t2
24330
24331 TUE("csdb",	320f014, f3af8014, 0, (), noargs, t_csdb),
24332 TUF("ssbb",	57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24333 TUF("pssbb",	57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24334
24335#undef  ARM_VARIANT
24336#define ARM_VARIANT    & arm_ext_v6t2
24337#undef  THUMB_VARIANT
24338#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24339 TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24340 TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24341
24342 /* Thumb-only instructions.  */
24343#undef  ARM_VARIANT
24344#define ARM_VARIANT NULL
24345  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
24346  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
24347
24348 /* ARM does not really have an IT instruction, so always allow it.
24349    The opcode is copied from Thumb in order to allow warnings in
24350    -mimplicit-it=[never | arm] modes.  */
24351#undef  ARM_VARIANT
24352#define ARM_VARIANT  & arm_ext_v1
24353#undef  THUMB_VARIANT
24354#define THUMB_VARIANT  & arm_ext_v6t2
24355
24356 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
24357 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
24358 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
24359 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
24360 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
24361 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
24362 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
24363 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
24364 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
24365 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
24366 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
24367 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
24368 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
24369 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
24370 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
24371 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
24372 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24373 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24374
24375 /* Thumb2 only instructions.  */
24376#undef  ARM_VARIANT
24377#define ARM_VARIANT  NULL
24378
24379 TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24380 TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24381 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
24382 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
24383 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
24384 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
24385
24386 /* Hardware division instructions.  */
24387#undef  ARM_VARIANT
24388#define ARM_VARIANT    & arm_ext_adiv
24389#undef  THUMB_VARIANT
24390#define THUMB_VARIANT  & arm_ext_div
24391
24392 TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24393 TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24394
24395 /* ARM V6M/V7 instructions.  */
24396#undef  ARM_VARIANT
24397#define ARM_VARIANT    & arm_ext_barrier
24398#undef  THUMB_VARIANT
24399#define THUMB_VARIANT  & arm_ext_barrier
24400
24401 TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24402 TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24403 TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24404
24405 /* ARM V7 instructions.  */
24406#undef  ARM_VARIANT
24407#define ARM_VARIANT    & arm_ext_v7
24408#undef  THUMB_VARIANT
24409#define THUMB_VARIANT  & arm_ext_v7
24410
24411 TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
24412 TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
24413
24414#undef  ARM_VARIANT
24415#define ARM_VARIANT    & arm_ext_mp
24416#undef  THUMB_VARIANT
24417#define THUMB_VARIANT  & arm_ext_mp
24418
24419 TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
24420
24421 /* AArchv8 instructions.  */
24422#undef  ARM_VARIANT
24423#define ARM_VARIANT   & arm_ext_v8
24424
24425/* Instructions shared between armv8-a and armv8-m.  */
24426#undef  THUMB_VARIANT
24427#define THUMB_VARIANT & arm_ext_atomics
24428
24429 TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24430 TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24431 TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24432 TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24433 TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24434 TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24435 TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24436 TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
24437 TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24438 TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24439							stlex,  t_stlex),
24440 TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24441							stlex, t_stlex),
24442 TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24443							stlex, t_stlex),
24444#undef  THUMB_VARIANT
24445#define THUMB_VARIANT & arm_ext_v8
24446
24447 tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
24448 TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24449							ldrexd, t_ldrexd),
24450 TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24451							strexd, t_strexd),
24452
24453/* Defined in V8 but is in undefined encoding space for earlier
24454   architectures.  However earlier architectures are required to treat
24455   this instuction as a semihosting trap as well.  Hence while not explicitly
24456   defined as such, it is in fact correct to define the instruction for all
24457   architectures.  */
24458#undef  THUMB_VARIANT
24459#define THUMB_VARIANT  & arm_ext_v1
24460#undef  ARM_VARIANT
24461#define ARM_VARIANT  & arm_ext_v1
24462 TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
24463
24464 /* ARMv8 T32 only.  */
24465#undef  ARM_VARIANT
24466#define ARM_VARIANT  NULL
24467 TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
24468 TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
24469 TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
24470
24471  /* FP for ARMv8.  */
24472#undef  ARM_VARIANT
24473#define ARM_VARIANT   & fpu_vfp_ext_armv8xd
24474#undef  THUMB_VARIANT
24475#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24476
24477  nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
24478  nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
24479  nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
24480  nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
24481  nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
24482  mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintz),
24483  mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintx),
24484  mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrinta),
24485  mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintn),
24486  mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintp),
24487  mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintm),
24488
24489  /* Crypto v1 extensions.  */
24490#undef  ARM_VARIANT
24491#define ARM_VARIANT & fpu_crypto_ext_armv8
24492#undef  THUMB_VARIANT
24493#define THUMB_VARIANT & fpu_crypto_ext_armv8
24494
24495  nUF(aese, _aes, 2, (RNQ, RNQ), aese),
24496  nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
24497  nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
24498  nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
24499  nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
24500  nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
24501  nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
24502  nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
24503  nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
24504  nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
24505  nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
24506  nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
24507  nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
24508  nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
24509
24510#undef  ARM_VARIANT
24511#define ARM_VARIANT   & arm_ext_crc
24512#undef  THUMB_VARIANT
24513#define THUMB_VARIANT & arm_ext_crc
24514  TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
24515  TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
24516  TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
24517  TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
24518  TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
24519  TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
24520
24521 /* ARMv8.2 RAS extension.  */
24522#undef  ARM_VARIANT
24523#define ARM_VARIANT   & arm_ext_ras
24524#undef  THUMB_VARIANT
24525#define THUMB_VARIANT & arm_ext_ras
24526 TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
24527
24528#undef  ARM_VARIANT
24529#define ARM_VARIANT   & arm_ext_v8_3
24530#undef  THUMB_VARIANT
24531#define THUMB_VARIANT & arm_ext_v8_3
24532 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
24533
24534#undef  ARM_VARIANT
24535#define ARM_VARIANT   & fpu_neon_ext_dotprod
24536#undef  THUMB_VARIANT
24537#define THUMB_VARIANT & fpu_neon_ext_dotprod
24538 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
24539 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
24540
24541#undef  ARM_VARIANT
24542#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
24543#undef  THUMB_VARIANT
24544#define THUMB_VARIANT NULL
24545
24546 cCE("wfs",	e200110, 1, (RR),	     rd),
24547 cCE("rfs",	e300110, 1, (RR),	     rd),
24548 cCE("wfc",	e400110, 1, (RR),	     rd),
24549 cCE("rfc",	e500110, 1, (RR),	     rd),
24550
24551 cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24552 cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24553 cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24554 cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24555
24556 cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24557 cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24558 cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24559 cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
24560
24561 cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
24562 cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
24563 cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
24564 cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
24565 cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
24566 cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
24567 cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
24568 cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
24569 cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
24570 cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
24571 cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
24572 cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
24573
24574 cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
24575 cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
24576 cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
24577 cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
24578 cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
24579 cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
24580 cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
24581 cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
24582 cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
24583 cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
24584 cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
24585 cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
24586
24587 cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
24588 cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
24589 cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
24590 cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
24591 cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
24592 cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
24593 cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
24594 cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
24595 cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
24596 cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
24597 cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
24598 cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
24599
24600 cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
24601 cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
24602 cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
24603 cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
24604 cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
24605 cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
24606 cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
24607 cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
24608 cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
24609 cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
24610 cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
24611 cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
24612
24613 cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
24614 cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
24615 cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
24616 cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
24617 cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
24618 cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
24619 cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
24620 cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
24621 cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
24622 cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
24623 cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
24624 cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
24625
24626 cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
24627 cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
24628 cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
24629 cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
24630 cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
24631 cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
24632 cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
24633 cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
24634 cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
24635 cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
24636 cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
24637 cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
24638
24639 cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
24640 cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
24641 cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
24642 cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
24643 cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
24644 cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
24645 cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
24646 cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
24647 cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
24648 cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
24649 cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
24650 cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
24651
24652 cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
24653 cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
24654 cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
24655 cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
24656 cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
24657 cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
24658 cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
24659 cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
24660 cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
24661 cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
24662 cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
24663 cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
24664
24665 cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
24666 cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
24667 cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
24668 cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
24669 cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
24670 cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
24671 cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
24672 cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
24673 cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
24674 cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
24675 cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
24676 cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
24677
24678 cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
24679 cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
24680 cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
24681 cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
24682 cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
24683 cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
24684 cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
24685 cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
24686 cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
24687 cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
24688 cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
24689 cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
24690
24691 cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
24692 cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
24693 cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
24694 cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
24695 cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
24696 cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
24697 cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
24698 cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
24699 cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
24700 cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
24701 cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
24702 cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
24703
24704 cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
24705 cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
24706 cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
24707 cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
24708 cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
24709 cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
24710 cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
24711 cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
24712 cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
24713 cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
24714 cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
24715 cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
24716
24717 cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
24718 cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
24719 cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
24720 cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
24721 cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
24722 cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
24723 cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
24724 cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
24725 cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
24726 cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
24727 cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
24728 cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
24729
24730 cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
24731 cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
24732 cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
24733 cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
24734 cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
24735 cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
24736 cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
24737 cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
24738 cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
24739 cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
24740 cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
24741 cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
24742
24743 cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
24744 cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
24745 cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
24746 cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
24747 cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
24748 cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
24749 cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
24750 cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
24751 cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
24752 cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
24753 cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
24754 cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
24755
24756 cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
24757 cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
24758 cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
24759 cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
24760 cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
24761 cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
24762 cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
24763 cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
24764 cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
24765 cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
24766 cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
24767 cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
24768
24769 cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
24770 cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
24771 cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
24772 cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
24773 cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
24774 cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24775 cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24776 cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24777 cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
24778 cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
24779 cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
24780 cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
24781
24782 cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
24783 cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
24784 cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
24785 cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
24786 cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
24787 cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24788 cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24789 cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24790 cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
24791 cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
24792 cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
24793 cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
24794
24795 cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
24796 cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
24797 cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
24798 cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
24799 cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
24800 cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24801 cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24802 cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24803 cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
24804 cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
24805 cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
24806 cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
24807
24808 cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
24809 cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
24810 cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
24811 cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
24812 cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
24813 cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24814 cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24815 cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24816 cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
24817 cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
24818 cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
24819 cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
24820
24821 cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
24822 cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
24823 cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
24824 cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
24825 cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
24826 cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24827 cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24828 cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24829 cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
24830 cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
24831 cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
24832 cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
24833
24834 cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
24835 cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
24836 cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
24837 cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
24838 cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
24839 cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24840 cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24841 cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24842 cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
24843 cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
24844 cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
24845 cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
24846
24847 cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
24848 cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
24849 cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
24850 cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
24851 cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
24852 cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24853 cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24854 cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24855 cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
24856 cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
24857 cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
24858 cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
24859
24860 cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
24861 cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
24862 cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
24863 cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
24864 cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
24865 cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24866 cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24867 cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24868 cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
24869 cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
24870 cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
24871 cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
24872
24873 cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
24874 cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
24875 cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
24876 cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
24877 cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
24878 cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24879 cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24880 cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24881 cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
24882 cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
24883 cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
24884 cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
24885
24886 cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
24887 cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
24888 cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
24889 cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
24890 cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
24891 cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24892 cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24893 cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24894 cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
24895 cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
24896 cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
24897 cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
24898
24899 cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24900 cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24901 cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24902 cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24903 cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24904 cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24905 cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24906 cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24907 cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24908 cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24909 cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24910 cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24911
24912 cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24913 cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24914 cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24915 cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24916 cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24917 cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24918 cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24919 cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24920 cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24921 cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24922 cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24923 cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24924
24925 cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24926 cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24927 cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24928 cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24929 cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24930 cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24931 cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24932 cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24933 cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24934 cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24935 cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24936 cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24937
24938 cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
24939 C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
24940 cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
24941 C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
24942
24943 cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
24944 cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
24945 cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
24946 cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
24947 cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
24948 cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
24949 cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
24950 cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
24951 cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
24952 cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
24953 cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
24954 cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
24955
24956  /* The implementation of the FIX instruction is broken on some
24957     assemblers, in that it accepts a precision specifier as well as a
24958     rounding specifier, despite the fact that this is meaningless.
24959     To be more compatible, we accept it as well, though of course it
24960     does not set any bits.  */
24961 cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
24962 cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
24963 cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
24964 cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
24965 cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
24966 cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
24967 cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
24968 cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
24969 cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
24970 cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
24971 cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
24972 cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
24973 cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
24974
24975  /* Instructions that were new with the real FPA, call them V2.  */
24976#undef  ARM_VARIANT
24977#define ARM_VARIANT  & fpu_fpa_ext_v2
24978
24979 cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24980 cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24981 cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24982 cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24983 cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24984 cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24985
24986#undef  ARM_VARIANT
24987#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
24988#undef THUMB_VARIANT
24989#define THUMB_VARIANT  & arm_ext_v6t2
24990 mcCE(vmrs,	ef00a10, 2, (APSR_RR, RVC),   vmrs),
24991 mcCE(vmsr,	ee00a10, 2, (RVC, RR),        vmsr),
24992 mcCE(fldd,	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
24993 mcCE(fstd,	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
24994 mcCE(flds,	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
24995 mcCE(fsts,	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
24996
24997  /* Memory operations.	 */
24998 mcCE(fldmias,	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
24999 mcCE(fldmdbs,	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25000 mcCE(fstmias,	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25001 mcCE(fstmdbs,	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25002#undef THUMB_VARIANT
25003
25004  /* Moves and type conversions.  */
25005 cCE("fmstat",	ef1fa10, 0, (),		      noargs),
25006 cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25007 cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25008 cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25009 cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25010 cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25011 cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25012 cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
25013 cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
25014
25015  /* Memory operations.	 */
25016 cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25017 cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25018 cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25019 cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25020 cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25021 cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25022 cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25023 cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25024 cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25025 cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25026 cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25027 cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25028
25029  /* Monadic operations.  */
25030 cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25031 cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25032 cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25033
25034  /* Dyadic operations.	 */
25035 cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25036 cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25037 cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25038 cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25039 cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25040 cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25041 cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25042 cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25043 cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25044
25045  /* Comparisons.  */
25046 cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25047 cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
25048 cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25049 cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
25050
25051 /* Double precision load/store are still present on single precision
25052    implementations.  */
25053 cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25054 cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25055 cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25056 cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25057 cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25058 cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25059 cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25060 cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25061
25062#undef  ARM_VARIANT
25063#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
25064
25065  /* Moves and type conversions.  */
25066 cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25067 cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25068 cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25069 cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25070 cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25071 cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25072 cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25073 cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25074 cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25075 cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25076 cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25077 cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25078
25079  /* Monadic operations.  */
25080 cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25081 cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25082 cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25083
25084  /* Dyadic operations.	 */
25085 cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25086 cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25087 cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25088 cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25089 cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25090 cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25091 cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25092 cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25093 cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25094
25095  /* Comparisons.  */
25096 cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25097 cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
25098 cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25099 cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
25100
25101/* Instructions which may belong to either the Neon or VFP instruction sets.
25102   Individual encoder functions perform additional architecture checks.  */
25103#undef  ARM_VARIANT
25104#define ARM_VARIANT    & fpu_vfp_ext_v1xd
25105#undef  THUMB_VARIANT
25106#define THUMB_VARIANT  & arm_ext_v6t2
25107
25108 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25109 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25110 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25111 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25112 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25113 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25114
25115 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
25116 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
25117
25118#undef  THUMB_VARIANT
25119#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
25120
25121  /* These mnemonics are unique to VFP.  */
25122 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
25123 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25124 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25125 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25126 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25127 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
25128
25129  /* Mnemonics shared by Neon and VFP.  */
25130 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25131
25132 mnCEF(vcvt,     _vcvt,   3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25133 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
25134 MNCEF(vcvtb,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25135 MNCEF(vcvtt,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25136
25137
25138  /* NOTE: All VMOV encoding is special-cased!  */
25139 NCE(vmovq,     0,       1, (VMOV), neon_mov),
25140
25141#undef  THUMB_VARIANT
25142/* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25143   by different feature bits.  Since we are setting the Thumb guard, we can
25144   require Thumb-1 which makes it a nop guard and set the right feature bit in
25145   do_vldr_vstr ().  */
25146#define THUMB_VARIANT  & arm_ext_v4t
25147 NCE(vldr,      d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25148 NCE(vstr,      d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25149
25150#undef  ARM_VARIANT
25151#define ARM_VARIANT    & arm_ext_fp16
25152#undef  THUMB_VARIANT
25153#define THUMB_VARIANT  & arm_ext_fp16
25154 /* New instructions added from v8.2, allowing the extraction and insertion of
25155    the upper 16 bits of a 32-bit vector register.  */
25156 NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
25157 NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
25158
25159 /* New backported fma/fms instructions optional in v8.2.  */
25160 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25161 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25162
25163#undef  THUMB_VARIANT
25164#define THUMB_VARIANT  & fpu_neon_ext_v1
25165#undef  ARM_VARIANT
25166#define ARM_VARIANT    & fpu_neon_ext_v1
25167
25168  /* Data processing with three registers of the same length.  */
25169  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
25170 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
25171 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
25172 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25173 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25174 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25175  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
25176 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25177 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25178 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25179 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25180  /* If not immediate, fall back to neon_dyadic_i64_su.
25181     shl should accept I8 I16 I32 I64,
25182     qshl should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
25183 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl),
25184 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl),
25185  /* Logic ops, types optional & ignored.  */
25186 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25187 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25188 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25189 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25190 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
25191  /* Bitfield ops, untyped.  */
25192 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25193 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25194 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25195 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25196 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25197 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25198  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
25199 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25200 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25201 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25202  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25203     back to neon_dyadic_if_su.  */
25204 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25205 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25206 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25207 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25208 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25209 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25210 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25211 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25212  /* Comparison. Type I8 I16 I32 F32.  */
25213 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25214 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
25215  /* As above, D registers only.  */
25216 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25217 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25218  /* Int and float variants, signedness unimportant.  */
25219 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25220 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25221 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
25222  /* Add/sub take types I8 I16 I32 I64 F32.  */
25223 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25224 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25225  /* vtst takes sizes 8, 16, 32.  */
25226 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25227 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
25228  /* VMUL takes I8 I16 I32 F32 P8.  */
25229 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
25230  /* VQD{R}MULH takes S16 S32.  */
25231 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25232 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25233 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25234 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25235 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25236 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25237 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25238 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25239 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25240 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25241 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25242 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25243 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25244 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25245 /* ARM v8.1 extension.  */
25246 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25247 nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25248 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25249
25250  /* Two address, int/float. Types S8 S16 S32 F32.  */
25251 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
25252 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
25253
25254  /* Data processing with two registers and a shift amount.  */
25255  /* Right shifts, and variants with rounding.
25256     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
25257 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25258 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25259 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25260 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25261 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25262 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25263  /* Shift and insert. Sizes accepted 8 16 32 64.  */
25264 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
25265 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
25266  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
25267 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
25268  /* Right shift immediate, saturating & narrowing, with rounding variants.
25269     Types accepted S16 S32 S64 U16 U32 U64.  */
25270 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25271 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25272  /* As above, unsigned. Types accepted S16 S32 S64.  */
25273 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25274 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25275  /* Right shift narrowing. Types accepted I16 I32 I64.  */
25276 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25277 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25278  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
25279 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
25280  /* CVT with optional immediate for fixed-point variant.  */
25281 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
25282
25283 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
25284
25285  /* Data processing, three registers of different lengths.  */
25286  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
25287 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
25288  /* If not scalar, fall back to neon_dyadic_long.
25289     Vector types as above, scalar types S16 S32 U16 U32.  */
25290 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25291 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25292  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
25293 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25294 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25295  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
25296 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25297 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25298 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25299 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25300  /* Saturating doubling multiplies. Types S16 S32.  */
25301 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25302 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25303 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25304  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25305     S16 S32 U16 U32.  */
25306 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
25307
25308  /* Extract. Size 8.  */
25309 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25310 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
25311
25312  /* Two registers, miscellaneous.  */
25313  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
25314 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
25315 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
25316 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
25317  /* Vector replicate. Sizes 8 16 32.  */
25318 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
25319  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
25320 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
25321  /* VMOVN. Types I16 I32 I64.  */
25322 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
25323  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
25324 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
25325  /* VQMOVUN. Types S16 S32 S64.  */
25326 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
25327  /* VZIP / VUZP. Sizes 8 16 32.  */
25328 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25329 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
25330 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25331 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
25332  /* VQABS / VQNEG. Types S8 S16 S32.  */
25333 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25334 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25335  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
25336 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
25337 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
25338 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
25339 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
25340  /* Reciprocal estimates.  Types U32 F16 F32.  */
25341 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
25342 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
25343 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
25344 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
25345  /* VCLS. Types S8 S16 S32.  */
25346 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
25347  /* VCLZ. Types I8 I16 I32.  */
25348 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
25349  /* VCNT. Size 8.  */
25350 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
25351 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
25352  /* Two address, untyped.  */
25353 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
25354 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
25355  /* VTRN. Sizes 8 16 32.  */
25356 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
25357 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
25358
25359  /* Table lookup. Size 8.  */
25360 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25361 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25362
25363#undef  THUMB_VARIANT
25364#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
25365#undef  ARM_VARIANT
25366#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
25367
25368  /* Neon element/structure load/store.  */
25369 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25370 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25371 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25372 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25373 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25374 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25375 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25376 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25377
25378#undef  THUMB_VARIANT
25379#define THUMB_VARIANT & fpu_vfp_ext_v3xd
25380#undef  ARM_VARIANT
25381#define ARM_VARIANT   & fpu_vfp_ext_v3xd
25382 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
25383 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25384 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25385 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25386 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25387 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25388 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25389 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25390 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25391
25392#undef  THUMB_VARIANT
25393#define THUMB_VARIANT  & fpu_vfp_ext_v3
25394#undef  ARM_VARIANT
25395#define ARM_VARIANT    & fpu_vfp_ext_v3
25396
25397 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
25398 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25399 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25400 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25401 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25402 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25403 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25404 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25405 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25406
25407#undef  ARM_VARIANT
25408#define ARM_VARIANT    & fpu_vfp_ext_fma
25409#undef  THUMB_VARIANT
25410#define THUMB_VARIANT  & fpu_vfp_ext_fma
25411 /* Mnemonics shared by Neon, VFP, MVE and BF16.  These are included in the
25412    VFP FMA variant; NEON and VFP FMA always includes the NEON
25413    FMA instructions.  */
25414 mnCEF(vfma,     _vfma,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25415 TUF ("vfmat",    c300850,    fc300850,  3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25416 mnCEF(vfms,     _vfms,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),  neon_fmac),
25417
25418 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25419    the v form should always be used.  */
25420 cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25421 cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25422 cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25423 cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25424 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25425 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25426
25427#undef THUMB_VARIANT
25428#undef  ARM_VARIANT
25429#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
25430
25431 cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25432 cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25433 cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25434 cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25435 cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25436 cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25437 cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25438 cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25439
25440#undef  ARM_VARIANT
25441#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
25442
25443 cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
25444 cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
25445 cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
25446 cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
25447 cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
25448 cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
25449 cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
25450 cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
25451 cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
25452 cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25453 cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25454 cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25455 cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25456 cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25457 cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25458 cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25459 cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25460 cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25461 cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
25462 cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
25463 cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25464 cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25465 cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25466 cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25467 cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25468 cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25469 cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
25470 cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
25471 cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
25472 cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
25473 cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
25474 cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
25475 cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
25476 cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
25477 cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
25478 cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
25479 cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
25480 cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25481 cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25482 cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25483 cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25484 cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25485 cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25486 cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25487 cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25488 cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25489 cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
25490 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25491 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25492 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25493 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25494 cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25495 cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25496 cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25497 cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25498 cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25499 cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25500 cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25501 cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25502 cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25503 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25504 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25505 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25506 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25507 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25508 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25509 cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
25510 cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
25511 cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
25512 cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
25513 cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25514 cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25515 cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25516 cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25517 cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25518 cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25519 cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25520 cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25521 cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25522 cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25523 cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25524 cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25525 cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25526 cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25527 cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25528 cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25529 cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25530 cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25531 cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
25532 cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25533 cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25534 cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25535 cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25536 cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25537 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25538 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25539 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25540 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25541 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25542 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25543 cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25544 cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25545 cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25546 cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25547 cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25548 cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25549 cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25550 cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25551 cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25552 cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25553 cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
25554 cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25555 cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25556 cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25557 cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25558 cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25559 cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25560 cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25561 cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25562 cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25563 cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25564 cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25565 cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25566 cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25567 cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25568 cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25569 cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25570 cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25571 cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
25572 cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
25573 cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
25574 cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
25575 cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
25576 cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25577 cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25578 cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25579 cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25580 cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25581 cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25582 cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25583 cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25584 cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25585 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
25586 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
25587 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
25588 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
25589 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
25590 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
25591 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25592 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25593 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25594 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
25595 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
25596 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
25597 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
25598 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
25599 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
25600 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25601 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25602 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25603 cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25604 cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
25605
25606#undef  ARM_VARIANT
25607#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
25608
25609 cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
25610 cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
25611 cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
25612 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
25613 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
25614 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
25615 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25616 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25617 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25618 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25619 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25620 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25621 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25622 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25623 cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25624 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25625 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25626 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25627 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25628 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25629 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
25630 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25631 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25632 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25633 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25634 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25635 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25636 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25637 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25638 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25639 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25640 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25641 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25642 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25643 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25644 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25645 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25646 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25647 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25648 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25649 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25650 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25651 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25652 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25653 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25654 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25655 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25656 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25657 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25658 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25659 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25660 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25661 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25662 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25663 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25664 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25665 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
25666
25667#undef  ARM_VARIANT
25668#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
25669
25670 cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
25671 cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
25672 cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
25673 cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
25674 cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
25675 cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
25676 cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
25677 cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
25678 cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
25679 cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
25680 cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
25681 cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
25682 cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
25683 cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
25684 cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
25685 cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
25686 cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
25687 cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
25688 cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
25689 cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
25690 cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
25691 cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
25692 cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
25693 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
25694 cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
25695 cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
25696 cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
25697 cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
25698 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
25699 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
25700 cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
25701 cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
25702 cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
25703 cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
25704 cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
25705 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
25706 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
25707 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
25708 cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
25709 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
25710 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
25711 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
25712 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
25713 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
25714 cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
25715 cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
25716 cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
25717 cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
25718 cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
25719 cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
25720 cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
25721 cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
25722 cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
25723 cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
25724 cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
25725 cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
25726 cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
25727 cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
25728 cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
25729 cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
25730 cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
25731 cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
25732 cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
25733 cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
25734 cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
25735 cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
25736 cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
25737 cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
25738 cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
25739 cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
25740 cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
25741 cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
25742 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25743 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25744 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25745 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25746
25747 /* ARMv8.5-A instructions.  */
25748#undef  ARM_VARIANT
25749#define ARM_VARIANT   & arm_ext_sb
25750#undef  THUMB_VARIANT
25751#define THUMB_VARIANT & arm_ext_sb
25752 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
25753
25754#undef  ARM_VARIANT
25755#define ARM_VARIANT   & arm_ext_predres
25756#undef  THUMB_VARIANT
25757#define THUMB_VARIANT & arm_ext_predres
25758 CE("cfprctx", e070f93, 1, (RRnpc), rd),
25759 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
25760 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
25761
25762 /* ARMv8-M instructions.  */
25763#undef  ARM_VARIANT
25764#define ARM_VARIANT NULL
25765#undef  THUMB_VARIANT
25766#define THUMB_VARIANT & arm_ext_v8m
25767 ToU("sg",    e97fe97f,	0, (),		   noargs),
25768 ToC("blxns", 4784,	1, (RRnpc),	   t_blx),
25769 ToC("bxns",  4704,	1, (RRnpc),	   t_bx),
25770 ToC("tt",    e840f000,	2, (RRnpc, RRnpc), tt),
25771 ToC("ttt",   e840f040,	2, (RRnpc, RRnpc), tt),
25772 ToC("tta",   e840f080,	2, (RRnpc, RRnpc), tt),
25773 ToC("ttat",  e840f0c0,	2, (RRnpc, RRnpc), tt),
25774
25775 /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
25776    instructions behave as nop if no VFP is present.  */
25777#undef  THUMB_VARIANT
25778#define THUMB_VARIANT & arm_ext_v8m_main
25779 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
25780 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
25781
25782 /* Armv8.1-M Mainline instructions.  */
25783#undef  THUMB_VARIANT
25784#define THUMB_VARIANT & arm_ext_v8_1m_main
25785 toU("cinc",  _cinc,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
25786 toU("cinv",  _cinv,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
25787 toU("cneg",  _cneg,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
25788 toU("csel",  _csel,  4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
25789 toU("csetm", _csetm, 2, (RRnpcsp, COND),		t_cond),
25790 toU("cset",  _cset,  2, (RRnpcsp, COND),		t_cond),
25791 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
25792 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
25793 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
25794
25795 toC("bf",     _bf,	2, (EXPs, EXPs),	     t_branch_future),
25796 toU("bfcsel", _bfcsel,	4, (EXPs, EXPs, EXPs, COND), t_branch_future),
25797 toC("bfx",    _bfx,	2, (EXPs, RRnpcsp),	     t_branch_future),
25798 toC("bfl",    _bfl,	2, (EXPs, EXPs),	     t_branch_future),
25799 toC("bflx",   _bflx,	2, (EXPs, RRnpcsp),	     t_branch_future),
25800
25801 toU("dls", _dls, 2, (LR, RRnpcsp),	 t_loloop),
25802 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
25803 toU("le",  _le,  2, (oLR, EXP),	 t_loloop),
25804
25805 ToC("clrm",	e89f0000, 1, (CLRMLST),  t_clrm),
25806 ToC("vscclrm",	ec9f0a00, 1, (VRSDVLST), t_vscclrm),
25807
25808#undef  THUMB_VARIANT
25809#define THUMB_VARIANT & mve_ext
25810 ToC("lsll",	ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25811 ToC("lsrl",	ea50011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
25812 ToC("asrl",	ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25813 ToC("uqrshll",	ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25814 ToC("sqrshrl",	ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25815 ToC("uqshll",	ea51010f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
25816 ToC("urshrl",	ea51011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
25817 ToC("srshrl",	ea51012f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
25818 ToC("sqshll",	ea51013f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
25819 ToC("uqrshl",	ea500f0d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
25820 ToC("sqrshr",	ea500f2d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
25821 ToC("uqshl",	ea500f0f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
25822 ToC("urshr",	ea500f1f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
25823 ToC("srshr",	ea500f2f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
25824 ToC("sqshl",	ea500f3f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
25825
25826 ToC("vpt",	ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25827 ToC("vptt",	ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25828 ToC("vpte",	ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25829 ToC("vpttt",	ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25830 ToC("vptte",	ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25831 ToC("vptet",	ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25832 ToC("vptee",	ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25833 ToC("vptttt",	ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25834 ToC("vpttte",	ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25835 ToC("vpttet",	ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25836 ToC("vpttee",	ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25837 ToC("vptett",	ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25838 ToC("vptete",	ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25839 ToC("vpteet",	ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25840 ToC("vpteee",	ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25841
25842 ToC("vpst",	fe710f4d, 0, (), mve_vpt),
25843 ToC("vpstt",	fe318f4d, 0, (), mve_vpt),
25844 ToC("vpste",	fe718f4d, 0, (), mve_vpt),
25845 ToC("vpsttt",	fe314f4d, 0, (), mve_vpt),
25846 ToC("vpstte",	fe31cf4d, 0, (), mve_vpt),
25847 ToC("vpstet",	fe71cf4d, 0, (), mve_vpt),
25848 ToC("vpstee",	fe714f4d, 0, (), mve_vpt),
25849 ToC("vpstttt",	fe312f4d, 0, (), mve_vpt),
25850 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
25851 ToC("vpsttet",	fe31ef4d, 0, (), mve_vpt),
25852 ToC("vpsttee",	fe31af4d, 0, (), mve_vpt),
25853 ToC("vpstett",	fe71af4d, 0, (), mve_vpt),
25854 ToC("vpstete",	fe71ef4d, 0, (), mve_vpt),
25855 ToC("vpsteet",	fe716f4d, 0, (), mve_vpt),
25856 ToC("vpsteee",	fe712f4d, 0, (), mve_vpt),
25857
25858 /* MVE and MVE FP only.  */
25859 mToC("vhcadd",	ee000f00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vhcadd),
25860 mCEF(vctp,	_vctp,      1, (RRnpc),				  mve_vctp),
25861 mCEF(vadc,	_vadc,      3, (RMQ, RMQ, RMQ),			  mve_vadc),
25862 mCEF(vadci,	_vadci,     3, (RMQ, RMQ, RMQ),			  mve_vadc),
25863 mToC("vsbc",	fe300f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
25864 mToC("vsbci",	fe301f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
25865 mCEF(vmullb,	_vmullb,    3, (RMQ, RMQ, RMQ),			  mve_vmull),
25866 mCEF(vabav,	_vabav,	    3, (RRnpcsp, RMQ, RMQ),		  mve_vabav),
25867 mCEF(vmladav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25868 mCEF(vmladava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25869 mCEF(vmladavx,	  _vmladavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25870 mCEF(vmladavax,  _vmladavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25871 mCEF(vmlav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25872 mCEF(vmlava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25873 mCEF(vmlsdav,	  _vmlsdav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25874 mCEF(vmlsdava,	  _vmlsdava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25875 mCEF(vmlsdavx,	  _vmlsdavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25876 mCEF(vmlsdavax,  _vmlsdavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
25877
25878 mCEF(vst20,	_vst20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
25879 mCEF(vst21,	_vst21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
25880 mCEF(vst40,	_vst40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25881 mCEF(vst41,	_vst41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25882 mCEF(vst42,	_vst42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25883 mCEF(vst43,	_vst43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25884 mCEF(vld20,	_vld20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
25885 mCEF(vld21,	_vld21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
25886 mCEF(vld40,	_vld40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25887 mCEF(vld41,	_vld41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25888 mCEF(vld42,	_vld42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25889 mCEF(vld43,	_vld43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
25890 mCEF(vstrb,	_vstrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25891 mCEF(vstrh,	_vstrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25892 mCEF(vstrw,	_vstrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25893 mCEF(vstrd,	_vstrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25894 mCEF(vldrb,	_vldrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25895 mCEF(vldrh,	_vldrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25896 mCEF(vldrw,	_vldrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25897 mCEF(vldrd,	_vldrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
25898
25899 mCEF(vmovnt,	_vmovnt,    2, (RMQ, RMQ),			  mve_movn),
25900 mCEF(vmovnb,	_vmovnb,    2, (RMQ, RMQ),			  mve_movn),
25901 mCEF(vbrsr,	_vbrsr,     3, (RMQ, RMQ, RR),			  mve_vbrsr),
25902 mCEF(vaddlv,	_vaddlv,    3, (RRe, RRo, RMQ),			  mve_vaddlv),
25903 mCEF(vaddlva,	_vaddlva,   3, (RRe, RRo, RMQ),			  mve_vaddlv),
25904 mCEF(vaddv,	_vaddv,	    2, (RRe, RMQ),			  mve_vaddv),
25905 mCEF(vaddva,	_vaddva,    2, (RRe, RMQ),			  mve_vaddv),
25906 mCEF(vddup,	_vddup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
25907 mCEF(vdwdup,	_vdwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
25908 mCEF(vidup,	_vidup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
25909 mCEF(viwdup,	_viwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
25910 mToC("vmaxa",	ee330e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
25911 mToC("vmina",	ee331e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
25912 mCEF(vmaxv,	_vmaxv,	  2, (RR, RMQ),				  mve_vmaxv),
25913 mCEF(vmaxav,	_vmaxav,  2, (RR, RMQ),				  mve_vmaxv),
25914 mCEF(vminv,	_vminv,	  2, (RR, RMQ),				  mve_vmaxv),
25915 mCEF(vminav,	_vminav,  2, (RR, RMQ),				  mve_vmaxv),
25916
25917 mCEF(vmlaldav,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25918 mCEF(vmlaldava,  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25919 mCEF(vmlaldavx,  _vmlaldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25920 mCEF(vmlaldavax, _vmlaldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25921 mCEF(vmlalv,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25922 mCEF(vmlalva,	  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25923 mCEF(vmlsldav,	  _vmlsldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25924 mCEF(vmlsldava,  _vmlsldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25925 mCEF(vmlsldavx,  _vmlsldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25926 mCEF(vmlsldavax, _vmlsldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
25927 mToC("vrmlaldavh", ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25928 mToC("vrmlaldavha",ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25929 mCEF(vrmlaldavhx,  _vrmlaldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25930 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25931 mToC("vrmlalvh",   ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25932 mToC("vrmlalvha",  ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25933 mCEF(vrmlsldavh,   _vrmlsldavh,   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25934 mCEF(vrmlsldavha,  _vrmlsldavha,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25935 mCEF(vrmlsldavhx,  _vrmlsldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25936 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
25937
25938 mToC("vmlas",	  ee011e40,	3, (RMQ, RMQ, RR),		mve_vmlas),
25939 mToC("vmulh",	  ee010e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
25940 mToC("vrmulh",	  ee011e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
25941 mToC("vpnot",	  fe310f4d,	0, (),				mve_vpnot),
25942 mToC("vpsel",	  fe310f01,	3, (RMQ, RMQ, RMQ),		mve_vpsel),
25943
25944 mToC("vqdmladh",  ee000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25945 mToC("vqdmladhx", ee001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25946 mToC("vqrdmladh", ee000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25947 mToC("vqrdmladhx",ee001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25948 mToC("vqdmlsdh",  fe000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25949 mToC("vqdmlsdhx", fe001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25950 mToC("vqrdmlsdh", fe000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25951 mToC("vqrdmlsdhx",fe001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
25952 mToC("vqdmlah",   ee000e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
25953 mToC("vqdmlash",  ee001e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
25954 mToC("vqrdmlash", ee001e40,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
25955 mToC("vqdmullt",  ee301f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
25956 mToC("vqdmullb",  ee300f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
25957 mCEF(vqmovnt,	  _vqmovnt,	2, (RMQ, RMQ),			mve_vqmovn),
25958 mCEF(vqmovnb,	  _vqmovnb,	2, (RMQ, RMQ),			mve_vqmovn),
25959 mCEF(vqmovunt,	  _vqmovunt,	2, (RMQ, RMQ),			mve_vqmovn),
25960 mCEF(vqmovunb,	  _vqmovunb,	2, (RMQ, RMQ),			mve_vqmovn),
25961
25962 mCEF(vshrnt,	  _vshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25963 mCEF(vshrnb,	  _vshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25964 mCEF(vrshrnt,	  _vrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25965 mCEF(vrshrnb,	  _vrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25966 mCEF(vqshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25967 mCEF(vqshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25968 mCEF(vqshrunt,	  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25969 mCEF(vqshrunb,	  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25970 mCEF(vqrshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25971 mCEF(vqrshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25972 mCEF(vqrshrunt,  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25973 mCEF(vqrshrunb,  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
25974
25975 mToC("vshlc",	    eea00fc0,	   3, (RMQ, RR, I32z),	    mve_vshlc),
25976 mToC("vshllt",	    ee201e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
25977 mToC("vshllb",	    ee200e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
25978
25979 toU("dlstp",	_dlstp, 2, (LR, RR),      t_loloop),
25980 toU("wlstp",	_wlstp, 3, (LR, RR, EXP), t_loloop),
25981 toU("letp",	_letp,  2, (LR, EXP),	  t_loloop),
25982 toU("lctp",	_lctp,  0, (),		  t_loloop),
25983
25984#undef THUMB_VARIANT
25985#define THUMB_VARIANT & mve_fp_ext
25986 mToC("vcmul", ee300e00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vcmul),
25987 mToC("vfmas", ee311e40,   3, (RMQ, RMQ, RR),			  mve_vfmas),
25988 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
25989 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
25990 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
25991 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
25992 mToC("vminnmv", eeee0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
25993 mToC("vminnmav",eeec0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
25994
25995#undef  ARM_VARIANT
25996#define ARM_VARIANT  & fpu_vfp_ext_v1
25997#undef  THUMB_VARIANT
25998#define THUMB_VARIANT  & arm_ext_v6t2
25999 mnCEF(vmla,     _vmla,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
26000 mnCEF(vmul,     _vmul,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
26001
26002 mcCE(fcpyd,	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
26003
26004#undef  ARM_VARIANT
26005#define ARM_VARIANT  & fpu_vfp_ext_v1xd
26006
26007 MNCE(vmov,   0,	1, (VMOV),	      neon_mov),
26008 mcCE(fmrs,	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
26009 mcCE(fmsr,	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
26010 mcCE(fcpys,	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
26011
26012 mCEF(vmullt, _vmullt,	3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ),	mve_vmull),
26013 mnCEF(vadd,  _vadd,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26014 mnCEF(vsub,  _vsub,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26015
26016 MNCEF(vabs,  1b10300,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26017 MNCEF(vneg,  1b10380,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26018
26019 mCEF(vmovlt, _vmovlt,	1, (VMOV),		mve_movl),
26020 mCEF(vmovlb, _vmovlb,	1, (VMOV),		mve_movl),
26021
26022 mnCE(vcmp,      _vcmp,    3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26023 mnCE(vcmpe,     _vcmpe,   3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26024
26025#undef  ARM_VARIANT
26026#define ARM_VARIANT  & fpu_vfp_ext_v2
26027
26028 mcCE(fmsrr,	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26029 mcCE(fmrrs,	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26030 mcCE(fmdrr,	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
26031 mcCE(fmrrd,	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
26032
26033#undef  ARM_VARIANT
26034#define ARM_VARIANT    & fpu_vfp_ext_armv8xd
26035 mnUF(vcvta,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvta),
26036 mnUF(vcvtp,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtp),
26037 mnUF(vcvtn,  _vcvta,  3, (RNSDQMQ, oRNSDQMQ, oI32z),	neon_cvtn),
26038 mnUF(vcvtm,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtm),
26039 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26040 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26041
26042#undef	ARM_VARIANT
26043#define ARM_VARIANT & fpu_neon_ext_v1
26044 mnUF(vabd,      _vabd,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26045 mnUF(vabdl,     _vabdl,	  3, (RNQMQ, RNDMQ, RNDMQ),   neon_dyadic_long),
26046 mnUF(vaddl,     _vaddl,	  3, (RNQMQ, RNDMQ, RNDMQR),  neon_dyadic_long),
26047 mnUF(vsubl,     _vsubl,	  3, (RNQMQ, RNDMQ, RNDMQR),  neon_dyadic_long),
26048 mnUF(vand,      _vand,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26049 mnUF(vbic,      _vbic,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26050 mnUF(vorr,      _vorr,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26051 mnUF(vorn,      _vorn,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26052 mnUF(veor,      _veor,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ),      neon_logic),
26053 MNUF(vcls,      1b00400,	  2, (RNDQMQ, RNDQMQ),		     neon_cls),
26054 MNUF(vclz,      1b00480,	  2, (RNDQMQ, RNDQMQ),		     neon_clz),
26055 mnCE(vdup,      _vdup,		  2, (RNDQMQ, RR_RNSC),		     neon_dup),
26056 MNUF(vhadd,     00000000,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26057 MNUF(vrhadd,    00000100,	  3, (RNDQMQ, oRNDQMQ, RNDQMQ),	  neon_dyadic_i_su),
26058 MNUF(vhsub,     00000200,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26059 mnUF(vmin,      _vmin,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26060 mnUF(vmax,      _vmax,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26061 MNUF(vqadd,     0000010,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26062 MNUF(vqsub,     0000210,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26063 mnUF(vmvn,      _vmvn,    2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26064 MNUF(vqabs,     1b00700,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26065 MNUF(vqneg,     1b00780,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26066 mnUF(vqrdmlah,  _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26067 mnUF(vqdmulh,   _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26068 mnUF(vqrdmulh,  _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26069 MNUF(vqrshl,    0000510,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26070 MNUF(vrshl,     0000500,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26071 MNUF(vshr,      0800010,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26072 MNUF(vrshr,     0800210,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26073 MNUF(vsli,      1800510,  3, (RNDQMQ, oRNDQMQ, I63),  neon_sli),
26074 MNUF(vsri,      1800410,  3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26075 MNUF(vrev64,    1b00000,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26076 MNUF(vrev32,    1b00080,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26077 MNUF(vrev16,    1b00100,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26078 mnUF(vshl,	 _vshl,    3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26079 mnUF(vqshl,     _vqshl,   3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26080 MNUF(vqshlu,    1800610,  3, (RNDQMQ, oRNDQMQ, I63),		 neon_qshlu_imm),
26081
26082#undef	ARM_VARIANT
26083#define ARM_VARIANT & arm_ext_v8_3
26084#undef	THUMB_VARIANT
26085#define	THUMB_VARIANT & arm_ext_v6t2_v8m
26086 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26087 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26088
26089#undef	ARM_VARIANT
26090#define ARM_VARIANT &arm_ext_bf16
26091#undef	THUMB_VARIANT
26092#define	THUMB_VARIANT &arm_ext_bf16
26093 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26094 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26095 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26096
26097#undef	ARM_VARIANT
26098#define ARM_VARIANT &arm_ext_i8mm
26099#undef	THUMB_VARIANT
26100#define	THUMB_VARIANT &arm_ext_i8mm
26101 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26102 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26103 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26104 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26105 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26106};
26107#undef ARM_VARIANT
26108#undef THUMB_VARIANT
26109#undef TCE
26110#undef TUE
26111#undef TUF
26112#undef TCC
26113#undef cCE
26114#undef cCL
26115#undef C3E
26116#undef C3
26117#undef CE
26118#undef CM
26119#undef CL
26120#undef UE
26121#undef UF
26122#undef UT
26123#undef NUF
26124#undef nUF
26125#undef NCE
26126#undef nCE
26127#undef OPS0
26128#undef OPS1
26129#undef OPS2
26130#undef OPS3
26131#undef OPS4
26132#undef OPS5
26133#undef OPS6
26134#undef do_0
26135#undef ToC
26136#undef toC
26137#undef ToU
26138#undef toU
26139
26140/* MD interface: bits in the object file.  */
26141
26142/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26143   for use in the a.out file, and stores them in the array pointed to by buf.
26144   This knows about the endian-ness of the target machine and does
26145   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
26146   2 (short) and 4 (long)  Floating numbers are put out as a series of
26147   LITTLENUMS (shorts, here at least).	*/
26148
26149void
26150md_number_to_chars (char * buf, valueT val, int n)
26151{
26152  if (target_big_endian)
26153    number_to_chars_bigendian (buf, val, n);
26154  else
26155    number_to_chars_littleendian (buf, val, n);
26156}
26157
26158static valueT
26159md_chars_to_number (char * buf, int n)
26160{
26161  valueT result = 0;
26162  unsigned char * where = (unsigned char *) buf;
26163
26164  if (target_big_endian)
26165    {
26166      while (n--)
26167	{
26168	  result <<= 8;
26169	  result |= (*where++ & 255);
26170	}
26171    }
26172  else
26173    {
26174      while (n--)
26175	{
26176	  result <<= 8;
26177	  result |= (where[n] & 255);
26178	}
26179    }
26180
26181  return result;
26182}
26183
26184/* MD interface: Sections.  */
26185
26186/* Calculate the maximum variable size (i.e., excluding fr_fix)
26187   that an rs_machine_dependent frag may reach.  */
26188
26189unsigned int
26190arm_frag_max_var (fragS *fragp)
26191{
26192  /* We only use rs_machine_dependent for variable-size Thumb instructions,
26193     which are either THUMB_SIZE (2) or INSN_SIZE (4).
26194
26195     Note that we generate relaxable instructions even for cases that don't
26196     really need it, like an immediate that's a trivial constant.  So we're
26197     overestimating the instruction size for some of those cases.  Rather
26198     than putting more intelligence here, it would probably be better to
26199     avoid generating a relaxation frag in the first place when it can be
26200     determined up front that a short instruction will suffice.  */
26201
26202  gas_assert (fragp->fr_type == rs_machine_dependent);
26203  return INSN_SIZE;
26204}
26205
26206/* Estimate the size of a frag before relaxing.  Assume everything fits in
26207   2 bytes.  */
26208
26209int
26210md_estimate_size_before_relax (fragS * fragp,
26211			       segT    segtype ATTRIBUTE_UNUSED)
26212{
26213  fragp->fr_var = 2;
26214  return 2;
26215}
26216
26217/* Convert a machine dependent frag.  */
26218
26219void
26220md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26221{
26222  unsigned long insn;
26223  unsigned long old_op;
26224  char *buf;
26225  expressionS exp;
26226  fixS *fixp;
26227  int reloc_type;
26228  int pc_rel;
26229  int opcode;
26230
26231  buf = fragp->fr_literal + fragp->fr_fix;
26232
26233  old_op = bfd_get_16(abfd, buf);
26234  if (fragp->fr_symbol)
26235    {
26236      exp.X_op = O_symbol;
26237      exp.X_add_symbol = fragp->fr_symbol;
26238    }
26239  else
26240    {
26241      exp.X_op = O_constant;
26242    }
26243  exp.X_add_number = fragp->fr_offset;
26244  opcode = fragp->fr_subtype;
26245  switch (opcode)
26246    {
26247    case T_MNEM_ldr_pc:
26248    case T_MNEM_ldr_pc2:
26249    case T_MNEM_ldr_sp:
26250    case T_MNEM_str_sp:
26251    case T_MNEM_ldr:
26252    case T_MNEM_ldrb:
26253    case T_MNEM_ldrh:
26254    case T_MNEM_str:
26255    case T_MNEM_strb:
26256    case T_MNEM_strh:
26257      if (fragp->fr_var == 4)
26258	{
26259	  insn = THUMB_OP32 (opcode);
26260	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26261	    {
26262	      insn |= (old_op & 0x700) << 4;
26263	    }
26264	  else
26265	    {
26266	      insn |= (old_op & 7) << 12;
26267	      insn |= (old_op & 0x38) << 13;
26268	    }
26269	  insn |= 0x00000c00;
26270	  put_thumb32_insn (buf, insn);
26271	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26272	}
26273      else
26274	{
26275	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26276	}
26277      pc_rel = (opcode == T_MNEM_ldr_pc2);
26278      break;
26279    case T_MNEM_adr:
26280      if (fragp->fr_var == 4)
26281	{
26282	  insn = THUMB_OP32 (opcode);
26283	  insn |= (old_op & 0xf0) << 4;
26284	  put_thumb32_insn (buf, insn);
26285	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26286	}
26287      else
26288	{
26289	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26290	  exp.X_add_number -= 4;
26291	}
26292      pc_rel = 1;
26293      break;
26294    case T_MNEM_mov:
26295    case T_MNEM_movs:
26296    case T_MNEM_cmp:
26297    case T_MNEM_cmn:
26298      if (fragp->fr_var == 4)
26299	{
26300	  int r0off = (opcode == T_MNEM_mov
26301		       || opcode == T_MNEM_movs) ? 0 : 8;
26302	  insn = THUMB_OP32 (opcode);
26303	  insn = (insn & 0xe1ffffff) | 0x10000000;
26304	  insn |= (old_op & 0x700) << r0off;
26305	  put_thumb32_insn (buf, insn);
26306	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26307	}
26308      else
26309	{
26310	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26311	}
26312      pc_rel = 0;
26313      break;
26314    case T_MNEM_b:
26315      if (fragp->fr_var == 4)
26316	{
26317	  insn = THUMB_OP32(opcode);
26318	  put_thumb32_insn (buf, insn);
26319	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26320	}
26321      else
26322	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26323      pc_rel = 1;
26324      break;
26325    case T_MNEM_bcond:
26326      if (fragp->fr_var == 4)
26327	{
26328	  insn = THUMB_OP32(opcode);
26329	  insn |= (old_op & 0xf00) << 14;
26330	  put_thumb32_insn (buf, insn);
26331	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26332	}
26333      else
26334	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26335      pc_rel = 1;
26336      break;
26337    case T_MNEM_add_sp:
26338    case T_MNEM_add_pc:
26339    case T_MNEM_inc_sp:
26340    case T_MNEM_dec_sp:
26341      if (fragp->fr_var == 4)
26342	{
26343	  /* ??? Choose between add and addw.  */
26344	  insn = THUMB_OP32 (opcode);
26345	  insn |= (old_op & 0xf0) << 4;
26346	  put_thumb32_insn (buf, insn);
26347	  if (opcode == T_MNEM_add_pc)
26348	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
26349	  else
26350	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26351	}
26352      else
26353	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26354      pc_rel = 0;
26355      break;
26356
26357    case T_MNEM_addi:
26358    case T_MNEM_addis:
26359    case T_MNEM_subi:
26360    case T_MNEM_subis:
26361      if (fragp->fr_var == 4)
26362	{
26363	  insn = THUMB_OP32 (opcode);
26364	  insn |= (old_op & 0xf0) << 4;
26365	  insn |= (old_op & 0xf) << 16;
26366	  put_thumb32_insn (buf, insn);
26367	  if (insn & (1 << 20))
26368	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26369	  else
26370	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26371	}
26372      else
26373	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26374      pc_rel = 0;
26375      break;
26376    default:
26377      abort ();
26378    }
26379  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26380		      (enum bfd_reloc_code_real) reloc_type);
26381  fixp->fx_file = fragp->fr_file;
26382  fixp->fx_line = fragp->fr_line;
26383  fragp->fr_fix += fragp->fr_var;
26384
26385  /* Set whether we use thumb-2 ISA based on final relaxation results.  */
26386  if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26387      && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26388    ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26389}
26390
26391/* Return the size of a relaxable immediate operand instruction.
26392   SHIFT and SIZE specify the form of the allowable immediate.  */
26393static int
26394relax_immediate (fragS *fragp, int size, int shift)
26395{
26396  offsetT offset;
26397  offsetT mask;
26398  offsetT low;
26399
26400  /* ??? Should be able to do better than this.  */
26401  if (fragp->fr_symbol)
26402    return 4;
26403
26404  low = (1 << shift) - 1;
26405  mask = (1 << (shift + size)) - (1 << shift);
26406  offset = fragp->fr_offset;
26407  /* Force misaligned offsets to 32-bit variant.  */
26408  if (offset & low)
26409    return 4;
26410  if (offset & ~mask)
26411    return 4;
26412  return 2;
26413}
26414
26415/* Get the address of a symbol during relaxation.  */
26416static addressT
26417relaxed_symbol_addr (fragS *fragp, long stretch)
26418{
26419  fragS *sym_frag;
26420  addressT addr;
26421  symbolS *sym;
26422
26423  sym = fragp->fr_symbol;
26424  sym_frag = symbol_get_frag (sym);
26425  know (S_GET_SEGMENT (sym) != absolute_section
26426	|| sym_frag == &zero_address_frag);
26427  addr = S_GET_VALUE (sym) + fragp->fr_offset;
26428
26429  /* If frag has yet to be reached on this pass, assume it will
26430     move by STRETCH just as we did.  If this is not so, it will
26431     be because some frag between grows, and that will force
26432     another pass.  */
26433
26434  if (stretch != 0
26435      && sym_frag->relax_marker != fragp->relax_marker)
26436    {
26437      fragS *f;
26438
26439      /* Adjust stretch for any alignment frag.  Note that if have
26440	 been expanding the earlier code, the symbol may be
26441	 defined in what appears to be an earlier frag.  FIXME:
26442	 This doesn't handle the fr_subtype field, which specifies
26443	 a maximum number of bytes to skip when doing an
26444	 alignment.  */
26445      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
26446	{
26447	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
26448	    {
26449	      if (stretch < 0)
26450		stretch = - ((- stretch)
26451			     & ~ ((1 << (int) f->fr_offset) - 1));
26452	      else
26453		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
26454	      if (stretch == 0)
26455		break;
26456	    }
26457	}
26458      if (f != NULL)
26459	addr += stretch;
26460    }
26461
26462  return addr;
26463}
26464
26465/* Return the size of a relaxable adr pseudo-instruction or PC-relative
26466   load.  */
26467static int
26468relax_adr (fragS *fragp, asection *sec, long stretch)
26469{
26470  addressT addr;
26471  offsetT val;
26472
26473  /* Assume worst case for symbols not known to be in the same section.  */
26474  if (fragp->fr_symbol == NULL
26475      || !S_IS_DEFINED (fragp->fr_symbol)
26476      || sec != S_GET_SEGMENT (fragp->fr_symbol)
26477      || S_IS_WEAK (fragp->fr_symbol))
26478    return 4;
26479
26480  val = relaxed_symbol_addr (fragp, stretch);
26481  addr = fragp->fr_address + fragp->fr_fix;
26482  addr = (addr + 4) & ~3;
26483  /* Force misaligned targets to 32-bit variant.  */
26484  if (val & 3)
26485    return 4;
26486  val -= addr;
26487  if (val < 0 || val > 1020)
26488    return 4;
26489  return 2;
26490}
26491
26492/* Return the size of a relaxable add/sub immediate instruction.  */
26493static int
26494relax_addsub (fragS *fragp, asection *sec)
26495{
26496  char *buf;
26497  int op;
26498
26499  buf = fragp->fr_literal + fragp->fr_fix;
26500  op = bfd_get_16(sec->owner, buf);
26501  if ((op & 0xf) == ((op >> 4) & 0xf))
26502    return relax_immediate (fragp, 8, 0);
26503  else
26504    return relax_immediate (fragp, 3, 0);
26505}
26506
26507/* Return TRUE iff the definition of symbol S could be pre-empted
26508   (overridden) at link or load time.  */
26509static bfd_boolean
26510symbol_preemptible (symbolS *s)
26511{
26512  /* Weak symbols can always be pre-empted.  */
26513  if (S_IS_WEAK (s))
26514    return TRUE;
26515
26516  /* Non-global symbols cannot be pre-empted. */
26517  if (! S_IS_EXTERNAL (s))
26518    return FALSE;
26519
26520#ifdef OBJ_ELF
26521  /* In ELF, a global symbol can be marked protected, or private.  In that
26522     case it can't be pre-empted (other definitions in the same link unit
26523     would violate the ODR).  */
26524  if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
26525    return FALSE;
26526#endif
26527
26528  /* Other global symbols might be pre-empted.  */
26529  return TRUE;
26530}
26531
26532/* Return the size of a relaxable branch instruction.  BITS is the
26533   size of the offset field in the narrow instruction.  */
26534
26535static int
26536relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
26537{
26538  addressT addr;
26539  offsetT val;
26540  offsetT limit;
26541
26542  /* Assume worst case for symbols not known to be in the same section.  */
26543  if (!S_IS_DEFINED (fragp->fr_symbol)
26544      || sec != S_GET_SEGMENT (fragp->fr_symbol)
26545      || S_IS_WEAK (fragp->fr_symbol))
26546    return 4;
26547
26548#ifdef OBJ_ELF
26549  /* A branch to a function in ARM state will require interworking.  */
26550  if (S_IS_DEFINED (fragp->fr_symbol)
26551      && ARM_IS_FUNC (fragp->fr_symbol))
26552      return 4;
26553#endif
26554
26555  if (symbol_preemptible (fragp->fr_symbol))
26556    return 4;
26557
26558  val = relaxed_symbol_addr (fragp, stretch);
26559  addr = fragp->fr_address + fragp->fr_fix + 4;
26560  val -= addr;
26561
26562  /* Offset is a signed value *2 */
26563  limit = 1 << bits;
26564  if (val >= limit || val < -limit)
26565    return 4;
26566  return 2;
26567}
26568
26569
26570/* Relax a machine dependent frag.  This returns the amount by which
26571   the current size of the frag should change.  */
26572
26573int
26574arm_relax_frag (asection *sec, fragS *fragp, long stretch)
26575{
26576  int oldsize;
26577  int newsize;
26578
26579  oldsize = fragp->fr_var;
26580  switch (fragp->fr_subtype)
26581    {
26582    case T_MNEM_ldr_pc2:
26583      newsize = relax_adr (fragp, sec, stretch);
26584      break;
26585    case T_MNEM_ldr_pc:
26586    case T_MNEM_ldr_sp:
26587    case T_MNEM_str_sp:
26588      newsize = relax_immediate (fragp, 8, 2);
26589      break;
26590    case T_MNEM_ldr:
26591    case T_MNEM_str:
26592      newsize = relax_immediate (fragp, 5, 2);
26593      break;
26594    case T_MNEM_ldrh:
26595    case T_MNEM_strh:
26596      newsize = relax_immediate (fragp, 5, 1);
26597      break;
26598    case T_MNEM_ldrb:
26599    case T_MNEM_strb:
26600      newsize = relax_immediate (fragp, 5, 0);
26601      break;
26602    case T_MNEM_adr:
26603      newsize = relax_adr (fragp, sec, stretch);
26604      break;
26605    case T_MNEM_mov:
26606    case T_MNEM_movs:
26607    case T_MNEM_cmp:
26608    case T_MNEM_cmn:
26609      newsize = relax_immediate (fragp, 8, 0);
26610      break;
26611    case T_MNEM_b:
26612      newsize = relax_branch (fragp, sec, 11, stretch);
26613      break;
26614    case T_MNEM_bcond:
26615      newsize = relax_branch (fragp, sec, 8, stretch);
26616      break;
26617    case T_MNEM_add_sp:
26618    case T_MNEM_add_pc:
26619      newsize = relax_immediate (fragp, 8, 2);
26620      break;
26621    case T_MNEM_inc_sp:
26622    case T_MNEM_dec_sp:
26623      newsize = relax_immediate (fragp, 7, 2);
26624      break;
26625    case T_MNEM_addi:
26626    case T_MNEM_addis:
26627    case T_MNEM_subi:
26628    case T_MNEM_subis:
26629      newsize = relax_addsub (fragp, sec);
26630      break;
26631    default:
26632      abort ();
26633    }
26634
26635  fragp->fr_var = newsize;
26636  /* Freeze wide instructions that are at or before the same location as
26637     in the previous pass.  This avoids infinite loops.
26638     Don't freeze them unconditionally because targets may be artificially
26639     misaligned by the expansion of preceding frags.  */
26640  if (stretch <= 0 && newsize > 2)
26641    {
26642      md_convert_frag (sec->owner, sec, fragp);
26643      frag_wane (fragp);
26644    }
26645
26646  return newsize - oldsize;
26647}
26648
26649/* Round up a section size to the appropriate boundary.	 */
26650
26651valueT
26652md_section_align (segT	 segment ATTRIBUTE_UNUSED,
26653		  valueT size)
26654{
26655  return size;
26656}
26657
26658/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
26659   of an rs_align_code fragment.  */
26660
26661void
26662arm_handle_align (fragS * fragP)
26663{
26664  static unsigned char const arm_noop[2][2][4] =
26665    {
26666      {  /* ARMv1 */
26667	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
26668	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
26669      },
26670      {  /* ARMv6k */
26671	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
26672	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
26673      },
26674    };
26675  static unsigned char const thumb_noop[2][2][2] =
26676    {
26677      {  /* Thumb-1 */
26678	{0xc0, 0x46},  /* LE */
26679	{0x46, 0xc0},  /* BE */
26680      },
26681      {  /* Thumb-2 */
26682	{0x00, 0xbf},  /* LE */
26683	{0xbf, 0x00}   /* BE */
26684      }
26685    };
26686  static unsigned char const wide_thumb_noop[2][4] =
26687    {  /* Wide Thumb-2 */
26688      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
26689      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
26690    };
26691
26692  unsigned bytes, fix, noop_size;
26693  char * p;
26694  const unsigned char * noop;
26695  const unsigned char *narrow_noop = NULL;
26696#ifdef OBJ_ELF
26697  enum mstate state;
26698#endif
26699
26700  if (fragP->fr_type != rs_align_code)
26701    return;
26702
26703  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
26704  p = fragP->fr_literal + fragP->fr_fix;
26705  fix = 0;
26706
26707  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
26708    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
26709
26710  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
26711
26712  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
26713    {
26714      if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26715			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
26716	{
26717	  narrow_noop = thumb_noop[1][target_big_endian];
26718	  noop = wide_thumb_noop[target_big_endian];
26719	}
26720      else
26721	noop = thumb_noop[0][target_big_endian];
26722      noop_size = 2;
26723#ifdef OBJ_ELF
26724      state = MAP_THUMB;
26725#endif
26726    }
26727  else
26728    {
26729      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26730					   ? selected_cpu : arm_arch_none,
26731					   arm_ext_v6k) != 0]
26732		     [target_big_endian];
26733      noop_size = 4;
26734#ifdef OBJ_ELF
26735      state = MAP_ARM;
26736#endif
26737    }
26738
26739  fragP->fr_var = noop_size;
26740
26741  if (bytes & (noop_size - 1))
26742    {
26743      fix = bytes & (noop_size - 1);
26744#ifdef OBJ_ELF
26745      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
26746#endif
26747      memset (p, 0, fix);
26748      p += fix;
26749      bytes -= fix;
26750    }
26751
26752  if (narrow_noop)
26753    {
26754      if (bytes & noop_size)
26755	{
26756	  /* Insert a narrow noop.  */
26757	  memcpy (p, narrow_noop, noop_size);
26758	  p += noop_size;
26759	  bytes -= noop_size;
26760	  fix += noop_size;
26761	}
26762
26763      /* Use wide noops for the remainder */
26764      noop_size = 4;
26765    }
26766
26767  while (bytes >= noop_size)
26768    {
26769      memcpy (p, noop, noop_size);
26770      p += noop_size;
26771      bytes -= noop_size;
26772      fix += noop_size;
26773    }
26774
26775  fragP->fr_fix += fix;
26776}
26777
26778/* Called from md_do_align.  Used to create an alignment
26779   frag in a code section.  */
26780
26781void
26782arm_frag_align_code (int n, int max)
26783{
26784  char * p;
26785
26786  /* We assume that there will never be a requirement
26787     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
26788  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
26789    {
26790      char err_msg[128];
26791
26792      sprintf (err_msg,
26793	_("alignments greater than %d bytes not supported in .text sections."),
26794	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
26795      as_fatal ("%s", err_msg);
26796    }
26797
26798  p = frag_var (rs_align_code,
26799		MAX_MEM_FOR_RS_ALIGN_CODE,
26800		1,
26801		(relax_substateT) max,
26802		(symbolS *) NULL,
26803		(offsetT) n,
26804		(char *) NULL);
26805  *p = 0;
26806}
26807
26808/* Perform target specific initialisation of a frag.
26809   Note - despite the name this initialisation is not done when the frag
26810   is created, but only when its type is assigned.  A frag can be created
26811   and used a long time before its type is set, so beware of assuming that
26812   this initialisation is performed first.  */
26813
26814#ifndef OBJ_ELF
26815void
26816arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
26817{
26818  /* Record whether this frag is in an ARM or a THUMB area.  */
26819  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26820}
26821
26822#else /* OBJ_ELF is defined.  */
26823void
26824arm_init_frag (fragS * fragP, int max_chars)
26825{
26826  bfd_boolean frag_thumb_mode;
26827
26828  /* If the current ARM vs THUMB mode has not already
26829     been recorded into this frag then do so now.  */
26830  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
26831    fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26832
26833  /* PR 21809: Do not set a mapping state for debug sections
26834     - it just confuses other tools.  */
26835  if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
26836    return;
26837
26838  frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
26839
26840  /* Record a mapping symbol for alignment frags.  We will delete this
26841     later if the alignment ends up empty.  */
26842  switch (fragP->fr_type)
26843    {
26844    case rs_align:
26845    case rs_align_test:
26846    case rs_fill:
26847      mapping_state_2 (MAP_DATA, max_chars);
26848      break;
26849    case rs_align_code:
26850      mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
26851      break;
26852    default:
26853      break;
26854    }
26855}
26856
26857/* When we change sections we need to issue a new mapping symbol.  */
26858
26859void
26860arm_elf_change_section (void)
26861{
26862  /* Link an unlinked unwind index table section to the .text section.	*/
26863  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
26864      && elf_linked_to_section (now_seg) == NULL)
26865    elf_linked_to_section (now_seg) = text_section;
26866}
26867
26868int
26869arm_elf_section_type (const char * str, size_t len)
26870{
26871  if (len == 5 && strncmp (str, "exidx", 5) == 0)
26872    return SHT_ARM_EXIDX;
26873
26874  return -1;
26875}
26876
26877/* Code to deal with unwinding tables.	*/
26878
26879static void add_unwind_adjustsp (offsetT);
26880
26881/* Generate any deferred unwind frame offset.  */
26882
26883static void
26884flush_pending_unwind (void)
26885{
26886  offsetT offset;
26887
26888  offset = unwind.pending_offset;
26889  unwind.pending_offset = 0;
26890  if (offset != 0)
26891    add_unwind_adjustsp (offset);
26892}
26893
26894/* Add an opcode to this list for this function.  Two-byte opcodes should
26895   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
26896   order.  */
26897
26898static void
26899add_unwind_opcode (valueT op, int length)
26900{
26901  /* Add any deferred stack adjustment.	 */
26902  if (unwind.pending_offset)
26903    flush_pending_unwind ();
26904
26905  unwind.sp_restored = 0;
26906
26907  if (unwind.opcode_count + length > unwind.opcode_alloc)
26908    {
26909      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
26910      if (unwind.opcodes)
26911	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
26912				     unwind.opcode_alloc);
26913      else
26914	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
26915    }
26916  while (length > 0)
26917    {
26918      length--;
26919      unwind.opcodes[unwind.opcode_count] = op & 0xff;
26920      op >>= 8;
26921      unwind.opcode_count++;
26922    }
26923}
26924
26925/* Add unwind opcodes to adjust the stack pointer.  */
26926
26927static void
26928add_unwind_adjustsp (offsetT offset)
26929{
26930  valueT op;
26931
26932  if (offset > 0x200)
26933    {
26934      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
26935      char bytes[5];
26936      int n;
26937      valueT o;
26938
26939      /* Long form: 0xb2, uleb128.  */
26940      /* This might not fit in a word so add the individual bytes,
26941	 remembering the list is built in reverse order.  */
26942      o = (valueT) ((offset - 0x204) >> 2);
26943      if (o == 0)
26944	add_unwind_opcode (0, 1);
26945
26946      /* Calculate the uleb128 encoding of the offset.	*/
26947      n = 0;
26948      while (o)
26949	{
26950	  bytes[n] = o & 0x7f;
26951	  o >>= 7;
26952	  if (o)
26953	    bytes[n] |= 0x80;
26954	  n++;
26955	}
26956      /* Add the insn.	*/
26957      for (; n; n--)
26958	add_unwind_opcode (bytes[n - 1], 1);
26959      add_unwind_opcode (0xb2, 1);
26960    }
26961  else if (offset > 0x100)
26962    {
26963      /* Two short opcodes.  */
26964      add_unwind_opcode (0x3f, 1);
26965      op = (offset - 0x104) >> 2;
26966      add_unwind_opcode (op, 1);
26967    }
26968  else if (offset > 0)
26969    {
26970      /* Short opcode.	*/
26971      op = (offset - 4) >> 2;
26972      add_unwind_opcode (op, 1);
26973    }
26974  else if (offset < 0)
26975    {
26976      offset = -offset;
26977      while (offset > 0x100)
26978	{
26979	  add_unwind_opcode (0x7f, 1);
26980	  offset -= 0x100;
26981	}
26982      op = ((offset - 4) >> 2) | 0x40;
26983      add_unwind_opcode (op, 1);
26984    }
26985}
26986
26987/* Finish the list of unwind opcodes for this function.	 */
26988
26989static void
26990finish_unwind_opcodes (void)
26991{
26992  valueT op;
26993
26994  if (unwind.fp_used)
26995    {
26996      /* Adjust sp as necessary.  */
26997      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
26998      flush_pending_unwind ();
26999
27000      /* After restoring sp from the frame pointer.  */
27001      op = 0x90 | unwind.fp_reg;
27002      add_unwind_opcode (op, 1);
27003    }
27004  else
27005    flush_pending_unwind ();
27006}
27007
27008
27009/* Start an exception table entry.  If idx is nonzero this is an index table
27010   entry.  */
27011
27012static void
27013start_unwind_section (const segT text_seg, int idx)
27014{
27015  const char * text_name;
27016  const char * prefix;
27017  const char * prefix_once;
27018  const char * group_name;
27019  char * sec_name;
27020  int type;
27021  int flags;
27022  int linkonce;
27023
27024  if (idx)
27025    {
27026      prefix = ELF_STRING_ARM_unwind;
27027      prefix_once = ELF_STRING_ARM_unwind_once;
27028      type = SHT_ARM_EXIDX;
27029    }
27030  else
27031    {
27032      prefix = ELF_STRING_ARM_unwind_info;
27033      prefix_once = ELF_STRING_ARM_unwind_info_once;
27034      type = SHT_PROGBITS;
27035    }
27036
27037  text_name = segment_name (text_seg);
27038  if (streq (text_name, ".text"))
27039    text_name = "";
27040
27041  if (strncmp (text_name, ".gnu.linkonce.t.",
27042	       strlen (".gnu.linkonce.t.")) == 0)
27043    {
27044      prefix = prefix_once;
27045      text_name += strlen (".gnu.linkonce.t.");
27046    }
27047
27048  sec_name = concat (prefix, text_name, (char *) NULL);
27049
27050  flags = SHF_ALLOC;
27051  linkonce = 0;
27052  group_name = 0;
27053
27054  /* Handle COMDAT group.  */
27055  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27056    {
27057      group_name = elf_group_name (text_seg);
27058      if (group_name == NULL)
27059	{
27060	  as_bad (_("Group section `%s' has no group signature"),
27061		  segment_name (text_seg));
27062	  ignore_rest_of_line ();
27063	  return;
27064	}
27065      flags |= SHF_GROUP;
27066      linkonce = 1;
27067    }
27068
27069  obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
27070			  linkonce, 0);
27071
27072  /* Set the section link for index tables.  */
27073  if (idx)
27074    elf_linked_to_section (now_seg) = text_seg;
27075}
27076
27077
27078/* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
27079   personality routine data.  Returns zero, or the index table value for
27080   an inline entry.  */
27081
27082static valueT
27083create_unwind_entry (int have_data)
27084{
27085  int size;
27086  addressT where;
27087  char *ptr;
27088  /* The current word of data.	*/
27089  valueT data;
27090  /* The number of bytes left in this word.  */
27091  int n;
27092
27093  finish_unwind_opcodes ();
27094
27095  /* Remember the current text section.	 */
27096  unwind.saved_seg = now_seg;
27097  unwind.saved_subseg = now_subseg;
27098
27099  start_unwind_section (now_seg, 0);
27100
27101  if (unwind.personality_routine == NULL)
27102    {
27103      if (unwind.personality_index == -2)
27104	{
27105	  if (have_data)
27106	    as_bad (_("handlerdata in cantunwind frame"));
27107	  return 1; /* EXIDX_CANTUNWIND.  */
27108	}
27109
27110      /* Use a default personality routine if none is specified.  */
27111      if (unwind.personality_index == -1)
27112	{
27113	  if (unwind.opcode_count > 3)
27114	    unwind.personality_index = 1;
27115	  else
27116	    unwind.personality_index = 0;
27117	}
27118
27119      /* Space for the personality routine entry.  */
27120      if (unwind.personality_index == 0)
27121	{
27122	  if (unwind.opcode_count > 3)
27123	    as_bad (_("too many unwind opcodes for personality routine 0"));
27124
27125	  if (!have_data)
27126	    {
27127	      /* All the data is inline in the index table.  */
27128	      data = 0x80;
27129	      n = 3;
27130	      while (unwind.opcode_count > 0)
27131		{
27132		  unwind.opcode_count--;
27133		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27134		  n--;
27135		}
27136
27137	      /* Pad with "finish" opcodes.  */
27138	      while (n--)
27139		data = (data << 8) | 0xb0;
27140
27141	      return data;
27142	    }
27143	  size = 0;
27144	}
27145      else
27146	/* We get two opcodes "free" in the first word.	 */
27147	size = unwind.opcode_count - 2;
27148    }
27149  else
27150    {
27151      /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
27152      if (unwind.personality_index != -1)
27153	{
27154	  as_bad (_("attempt to recreate an unwind entry"));
27155	  return 1;
27156	}
27157
27158      /* An extra byte is required for the opcode count.	*/
27159      size = unwind.opcode_count + 1;
27160    }
27161
27162  size = (size + 3) >> 2;
27163  if (size > 0xff)
27164    as_bad (_("too many unwind opcodes"));
27165
27166  frag_align (2, 0, 0);
27167  record_alignment (now_seg, 2);
27168  unwind.table_entry = expr_build_dot ();
27169
27170  /* Allocate the table entry.	*/
27171  ptr = frag_more ((size << 2) + 4);
27172  /* PR 13449: Zero the table entries in case some of them are not used.  */
27173  memset (ptr, 0, (size << 2) + 4);
27174  where = frag_now_fix () - ((size << 2) + 4);
27175
27176  switch (unwind.personality_index)
27177    {
27178    case -1:
27179      /* ??? Should this be a PLT generating relocation?  */
27180      /* Custom personality routine.  */
27181      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27182	       BFD_RELOC_ARM_PREL31);
27183
27184      where += 4;
27185      ptr += 4;
27186
27187      /* Set the first byte to the number of additional words.	*/
27188      data = size > 0 ? size - 1 : 0;
27189      n = 3;
27190      break;
27191
27192    /* ABI defined personality routines.  */
27193    case 0:
27194      /* Three opcodes bytes are packed into the first word.  */
27195      data = 0x80;
27196      n = 3;
27197      break;
27198
27199    case 1:
27200    case 2:
27201      /* The size and first two opcode bytes go in the first word.  */
27202      data = ((0x80 + unwind.personality_index) << 8) | size;
27203      n = 2;
27204      break;
27205
27206    default:
27207      /* Should never happen.  */
27208      abort ();
27209    }
27210
27211  /* Pack the opcodes into words (MSB first), reversing the list at the same
27212     time.  */
27213  while (unwind.opcode_count > 0)
27214    {
27215      if (n == 0)
27216	{
27217	  md_number_to_chars (ptr, data, 4);
27218	  ptr += 4;
27219	  n = 4;
27220	  data = 0;
27221	}
27222      unwind.opcode_count--;
27223      n--;
27224      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27225    }
27226
27227  /* Finish off the last word.	*/
27228  if (n < 4)
27229    {
27230      /* Pad with "finish" opcodes.  */
27231      while (n--)
27232	data = (data << 8) | 0xb0;
27233
27234      md_number_to_chars (ptr, data, 4);
27235    }
27236
27237  if (!have_data)
27238    {
27239      /* Add an empty descriptor if there is no user-specified data.   */
27240      ptr = frag_more (4);
27241      md_number_to_chars (ptr, 0, 4);
27242    }
27243
27244  return 0;
27245}
27246
27247
27248/* Initialize the DWARF-2 unwind information for this procedure.  */
27249
27250void
27251tc_arm_frame_initial_instructions (void)
27252{
27253  cfi_add_CFA_def_cfa (REG_SP, 0);
27254}
27255#endif /* OBJ_ELF */
27256
27257/* Convert REGNAME to a DWARF-2 register number.  */
27258
27259int
27260tc_arm_regname_to_dw2regnum (char *regname)
27261{
27262  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27263  if (reg != FAIL)
27264    return reg;
27265
27266  /* PR 16694: Allow VFP registers as well.  */
27267  reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27268  if (reg != FAIL)
27269    return 64 + reg;
27270
27271  reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27272  if (reg != FAIL)
27273    return reg + 256;
27274
27275  return FAIL;
27276}
27277
27278#ifdef TE_PE
27279void
27280tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27281{
27282  expressionS exp;
27283
27284  exp.X_op = O_secrel;
27285  exp.X_add_symbol = symbol;
27286  exp.X_add_number = 0;
27287  emit_expr (&exp, size);
27288}
27289#endif
27290
27291/* MD interface: Symbol and relocation handling.  */
27292
27293/* Return the address within the segment that a PC-relative fixup is
27294   relative to.  For ARM, PC-relative fixups applied to instructions
27295   are generally relative to the location of the fixup plus 8 bytes.
27296   Thumb branches are offset by 4, and Thumb loads relative to PC
27297   require special handling.  */
27298
27299long
27300md_pcrel_from_section (fixS * fixP, segT seg)
27301{
27302  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27303
27304  /* If this is pc-relative and we are going to emit a relocation
27305     then we just want to put out any pipeline compensation that the linker
27306     will need.  Otherwise we want to use the calculated base.
27307     For WinCE we skip the bias for externals as well, since this
27308     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
27309  if (fixP->fx_pcrel
27310      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27311	  || (arm_force_relocation (fixP)
27312#ifdef TE_WINCE
27313	      && !S_IS_EXTERNAL (fixP->fx_addsy)
27314#endif
27315	      )))
27316    base = 0;
27317
27318
27319  switch (fixP->fx_r_type)
27320    {
27321      /* PC relative addressing on the Thumb is slightly odd as the
27322	 bottom two bits of the PC are forced to zero for the
27323	 calculation.  This happens *after* application of the
27324	 pipeline offset.  However, Thumb adrl already adjusts for
27325	 this, so we need not do it again.  */
27326    case BFD_RELOC_ARM_THUMB_ADD:
27327      return base & ~3;
27328
27329    case BFD_RELOC_ARM_THUMB_OFFSET:
27330    case BFD_RELOC_ARM_T32_OFFSET_IMM:
27331    case BFD_RELOC_ARM_T32_ADD_PC12:
27332    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27333      return (base + 4) & ~3;
27334
27335      /* Thumb branches are simply offset by +4.  */
27336    case BFD_RELOC_THUMB_PCREL_BRANCH5:
27337    case BFD_RELOC_THUMB_PCREL_BRANCH7:
27338    case BFD_RELOC_THUMB_PCREL_BRANCH9:
27339    case BFD_RELOC_THUMB_PCREL_BRANCH12:
27340    case BFD_RELOC_THUMB_PCREL_BRANCH20:
27341    case BFD_RELOC_THUMB_PCREL_BRANCH25:
27342    case BFD_RELOC_THUMB_PCREL_BFCSEL:
27343    case BFD_RELOC_ARM_THUMB_BF17:
27344    case BFD_RELOC_ARM_THUMB_BF19:
27345    case BFD_RELOC_ARM_THUMB_BF13:
27346    case BFD_RELOC_ARM_THUMB_LOOP12:
27347      return base + 4;
27348
27349    case BFD_RELOC_THUMB_PCREL_BRANCH23:
27350      if (fixP->fx_addsy
27351	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27352	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27353	  && ARM_IS_FUNC (fixP->fx_addsy)
27354	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27355	base = fixP->fx_where + fixP->fx_frag->fr_address;
27356       return base + 4;
27357
27358      /* BLX is like branches above, but forces the low two bits of PC to
27359	 zero.  */
27360    case BFD_RELOC_THUMB_PCREL_BLX:
27361      if (fixP->fx_addsy
27362	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27363	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27364	  && THUMB_IS_FUNC (fixP->fx_addsy)
27365	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27366	base = fixP->fx_where + fixP->fx_frag->fr_address;
27367      return (base + 4) & ~3;
27368
27369      /* ARM mode branches are offset by +8.  However, the Windows CE
27370	 loader expects the relocation not to take this into account.  */
27371    case BFD_RELOC_ARM_PCREL_BLX:
27372      if (fixP->fx_addsy
27373	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27374	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27375	  && ARM_IS_FUNC (fixP->fx_addsy)
27376	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27377	base = fixP->fx_where + fixP->fx_frag->fr_address;
27378      return base + 8;
27379
27380    case BFD_RELOC_ARM_PCREL_CALL:
27381      if (fixP->fx_addsy
27382	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27383	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27384	  && THUMB_IS_FUNC (fixP->fx_addsy)
27385	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27386	base = fixP->fx_where + fixP->fx_frag->fr_address;
27387      return base + 8;
27388
27389    case BFD_RELOC_ARM_PCREL_BRANCH:
27390    case BFD_RELOC_ARM_PCREL_JUMP:
27391    case BFD_RELOC_ARM_PLT32:
27392#ifdef TE_WINCE
27393      /* When handling fixups immediately, because we have already
27394	 discovered the value of a symbol, or the address of the frag involved
27395	 we must account for the offset by +8, as the OS loader will never see the reloc.
27396	 see fixup_segment() in write.c
27397	 The S_IS_EXTERNAL test handles the case of global symbols.
27398	 Those need the calculated base, not just the pipe compensation the linker will need.  */
27399      if (fixP->fx_pcrel
27400	  && fixP->fx_addsy != NULL
27401	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27402	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27403	return base + 8;
27404      return base;
27405#else
27406      return base + 8;
27407#endif
27408
27409
27410      /* ARM mode loads relative to PC are also offset by +8.  Unlike
27411	 branches, the Windows CE loader *does* expect the relocation
27412	 to take this into account.  */
27413    case BFD_RELOC_ARM_OFFSET_IMM:
27414    case BFD_RELOC_ARM_OFFSET_IMM8:
27415    case BFD_RELOC_ARM_HWLITERAL:
27416    case BFD_RELOC_ARM_LITERAL:
27417    case BFD_RELOC_ARM_CP_OFF_IMM:
27418      return base + 8;
27419
27420
27421      /* Other PC-relative relocations are un-offset.  */
27422    default:
27423      return base;
27424    }
27425}
27426
27427static bfd_boolean flag_warn_syms = TRUE;
27428
27429bfd_boolean
27430arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27431{
27432  /* PR 18347 - Warn if the user attempts to create a symbol with the same
27433     name as an ARM instruction.  Whilst strictly speaking it is allowed, it
27434     does mean that the resulting code might be very confusing to the reader.
27435     Also this warning can be triggered if the user omits an operand before
27436     an immediate address, eg:
27437
27438       LDR =foo
27439
27440     GAS treats this as an assignment of the value of the symbol foo to a
27441     symbol LDR, and so (without this code) it will not issue any kind of
27442     warning or error message.
27443
27444     Note - ARM instructions are case-insensitive but the strings in the hash
27445     table are all stored in lower case, so we must first ensure that name is
27446     lower case too.  */
27447  if (flag_warn_syms && arm_ops_hsh)
27448    {
27449      char * nbuf = strdup (name);
27450      char * p;
27451
27452      for (p = nbuf; *p; p++)
27453	*p = TOLOWER (*p);
27454      if (hash_find (arm_ops_hsh, nbuf) != NULL)
27455	{
27456	  static struct hash_control * already_warned = NULL;
27457
27458	  if (already_warned == NULL)
27459	    already_warned = hash_new ();
27460	  /* Only warn about the symbol once.  To keep the code
27461	     simple we let hash_insert do the lookup for us.  */
27462	  if (hash_insert (already_warned, nbuf, NULL) == NULL)
27463	    as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
27464	}
27465      else
27466	free (nbuf);
27467    }
27468
27469  return FALSE;
27470}
27471
27472/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
27473   Otherwise we have no need to default values of symbols.  */
27474
27475symbolS *
27476md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
27477{
27478#ifdef OBJ_ELF
27479  if (name[0] == '_' && name[1] == 'G'
27480      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
27481    {
27482      if (!GOT_symbol)
27483	{
27484	  if (symbol_find (name))
27485	    as_bad (_("GOT already in the symbol table"));
27486
27487	  GOT_symbol = symbol_new (name, undefined_section,
27488				   (valueT) 0, & zero_address_frag);
27489	}
27490
27491      return GOT_symbol;
27492    }
27493#endif
27494
27495  return NULL;
27496}
27497
27498/* Subroutine of md_apply_fix.	 Check to see if an immediate can be
27499   computed as two separate immediate values, added together.  We
27500   already know that this value cannot be computed by just one ARM
27501   instruction.	 */
27502
27503static unsigned int
27504validate_immediate_twopart (unsigned int   val,
27505			    unsigned int * highpart)
27506{
27507  unsigned int a;
27508  unsigned int i;
27509
27510  for (i = 0; i < 32; i += 2)
27511    if (((a = rotate_left (val, i)) & 0xff) != 0)
27512      {
27513	if (a & 0xff00)
27514	  {
27515	    if (a & ~ 0xffff)
27516	      continue;
27517	    * highpart = (a  >> 8) | ((i + 24) << 7);
27518	  }
27519	else if (a & 0xff0000)
27520	  {
27521	    if (a & 0xff000000)
27522	      continue;
27523	    * highpart = (a >> 16) | ((i + 16) << 7);
27524	  }
27525	else
27526	  {
27527	    gas_assert (a & 0xff000000);
27528	    * highpart = (a >> 24) | ((i + 8) << 7);
27529	  }
27530
27531	return (a & 0xff) | (i << 7);
27532      }
27533
27534  return FAIL;
27535}
27536
27537static int
27538validate_offset_imm (unsigned int val, int hwse)
27539{
27540  if ((hwse && val > 255) || val > 4095)
27541    return FAIL;
27542  return val;
27543}
27544
27545/* Subroutine of md_apply_fix.	 Do those data_ops which can take a
27546   negative immediate constant by altering the instruction.  A bit of
27547   a hack really.
27548	MOV <-> MVN
27549	AND <-> BIC
27550	ADC <-> SBC
27551	by inverting the second operand, and
27552	ADD <-> SUB
27553	CMP <-> CMN
27554	by negating the second operand.	 */
27555
27556static int
27557negate_data_op (unsigned long * instruction,
27558		unsigned long	value)
27559{
27560  int op, new_inst;
27561  unsigned long negated, inverted;
27562
27563  negated = encode_arm_immediate (-value);
27564  inverted = encode_arm_immediate (~value);
27565
27566  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
27567  switch (op)
27568    {
27569      /* First negates.	 */
27570    case OPCODE_SUB:		 /* ADD <-> SUB	 */
27571      new_inst = OPCODE_ADD;
27572      value = negated;
27573      break;
27574
27575    case OPCODE_ADD:
27576      new_inst = OPCODE_SUB;
27577      value = negated;
27578      break;
27579
27580    case OPCODE_CMP:		 /* CMP <-> CMN	 */
27581      new_inst = OPCODE_CMN;
27582      value = negated;
27583      break;
27584
27585    case OPCODE_CMN:
27586      new_inst = OPCODE_CMP;
27587      value = negated;
27588      break;
27589
27590      /* Now Inverted ops.  */
27591    case OPCODE_MOV:		 /* MOV <-> MVN	 */
27592      new_inst = OPCODE_MVN;
27593      value = inverted;
27594      break;
27595
27596    case OPCODE_MVN:
27597      new_inst = OPCODE_MOV;
27598      value = inverted;
27599      break;
27600
27601    case OPCODE_AND:		 /* AND <-> BIC	 */
27602      new_inst = OPCODE_BIC;
27603      value = inverted;
27604      break;
27605
27606    case OPCODE_BIC:
27607      new_inst = OPCODE_AND;
27608      value = inverted;
27609      break;
27610
27611    case OPCODE_ADC:		  /* ADC <-> SBC  */
27612      new_inst = OPCODE_SBC;
27613      value = inverted;
27614      break;
27615
27616    case OPCODE_SBC:
27617      new_inst = OPCODE_ADC;
27618      value = inverted;
27619      break;
27620
27621      /* We cannot do anything.	 */
27622    default:
27623      return FAIL;
27624    }
27625
27626  if (value == (unsigned) FAIL)
27627    return FAIL;
27628
27629  *instruction &= OPCODE_MASK;
27630  *instruction |= new_inst << DATA_OP_SHIFT;
27631  return value;
27632}
27633
27634/* Like negate_data_op, but for Thumb-2.   */
27635
27636static unsigned int
27637thumb32_negate_data_op (offsetT *instruction, unsigned int value)
27638{
27639  int op, new_inst;
27640  int rd;
27641  unsigned int negated, inverted;
27642
27643  negated = encode_thumb32_immediate (-value);
27644  inverted = encode_thumb32_immediate (~value);
27645
27646  rd = (*instruction >> 8) & 0xf;
27647  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
27648  switch (op)
27649    {
27650      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
27651    case T2_OPCODE_SUB:
27652      new_inst = T2_OPCODE_ADD;
27653      value = negated;
27654      break;
27655
27656    case T2_OPCODE_ADD:
27657      new_inst = T2_OPCODE_SUB;
27658      value = negated;
27659      break;
27660
27661      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
27662    case T2_OPCODE_ORR:
27663      new_inst = T2_OPCODE_ORN;
27664      value = inverted;
27665      break;
27666
27667    case T2_OPCODE_ORN:
27668      new_inst = T2_OPCODE_ORR;
27669      value = inverted;
27670      break;
27671
27672      /* AND <-> BIC.  TST has no inverted equivalent.  */
27673    case T2_OPCODE_AND:
27674      new_inst = T2_OPCODE_BIC;
27675      if (rd == 15)
27676	value = FAIL;
27677      else
27678	value = inverted;
27679      break;
27680
27681    case T2_OPCODE_BIC:
27682      new_inst = T2_OPCODE_AND;
27683      value = inverted;
27684      break;
27685
27686      /* ADC <-> SBC  */
27687    case T2_OPCODE_ADC:
27688      new_inst = T2_OPCODE_SBC;
27689      value = inverted;
27690      break;
27691
27692    case T2_OPCODE_SBC:
27693      new_inst = T2_OPCODE_ADC;
27694      value = inverted;
27695      break;
27696
27697      /* We cannot do anything.	 */
27698    default:
27699      return FAIL;
27700    }
27701
27702  if (value == (unsigned int)FAIL)
27703    return FAIL;
27704
27705  *instruction &= T2_OPCODE_MASK;
27706  *instruction |= new_inst << T2_DATA_OP_SHIFT;
27707  return value;
27708}
27709
27710/* Read a 32-bit thumb instruction from buf.  */
27711
27712static unsigned long
27713get_thumb32_insn (char * buf)
27714{
27715  unsigned long insn;
27716  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
27717  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27718
27719  return insn;
27720}
27721
27722/* We usually want to set the low bit on the address of thumb function
27723   symbols.  In particular .word foo - . should have the low bit set.
27724   Generic code tries to fold the difference of two symbols to
27725   a constant.  Prevent this and force a relocation when the first symbols
27726   is a thumb function.  */
27727
27728bfd_boolean
27729arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
27730{
27731  if (op == O_subtract
27732      && l->X_op == O_symbol
27733      && r->X_op == O_symbol
27734      && THUMB_IS_FUNC (l->X_add_symbol))
27735    {
27736      l->X_op = O_subtract;
27737      l->X_op_symbol = r->X_add_symbol;
27738      l->X_add_number -= r->X_add_number;
27739      return TRUE;
27740    }
27741
27742  /* Process as normal.  */
27743  return FALSE;
27744}
27745
27746/* Encode Thumb2 unconditional branches and calls. The encoding
27747   for the 2 are identical for the immediate values.  */
27748
27749static void
27750encode_thumb2_b_bl_offset (char * buf, offsetT value)
27751{
27752#define T2I1I2MASK  ((1 << 13) | (1 << 11))
27753  offsetT newval;
27754  offsetT newval2;
27755  addressT S, I1, I2, lo, hi;
27756
27757  S = (value >> 24) & 0x01;
27758  I1 = (value >> 23) & 0x01;
27759  I2 = (value >> 22) & 0x01;
27760  hi = (value >> 12) & 0x3ff;
27761  lo = (value >> 1) & 0x7ff;
27762  newval   = md_chars_to_number (buf, THUMB_SIZE);
27763  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27764  newval  |= (S << 10) | hi;
27765  newval2 &=  ~T2I1I2MASK;
27766  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
27767  md_number_to_chars (buf, newval, THUMB_SIZE);
27768  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
27769}
27770
27771void
27772md_apply_fix (fixS *	fixP,
27773	       valueT * valP,
27774	       segT	seg)
27775{
27776  offsetT	 value = * valP;
27777  offsetT	 newval;
27778  unsigned int	 newimm;
27779  unsigned long	 temp;
27780  int		 sign;
27781  char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
27782
27783  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
27784
27785  /* Note whether this will delete the relocation.  */
27786
27787  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
27788    fixP->fx_done = 1;
27789
27790  /* On a 64-bit host, silently truncate 'value' to 32 bits for
27791     consistency with the behaviour on 32-bit hosts.  Remember value
27792     for emit_reloc.  */
27793  value &= 0xffffffff;
27794  value ^= 0x80000000;
27795  value -= 0x80000000;
27796
27797  *valP = value;
27798  fixP->fx_addnumber = value;
27799
27800  /* Same treatment for fixP->fx_offset.  */
27801  fixP->fx_offset &= 0xffffffff;
27802  fixP->fx_offset ^= 0x80000000;
27803  fixP->fx_offset -= 0x80000000;
27804
27805  switch (fixP->fx_r_type)
27806    {
27807    case BFD_RELOC_NONE:
27808      /* This will need to go in the object file.  */
27809      fixP->fx_done = 0;
27810      break;
27811
27812    case BFD_RELOC_ARM_IMMEDIATE:
27813      /* We claim that this fixup has been processed here,
27814	 even if in fact we generate an error because we do
27815	 not have a reloc for it, so tc_gen_reloc will reject it.  */
27816      fixP->fx_done = 1;
27817
27818      if (fixP->fx_addsy)
27819	{
27820	  const char *msg = 0;
27821
27822	  if (! S_IS_DEFINED (fixP->fx_addsy))
27823	    msg = _("undefined symbol %s used as an immediate value");
27824	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27825	    msg = _("symbol %s is in a different section");
27826	  else if (S_IS_WEAK (fixP->fx_addsy))
27827	    msg = _("symbol %s is weak and may be overridden later");
27828
27829	  if (msg)
27830	    {
27831	      as_bad_where (fixP->fx_file, fixP->fx_line,
27832			    msg, S_GET_NAME (fixP->fx_addsy));
27833	      break;
27834	    }
27835	}
27836
27837      temp = md_chars_to_number (buf, INSN_SIZE);
27838
27839      /* If the offset is negative, we should use encoding A2 for ADR.  */
27840      if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
27841	newimm = negate_data_op (&temp, value);
27842      else
27843	{
27844	  newimm = encode_arm_immediate (value);
27845
27846	  /* If the instruction will fail, see if we can fix things up by
27847	     changing the opcode.  */
27848	  if (newimm == (unsigned int) FAIL)
27849	    newimm = negate_data_op (&temp, value);
27850	  /* MOV accepts both ARM modified immediate (A1 encoding) and
27851	     UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
27852	     When disassembling, MOV is preferred when there is no encoding
27853	     overlap.  */
27854	  if (newimm == (unsigned int) FAIL
27855	      && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
27856	      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
27857	      && !((temp >> SBIT_SHIFT) & 0x1)
27858	      && value >= 0 && value <= 0xffff)
27859	    {
27860	      /* Clear bits[23:20] to change encoding from A1 to A2.  */
27861	      temp &= 0xff0fffff;
27862	      /* Encoding high 4bits imm.  Code below will encode the remaining
27863		 low 12bits.  */
27864	      temp |= (value & 0x0000f000) << 4;
27865	      newimm = value & 0x00000fff;
27866	    }
27867	}
27868
27869      if (newimm == (unsigned int) FAIL)
27870	{
27871	  as_bad_where (fixP->fx_file, fixP->fx_line,
27872			_("invalid constant (%lx) after fixup"),
27873			(unsigned long) value);
27874	  break;
27875	}
27876
27877      newimm |= (temp & 0xfffff000);
27878      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27879      break;
27880
27881    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
27882      {
27883	unsigned int highpart = 0;
27884	unsigned int newinsn  = 0xe1a00000; /* nop.  */
27885
27886	if (fixP->fx_addsy)
27887	  {
27888	    const char *msg = 0;
27889
27890	    if (! S_IS_DEFINED (fixP->fx_addsy))
27891	      msg = _("undefined symbol %s used as an immediate value");
27892	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27893	      msg = _("symbol %s is in a different section");
27894	    else if (S_IS_WEAK (fixP->fx_addsy))
27895	      msg = _("symbol %s is weak and may be overridden later");
27896
27897	    if (msg)
27898	      {
27899		as_bad_where (fixP->fx_file, fixP->fx_line,
27900			      msg, S_GET_NAME (fixP->fx_addsy));
27901		break;
27902	      }
27903	  }
27904
27905	newimm = encode_arm_immediate (value);
27906	temp = md_chars_to_number (buf, INSN_SIZE);
27907
27908	/* If the instruction will fail, see if we can fix things up by
27909	   changing the opcode.	 */
27910	if (newimm == (unsigned int) FAIL
27911	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
27912	  {
27913	    /* No ?  OK - try using two ADD instructions to generate
27914	       the value.  */
27915	    newimm = validate_immediate_twopart (value, & highpart);
27916
27917	    /* Yes - then make sure that the second instruction is
27918	       also an add.  */
27919	    if (newimm != (unsigned int) FAIL)
27920	      newinsn = temp;
27921	    /* Still No ?  Try using a negated value.  */
27922	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
27923	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
27924	    /* Otherwise - give up.  */
27925	    else
27926	      {
27927		as_bad_where (fixP->fx_file, fixP->fx_line,
27928			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
27929			      (long) value);
27930		break;
27931	      }
27932
27933	    /* Replace the first operand in the 2nd instruction (which
27934	       is the PC) with the destination register.  We have
27935	       already added in the PC in the first instruction and we
27936	       do not want to do it again.  */
27937	    newinsn &= ~ 0xf0000;
27938	    newinsn |= ((newinsn & 0x0f000) << 4);
27939	  }
27940
27941	newimm |= (temp & 0xfffff000);
27942	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27943
27944	highpart |= (newinsn & 0xfffff000);
27945	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
27946      }
27947      break;
27948
27949    case BFD_RELOC_ARM_OFFSET_IMM:
27950      if (!fixP->fx_done && seg->use_rela_p)
27951	value = 0;
27952      /* Fall through.  */
27953
27954    case BFD_RELOC_ARM_LITERAL:
27955      sign = value > 0;
27956
27957      if (value < 0)
27958	value = - value;
27959
27960      if (validate_offset_imm (value, 0) == FAIL)
27961	{
27962	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
27963	    as_bad_where (fixP->fx_file, fixP->fx_line,
27964			  _("invalid literal constant: pool needs to be closer"));
27965	  else
27966	    as_bad_where (fixP->fx_file, fixP->fx_line,
27967			  _("bad immediate value for offset (%ld)"),
27968			  (long) value);
27969	  break;
27970	}
27971
27972      newval = md_chars_to_number (buf, INSN_SIZE);
27973      if (value == 0)
27974	newval &= 0xfffff000;
27975      else
27976	{
27977	  newval &= 0xff7ff000;
27978	  newval |= value | (sign ? INDEX_UP : 0);
27979	}
27980      md_number_to_chars (buf, newval, INSN_SIZE);
27981      break;
27982
27983    case BFD_RELOC_ARM_OFFSET_IMM8:
27984    case BFD_RELOC_ARM_HWLITERAL:
27985      sign = value > 0;
27986
27987      if (value < 0)
27988	value = - value;
27989
27990      if (validate_offset_imm (value, 1) == FAIL)
27991	{
27992	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
27993	    as_bad_where (fixP->fx_file, fixP->fx_line,
27994			  _("invalid literal constant: pool needs to be closer"));
27995	  else
27996	    as_bad_where (fixP->fx_file, fixP->fx_line,
27997			  _("bad immediate value for 8-bit offset (%ld)"),
27998			  (long) value);
27999	  break;
28000	}
28001
28002      newval = md_chars_to_number (buf, INSN_SIZE);
28003      if (value == 0)
28004	newval &= 0xfffff0f0;
28005      else
28006	{
28007	  newval &= 0xff7ff0f0;
28008	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28009	}
28010      md_number_to_chars (buf, newval, INSN_SIZE);
28011      break;
28012
28013    case BFD_RELOC_ARM_T32_OFFSET_U8:
28014      if (value < 0 || value > 1020 || value % 4 != 0)
28015	as_bad_where (fixP->fx_file, fixP->fx_line,
28016		      _("bad immediate value for offset (%ld)"), (long) value);
28017      value /= 4;
28018
28019      newval = md_chars_to_number (buf+2, THUMB_SIZE);
28020      newval |= value;
28021      md_number_to_chars (buf+2, newval, THUMB_SIZE);
28022      break;
28023
28024    case BFD_RELOC_ARM_T32_OFFSET_IMM:
28025      /* This is a complicated relocation used for all varieties of Thumb32
28026	 load/store instruction with immediate offset:
28027
28028	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28029						   *4, optional writeback(W)
28030						   (doubleword load/store)
28031
28032	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28033	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28034	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28035	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28036	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28037
28038	 Uppercase letters indicate bits that are already encoded at
28039	 this point.  Lowercase letters are our problem.  For the
28040	 second block of instructions, the secondary opcode nybble
28041	 (bits 8..11) is present, and bit 23 is zero, even if this is
28042	 a PC-relative operation.  */
28043      newval = md_chars_to_number (buf, THUMB_SIZE);
28044      newval <<= 16;
28045      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28046
28047      if ((newval & 0xf0000000) == 0xe0000000)
28048	{
28049	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
28050	  if (value >= 0)
28051	    newval |= (1 << 23);
28052	  else
28053	    value = -value;
28054	  if (value % 4 != 0)
28055	    {
28056	      as_bad_where (fixP->fx_file, fixP->fx_line,
28057			    _("offset not a multiple of 4"));
28058	      break;
28059	    }
28060	  value /= 4;
28061	  if (value > 0xff)
28062	    {
28063	      as_bad_where (fixP->fx_file, fixP->fx_line,
28064			    _("offset out of range"));
28065	      break;
28066	    }
28067	  newval &= ~0xff;
28068	}
28069      else if ((newval & 0x000f0000) == 0x000f0000)
28070	{
28071	  /* PC-relative, 12-bit offset.  */
28072	  if (value >= 0)
28073	    newval |= (1 << 23);
28074	  else
28075	    value = -value;
28076	  if (value > 0xfff)
28077	    {
28078	      as_bad_where (fixP->fx_file, fixP->fx_line,
28079			    _("offset out of range"));
28080	      break;
28081	    }
28082	  newval &= ~0xfff;
28083	}
28084      else if ((newval & 0x00000100) == 0x00000100)
28085	{
28086	  /* Writeback: 8-bit, +/- offset.  */
28087	  if (value >= 0)
28088	    newval |= (1 << 9);
28089	  else
28090	    value = -value;
28091	  if (value > 0xff)
28092	    {
28093	      as_bad_where (fixP->fx_file, fixP->fx_line,
28094			    _("offset out of range"));
28095	      break;
28096	    }
28097	  newval &= ~0xff;
28098	}
28099      else if ((newval & 0x00000f00) == 0x00000e00)
28100	{
28101	  /* T-instruction: positive 8-bit offset.  */
28102	  if (value < 0 || value > 0xff)
28103	    {
28104	      as_bad_where (fixP->fx_file, fixP->fx_line,
28105			    _("offset out of range"));
28106	      break;
28107	    }
28108	  newval &= ~0xff;
28109	  newval |= value;
28110	}
28111      else
28112	{
28113	  /* Positive 12-bit or negative 8-bit offset.  */
28114	  int limit;
28115	  if (value >= 0)
28116	    {
28117	      newval |= (1 << 23);
28118	      limit = 0xfff;
28119	    }
28120	  else
28121	    {
28122	      value = -value;
28123	      limit = 0xff;
28124	    }
28125	  if (value > limit)
28126	    {
28127	      as_bad_where (fixP->fx_file, fixP->fx_line,
28128			    _("offset out of range"));
28129	      break;
28130	    }
28131	  newval &= ~limit;
28132	}
28133
28134      newval |= value;
28135      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28136      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28137      break;
28138
28139    case BFD_RELOC_ARM_SHIFT_IMM:
28140      newval = md_chars_to_number (buf, INSN_SIZE);
28141      if (((unsigned long) value) > 32
28142	  || (value == 32
28143	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28144	{
28145	  as_bad_where (fixP->fx_file, fixP->fx_line,
28146			_("shift expression is too large"));
28147	  break;
28148	}
28149
28150      if (value == 0)
28151	/* Shifts of zero must be done as lsl.	*/
28152	newval &= ~0x60;
28153      else if (value == 32)
28154	value = 0;
28155      newval &= 0xfffff07f;
28156      newval |= (value & 0x1f) << 7;
28157      md_number_to_chars (buf, newval, INSN_SIZE);
28158      break;
28159
28160    case BFD_RELOC_ARM_T32_IMMEDIATE:
28161    case BFD_RELOC_ARM_T32_ADD_IMM:
28162    case BFD_RELOC_ARM_T32_IMM12:
28163    case BFD_RELOC_ARM_T32_ADD_PC12:
28164      /* We claim that this fixup has been processed here,
28165	 even if in fact we generate an error because we do
28166	 not have a reloc for it, so tc_gen_reloc will reject it.  */
28167      fixP->fx_done = 1;
28168
28169      if (fixP->fx_addsy
28170	  && ! S_IS_DEFINED (fixP->fx_addsy))
28171	{
28172	  as_bad_where (fixP->fx_file, fixP->fx_line,
28173			_("undefined symbol %s used as an immediate value"),
28174			S_GET_NAME (fixP->fx_addsy));
28175	  break;
28176	}
28177
28178      newval = md_chars_to_number (buf, THUMB_SIZE);
28179      newval <<= 16;
28180      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28181
28182      newimm = FAIL;
28183      if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28184	   /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28185	      Thumb2 modified immediate encoding (T2).  */
28186	   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28187	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28188	{
28189	  newimm = encode_thumb32_immediate (value);
28190	  if (newimm == (unsigned int) FAIL)
28191	    newimm = thumb32_negate_data_op (&newval, value);
28192	}
28193      if (newimm == (unsigned int) FAIL)
28194	{
28195	  if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28196	    {
28197	      /* Turn add/sum into addw/subw.  */
28198	      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28199		newval = (newval & 0xfeffffff) | 0x02000000;
28200	      /* No flat 12-bit imm encoding for addsw/subsw.  */
28201	      if ((newval & 0x00100000) == 0)
28202		{
28203		  /* 12 bit immediate for addw/subw.  */
28204		  if (value < 0)
28205		    {
28206		      value = -value;
28207		      newval ^= 0x00a00000;
28208		    }
28209		  if (value > 0xfff)
28210		    newimm = (unsigned int) FAIL;
28211		  else
28212		    newimm = value;
28213		}
28214	    }
28215	  else
28216	    {
28217	      /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28218		 UINT16 (T3 encoding), MOVW only accepts UINT16.  When
28219		 disassembling, MOV is preferred when there is no encoding
28220		 overlap.  */
28221	      if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28222		  /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28223		     but with the Rn field [19:16] set to 1111.  */
28224		  && (((newval >> 16) & 0xf) == 0xf)
28225		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28226		  && !((newval >> T2_SBIT_SHIFT) & 0x1)
28227		  && value >= 0 && value <= 0xffff)
28228		{
28229		  /* Toggle bit[25] to change encoding from T2 to T3.  */
28230		  newval ^= 1 << 25;
28231		  /* Clear bits[19:16].  */
28232		  newval &= 0xfff0ffff;
28233		  /* Encoding high 4bits imm.  Code below will encode the
28234		     remaining low 12bits.  */
28235		  newval |= (value & 0x0000f000) << 4;
28236		  newimm = value & 0x00000fff;
28237		}
28238	    }
28239	}
28240
28241      if (newimm == (unsigned int)FAIL)
28242	{
28243	  as_bad_where (fixP->fx_file, fixP->fx_line,
28244			_("invalid constant (%lx) after fixup"),
28245			(unsigned long) value);
28246	  break;
28247	}
28248
28249      newval |= (newimm & 0x800) << 15;
28250      newval |= (newimm & 0x700) << 4;
28251      newval |= (newimm & 0x0ff);
28252
28253      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28254      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28255      break;
28256
28257    case BFD_RELOC_ARM_SMC:
28258      if (((unsigned long) value) > 0xf)
28259	as_bad_where (fixP->fx_file, fixP->fx_line,
28260		      _("invalid smc expression"));
28261
28262      newval = md_chars_to_number (buf, INSN_SIZE);
28263      newval |= (value & 0xf);
28264      md_number_to_chars (buf, newval, INSN_SIZE);
28265      break;
28266
28267    case BFD_RELOC_ARM_HVC:
28268      if (((unsigned long) value) > 0xffff)
28269	as_bad_where (fixP->fx_file, fixP->fx_line,
28270		      _("invalid hvc expression"));
28271      newval = md_chars_to_number (buf, INSN_SIZE);
28272      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28273      md_number_to_chars (buf, newval, INSN_SIZE);
28274      break;
28275
28276    case BFD_RELOC_ARM_SWI:
28277      if (fixP->tc_fix_data != 0)
28278	{
28279	  if (((unsigned long) value) > 0xff)
28280	    as_bad_where (fixP->fx_file, fixP->fx_line,
28281			  _("invalid swi expression"));
28282	  newval = md_chars_to_number (buf, THUMB_SIZE);
28283	  newval |= value;
28284	  md_number_to_chars (buf, newval, THUMB_SIZE);
28285	}
28286      else
28287	{
28288	  if (((unsigned long) value) > 0x00ffffff)
28289	    as_bad_where (fixP->fx_file, fixP->fx_line,
28290			  _("invalid swi expression"));
28291	  newval = md_chars_to_number (buf, INSN_SIZE);
28292	  newval |= value;
28293	  md_number_to_chars (buf, newval, INSN_SIZE);
28294	}
28295      break;
28296
28297    case BFD_RELOC_ARM_MULTI:
28298      if (((unsigned long) value) > 0xffff)
28299	as_bad_where (fixP->fx_file, fixP->fx_line,
28300		      _("invalid expression in load/store multiple"));
28301      newval = value | md_chars_to_number (buf, INSN_SIZE);
28302      md_number_to_chars (buf, newval, INSN_SIZE);
28303      break;
28304
28305#ifdef OBJ_ELF
28306    case BFD_RELOC_ARM_PCREL_CALL:
28307
28308      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28309	  && fixP->fx_addsy
28310	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28311	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28312	  && THUMB_IS_FUNC (fixP->fx_addsy))
28313	/* Flip the bl to blx. This is a simple flip
28314	   bit here because we generate PCREL_CALL for
28315	   unconditional bls.  */
28316	{
28317	  newval = md_chars_to_number (buf, INSN_SIZE);
28318	  newval = newval | 0x10000000;
28319	  md_number_to_chars (buf, newval, INSN_SIZE);
28320	  temp = 1;
28321	  fixP->fx_done = 1;
28322	}
28323      else
28324	temp = 3;
28325      goto arm_branch_common;
28326
28327    case BFD_RELOC_ARM_PCREL_JUMP:
28328      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28329	  && fixP->fx_addsy
28330	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28331	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28332	  && THUMB_IS_FUNC (fixP->fx_addsy))
28333	{
28334	  /* This would map to a bl<cond>, b<cond>,
28335	     b<always> to a Thumb function. We
28336	     need to force a relocation for this particular
28337	     case.  */
28338	  newval = md_chars_to_number (buf, INSN_SIZE);
28339	  fixP->fx_done = 0;
28340	}
28341      /* Fall through.  */
28342
28343    case BFD_RELOC_ARM_PLT32:
28344#endif
28345    case BFD_RELOC_ARM_PCREL_BRANCH:
28346      temp = 3;
28347      goto arm_branch_common;
28348
28349    case BFD_RELOC_ARM_PCREL_BLX:
28350
28351      temp = 1;
28352      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28353	  && fixP->fx_addsy
28354	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28355	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28356	  && ARM_IS_FUNC (fixP->fx_addsy))
28357	{
28358	  /* Flip the blx to a bl and warn.  */
28359	  const char *name = S_GET_NAME (fixP->fx_addsy);
28360	  newval = 0xeb000000;
28361	  as_warn_where (fixP->fx_file, fixP->fx_line,
28362			 _("blx to '%s' an ARM ISA state function changed to bl"),
28363			  name);
28364	  md_number_to_chars (buf, newval, INSN_SIZE);
28365	  temp = 3;
28366	  fixP->fx_done = 1;
28367	}
28368
28369#ifdef OBJ_ELF
28370       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28371	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28372#endif
28373
28374    arm_branch_common:
28375      /* We are going to store value (shifted right by two) in the
28376	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
28377	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
28378	 also be clear.  */
28379      if (value & temp)
28380	as_bad_where (fixP->fx_file, fixP->fx_line,
28381		      _("misaligned branch destination"));
28382      if ((value & (offsetT)0xfe000000) != (offsetT)0
28383	  && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
28384	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28385
28386      if (fixP->fx_done || !seg->use_rela_p)
28387	{
28388	  newval = md_chars_to_number (buf, INSN_SIZE);
28389	  newval |= (value >> 2) & 0x00ffffff;
28390	  /* Set the H bit on BLX instructions.  */
28391	  if (temp == 1)
28392	    {
28393	      if (value & 2)
28394		newval |= 0x01000000;
28395	      else
28396		newval &= ~0x01000000;
28397	    }
28398	  md_number_to_chars (buf, newval, INSN_SIZE);
28399	}
28400      break;
28401
28402    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28403      /* CBZ can only branch forward.  */
28404
28405      /* Attempts to use CBZ to branch to the next instruction
28406	 (which, strictly speaking, are prohibited) will be turned into
28407	 no-ops.
28408
28409	 FIXME: It may be better to remove the instruction completely and
28410	 perform relaxation.  */
28411      if (value == -2)
28412	{
28413	  newval = md_chars_to_number (buf, THUMB_SIZE);
28414	  newval = 0xbf00; /* NOP encoding T1 */
28415	  md_number_to_chars (buf, newval, THUMB_SIZE);
28416	}
28417      else
28418	{
28419	  if (value & ~0x7e)
28420	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28421
28422	  if (fixP->fx_done || !seg->use_rela_p)
28423	    {
28424	      newval = md_chars_to_number (buf, THUMB_SIZE);
28425	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28426	      md_number_to_chars (buf, newval, THUMB_SIZE);
28427	    }
28428	}
28429      break;
28430
28431    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
28432      if (out_of_range_p (value, 8))
28433	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28434
28435      if (fixP->fx_done || !seg->use_rela_p)
28436	{
28437	  newval = md_chars_to_number (buf, THUMB_SIZE);
28438	  newval |= (value & 0x1ff) >> 1;
28439	  md_number_to_chars (buf, newval, THUMB_SIZE);
28440	}
28441      break;
28442
28443    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
28444      if (out_of_range_p (value, 11))
28445	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28446
28447      if (fixP->fx_done || !seg->use_rela_p)
28448	{
28449	  newval = md_chars_to_number (buf, THUMB_SIZE);
28450	  newval |= (value & 0xfff) >> 1;
28451	  md_number_to_chars (buf, newval, THUMB_SIZE);
28452	}
28453      break;
28454
28455    /* This relocation is misnamed, it should be BRANCH21.  */
28456    case BFD_RELOC_THUMB_PCREL_BRANCH20:
28457      if (fixP->fx_addsy
28458	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28459	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28460	  && ARM_IS_FUNC (fixP->fx_addsy)
28461	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28462	{
28463	  /* Force a relocation for a branch 20 bits wide.  */
28464	  fixP->fx_done = 0;
28465	}
28466      if (out_of_range_p (value, 20))
28467	as_bad_where (fixP->fx_file, fixP->fx_line,
28468		      _("conditional branch out of range"));
28469
28470      if (fixP->fx_done || !seg->use_rela_p)
28471	{
28472	  offsetT newval2;
28473	  addressT S, J1, J2, lo, hi;
28474
28475	  S  = (value & 0x00100000) >> 20;
28476	  J2 = (value & 0x00080000) >> 19;
28477	  J1 = (value & 0x00040000) >> 18;
28478	  hi = (value & 0x0003f000) >> 12;
28479	  lo = (value & 0x00000ffe) >> 1;
28480
28481	  newval   = md_chars_to_number (buf, THUMB_SIZE);
28482	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28483	  newval  |= (S << 10) | hi;
28484	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
28485	  md_number_to_chars (buf, newval, THUMB_SIZE);
28486	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28487	}
28488      break;
28489
28490    case BFD_RELOC_THUMB_PCREL_BLX:
28491      /* If there is a blx from a thumb state function to
28492	 another thumb function flip this to a bl and warn
28493	 about it.  */
28494
28495      if (fixP->fx_addsy
28496	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28497	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28498	  && THUMB_IS_FUNC (fixP->fx_addsy))
28499	{
28500	  const char *name = S_GET_NAME (fixP->fx_addsy);
28501	  as_warn_where (fixP->fx_file, fixP->fx_line,
28502			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
28503			 name);
28504	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28505	  newval = newval | 0x1000;
28506	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28507	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28508	  fixP->fx_done = 1;
28509	}
28510
28511
28512      goto thumb_bl_common;
28513
28514    case BFD_RELOC_THUMB_PCREL_BRANCH23:
28515      /* A bl from Thumb state ISA to an internal ARM state function
28516	 is converted to a blx.  */
28517      if (fixP->fx_addsy
28518	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28519	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28520	  && ARM_IS_FUNC (fixP->fx_addsy)
28521	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28522	{
28523	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28524	  newval = newval & ~0x1000;
28525	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28526	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
28527	  fixP->fx_done = 1;
28528	}
28529
28530    thumb_bl_common:
28531
28532      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28533	/* For a BLX instruction, make sure that the relocation is rounded up
28534	   to a word boundary.  This follows the semantics of the instruction
28535	   which specifies that bit 1 of the target address will come from bit
28536	   1 of the base address.  */
28537	value = (value + 3) & ~ 3;
28538
28539#ifdef OBJ_ELF
28540       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
28541	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28542	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28543#endif
28544
28545      if (out_of_range_p (value, 22))
28546	{
28547	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
28548	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28549	  else if (out_of_range_p (value, 24))
28550	    as_bad_where (fixP->fx_file, fixP->fx_line,
28551			  _("Thumb2 branch out of range"));
28552	}
28553
28554      if (fixP->fx_done || !seg->use_rela_p)
28555	encode_thumb2_b_bl_offset (buf, value);
28556
28557      break;
28558
28559    case BFD_RELOC_THUMB_PCREL_BRANCH25:
28560      if (out_of_range_p (value, 24))
28561	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28562
28563      if (fixP->fx_done || !seg->use_rela_p)
28564	  encode_thumb2_b_bl_offset (buf, value);
28565
28566      break;
28567
28568    case BFD_RELOC_8:
28569      if (fixP->fx_done || !seg->use_rela_p)
28570	*buf = value;
28571      break;
28572
28573    case BFD_RELOC_16:
28574      if (fixP->fx_done || !seg->use_rela_p)
28575	md_number_to_chars (buf, value, 2);
28576      break;
28577
28578#ifdef OBJ_ELF
28579    case BFD_RELOC_ARM_TLS_CALL:
28580    case BFD_RELOC_ARM_THM_TLS_CALL:
28581    case BFD_RELOC_ARM_TLS_DESCSEQ:
28582    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
28583    case BFD_RELOC_ARM_TLS_GOTDESC:
28584    case BFD_RELOC_ARM_TLS_GD32:
28585    case BFD_RELOC_ARM_TLS_LE32:
28586    case BFD_RELOC_ARM_TLS_IE32:
28587    case BFD_RELOC_ARM_TLS_LDM32:
28588    case BFD_RELOC_ARM_TLS_LDO32:
28589      S_SET_THREAD_LOCAL (fixP->fx_addsy);
28590      break;
28591
28592      /* Same handling as above, but with the arm_fdpic guard.  */
28593    case BFD_RELOC_ARM_TLS_GD32_FDPIC:
28594    case BFD_RELOC_ARM_TLS_IE32_FDPIC:
28595    case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
28596      if (arm_fdpic)
28597	{
28598	  S_SET_THREAD_LOCAL (fixP->fx_addsy);
28599	}
28600      else
28601	{
28602	  as_bad_where (fixP->fx_file, fixP->fx_line,
28603			_("Relocation supported only in FDPIC mode"));
28604	}
28605      break;
28606
28607    case BFD_RELOC_ARM_GOT32:
28608    case BFD_RELOC_ARM_GOTOFF:
28609      break;
28610
28611    case BFD_RELOC_ARM_GOT_PREL:
28612      if (fixP->fx_done || !seg->use_rela_p)
28613	md_number_to_chars (buf, value, 4);
28614      break;
28615
28616    case BFD_RELOC_ARM_TARGET2:
28617      /* TARGET2 is not partial-inplace, so we need to write the
28618	 addend here for REL targets, because it won't be written out
28619	 during reloc processing later.  */
28620      if (fixP->fx_done || !seg->use_rela_p)
28621	md_number_to_chars (buf, fixP->fx_offset, 4);
28622      break;
28623
28624      /* Relocations for FDPIC.  */
28625    case BFD_RELOC_ARM_GOTFUNCDESC:
28626    case BFD_RELOC_ARM_GOTOFFFUNCDESC:
28627    case BFD_RELOC_ARM_FUNCDESC:
28628      if (arm_fdpic)
28629	{
28630	  if (fixP->fx_done || !seg->use_rela_p)
28631	    md_number_to_chars (buf, 0, 4);
28632	}
28633      else
28634	{
28635	  as_bad_where (fixP->fx_file, fixP->fx_line,
28636			_("Relocation supported only in FDPIC mode"));
28637      }
28638      break;
28639#endif
28640
28641    case BFD_RELOC_RVA:
28642    case BFD_RELOC_32:
28643    case BFD_RELOC_ARM_TARGET1:
28644    case BFD_RELOC_ARM_ROSEGREL32:
28645    case BFD_RELOC_ARM_SBREL32:
28646    case BFD_RELOC_32_PCREL:
28647#ifdef TE_PE
28648    case BFD_RELOC_32_SECREL:
28649#endif
28650      if (fixP->fx_done || !seg->use_rela_p)
28651#ifdef TE_WINCE
28652	/* For WinCE we only do this for pcrel fixups.  */
28653	if (fixP->fx_done || fixP->fx_pcrel)
28654#endif
28655	  md_number_to_chars (buf, value, 4);
28656      break;
28657
28658#ifdef OBJ_ELF
28659    case BFD_RELOC_ARM_PREL31:
28660      if (fixP->fx_done || !seg->use_rela_p)
28661	{
28662	  newval = md_chars_to_number (buf, 4) & 0x80000000;
28663	  if ((value ^ (value >> 1)) & 0x40000000)
28664	    {
28665	      as_bad_where (fixP->fx_file, fixP->fx_line,
28666			    _("rel31 relocation overflow"));
28667	    }
28668	  newval |= value & 0x7fffffff;
28669	  md_number_to_chars (buf, newval, 4);
28670	}
28671      break;
28672#endif
28673
28674    case BFD_RELOC_ARM_CP_OFF_IMM:
28675    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
28676    case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
28677      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
28678	newval = md_chars_to_number (buf, INSN_SIZE);
28679      else
28680	newval = get_thumb32_insn (buf);
28681      if ((newval & 0x0f200f00) == 0x0d000900)
28682	{
28683	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
28684	     has permitted values that are multiples of 2, in the range 0
28685	     to 510.  */
28686	  if (value < -510 || value > 510 || (value & 1))
28687	    as_bad_where (fixP->fx_file, fixP->fx_line,
28688			  _("co-processor offset out of range"));
28689	}
28690      else if ((newval & 0xfe001f80) == 0xec000f80)
28691	{
28692	  if (value < -511 || value > 512 || (value & 3))
28693	    as_bad_where (fixP->fx_file, fixP->fx_line,
28694			  _("co-processor offset out of range"));
28695	}
28696      else if (value < -1023 || value > 1023 || (value & 3))
28697	as_bad_where (fixP->fx_file, fixP->fx_line,
28698		      _("co-processor offset out of range"));
28699    cp_off_common:
28700      sign = value > 0;
28701      if (value < 0)
28702	value = -value;
28703      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28704	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28705	newval = md_chars_to_number (buf, INSN_SIZE);
28706      else
28707	newval = get_thumb32_insn (buf);
28708      if (value == 0)
28709	{
28710	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28711	    newval &= 0xffffff80;
28712	  else
28713	    newval &= 0xffffff00;
28714	}
28715      else
28716	{
28717	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28718	    newval &= 0xff7fff80;
28719	  else
28720	    newval &= 0xff7fff00;
28721	  if ((newval & 0x0f200f00) == 0x0d000900)
28722	    {
28723	      /* This is a fp16 vstr/vldr.
28724
28725		 It requires the immediate offset in the instruction is shifted
28726		 left by 1 to be a half-word offset.
28727
28728		 Here, left shift by 1 first, and later right shift by 2
28729		 should get the right offset.  */
28730	      value <<= 1;
28731	    }
28732	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
28733	}
28734      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28735	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28736	md_number_to_chars (buf, newval, INSN_SIZE);
28737      else
28738	put_thumb32_insn (buf, newval);
28739      break;
28740
28741    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
28742    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
28743      if (value < -255 || value > 255)
28744	as_bad_where (fixP->fx_file, fixP->fx_line,
28745		      _("co-processor offset out of range"));
28746      value *= 4;
28747      goto cp_off_common;
28748
28749    case BFD_RELOC_ARM_THUMB_OFFSET:
28750      newval = md_chars_to_number (buf, THUMB_SIZE);
28751      /* Exactly what ranges, and where the offset is inserted depends
28752	 on the type of instruction, we can establish this from the
28753	 top 4 bits.  */
28754      switch (newval >> 12)
28755	{
28756	case 4: /* PC load.  */
28757	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
28758	     forced to zero for these loads; md_pcrel_from has already
28759	     compensated for this.  */
28760	  if (value & 3)
28761	    as_bad_where (fixP->fx_file, fixP->fx_line,
28762			  _("invalid offset, target not word aligned (0x%08lX)"),
28763			  (((unsigned long) fixP->fx_frag->fr_address
28764			    + (unsigned long) fixP->fx_where) & ~3)
28765			  + (unsigned long) value);
28766
28767	  if (value & ~0x3fc)
28768	    as_bad_where (fixP->fx_file, fixP->fx_line,
28769			  _("invalid offset, value too big (0x%08lX)"),
28770			  (long) value);
28771
28772	  newval |= value >> 2;
28773	  break;
28774
28775	case 9: /* SP load/store.  */
28776	  if (value & ~0x3fc)
28777	    as_bad_where (fixP->fx_file, fixP->fx_line,
28778			  _("invalid offset, value too big (0x%08lX)"),
28779			  (long) value);
28780	  newval |= value >> 2;
28781	  break;
28782
28783	case 6: /* Word load/store.  */
28784	  if (value & ~0x7c)
28785	    as_bad_where (fixP->fx_file, fixP->fx_line,
28786			  _("invalid offset, value too big (0x%08lX)"),
28787			  (long) value);
28788	  newval |= value << 4; /* 6 - 2.  */
28789	  break;
28790
28791	case 7: /* Byte load/store.  */
28792	  if (value & ~0x1f)
28793	    as_bad_where (fixP->fx_file, fixP->fx_line,
28794			  _("invalid offset, value too big (0x%08lX)"),
28795			  (long) value);
28796	  newval |= value << 6;
28797	  break;
28798
28799	case 8: /* Halfword load/store.	 */
28800	  if (value & ~0x3e)
28801	    as_bad_where (fixP->fx_file, fixP->fx_line,
28802			  _("invalid offset, value too big (0x%08lX)"),
28803			  (long) value);
28804	  newval |= value << 5; /* 6 - 1.  */
28805	  break;
28806
28807	default:
28808	  as_bad_where (fixP->fx_file, fixP->fx_line,
28809			"Unable to process relocation for thumb opcode: %lx",
28810			(unsigned long) newval);
28811	  break;
28812	}
28813      md_number_to_chars (buf, newval, THUMB_SIZE);
28814      break;
28815
28816    case BFD_RELOC_ARM_THUMB_ADD:
28817      /* This is a complicated relocation, since we use it for all of
28818	 the following immediate relocations:
28819
28820	    3bit ADD/SUB
28821	    8bit ADD/SUB
28822	    9bit ADD/SUB SP word-aligned
28823	   10bit ADD PC/SP word-aligned
28824
28825	 The type of instruction being processed is encoded in the
28826	 instruction field:
28827
28828	   0x8000  SUB
28829	   0x00F0  Rd
28830	   0x000F  Rs
28831      */
28832      newval = md_chars_to_number (buf, THUMB_SIZE);
28833      {
28834	int rd = (newval >> 4) & 0xf;
28835	int rs = newval & 0xf;
28836	int subtract = !!(newval & 0x8000);
28837
28838	/* Check for HI regs, only very restricted cases allowed:
28839	   Adjusting SP, and using PC or SP to get an address.	*/
28840	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
28841	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
28842	  as_bad_where (fixP->fx_file, fixP->fx_line,
28843			_("invalid Hi register with immediate"));
28844
28845	/* If value is negative, choose the opposite instruction.  */
28846	if (value < 0)
28847	  {
28848	    value = -value;
28849	    subtract = !subtract;
28850	    if (value < 0)
28851	      as_bad_where (fixP->fx_file, fixP->fx_line,
28852			    _("immediate value out of range"));
28853	  }
28854
28855	if (rd == REG_SP)
28856	  {
28857 	    if (value & ~0x1fc)
28858	      as_bad_where (fixP->fx_file, fixP->fx_line,
28859			    _("invalid immediate for stack address calculation"));
28860	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
28861	    newval |= value >> 2;
28862	  }
28863	else if (rs == REG_PC || rs == REG_SP)
28864	  {
28865	    /* PR gas/18541.  If the addition is for a defined symbol
28866	       within range of an ADR instruction then accept it.  */
28867	    if (subtract
28868		&& value == 4
28869		&& fixP->fx_addsy != NULL)
28870	      {
28871		subtract = 0;
28872
28873		if (! S_IS_DEFINED (fixP->fx_addsy)
28874		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
28875		    || S_IS_WEAK (fixP->fx_addsy))
28876		  {
28877		    as_bad_where (fixP->fx_file, fixP->fx_line,
28878				  _("address calculation needs a strongly defined nearby symbol"));
28879		  }
28880		else
28881		  {
28882		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
28883
28884		    /* Round up to the next 4-byte boundary.  */
28885		    if (v & 3)
28886		      v = (v + 3) & ~ 3;
28887		    else
28888		      v += 4;
28889		    v = S_GET_VALUE (fixP->fx_addsy) - v;
28890
28891		    if (v & ~0x3fc)
28892		      {
28893			as_bad_where (fixP->fx_file, fixP->fx_line,
28894				      _("symbol too far away"));
28895		      }
28896		    else
28897		      {
28898			fixP->fx_done = 1;
28899			value = v;
28900		      }
28901		  }
28902	      }
28903
28904	    if (subtract || value & ~0x3fc)
28905	      as_bad_where (fixP->fx_file, fixP->fx_line,
28906			    _("invalid immediate for address calculation (value = 0x%08lX)"),
28907			    (unsigned long) (subtract ? - value : value));
28908	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
28909	    newval |= rd << 8;
28910	    newval |= value >> 2;
28911	  }
28912	else if (rs == rd)
28913	  {
28914	    if (value & ~0xff)
28915	      as_bad_where (fixP->fx_file, fixP->fx_line,
28916			    _("immediate value out of range"));
28917	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
28918	    newval |= (rd << 8) | value;
28919	  }
28920	else
28921	  {
28922	    if (value & ~0x7)
28923	      as_bad_where (fixP->fx_file, fixP->fx_line,
28924			    _("immediate value out of range"));
28925	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
28926	    newval |= rd | (rs << 3) | (value << 6);
28927	  }
28928      }
28929      md_number_to_chars (buf, newval, THUMB_SIZE);
28930      break;
28931
28932    case BFD_RELOC_ARM_THUMB_IMM:
28933      newval = md_chars_to_number (buf, THUMB_SIZE);
28934      if (value < 0 || value > 255)
28935	as_bad_where (fixP->fx_file, fixP->fx_line,
28936		      _("invalid immediate: %ld is out of range"),
28937		      (long) value);
28938      newval |= value;
28939      md_number_to_chars (buf, newval, THUMB_SIZE);
28940      break;
28941
28942    case BFD_RELOC_ARM_THUMB_SHIFT:
28943      /* 5bit shift value (0..32).  LSL cannot take 32.	 */
28944      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
28945      temp = newval & 0xf800;
28946      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
28947	as_bad_where (fixP->fx_file, fixP->fx_line,
28948		      _("invalid shift value: %ld"), (long) value);
28949      /* Shifts of zero must be encoded as LSL.	 */
28950      if (value == 0)
28951	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
28952      /* Shifts of 32 are encoded as zero.  */
28953      else if (value == 32)
28954	value = 0;
28955      newval |= value << 6;
28956      md_number_to_chars (buf, newval, THUMB_SIZE);
28957      break;
28958
28959    case BFD_RELOC_VTABLE_INHERIT:
28960    case BFD_RELOC_VTABLE_ENTRY:
28961      fixP->fx_done = 0;
28962      return;
28963
28964    case BFD_RELOC_ARM_MOVW:
28965    case BFD_RELOC_ARM_MOVT:
28966    case BFD_RELOC_ARM_THUMB_MOVW:
28967    case BFD_RELOC_ARM_THUMB_MOVT:
28968      if (fixP->fx_done || !seg->use_rela_p)
28969	{
28970	  /* REL format relocations are limited to a 16-bit addend.  */
28971	  if (!fixP->fx_done)
28972	    {
28973	      if (value < -0x8000 || value > 0x7fff)
28974		  as_bad_where (fixP->fx_file, fixP->fx_line,
28975				_("offset out of range"));
28976	    }
28977	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
28978		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28979	    {
28980	      value >>= 16;
28981	    }
28982
28983	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
28984	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28985	    {
28986	      newval = get_thumb32_insn (buf);
28987	      newval &= 0xfbf08f00;
28988	      newval |= (value & 0xf000) << 4;
28989	      newval |= (value & 0x0800) << 15;
28990	      newval |= (value & 0x0700) << 4;
28991	      newval |= (value & 0x00ff);
28992	      put_thumb32_insn (buf, newval);
28993	    }
28994	  else
28995	    {
28996	      newval = md_chars_to_number (buf, 4);
28997	      newval &= 0xfff0f000;
28998	      newval |= value & 0x0fff;
28999	      newval |= (value & 0xf000) << 4;
29000	      md_number_to_chars (buf, newval, 4);
29001	    }
29002	}
29003      return;
29004
29005   case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29006   case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29007   case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29008   case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29009      gas_assert (!fixP->fx_done);
29010      {
29011	bfd_vma insn;
29012	bfd_boolean is_mov;
29013	bfd_vma encoded_addend = value;
29014
29015	/* Check that addend can be encoded in instruction.  */
29016	if (!seg->use_rela_p && (value < 0 || value > 255))
29017	  as_bad_where (fixP->fx_file, fixP->fx_line,
29018			_("the offset 0x%08lX is not representable"),
29019			(unsigned long) encoded_addend);
29020
29021	/* Extract the instruction.  */
29022	insn = md_chars_to_number (buf, THUMB_SIZE);
29023	is_mov = (insn & 0xf800) == 0x2000;
29024
29025	/* Encode insn.  */
29026	if (is_mov)
29027	  {
29028	    if (!seg->use_rela_p)
29029	      insn |= encoded_addend;
29030	  }
29031	else
29032	  {
29033	    int rd, rs;
29034
29035	    /* Extract the instruction.  */
29036	     /* Encoding is the following
29037		0x8000  SUB
29038		0x00F0  Rd
29039		0x000F  Rs
29040	     */
29041	     /* The following conditions must be true :
29042		- ADD
29043		- Rd == Rs
29044		- Rd <= 7
29045	     */
29046	    rd = (insn >> 4) & 0xf;
29047	    rs = insn & 0xf;
29048	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
29049	      as_bad_where (fixP->fx_file, fixP->fx_line,
29050			_("Unable to process relocation for thumb opcode: %lx"),
29051			(unsigned long) insn);
29052
29053	    /* Encode as ADD immediate8 thumb 1 code.  */
29054	    insn = 0x3000 | (rd << 8);
29055
29056	    /* Place the encoded addend into the first 8 bits of the
29057	       instruction.  */
29058	    if (!seg->use_rela_p)
29059	      insn |= encoded_addend;
29060	  }
29061
29062	/* Update the instruction.  */
29063	md_number_to_chars (buf, insn, THUMB_SIZE);
29064      }
29065      break;
29066
29067   case BFD_RELOC_ARM_ALU_PC_G0_NC:
29068   case BFD_RELOC_ARM_ALU_PC_G0:
29069   case BFD_RELOC_ARM_ALU_PC_G1_NC:
29070   case BFD_RELOC_ARM_ALU_PC_G1:
29071   case BFD_RELOC_ARM_ALU_PC_G2:
29072   case BFD_RELOC_ARM_ALU_SB_G0_NC:
29073   case BFD_RELOC_ARM_ALU_SB_G0:
29074   case BFD_RELOC_ARM_ALU_SB_G1_NC:
29075   case BFD_RELOC_ARM_ALU_SB_G1:
29076   case BFD_RELOC_ARM_ALU_SB_G2:
29077     gas_assert (!fixP->fx_done);
29078     if (!seg->use_rela_p)
29079       {
29080	 bfd_vma insn;
29081	 bfd_vma encoded_addend;
29082	 bfd_vma addend_abs = llabs (value);
29083
29084	 /* Check that the absolute value of the addend can be
29085	    expressed as an 8-bit constant plus a rotation.  */
29086	 encoded_addend = encode_arm_immediate (addend_abs);
29087	 if (encoded_addend == (unsigned int) FAIL)
29088	   as_bad_where (fixP->fx_file, fixP->fx_line,
29089			 _("the offset 0x%08lX is not representable"),
29090			 (unsigned long) addend_abs);
29091
29092	 /* Extract the instruction.  */
29093	 insn = md_chars_to_number (buf, INSN_SIZE);
29094
29095	 /* If the addend is positive, use an ADD instruction.
29096	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
29097	 insn &= 0xff1fffff;
29098	 if (value < 0)
29099	   insn |= 1 << 22;
29100	 else
29101	   insn |= 1 << 23;
29102
29103	 /* Place the encoded addend into the first 12 bits of the
29104	    instruction.  */
29105	 insn &= 0xfffff000;
29106	 insn |= encoded_addend;
29107
29108	 /* Update the instruction.  */
29109	 md_number_to_chars (buf, insn, INSN_SIZE);
29110       }
29111     break;
29112
29113    case BFD_RELOC_ARM_LDR_PC_G0:
29114    case BFD_RELOC_ARM_LDR_PC_G1:
29115    case BFD_RELOC_ARM_LDR_PC_G2:
29116    case BFD_RELOC_ARM_LDR_SB_G0:
29117    case BFD_RELOC_ARM_LDR_SB_G1:
29118    case BFD_RELOC_ARM_LDR_SB_G2:
29119      gas_assert (!fixP->fx_done);
29120      if (!seg->use_rela_p)
29121	{
29122	  bfd_vma insn;
29123	  bfd_vma addend_abs = llabs (value);
29124
29125	  /* Check that the absolute value of the addend can be
29126	     encoded in 12 bits.  */
29127	  if (addend_abs >= 0x1000)
29128	    as_bad_where (fixP->fx_file, fixP->fx_line,
29129			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29130			  (unsigned long) addend_abs);
29131
29132	  /* Extract the instruction.  */
29133	  insn = md_chars_to_number (buf, INSN_SIZE);
29134
29135	  /* If the addend is negative, clear bit 23 of the instruction.
29136	     Otherwise set it.  */
29137	  if (value < 0)
29138	    insn &= ~(1 << 23);
29139	  else
29140	    insn |= 1 << 23;
29141
29142	  /* Place the absolute value of the addend into the first 12 bits
29143	     of the instruction.  */
29144	  insn &= 0xfffff000;
29145	  insn |= addend_abs;
29146
29147	  /* Update the instruction.  */
29148	  md_number_to_chars (buf, insn, INSN_SIZE);
29149	}
29150      break;
29151
29152    case BFD_RELOC_ARM_LDRS_PC_G0:
29153    case BFD_RELOC_ARM_LDRS_PC_G1:
29154    case BFD_RELOC_ARM_LDRS_PC_G2:
29155    case BFD_RELOC_ARM_LDRS_SB_G0:
29156    case BFD_RELOC_ARM_LDRS_SB_G1:
29157    case BFD_RELOC_ARM_LDRS_SB_G2:
29158      gas_assert (!fixP->fx_done);
29159      if (!seg->use_rela_p)
29160	{
29161	  bfd_vma insn;
29162	  bfd_vma addend_abs = llabs (value);
29163
29164	  /* Check that the absolute value of the addend can be
29165	     encoded in 8 bits.  */
29166	  if (addend_abs >= 0x100)
29167	    as_bad_where (fixP->fx_file, fixP->fx_line,
29168			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29169			  (unsigned long) addend_abs);
29170
29171	  /* Extract the instruction.  */
29172	  insn = md_chars_to_number (buf, INSN_SIZE);
29173
29174	  /* If the addend is negative, clear bit 23 of the instruction.
29175	     Otherwise set it.  */
29176	  if (value < 0)
29177	    insn &= ~(1 << 23);
29178	  else
29179	    insn |= 1 << 23;
29180
29181	  /* Place the first four bits of the absolute value of the addend
29182	     into the first 4 bits of the instruction, and the remaining
29183	     four into bits 8 .. 11.  */
29184	  insn &= 0xfffff0f0;
29185	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29186
29187	  /* Update the instruction.  */
29188	  md_number_to_chars (buf, insn, INSN_SIZE);
29189	}
29190      break;
29191
29192    case BFD_RELOC_ARM_LDC_PC_G0:
29193    case BFD_RELOC_ARM_LDC_PC_G1:
29194    case BFD_RELOC_ARM_LDC_PC_G2:
29195    case BFD_RELOC_ARM_LDC_SB_G0:
29196    case BFD_RELOC_ARM_LDC_SB_G1:
29197    case BFD_RELOC_ARM_LDC_SB_G2:
29198      gas_assert (!fixP->fx_done);
29199      if (!seg->use_rela_p)
29200	{
29201	  bfd_vma insn;
29202	  bfd_vma addend_abs = llabs (value);
29203
29204	  /* Check that the absolute value of the addend is a multiple of
29205	     four and, when divided by four, fits in 8 bits.  */
29206	  if (addend_abs & 0x3)
29207	    as_bad_where (fixP->fx_file, fixP->fx_line,
29208			  _("bad offset 0x%08lX (must be word-aligned)"),
29209			  (unsigned long) addend_abs);
29210
29211	  if ((addend_abs >> 2) > 0xff)
29212	    as_bad_where (fixP->fx_file, fixP->fx_line,
29213			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29214			  (unsigned long) addend_abs);
29215
29216	  /* Extract the instruction.  */
29217	  insn = md_chars_to_number (buf, INSN_SIZE);
29218
29219	  /* If the addend is negative, clear bit 23 of the instruction.
29220	     Otherwise set it.  */
29221	  if (value < 0)
29222	    insn &= ~(1 << 23);
29223	  else
29224	    insn |= 1 << 23;
29225
29226	  /* Place the addend (divided by four) into the first eight
29227	     bits of the instruction.  */
29228	  insn &= 0xfffffff0;
29229	  insn |= addend_abs >> 2;
29230
29231	  /* Update the instruction.  */
29232	  md_number_to_chars (buf, insn, INSN_SIZE);
29233	}
29234      break;
29235
29236    case BFD_RELOC_THUMB_PCREL_BRANCH5:
29237      if (fixP->fx_addsy
29238	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29239	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29240	  && ARM_IS_FUNC (fixP->fx_addsy)
29241	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29242	{
29243	  /* Force a relocation for a branch 5 bits wide.  */
29244	  fixP->fx_done = 0;
29245	}
29246      if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
29247	as_bad_where (fixP->fx_file, fixP->fx_line,
29248		      BAD_BRANCH_OFF);
29249
29250      if (fixP->fx_done || !seg->use_rela_p)
29251	{
29252	  addressT boff = value >> 1;
29253
29254	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29255	  newval |= (boff << 7);
29256	  md_number_to_chars (buf, newval, THUMB_SIZE);
29257	}
29258      break;
29259
29260    case BFD_RELOC_THUMB_PCREL_BFCSEL:
29261      if (fixP->fx_addsy
29262	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29263	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29264	  && ARM_IS_FUNC (fixP->fx_addsy)
29265	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29266	{
29267	  fixP->fx_done = 0;
29268	}
29269      if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
29270	as_bad_where (fixP->fx_file, fixP->fx_line,
29271		      _("branch out of range"));
29272
29273      if (fixP->fx_done || !seg->use_rela_p)
29274	{
29275	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29276
29277	  addressT boff = ((newval & 0x0780) >> 7) << 1;
29278	  addressT diff = value - boff;
29279
29280	  if (diff == 4)
29281	    {
29282	      newval |= 1 << 1; /* T bit.  */
29283	    }
29284	  else if (diff != 2)
29285	    {
29286	      as_bad_where (fixP->fx_file, fixP->fx_line,
29287			    _("out of range label-relative fixup value"));
29288	    }
29289	  md_number_to_chars (buf, newval, THUMB_SIZE);
29290	}
29291      break;
29292
29293    case BFD_RELOC_ARM_THUMB_BF17:
29294      if (fixP->fx_addsy
29295	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29296	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29297	  && ARM_IS_FUNC (fixP->fx_addsy)
29298	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29299	{
29300	  /* Force a relocation for a branch 17 bits wide.  */
29301	  fixP->fx_done = 0;
29302	}
29303
29304      if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
29305	as_bad_where (fixP->fx_file, fixP->fx_line,
29306		      BAD_BRANCH_OFF);
29307
29308      if (fixP->fx_done || !seg->use_rela_p)
29309	{
29310	  offsetT newval2;
29311	  addressT immA, immB, immC;
29312
29313	  immA = (value & 0x0001f000) >> 12;
29314	  immB = (value & 0x00000ffc) >> 2;
29315	  immC = (value & 0x00000002) >> 1;
29316
29317	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29318	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29319	  newval  |= immA;
29320	  newval2 |= (immC << 11) | (immB << 1);
29321	  md_number_to_chars (buf, newval, THUMB_SIZE);
29322	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29323	}
29324      break;
29325
29326    case BFD_RELOC_ARM_THUMB_BF19:
29327      if (fixP->fx_addsy
29328	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29329	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29330	  && ARM_IS_FUNC (fixP->fx_addsy)
29331	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29332	{
29333	  /* Force a relocation for a branch 19 bits wide.  */
29334	  fixP->fx_done = 0;
29335	}
29336
29337      if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
29338	as_bad_where (fixP->fx_file, fixP->fx_line,
29339		      BAD_BRANCH_OFF);
29340
29341      if (fixP->fx_done || !seg->use_rela_p)
29342	{
29343	  offsetT newval2;
29344	  addressT immA, immB, immC;
29345
29346	  immA = (value & 0x0007f000) >> 12;
29347	  immB = (value & 0x00000ffc) >> 2;
29348	  immC = (value & 0x00000002) >> 1;
29349
29350	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29351	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29352	  newval  |= immA;
29353	  newval2 |= (immC << 11) | (immB << 1);
29354	  md_number_to_chars (buf, newval, THUMB_SIZE);
29355	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29356	}
29357      break;
29358
29359    case BFD_RELOC_ARM_THUMB_BF13:
29360      if (fixP->fx_addsy
29361	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29362	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29363	  && ARM_IS_FUNC (fixP->fx_addsy)
29364	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29365	{
29366	  /* Force a relocation for a branch 13 bits wide.  */
29367	  fixP->fx_done = 0;
29368	}
29369
29370      if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
29371	as_bad_where (fixP->fx_file, fixP->fx_line,
29372		      BAD_BRANCH_OFF);
29373
29374      if (fixP->fx_done || !seg->use_rela_p)
29375	{
29376	  offsetT newval2;
29377	  addressT immA, immB, immC;
29378
29379	  immA = (value & 0x00001000) >> 12;
29380	  immB = (value & 0x00000ffc) >> 2;
29381	  immC = (value & 0x00000002) >> 1;
29382
29383	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29384	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29385	  newval  |= immA;
29386	  newval2 |= (immC << 11) | (immB << 1);
29387	  md_number_to_chars (buf, newval, THUMB_SIZE);
29388	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29389	}
29390      break;
29391
29392    case BFD_RELOC_ARM_THUMB_LOOP12:
29393      if (fixP->fx_addsy
29394	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29395	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29396	  && ARM_IS_FUNC (fixP->fx_addsy)
29397	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29398	{
29399	  /* Force a relocation for a branch 12 bits wide.  */
29400	  fixP->fx_done = 0;
29401	}
29402
29403      bfd_vma insn = get_thumb32_insn (buf);
29404      /* le lr, <label>, le <label> or letp lr, <label> */
29405      if (((insn & 0xffffffff) == 0xf00fc001)
29406	  || ((insn & 0xffffffff) == 0xf02fc001)
29407	  || ((insn & 0xffffffff) == 0xf01fc001))
29408	value = -value;
29409
29410      if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
29411	as_bad_where (fixP->fx_file, fixP->fx_line,
29412		      BAD_BRANCH_OFF);
29413      if (fixP->fx_done || !seg->use_rela_p)
29414	{
29415	  addressT imml, immh;
29416
29417	  immh = (value & 0x00000ffc) >> 2;
29418	  imml = (value & 0x00000002) >> 1;
29419
29420	  newval  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29421	  newval |= (imml << 11) | (immh << 1);
29422	  md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29423	}
29424      break;
29425
29426    case BFD_RELOC_ARM_V4BX:
29427      /* This will need to go in the object file.  */
29428      fixP->fx_done = 0;
29429      break;
29430
29431    case BFD_RELOC_UNUSED:
29432    default:
29433      as_bad_where (fixP->fx_file, fixP->fx_line,
29434		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
29435    }
29436}
29437
29438/* Translate internal representation of relocation info to BFD target
29439   format.  */
29440
29441arelent *
29442tc_gen_reloc (asection *section, fixS *fixp)
29443{
29444  arelent * reloc;
29445  bfd_reloc_code_real_type code;
29446
29447  reloc = XNEW (arelent);
29448
29449  reloc->sym_ptr_ptr = XNEW (asymbol *);
29450  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
29451  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
29452
29453  if (fixp->fx_pcrel)
29454    {
29455      if (section->use_rela_p)
29456	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
29457      else
29458	fixp->fx_offset = reloc->address;
29459    }
29460  reloc->addend = fixp->fx_offset;
29461
29462  switch (fixp->fx_r_type)
29463    {
29464    case BFD_RELOC_8:
29465      if (fixp->fx_pcrel)
29466	{
29467	  code = BFD_RELOC_8_PCREL;
29468	  break;
29469	}
29470      /* Fall through.  */
29471
29472    case BFD_RELOC_16:
29473      if (fixp->fx_pcrel)
29474	{
29475	  code = BFD_RELOC_16_PCREL;
29476	  break;
29477	}
29478      /* Fall through.  */
29479
29480    case BFD_RELOC_32:
29481      if (fixp->fx_pcrel)
29482	{
29483	  code = BFD_RELOC_32_PCREL;
29484	  break;
29485	}
29486      /* Fall through.  */
29487
29488    case BFD_RELOC_ARM_MOVW:
29489      if (fixp->fx_pcrel)
29490	{
29491	  code = BFD_RELOC_ARM_MOVW_PCREL;
29492	  break;
29493	}
29494      /* Fall through.  */
29495
29496    case BFD_RELOC_ARM_MOVT:
29497      if (fixp->fx_pcrel)
29498	{
29499	  code = BFD_RELOC_ARM_MOVT_PCREL;
29500	  break;
29501	}
29502      /* Fall through.  */
29503
29504    case BFD_RELOC_ARM_THUMB_MOVW:
29505      if (fixp->fx_pcrel)
29506	{
29507	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
29508	  break;
29509	}
29510      /* Fall through.  */
29511
29512    case BFD_RELOC_ARM_THUMB_MOVT:
29513      if (fixp->fx_pcrel)
29514	{
29515	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
29516	  break;
29517	}
29518      /* Fall through.  */
29519
29520    case BFD_RELOC_NONE:
29521    case BFD_RELOC_ARM_PCREL_BRANCH:
29522    case BFD_RELOC_ARM_PCREL_BLX:
29523    case BFD_RELOC_RVA:
29524    case BFD_RELOC_THUMB_PCREL_BRANCH7:
29525    case BFD_RELOC_THUMB_PCREL_BRANCH9:
29526    case BFD_RELOC_THUMB_PCREL_BRANCH12:
29527    case BFD_RELOC_THUMB_PCREL_BRANCH20:
29528    case BFD_RELOC_THUMB_PCREL_BRANCH23:
29529    case BFD_RELOC_THUMB_PCREL_BRANCH25:
29530    case BFD_RELOC_VTABLE_ENTRY:
29531    case BFD_RELOC_VTABLE_INHERIT:
29532#ifdef TE_PE
29533    case BFD_RELOC_32_SECREL:
29534#endif
29535      code = fixp->fx_r_type;
29536      break;
29537
29538    case BFD_RELOC_THUMB_PCREL_BLX:
29539#ifdef OBJ_ELF
29540      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
29541	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
29542      else
29543#endif
29544	code = BFD_RELOC_THUMB_PCREL_BLX;
29545      break;
29546
29547    case BFD_RELOC_ARM_LITERAL:
29548    case BFD_RELOC_ARM_HWLITERAL:
29549      /* If this is called then the a literal has
29550	 been referenced across a section boundary.  */
29551      as_bad_where (fixp->fx_file, fixp->fx_line,
29552		    _("literal referenced across section boundary"));
29553      return NULL;
29554
29555#ifdef OBJ_ELF
29556    case BFD_RELOC_ARM_TLS_CALL:
29557    case BFD_RELOC_ARM_THM_TLS_CALL:
29558    case BFD_RELOC_ARM_TLS_DESCSEQ:
29559    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29560    case BFD_RELOC_ARM_GOT32:
29561    case BFD_RELOC_ARM_GOTOFF:
29562    case BFD_RELOC_ARM_GOT_PREL:
29563    case BFD_RELOC_ARM_PLT32:
29564    case BFD_RELOC_ARM_TARGET1:
29565    case BFD_RELOC_ARM_ROSEGREL32:
29566    case BFD_RELOC_ARM_SBREL32:
29567    case BFD_RELOC_ARM_PREL31:
29568    case BFD_RELOC_ARM_TARGET2:
29569    case BFD_RELOC_ARM_TLS_LDO32:
29570    case BFD_RELOC_ARM_PCREL_CALL:
29571    case BFD_RELOC_ARM_PCREL_JUMP:
29572    case BFD_RELOC_ARM_ALU_PC_G0_NC:
29573    case BFD_RELOC_ARM_ALU_PC_G0:
29574    case BFD_RELOC_ARM_ALU_PC_G1_NC:
29575    case BFD_RELOC_ARM_ALU_PC_G1:
29576    case BFD_RELOC_ARM_ALU_PC_G2:
29577    case BFD_RELOC_ARM_LDR_PC_G0:
29578    case BFD_RELOC_ARM_LDR_PC_G1:
29579    case BFD_RELOC_ARM_LDR_PC_G2:
29580    case BFD_RELOC_ARM_LDRS_PC_G0:
29581    case BFD_RELOC_ARM_LDRS_PC_G1:
29582    case BFD_RELOC_ARM_LDRS_PC_G2:
29583    case BFD_RELOC_ARM_LDC_PC_G0:
29584    case BFD_RELOC_ARM_LDC_PC_G1:
29585    case BFD_RELOC_ARM_LDC_PC_G2:
29586    case BFD_RELOC_ARM_ALU_SB_G0_NC:
29587    case BFD_RELOC_ARM_ALU_SB_G0:
29588    case BFD_RELOC_ARM_ALU_SB_G1_NC:
29589    case BFD_RELOC_ARM_ALU_SB_G1:
29590    case BFD_RELOC_ARM_ALU_SB_G2:
29591    case BFD_RELOC_ARM_LDR_SB_G0:
29592    case BFD_RELOC_ARM_LDR_SB_G1:
29593    case BFD_RELOC_ARM_LDR_SB_G2:
29594    case BFD_RELOC_ARM_LDRS_SB_G0:
29595    case BFD_RELOC_ARM_LDRS_SB_G1:
29596    case BFD_RELOC_ARM_LDRS_SB_G2:
29597    case BFD_RELOC_ARM_LDC_SB_G0:
29598    case BFD_RELOC_ARM_LDC_SB_G1:
29599    case BFD_RELOC_ARM_LDC_SB_G2:
29600    case BFD_RELOC_ARM_V4BX:
29601    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29602    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29603    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29604    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29605    case BFD_RELOC_ARM_GOTFUNCDESC:
29606    case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29607    case BFD_RELOC_ARM_FUNCDESC:
29608    case BFD_RELOC_ARM_THUMB_BF17:
29609    case BFD_RELOC_ARM_THUMB_BF19:
29610    case BFD_RELOC_ARM_THUMB_BF13:
29611      code = fixp->fx_r_type;
29612      break;
29613
29614    case BFD_RELOC_ARM_TLS_GOTDESC:
29615    case BFD_RELOC_ARM_TLS_GD32:
29616    case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29617    case BFD_RELOC_ARM_TLS_LE32:
29618    case BFD_RELOC_ARM_TLS_IE32:
29619    case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29620    case BFD_RELOC_ARM_TLS_LDM32:
29621    case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29622      /* BFD will include the symbol's address in the addend.
29623	 But we don't want that, so subtract it out again here.  */
29624      if (!S_IS_COMMON (fixp->fx_addsy))
29625	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
29626      code = fixp->fx_r_type;
29627      break;
29628#endif
29629
29630    case BFD_RELOC_ARM_IMMEDIATE:
29631      as_bad_where (fixp->fx_file, fixp->fx_line,
29632		    _("internal relocation (type: IMMEDIATE) not fixed up"));
29633      return NULL;
29634
29635    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
29636      as_bad_where (fixp->fx_file, fixp->fx_line,
29637		    _("ADRL used for a symbol not defined in the same file"));
29638      return NULL;
29639
29640    case BFD_RELOC_THUMB_PCREL_BRANCH5:
29641    case BFD_RELOC_THUMB_PCREL_BFCSEL:
29642    case BFD_RELOC_ARM_THUMB_LOOP12:
29643      as_bad_where (fixp->fx_file, fixp->fx_line,
29644		    _("%s used for a symbol not defined in the same file"),
29645		    bfd_get_reloc_code_name (fixp->fx_r_type));
29646      return NULL;
29647
29648    case BFD_RELOC_ARM_OFFSET_IMM:
29649      if (section->use_rela_p)
29650	{
29651	  code = fixp->fx_r_type;
29652	  break;
29653	}
29654
29655      if (fixp->fx_addsy != NULL
29656	  && !S_IS_DEFINED (fixp->fx_addsy)
29657	  && S_IS_LOCAL (fixp->fx_addsy))
29658	{
29659	  as_bad_where (fixp->fx_file, fixp->fx_line,
29660			_("undefined local label `%s'"),
29661			S_GET_NAME (fixp->fx_addsy));
29662	  return NULL;
29663	}
29664
29665      as_bad_where (fixp->fx_file, fixp->fx_line,
29666		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
29667      return NULL;
29668
29669    default:
29670      {
29671	const char * type;
29672
29673	switch (fixp->fx_r_type)
29674	  {
29675	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
29676	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
29677	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
29678	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
29679	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
29680	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
29681	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
29682	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
29683	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
29684	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
29685	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
29686	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
29687	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
29688	  default:			   type = _("<unknown>"); break;
29689	  }
29690	as_bad_where (fixp->fx_file, fixp->fx_line,
29691		      _("cannot represent %s relocation in this object file format"),
29692		      type);
29693	return NULL;
29694      }
29695    }
29696
29697#ifdef OBJ_ELF
29698  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
29699      && GOT_symbol
29700      && fixp->fx_addsy == GOT_symbol)
29701    {
29702      code = BFD_RELOC_ARM_GOTPC;
29703      reloc->addend = fixp->fx_offset = reloc->address;
29704    }
29705#endif
29706
29707  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
29708
29709  if (reloc->howto == NULL)
29710    {
29711      as_bad_where (fixp->fx_file, fixp->fx_line,
29712		    _("cannot represent %s relocation in this object file format"),
29713		    bfd_get_reloc_code_name (code));
29714      return NULL;
29715    }
29716
29717  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
29718     vtable entry to be used in the relocation's section offset.  */
29719  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29720    reloc->address = fixp->fx_offset;
29721
29722  return reloc;
29723}
29724
29725/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
29726
29727void
29728cons_fix_new_arm (fragS *	frag,
29729		  int		where,
29730		  int		size,
29731		  expressionS * exp,
29732		  bfd_reloc_code_real_type reloc)
29733{
29734  int pcrel = 0;
29735
29736  /* Pick a reloc.
29737     FIXME: @@ Should look at CPU word size.  */
29738  switch (size)
29739    {
29740    case 1:
29741      reloc = BFD_RELOC_8;
29742      break;
29743    case 2:
29744      reloc = BFD_RELOC_16;
29745      break;
29746    case 4:
29747    default:
29748      reloc = BFD_RELOC_32;
29749      break;
29750    case 8:
29751      reloc = BFD_RELOC_64;
29752      break;
29753    }
29754
29755#ifdef TE_PE
29756  if (exp->X_op == O_secrel)
29757  {
29758    exp->X_op = O_symbol;
29759    reloc = BFD_RELOC_32_SECREL;
29760  }
29761#endif
29762
29763  fix_new_exp (frag, where, size, exp, pcrel, reloc);
29764}
29765
29766#if defined (OBJ_COFF)
29767void
29768arm_validate_fix (fixS * fixP)
29769{
29770  /* If the destination of the branch is a defined symbol which does not have
29771     the THUMB_FUNC attribute, then we must be calling a function which has
29772     the (interfacearm) attribute.  We look for the Thumb entry point to that
29773     function and change the branch to refer to that function instead.	*/
29774  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
29775      && fixP->fx_addsy != NULL
29776      && S_IS_DEFINED (fixP->fx_addsy)
29777      && ! THUMB_IS_FUNC (fixP->fx_addsy))
29778    {
29779      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
29780    }
29781}
29782#endif
29783
29784
29785int
29786arm_force_relocation (struct fix * fixp)
29787{
29788#if defined (OBJ_COFF) && defined (TE_PE)
29789  if (fixp->fx_r_type == BFD_RELOC_RVA)
29790    return 1;
29791#endif
29792
29793  /* In case we have a call or a branch to a function in ARM ISA mode from
29794     a thumb function or vice-versa force the relocation. These relocations
29795     are cleared off for some cores that might have blx and simple transformations
29796     are possible.  */
29797
29798#ifdef OBJ_ELF
29799  switch (fixp->fx_r_type)
29800    {
29801    case BFD_RELOC_ARM_PCREL_JUMP:
29802    case BFD_RELOC_ARM_PCREL_CALL:
29803    case BFD_RELOC_THUMB_PCREL_BLX:
29804      if (THUMB_IS_FUNC (fixp->fx_addsy))
29805	return 1;
29806      break;
29807
29808    case BFD_RELOC_ARM_PCREL_BLX:
29809    case BFD_RELOC_THUMB_PCREL_BRANCH25:
29810    case BFD_RELOC_THUMB_PCREL_BRANCH20:
29811    case BFD_RELOC_THUMB_PCREL_BRANCH23:
29812      if (ARM_IS_FUNC (fixp->fx_addsy))
29813	return 1;
29814      break;
29815
29816    default:
29817      break;
29818    }
29819#endif
29820
29821  /* Resolve these relocations even if the symbol is extern or weak.
29822     Technically this is probably wrong due to symbol preemption.
29823     In practice these relocations do not have enough range to be useful
29824     at dynamic link time, and some code (e.g. in the Linux kernel)
29825     expects these references to be resolved.  */
29826  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
29827      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
29828      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
29829      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
29830      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29831      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
29832      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
29833      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
29834      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
29835      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
29836      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
29837      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
29838      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
29839      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
29840    return 0;
29841
29842  /* Always leave these relocations for the linker.  */
29843  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29844       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29845      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29846    return 1;
29847
29848  /* Always generate relocations against function symbols.  */
29849  if (fixp->fx_r_type == BFD_RELOC_32
29850      && fixp->fx_addsy
29851      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
29852    return 1;
29853
29854  return generic_force_reloc (fixp);
29855}
29856
29857#if defined (OBJ_ELF) || defined (OBJ_COFF)
29858/* Relocations against function names must be left unadjusted,
29859   so that the linker can use this information to generate interworking
29860   stubs.  The MIPS version of this function
29861   also prevents relocations that are mips-16 specific, but I do not
29862   know why it does this.
29863
29864   FIXME:
29865   There is one other problem that ought to be addressed here, but
29866   which currently is not:  Taking the address of a label (rather
29867   than a function) and then later jumping to that address.  Such
29868   addresses also ought to have their bottom bit set (assuming that
29869   they reside in Thumb code), but at the moment they will not.	 */
29870
29871bfd_boolean
29872arm_fix_adjustable (fixS * fixP)
29873{
29874  if (fixP->fx_addsy == NULL)
29875    return 1;
29876
29877  /* Preserve relocations against symbols with function type.  */
29878  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
29879    return FALSE;
29880
29881  if (THUMB_IS_FUNC (fixP->fx_addsy)
29882      && fixP->fx_subsy == NULL)
29883    return FALSE;
29884
29885  /* We need the symbol name for the VTABLE entries.  */
29886  if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
29887      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29888    return FALSE;
29889
29890  /* Don't allow symbols to be discarded on GOT related relocs.	 */
29891  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
29892      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
29893      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
29894      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
29895      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
29896      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
29897      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
29898      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
29899      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
29900      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
29901      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
29902      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
29903      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
29904      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
29905      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
29906      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
29907      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
29908    return FALSE;
29909
29910  /* Similarly for group relocations.  */
29911  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29912       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29913      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29914    return FALSE;
29915
29916  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
29917  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
29918      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29919      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
29920      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
29921      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29922      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
29923      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
29924      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
29925    return FALSE;
29926
29927  /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
29928     offsets, so keep these symbols.  */
29929  if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
29930      && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
29931    return FALSE;
29932
29933  return TRUE;
29934}
29935#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
29936
29937#ifdef OBJ_ELF
29938const char *
29939elf32_arm_target_format (void)
29940{
29941#ifdef TE_SYMBIAN
29942  return (target_big_endian
29943	  ? "elf32-bigarm-symbian"
29944	  : "elf32-littlearm-symbian");
29945#elif defined (TE_VXWORKS)
29946  return (target_big_endian
29947	  ? "elf32-bigarm-vxworks"
29948	  : "elf32-littlearm-vxworks");
29949#elif defined (TE_NACL)
29950  return (target_big_endian
29951	  ? "elf32-bigarm-nacl"
29952	  : "elf32-littlearm-nacl");
29953#else
29954  if (arm_fdpic)
29955    {
29956      if (target_big_endian)
29957	return "elf32-bigarm-fdpic";
29958      else
29959	return "elf32-littlearm-fdpic";
29960    }
29961  else
29962    {
29963      if (target_big_endian)
29964	return "elf32-bigarm";
29965      else
29966	return "elf32-littlearm";
29967    }
29968#endif
29969}
29970
29971void
29972armelf_frob_symbol (symbolS * symp,
29973		    int *     puntp)
29974{
29975  elf_frob_symbol (symp, puntp);
29976}
29977#endif
29978
29979/* MD interface: Finalization.	*/
29980
29981void
29982arm_cleanup (void)
29983{
29984  literal_pool * pool;
29985
29986  /* Ensure that all the predication blocks are properly closed.  */
29987  check_pred_blocks_finished ();
29988
29989  for (pool = list_of_pools; pool; pool = pool->next)
29990    {
29991      /* Put it at the end of the relevant section.  */
29992      subseg_set (pool->section, pool->sub_section);
29993#ifdef OBJ_ELF
29994      arm_elf_change_section ();
29995#endif
29996      s_ltorg (0);
29997    }
29998}
29999
30000#ifdef OBJ_ELF
30001/* Remove any excess mapping symbols generated for alignment frags in
30002   SEC.  We may have created a mapping symbol before a zero byte
30003   alignment; remove it if there's a mapping symbol after the
30004   alignment.  */
30005static void
30006check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
30007		       void *dummy ATTRIBUTE_UNUSED)
30008{
30009  segment_info_type *seginfo = seg_info (sec);
30010  fragS *fragp;
30011
30012  if (seginfo == NULL || seginfo->frchainP == NULL)
30013    return;
30014
30015  for (fragp = seginfo->frchainP->frch_root;
30016       fragp != NULL;
30017       fragp = fragp->fr_next)
30018    {
30019      symbolS *sym = fragp->tc_frag_data.last_map;
30020      fragS *next = fragp->fr_next;
30021
30022      /* Variable-sized frags have been converted to fixed size by
30023	 this point.  But if this was variable-sized to start with,
30024	 there will be a fixed-size frag after it.  So don't handle
30025	 next == NULL.  */
30026      if (sym == NULL || next == NULL)
30027	continue;
30028
30029      if (S_GET_VALUE (sym) < next->fr_address)
30030	/* Not at the end of this frag.  */
30031	continue;
30032      know (S_GET_VALUE (sym) == next->fr_address);
30033
30034      do
30035	{
30036	  if (next->tc_frag_data.first_map != NULL)
30037	    {
30038	      /* Next frag starts with a mapping symbol.  Discard this
30039		 one.  */
30040	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30041	      break;
30042	    }
30043
30044	  if (next->fr_next == NULL)
30045	    {
30046	      /* This mapping symbol is at the end of the section.  Discard
30047		 it.  */
30048	      know (next->fr_fix == 0 && next->fr_var == 0);
30049	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30050	      break;
30051	    }
30052
30053	  /* As long as we have empty frags without any mapping symbols,
30054	     keep looking.  */
30055	  /* If the next frag is non-empty and does not start with a
30056	     mapping symbol, then this mapping symbol is required.  */
30057	  if (next->fr_address != next->fr_next->fr_address)
30058	    break;
30059
30060	  next = next->fr_next;
30061	}
30062      while (next != NULL);
30063    }
30064}
30065#endif
30066
30067/* Adjust the symbol table.  This marks Thumb symbols as distinct from
30068   ARM ones.  */
30069
30070void
30071arm_adjust_symtab (void)
30072{
30073#ifdef OBJ_COFF
30074  symbolS * sym;
30075
30076  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30077    {
30078      if (ARM_IS_THUMB (sym))
30079	{
30080	  if (THUMB_IS_FUNC (sym))
30081	    {
30082	      /* Mark the symbol as a Thumb function.  */
30083	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
30084		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
30085		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30086
30087	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30088		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30089	      else
30090		as_bad (_("%s: unexpected function type: %d"),
30091			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30092	    }
30093	  else switch (S_GET_STORAGE_CLASS (sym))
30094	    {
30095	    case C_EXT:
30096	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30097	      break;
30098	    case C_STAT:
30099	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30100	      break;
30101	    case C_LABEL:
30102	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30103	      break;
30104	    default:
30105	      /* Do nothing.  */
30106	      break;
30107	    }
30108	}
30109
30110      if (ARM_IS_INTERWORK (sym))
30111	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30112    }
30113#endif
30114#ifdef OBJ_ELF
30115  symbolS * sym;
30116  char	    bind;
30117
30118  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30119    {
30120      if (ARM_IS_THUMB (sym))
30121	{
30122	  elf_symbol_type * elf_sym;
30123
30124	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30125	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30126
30127	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30128		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30129	    {
30130	      /* If it's a .thumb_func, declare it as so,
30131		 otherwise tag label as .code 16.  */
30132	      if (THUMB_IS_FUNC (sym))
30133		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30134					 ST_BRANCH_TO_THUMB);
30135	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30136		elf_sym->internal_elf_sym.st_info =
30137		  ELF_ST_INFO (bind, STT_ARM_16BIT);
30138	    }
30139	}
30140    }
30141
30142  /* Remove any overlapping mapping symbols generated by alignment frags.  */
30143  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30144  /* Now do generic ELF adjustments.  */
30145  elf_adjust_symtab ();
30146#endif
30147}
30148
30149/* MD interface: Initialization.  */
30150
30151static void
30152set_constant_flonums (void)
30153{
30154  int i;
30155
30156  for (i = 0; i < NUM_FLOAT_VALS; i++)
30157    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30158      abort ();
30159}
30160
30161/* Auto-select Thumb mode if it's the only available instruction set for the
30162   given architecture.  */
30163
30164static void
30165autoselect_thumb_from_cpu_variant (void)
30166{
30167  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30168    opcode_select (16);
30169}
30170
30171void
30172md_begin (void)
30173{
30174  unsigned mach;
30175  unsigned int i;
30176
30177  if (	 (arm_ops_hsh = hash_new ()) == NULL
30178      || (arm_cond_hsh = hash_new ()) == NULL
30179      || (arm_vcond_hsh = hash_new ()) == NULL
30180      || (arm_shift_hsh = hash_new ()) == NULL
30181      || (arm_psr_hsh = hash_new ()) == NULL
30182      || (arm_v7m_psr_hsh = hash_new ()) == NULL
30183      || (arm_reg_hsh = hash_new ()) == NULL
30184      || (arm_reloc_hsh = hash_new ()) == NULL
30185      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
30186    as_fatal (_("virtual memory exhausted"));
30187
30188  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30189    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
30190  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30191    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
30192  for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30193    hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
30194  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30195    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
30196  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30197    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
30198  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30199    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30200		 (void *) (v7m_psrs + i));
30201  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30202    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
30203  for (i = 0;
30204       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30205       i++)
30206    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30207		 (void *) (barrier_opt_names + i));
30208#ifdef OBJ_ELF
30209  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30210    {
30211      struct reloc_entry * entry = reloc_names + i;
30212
30213      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30214	/* This makes encode_branch() use the EABI versions of this relocation.  */
30215	entry->reloc = BFD_RELOC_UNUSED;
30216
30217      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
30218    }
30219#endif
30220
30221  set_constant_flonums ();
30222
30223  /* Set the cpu variant based on the command-line options.  We prefer
30224     -mcpu= over -march= if both are set (as for GCC); and we prefer
30225     -mfpu= over any other way of setting the floating point unit.
30226     Use of legacy options with new options are faulted.  */
30227  if (legacy_cpu)
30228    {
30229      if (mcpu_cpu_opt || march_cpu_opt)
30230	as_bad (_("use of old and new-style options to set CPU type"));
30231
30232      selected_arch = *legacy_cpu;
30233    }
30234  else if (mcpu_cpu_opt)
30235    {
30236      selected_arch = *mcpu_cpu_opt;
30237      selected_ext = *mcpu_ext_opt;
30238    }
30239  else if (march_cpu_opt)
30240    {
30241      selected_arch = *march_cpu_opt;
30242      selected_ext = *march_ext_opt;
30243    }
30244  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30245
30246  if (legacy_fpu)
30247    {
30248      if (mfpu_opt)
30249	as_bad (_("use of old and new-style options to set FPU type"));
30250
30251      selected_fpu = *legacy_fpu;
30252    }
30253  else if (mfpu_opt)
30254    selected_fpu = *mfpu_opt;
30255  else
30256    {
30257#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30258	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
30259      /* Some environments specify a default FPU.  If they don't, infer it
30260	 from the processor.  */
30261      if (mcpu_fpu_opt)
30262	selected_fpu = *mcpu_fpu_opt;
30263      else if (march_fpu_opt)
30264	selected_fpu = *march_fpu_opt;
30265#else
30266      selected_fpu = fpu_default;
30267#endif
30268    }
30269
30270  if (ARM_FEATURE_ZERO (selected_fpu))
30271    {
30272      if (!no_cpu_selected ())
30273	selected_fpu = fpu_default;
30274      else
30275	selected_fpu = fpu_arch_fpa;
30276    }
30277
30278#ifdef CPU_DEFAULT
30279  if (ARM_FEATURE_ZERO (selected_arch))
30280    {
30281      selected_arch = cpu_default;
30282      selected_cpu = selected_arch;
30283    }
30284  ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30285#else
30286  /*  Autodection of feature mode: allow all features in cpu_variant but leave
30287      selected_cpu unset.  It will be set in aeabi_set_public_attributes ()
30288      after all instruction have been processed and we can decide what CPU
30289      should be selected.  */
30290  if (ARM_FEATURE_ZERO (selected_arch))
30291    ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30292  else
30293    ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30294#endif
30295
30296  autoselect_thumb_from_cpu_variant ();
30297
30298  arm_arch_used = thumb_arch_used = arm_arch_none;
30299
30300#if defined OBJ_COFF || defined OBJ_ELF
30301  {
30302    unsigned int flags = 0;
30303
30304#if defined OBJ_ELF
30305    flags = meabi_flags;
30306
30307    switch (meabi_flags)
30308      {
30309      case EF_ARM_EABI_UNKNOWN:
30310#endif
30311	/* Set the flags in the private structure.  */
30312	if (uses_apcs_26)      flags |= F_APCS26;
30313	if (support_interwork) flags |= F_INTERWORK;
30314	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
30315	if (pic_code)	       flags |= F_PIC;
30316	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30317	  flags |= F_SOFT_FLOAT;
30318
30319	switch (mfloat_abi_opt)
30320	  {
30321	  case ARM_FLOAT_ABI_SOFT:
30322	  case ARM_FLOAT_ABI_SOFTFP:
30323	    flags |= F_SOFT_FLOAT;
30324	    break;
30325
30326	  case ARM_FLOAT_ABI_HARD:
30327	    if (flags & F_SOFT_FLOAT)
30328	      as_bad (_("hard-float conflicts with specified fpu"));
30329	    break;
30330	  }
30331
30332	/* Using pure-endian doubles (even if soft-float).	*/
30333	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30334	  flags |= F_VFP_FLOAT;
30335
30336#if defined OBJ_ELF
30337	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30338	    flags |= EF_ARM_MAVERICK_FLOAT;
30339	break;
30340
30341      case EF_ARM_EABI_VER4:
30342      case EF_ARM_EABI_VER5:
30343	/* No additional flags to set.	*/
30344	break;
30345
30346      default:
30347	abort ();
30348      }
30349#endif
30350    bfd_set_private_flags (stdoutput, flags);
30351
30352    /* We have run out flags in the COFF header to encode the
30353       status of ATPCS support, so instead we create a dummy,
30354       empty, debug section called .arm.atpcs.	*/
30355    if (atpcs)
30356      {
30357	asection * sec;
30358
30359	sec = bfd_make_section (stdoutput, ".arm.atpcs");
30360
30361	if (sec != NULL)
30362	  {
30363	    bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30364	    bfd_set_section_size (sec, 0);
30365	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30366	  }
30367      }
30368  }
30369#endif
30370
30371  /* Record the CPU type as well.  */
30372  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30373    mach = bfd_mach_arm_iWMMXt2;
30374  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30375    mach = bfd_mach_arm_iWMMXt;
30376  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30377    mach = bfd_mach_arm_XScale;
30378  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30379    mach = bfd_mach_arm_ep9312;
30380  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30381    mach = bfd_mach_arm_5TE;
30382  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30383    {
30384      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30385	mach = bfd_mach_arm_5T;
30386      else
30387	mach = bfd_mach_arm_5;
30388    }
30389  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30390    {
30391      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30392	mach = bfd_mach_arm_4T;
30393      else
30394	mach = bfd_mach_arm_4;
30395    }
30396  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30397    mach = bfd_mach_arm_3M;
30398  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30399    mach = bfd_mach_arm_3;
30400  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30401    mach = bfd_mach_arm_2a;
30402  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30403    mach = bfd_mach_arm_2;
30404  else
30405    mach = bfd_mach_arm_unknown;
30406
30407  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30408}
30409
30410/* Command line processing.  */
30411
30412/* md_parse_option
30413      Invocation line includes a switch not recognized by the base assembler.
30414      See if it's a processor-specific option.
30415
30416      This routine is somewhat complicated by the need for backwards
30417      compatibility (since older releases of gcc can't be changed).
30418      The new options try to make the interface as compatible as
30419      possible with GCC.
30420
30421      New options (supported) are:
30422
30423	      -mcpu=<cpu name>		 Assemble for selected processor
30424	      -march=<architecture name> Assemble for selected architecture
30425	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
30426	      -EB/-mbig-endian		 Big-endian
30427	      -EL/-mlittle-endian	 Little-endian
30428	      -k			 Generate PIC code
30429	      -mthumb			 Start in Thumb mode
30430	      -mthumb-interwork		 Code supports ARM/Thumb interworking
30431
30432	      -m[no-]warn-deprecated     Warn about deprecated features
30433	      -m[no-]warn-syms		 Warn when symbols match instructions
30434
30435      For now we will also provide support for:
30436
30437	      -mapcs-32			 32-bit Program counter
30438	      -mapcs-26			 26-bit Program counter
30439	      -macps-float		 Floats passed in FP registers
30440	      -mapcs-reentrant		 Reentrant code
30441	      -matpcs
30442      (sometime these will probably be replaced with -mapcs=<list of options>
30443      and -matpcs=<list of options>)
30444
30445      The remaining options are only supported for back-wards compatibility.
30446      Cpu variants, the arm part is optional:
30447	      -m[arm]1		      Currently not supported.
30448	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
30449	      -m[arm]3		      Arm 3 processor
30450	      -m[arm]6[xx],	      Arm 6 processors
30451	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
30452	      -m[arm]8[10]	      Arm 8 processors
30453	      -m[arm]9[20][tdmi]      Arm 9 processors
30454	      -mstrongarm[110[0]]     StrongARM processors
30455	      -mxscale		      XScale processors
30456	      -m[arm]v[2345[t[e]]]    Arm architectures
30457	      -mall		      All (except the ARM1)
30458      FP variants:
30459	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
30460	      -mfpe-old		      (No float load/store multiples)
30461	      -mvfpxd		      VFP Single precision
30462	      -mvfp		      All VFP
30463	      -mno-fpu		      Disable all floating point instructions
30464
30465      The following CPU names are recognized:
30466	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
30467	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
30468	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
30469	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
30470	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
30471	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
30472	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
30473
30474      */
30475
30476const char * md_shortopts = "m:k";
30477
30478#ifdef ARM_BI_ENDIAN
30479#define OPTION_EB (OPTION_MD_BASE + 0)
30480#define OPTION_EL (OPTION_MD_BASE + 1)
30481#else
30482#if TARGET_BYTES_BIG_ENDIAN
30483#define OPTION_EB (OPTION_MD_BASE + 0)
30484#else
30485#define OPTION_EL (OPTION_MD_BASE + 1)
30486#endif
30487#endif
30488#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
30489#define OPTION_FDPIC (OPTION_MD_BASE + 3)
30490
30491struct option md_longopts[] =
30492{
30493#ifdef OPTION_EB
30494  {"EB", no_argument, NULL, OPTION_EB},
30495#endif
30496#ifdef OPTION_EL
30497  {"EL", no_argument, NULL, OPTION_EL},
30498#endif
30499  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
30500#ifdef OBJ_ELF
30501  {"fdpic", no_argument, NULL, OPTION_FDPIC},
30502#endif
30503  {NULL, no_argument, NULL, 0}
30504};
30505
30506size_t md_longopts_size = sizeof (md_longopts);
30507
30508struct arm_option_table
30509{
30510  const char *  option;		/* Option name to match.  */
30511  const char *  help;		/* Help information.  */
30512  int *         var;		/* Variable to change.	*/
30513  int	        value;		/* What to change it to.  */
30514  const char *  deprecated;	/* If non-null, print this message.  */
30515};
30516
30517struct arm_option_table arm_opts[] =
30518{
30519  {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
30520  {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
30521  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
30522   &support_interwork, 1, NULL},
30523  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
30524  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
30525  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
30526   1, NULL},
30527  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
30528  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
30529  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
30530  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
30531   NULL},
30532
30533  /* These are recognized by the assembler, but have no affect on code.	 */
30534  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
30535  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
30536
30537  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
30538  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
30539   &warn_on_deprecated, 0, NULL},
30540
30541  {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
30542   " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
30543  {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
30544
30545  {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
30546  {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
30547  {NULL, NULL, NULL, 0, NULL}
30548};
30549
30550struct arm_legacy_option_table
30551{
30552  const char *              option;		/* Option name to match.  */
30553  const arm_feature_set	**  var;		/* Variable to change.	*/
30554  const arm_feature_set	    value;		/* What to change it to.  */
30555  const char *              deprecated;		/* If non-null, print this message.  */
30556};
30557
30558const struct arm_legacy_option_table arm_legacy_opts[] =
30559{
30560  /* DON'T add any new processors to this list -- we want the whole list
30561     to go away...  Add them to the processors table instead.  */
30562  {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
30563  {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
30564  {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
30565  {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
30566  {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30567  {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30568  {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30569  {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30570  {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
30571  {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
30572  {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
30573  {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
30574  {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
30575  {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
30576  {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
30577  {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
30578  {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
30579  {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
30580  {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
30581  {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
30582  {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
30583  {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
30584  {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
30585  {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
30586  {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
30587  {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
30588  {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
30589  {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
30590  {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
30591  {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
30592  {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
30593  {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
30594  {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
30595  {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
30596  {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30597  {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30598  {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30599  {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30600  {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30601  {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30602  {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
30603  {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
30604  {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
30605  {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
30606  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
30607  {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
30608  {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30609  {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30610  {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30611  {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30612  {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30613  {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30614  {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30615  {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30616  {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30617  {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30618  {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
30619  {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
30620  {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
30621  {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
30622  {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30623  {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30624  {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30625  {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30626  {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30627  {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30628  {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30629  {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30630  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
30631  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
30632   N_("use -mcpu=strongarm110")},
30633  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
30634   N_("use -mcpu=strongarm1100")},
30635  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
30636   N_("use -mcpu=strongarm1110")},
30637  {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
30638  {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
30639  {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
30640
30641  /* Architecture variants -- don't add any more to this list either.  */
30642  {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
30643  {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
30644  {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30645  {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30646  {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
30647  {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
30648  {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30649  {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30650  {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
30651  {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
30652  {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30653  {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30654  {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
30655  {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
30656  {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30657  {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30658  {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30659  {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30660
30661  /* Floating point variants -- don't add any more to this list either.	 */
30662  {"mfpe-old",   &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
30663  {"mfpa10",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
30664  {"mfpa11",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
30665  {"mno-fpu",    &legacy_fpu, ARM_ARCH_NONE,
30666   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
30667
30668  {NULL, NULL, ARM_ARCH_NONE, NULL}
30669};
30670
30671struct arm_cpu_option_table
30672{
30673  const char *           name;
30674  size_t                 name_len;
30675  const arm_feature_set	 value;
30676  const arm_feature_set	 ext;
30677  /* For some CPUs we assume an FPU unless the user explicitly sets
30678     -mfpu=...	*/
30679  const arm_feature_set	 default_fpu;
30680  /* The canonical name of the CPU, or NULL to use NAME converted to upper
30681     case.  */
30682  const char *           canonical_name;
30683};
30684
30685/* This list should, at a minimum, contain all the cpu names
30686   recognized by GCC.  */
30687#define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
30688
30689static const struct arm_cpu_option_table arm_cpus[] =
30690{
30691  ARM_CPU_OPT ("all",		  NULL,		       ARM_ANY,
30692	       ARM_ARCH_NONE,
30693	       FPU_ARCH_FPA),
30694  ARM_CPU_OPT ("arm1",		  NULL,		       ARM_ARCH_V1,
30695	       ARM_ARCH_NONE,
30696	       FPU_ARCH_FPA),
30697  ARM_CPU_OPT ("arm2",		  NULL,		       ARM_ARCH_V2,
30698	       ARM_ARCH_NONE,
30699	       FPU_ARCH_FPA),
30700  ARM_CPU_OPT ("arm250",	  NULL,		       ARM_ARCH_V2S,
30701	       ARM_ARCH_NONE,
30702	       FPU_ARCH_FPA),
30703  ARM_CPU_OPT ("arm3",		  NULL,		       ARM_ARCH_V2S,
30704	       ARM_ARCH_NONE,
30705	       FPU_ARCH_FPA),
30706  ARM_CPU_OPT ("arm6",		  NULL,		       ARM_ARCH_V3,
30707	       ARM_ARCH_NONE,
30708	       FPU_ARCH_FPA),
30709  ARM_CPU_OPT ("arm60",		  NULL,		       ARM_ARCH_V3,
30710	       ARM_ARCH_NONE,
30711	       FPU_ARCH_FPA),
30712  ARM_CPU_OPT ("arm600",	  NULL,		       ARM_ARCH_V3,
30713	       ARM_ARCH_NONE,
30714	       FPU_ARCH_FPA),
30715  ARM_CPU_OPT ("arm610",	  NULL,		       ARM_ARCH_V3,
30716	       ARM_ARCH_NONE,
30717	       FPU_ARCH_FPA),
30718  ARM_CPU_OPT ("arm620",	  NULL,		       ARM_ARCH_V3,
30719	       ARM_ARCH_NONE,
30720	       FPU_ARCH_FPA),
30721  ARM_CPU_OPT ("arm7",		  NULL,		       ARM_ARCH_V3,
30722	       ARM_ARCH_NONE,
30723	       FPU_ARCH_FPA),
30724  ARM_CPU_OPT ("arm7m",		  NULL,		       ARM_ARCH_V3M,
30725	       ARM_ARCH_NONE,
30726	       FPU_ARCH_FPA),
30727  ARM_CPU_OPT ("arm7d",		  NULL,		       ARM_ARCH_V3,
30728	       ARM_ARCH_NONE,
30729	       FPU_ARCH_FPA),
30730  ARM_CPU_OPT ("arm7dm",	  NULL,		       ARM_ARCH_V3M,
30731	       ARM_ARCH_NONE,
30732	       FPU_ARCH_FPA),
30733  ARM_CPU_OPT ("arm7di",	  NULL,		       ARM_ARCH_V3,
30734	       ARM_ARCH_NONE,
30735	       FPU_ARCH_FPA),
30736  ARM_CPU_OPT ("arm7dmi",	  NULL,		       ARM_ARCH_V3M,
30737	       ARM_ARCH_NONE,
30738	       FPU_ARCH_FPA),
30739  ARM_CPU_OPT ("arm70",		  NULL,		       ARM_ARCH_V3,
30740	       ARM_ARCH_NONE,
30741	       FPU_ARCH_FPA),
30742  ARM_CPU_OPT ("arm700",	  NULL,		       ARM_ARCH_V3,
30743	       ARM_ARCH_NONE,
30744	       FPU_ARCH_FPA),
30745  ARM_CPU_OPT ("arm700i",	  NULL,		       ARM_ARCH_V3,
30746	       ARM_ARCH_NONE,
30747	       FPU_ARCH_FPA),
30748  ARM_CPU_OPT ("arm710",	  NULL,		       ARM_ARCH_V3,
30749	       ARM_ARCH_NONE,
30750	       FPU_ARCH_FPA),
30751  ARM_CPU_OPT ("arm710t",	  NULL,		       ARM_ARCH_V4T,
30752	       ARM_ARCH_NONE,
30753	       FPU_ARCH_FPA),
30754  ARM_CPU_OPT ("arm720",	  NULL,		       ARM_ARCH_V3,
30755	       ARM_ARCH_NONE,
30756	       FPU_ARCH_FPA),
30757  ARM_CPU_OPT ("arm720t",	  NULL,		       ARM_ARCH_V4T,
30758	       ARM_ARCH_NONE,
30759	       FPU_ARCH_FPA),
30760  ARM_CPU_OPT ("arm740t",	  NULL,		       ARM_ARCH_V4T,
30761	       ARM_ARCH_NONE,
30762	       FPU_ARCH_FPA),
30763  ARM_CPU_OPT ("arm710c",	  NULL,		       ARM_ARCH_V3,
30764	       ARM_ARCH_NONE,
30765	       FPU_ARCH_FPA),
30766  ARM_CPU_OPT ("arm7100",	  NULL,		       ARM_ARCH_V3,
30767	       ARM_ARCH_NONE,
30768	       FPU_ARCH_FPA),
30769  ARM_CPU_OPT ("arm7500",	  NULL,		       ARM_ARCH_V3,
30770	       ARM_ARCH_NONE,
30771	       FPU_ARCH_FPA),
30772  ARM_CPU_OPT ("arm7500fe",	  NULL,		       ARM_ARCH_V3,
30773	       ARM_ARCH_NONE,
30774	       FPU_ARCH_FPA),
30775  ARM_CPU_OPT ("arm7t",		  NULL,		       ARM_ARCH_V4T,
30776	       ARM_ARCH_NONE,
30777	       FPU_ARCH_FPA),
30778  ARM_CPU_OPT ("arm7tdmi",	  NULL,		       ARM_ARCH_V4T,
30779	       ARM_ARCH_NONE,
30780	       FPU_ARCH_FPA),
30781  ARM_CPU_OPT ("arm7tdmi-s",	  NULL,		       ARM_ARCH_V4T,
30782	       ARM_ARCH_NONE,
30783	       FPU_ARCH_FPA),
30784  ARM_CPU_OPT ("arm8",		  NULL,		       ARM_ARCH_V4,
30785	       ARM_ARCH_NONE,
30786	       FPU_ARCH_FPA),
30787  ARM_CPU_OPT ("arm810",	  NULL,		       ARM_ARCH_V4,
30788	       ARM_ARCH_NONE,
30789	       FPU_ARCH_FPA),
30790  ARM_CPU_OPT ("strongarm",	  NULL,		       ARM_ARCH_V4,
30791	       ARM_ARCH_NONE,
30792	       FPU_ARCH_FPA),
30793  ARM_CPU_OPT ("strongarm1",	  NULL,		       ARM_ARCH_V4,
30794	       ARM_ARCH_NONE,
30795	       FPU_ARCH_FPA),
30796  ARM_CPU_OPT ("strongarm110",	  NULL,		       ARM_ARCH_V4,
30797	       ARM_ARCH_NONE,
30798	       FPU_ARCH_FPA),
30799  ARM_CPU_OPT ("strongarm1100",	  NULL,		       ARM_ARCH_V4,
30800	       ARM_ARCH_NONE,
30801	       FPU_ARCH_FPA),
30802  ARM_CPU_OPT ("strongarm1110",	  NULL,		       ARM_ARCH_V4,
30803	       ARM_ARCH_NONE,
30804	       FPU_ARCH_FPA),
30805  ARM_CPU_OPT ("arm9",		  NULL,		       ARM_ARCH_V4T,
30806	       ARM_ARCH_NONE,
30807	       FPU_ARCH_FPA),
30808  ARM_CPU_OPT ("arm920",	  "ARM920T",	       ARM_ARCH_V4T,
30809	       ARM_ARCH_NONE,
30810	       FPU_ARCH_FPA),
30811  ARM_CPU_OPT ("arm920t",	  NULL,		       ARM_ARCH_V4T,
30812	       ARM_ARCH_NONE,
30813	       FPU_ARCH_FPA),
30814  ARM_CPU_OPT ("arm922t",	  NULL,		       ARM_ARCH_V4T,
30815	       ARM_ARCH_NONE,
30816	       FPU_ARCH_FPA),
30817  ARM_CPU_OPT ("arm940t",	  NULL,		       ARM_ARCH_V4T,
30818	       ARM_ARCH_NONE,
30819	       FPU_ARCH_FPA),
30820  ARM_CPU_OPT ("arm9tdmi",	  NULL,		       ARM_ARCH_V4T,
30821	       ARM_ARCH_NONE,
30822	       FPU_ARCH_FPA),
30823  ARM_CPU_OPT ("fa526",		  NULL,		       ARM_ARCH_V4,
30824	       ARM_ARCH_NONE,
30825	       FPU_ARCH_FPA),
30826  ARM_CPU_OPT ("fa626",		  NULL,		       ARM_ARCH_V4,
30827	       ARM_ARCH_NONE,
30828	       FPU_ARCH_FPA),
30829
30830  /* For V5 or later processors we default to using VFP; but the user
30831     should really set the FPU type explicitly.	 */
30832  ARM_CPU_OPT ("arm9e-r0",	  NULL,		       ARM_ARCH_V5TExP,
30833	       ARM_ARCH_NONE,
30834	       FPU_ARCH_VFP_V2),
30835  ARM_CPU_OPT ("arm9e",		  NULL,		       ARM_ARCH_V5TE,
30836	       ARM_ARCH_NONE,
30837	       FPU_ARCH_VFP_V2),
30838  ARM_CPU_OPT ("arm926ej",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
30839	       ARM_ARCH_NONE,
30840	       FPU_ARCH_VFP_V2),
30841  ARM_CPU_OPT ("arm926ejs",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
30842	       ARM_ARCH_NONE,
30843	       FPU_ARCH_VFP_V2),
30844  ARM_CPU_OPT ("arm926ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
30845	       ARM_ARCH_NONE,
30846	       FPU_ARCH_VFP_V2),
30847  ARM_CPU_OPT ("arm946e-r0",	  NULL,		       ARM_ARCH_V5TExP,
30848	       ARM_ARCH_NONE,
30849	       FPU_ARCH_VFP_V2),
30850  ARM_CPU_OPT ("arm946e",	  "ARM946E-S",	       ARM_ARCH_V5TE,
30851	       ARM_ARCH_NONE,
30852	       FPU_ARCH_VFP_V2),
30853  ARM_CPU_OPT ("arm946e-s",	  NULL,		       ARM_ARCH_V5TE,
30854	       ARM_ARCH_NONE,
30855	       FPU_ARCH_VFP_V2),
30856  ARM_CPU_OPT ("arm966e-r0",	  NULL,		       ARM_ARCH_V5TExP,
30857	       ARM_ARCH_NONE,
30858	       FPU_ARCH_VFP_V2),
30859  ARM_CPU_OPT ("arm966e",	  "ARM966E-S",	       ARM_ARCH_V5TE,
30860	       ARM_ARCH_NONE,
30861	       FPU_ARCH_VFP_V2),
30862  ARM_CPU_OPT ("arm966e-s",	  NULL,		       ARM_ARCH_V5TE,
30863	       ARM_ARCH_NONE,
30864	       FPU_ARCH_VFP_V2),
30865  ARM_CPU_OPT ("arm968e-s",	  NULL,		       ARM_ARCH_V5TE,
30866	       ARM_ARCH_NONE,
30867	       FPU_ARCH_VFP_V2),
30868  ARM_CPU_OPT ("arm10t",	  NULL,		       ARM_ARCH_V5T,
30869	       ARM_ARCH_NONE,
30870	       FPU_ARCH_VFP_V1),
30871  ARM_CPU_OPT ("arm10tdmi",	  NULL,		       ARM_ARCH_V5T,
30872	       ARM_ARCH_NONE,
30873	       FPU_ARCH_VFP_V1),
30874  ARM_CPU_OPT ("arm10e",	  NULL,		       ARM_ARCH_V5TE,
30875	       ARM_ARCH_NONE,
30876	       FPU_ARCH_VFP_V2),
30877  ARM_CPU_OPT ("arm1020",	  "ARM1020E",	       ARM_ARCH_V5TE,
30878	       ARM_ARCH_NONE,
30879	       FPU_ARCH_VFP_V2),
30880  ARM_CPU_OPT ("arm1020t",	  NULL,		       ARM_ARCH_V5T,
30881	       ARM_ARCH_NONE,
30882	       FPU_ARCH_VFP_V1),
30883  ARM_CPU_OPT ("arm1020e",	  NULL,		       ARM_ARCH_V5TE,
30884	       ARM_ARCH_NONE,
30885	       FPU_ARCH_VFP_V2),
30886  ARM_CPU_OPT ("arm1022e",	  NULL,		       ARM_ARCH_V5TE,
30887	       ARM_ARCH_NONE,
30888	       FPU_ARCH_VFP_V2),
30889  ARM_CPU_OPT ("arm1026ejs",	  "ARM1026EJ-S",       ARM_ARCH_V5TEJ,
30890	       ARM_ARCH_NONE,
30891	       FPU_ARCH_VFP_V2),
30892  ARM_CPU_OPT ("arm1026ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
30893	       ARM_ARCH_NONE,
30894	       FPU_ARCH_VFP_V2),
30895  ARM_CPU_OPT ("fa606te",	  NULL,		       ARM_ARCH_V5TE,
30896	       ARM_ARCH_NONE,
30897	       FPU_ARCH_VFP_V2),
30898  ARM_CPU_OPT ("fa616te",	  NULL,		       ARM_ARCH_V5TE,
30899	       ARM_ARCH_NONE,
30900	       FPU_ARCH_VFP_V2),
30901  ARM_CPU_OPT ("fa626te",	  NULL,		       ARM_ARCH_V5TE,
30902	       ARM_ARCH_NONE,
30903	       FPU_ARCH_VFP_V2),
30904  ARM_CPU_OPT ("fmp626",	  NULL,		       ARM_ARCH_V5TE,
30905	       ARM_ARCH_NONE,
30906	       FPU_ARCH_VFP_V2),
30907  ARM_CPU_OPT ("fa726te",	  NULL,		       ARM_ARCH_V5TE,
30908	       ARM_ARCH_NONE,
30909	       FPU_ARCH_VFP_V2),
30910  ARM_CPU_OPT ("arm1136js",	  "ARM1136J-S",	       ARM_ARCH_V6,
30911	       ARM_ARCH_NONE,
30912	       FPU_NONE),
30913  ARM_CPU_OPT ("arm1136j-s",	  NULL,		       ARM_ARCH_V6,
30914	       ARM_ARCH_NONE,
30915	       FPU_NONE),
30916  ARM_CPU_OPT ("arm1136jfs",	  "ARM1136JF-S",       ARM_ARCH_V6,
30917	       ARM_ARCH_NONE,
30918	       FPU_ARCH_VFP_V2),
30919  ARM_CPU_OPT ("arm1136jf-s",	  NULL,		       ARM_ARCH_V6,
30920	       ARM_ARCH_NONE,
30921	       FPU_ARCH_VFP_V2),
30922  ARM_CPU_OPT ("mpcore",	  "MPCore",	       ARM_ARCH_V6K,
30923	       ARM_ARCH_NONE,
30924	       FPU_ARCH_VFP_V2),
30925  ARM_CPU_OPT ("mpcorenovfp",	  "MPCore",	       ARM_ARCH_V6K,
30926	       ARM_ARCH_NONE,
30927	       FPU_NONE),
30928  ARM_CPU_OPT ("arm1156t2-s",	  NULL,		       ARM_ARCH_V6T2,
30929	       ARM_ARCH_NONE,
30930	       FPU_NONE),
30931  ARM_CPU_OPT ("arm1156t2f-s",	  NULL,		       ARM_ARCH_V6T2,
30932	       ARM_ARCH_NONE,
30933	       FPU_ARCH_VFP_V2),
30934  ARM_CPU_OPT ("arm1176jz-s",	  NULL,		       ARM_ARCH_V6KZ,
30935	       ARM_ARCH_NONE,
30936	       FPU_NONE),
30937  ARM_CPU_OPT ("arm1176jzf-s",	  NULL,		       ARM_ARCH_V6KZ,
30938	       ARM_ARCH_NONE,
30939	       FPU_ARCH_VFP_V2),
30940  ARM_CPU_OPT ("cortex-a5",	  "Cortex-A5",	       ARM_ARCH_V7A,
30941	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30942	       FPU_NONE),
30943  ARM_CPU_OPT ("cortex-a7",	  "Cortex-A7",	       ARM_ARCH_V7VE,
30944	       ARM_ARCH_NONE,
30945	       FPU_ARCH_NEON_VFP_V4),
30946  ARM_CPU_OPT ("cortex-a8",	  "Cortex-A8",	       ARM_ARCH_V7A,
30947	       ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
30948	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30949  ARM_CPU_OPT ("cortex-a9",	  "Cortex-A9",	       ARM_ARCH_V7A,
30950	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30951	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30952  ARM_CPU_OPT ("cortex-a12",	  "Cortex-A12",	       ARM_ARCH_V7VE,
30953	       ARM_ARCH_NONE,
30954	       FPU_ARCH_NEON_VFP_V4),
30955  ARM_CPU_OPT ("cortex-a15",	  "Cortex-A15",	       ARM_ARCH_V7VE,
30956	       ARM_ARCH_NONE,
30957	       FPU_ARCH_NEON_VFP_V4),
30958  ARM_CPU_OPT ("cortex-a17",	  "Cortex-A17",	       ARM_ARCH_V7VE,
30959	       ARM_ARCH_NONE,
30960	       FPU_ARCH_NEON_VFP_V4),
30961  ARM_CPU_OPT ("cortex-a32",	  "Cortex-A32",	       ARM_ARCH_V8A,
30962	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30963	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30964  ARM_CPU_OPT ("cortex-a35",	  "Cortex-A35",	       ARM_ARCH_V8A,
30965	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30966	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30967  ARM_CPU_OPT ("cortex-a53",	  "Cortex-A53",	       ARM_ARCH_V8A,
30968	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30969	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30970  ARM_CPU_OPT ("cortex-a55",    "Cortex-A55",	       ARM_ARCH_V8_2A,
30971	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30972	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30973  ARM_CPU_OPT ("cortex-a57",	  "Cortex-A57",	       ARM_ARCH_V8A,
30974	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30975	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30976  ARM_CPU_OPT ("cortex-a72",	  "Cortex-A72",	       ARM_ARCH_V8A,
30977	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30978	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30979  ARM_CPU_OPT ("cortex-a73",	  "Cortex-A73",	       ARM_ARCH_V8A,
30980	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
30981	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30982  ARM_CPU_OPT ("cortex-a75",    "Cortex-A75",	       ARM_ARCH_V8_2A,
30983	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30984	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30985  ARM_CPU_OPT ("cortex-a76",    "Cortex-A76",	       ARM_ARCH_V8_2A,
30986	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30987	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30988  ARM_CPU_OPT ("cortex-a76ae",    "Cortex-A76AE",      ARM_ARCH_V8_2A,
30989	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30990	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30991  ARM_CPU_OPT ("cortex-a77",    "Cortex-A77",	       ARM_ARCH_V8_2A,
30992	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30993	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30994  ARM_CPU_OPT ("ares",    "Ares",	       ARM_ARCH_V8_2A,
30995	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30996	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30997  ARM_CPU_OPT ("cortex-r4",	  "Cortex-R4",	       ARM_ARCH_V7R,
30998	       ARM_ARCH_NONE,
30999	       FPU_NONE),
31000  ARM_CPU_OPT ("cortex-r4f",	  "Cortex-R4F",	       ARM_ARCH_V7R,
31001	       ARM_ARCH_NONE,
31002	       FPU_ARCH_VFP_V3D16),
31003  ARM_CPU_OPT ("cortex-r5",	  "Cortex-R5",	       ARM_ARCH_V7R,
31004	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31005	       FPU_NONE),
31006  ARM_CPU_OPT ("cortex-r7",	  "Cortex-R7",	       ARM_ARCH_V7R,
31007	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31008	       FPU_ARCH_VFP_V3D16),
31009  ARM_CPU_OPT ("cortex-r8",	  "Cortex-R8",	       ARM_ARCH_V7R,
31010	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31011	       FPU_ARCH_VFP_V3D16),
31012  ARM_CPU_OPT ("cortex-r52",	  "Cortex-R52",	       ARM_ARCH_V8R,
31013	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31014	      FPU_ARCH_NEON_VFP_ARMV8),
31015  ARM_CPU_OPT ("cortex-m35p",	  "Cortex-M35P",       ARM_ARCH_V8M_MAIN,
31016	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31017	       FPU_NONE),
31018  ARM_CPU_OPT ("cortex-m33",	  "Cortex-M33",	       ARM_ARCH_V8M_MAIN,
31019	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31020	       FPU_NONE),
31021  ARM_CPU_OPT ("cortex-m23",	  "Cortex-M23",	       ARM_ARCH_V8M_BASE,
31022	       ARM_ARCH_NONE,
31023	       FPU_NONE),
31024  ARM_CPU_OPT ("cortex-m7",	  "Cortex-M7",	       ARM_ARCH_V7EM,
31025	       ARM_ARCH_NONE,
31026	       FPU_NONE),
31027  ARM_CPU_OPT ("cortex-m4",	  "Cortex-M4",	       ARM_ARCH_V7EM,
31028	       ARM_ARCH_NONE,
31029	       FPU_NONE),
31030  ARM_CPU_OPT ("cortex-m3",	  "Cortex-M3",	       ARM_ARCH_V7M,
31031	       ARM_ARCH_NONE,
31032	       FPU_NONE),
31033  ARM_CPU_OPT ("cortex-m1",	  "Cortex-M1",	       ARM_ARCH_V6SM,
31034	       ARM_ARCH_NONE,
31035	       FPU_NONE),
31036  ARM_CPU_OPT ("cortex-m0",	  "Cortex-M0",	       ARM_ARCH_V6SM,
31037	       ARM_ARCH_NONE,
31038	       FPU_NONE),
31039  ARM_CPU_OPT ("cortex-m0plus",	  "Cortex-M0+",	       ARM_ARCH_V6SM,
31040	       ARM_ARCH_NONE,
31041	       FPU_NONE),
31042  ARM_CPU_OPT ("exynos-m1",	  "Samsung Exynos M1", ARM_ARCH_V8A,
31043	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31044	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31045  ARM_CPU_OPT ("neoverse-n1",    "Neoverse N1",	       ARM_ARCH_V8_2A,
31046	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31047	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31048  /* ??? XSCALE is really an architecture.  */
31049  ARM_CPU_OPT ("xscale",	  NULL,		       ARM_ARCH_XSCALE,
31050	       ARM_ARCH_NONE,
31051	       FPU_ARCH_VFP_V2),
31052
31053  /* ??? iwmmxt is not a processor.  */
31054  ARM_CPU_OPT ("iwmmxt",	  NULL,		       ARM_ARCH_IWMMXT,
31055	       ARM_ARCH_NONE,
31056	       FPU_ARCH_VFP_V2),
31057  ARM_CPU_OPT ("iwmmxt2",	  NULL,		       ARM_ARCH_IWMMXT2,
31058	       ARM_ARCH_NONE,
31059	       FPU_ARCH_VFP_V2),
31060  ARM_CPU_OPT ("i80200",	  NULL,		       ARM_ARCH_XSCALE,
31061	       ARM_ARCH_NONE,
31062	       FPU_ARCH_VFP_V2),
31063
31064  /* Maverick.  */
31065  ARM_CPU_OPT ("ep9312",	  "ARM920T",
31066	       ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31067	       ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31068
31069  /* Marvell processors.  */
31070  ARM_CPU_OPT ("marvell-pj4",	  NULL,		       ARM_ARCH_V7A,
31071	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31072	       FPU_ARCH_VFP_V3D16),
31073  ARM_CPU_OPT ("marvell-whitney", NULL,		       ARM_ARCH_V7A,
31074	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31075	       FPU_ARCH_NEON_VFP_V4),
31076
31077  /* APM X-Gene family.  */
31078  ARM_CPU_OPT ("xgene1",	  "APM X-Gene 1",      ARM_ARCH_V8A,
31079	       ARM_ARCH_NONE,
31080	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31081  ARM_CPU_OPT ("xgene2",	  "APM X-Gene 2",      ARM_ARCH_V8A,
31082	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31083	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31084
31085  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31086};
31087#undef ARM_CPU_OPT
31088
31089struct arm_ext_table
31090{
31091  const char *		  name;
31092  size_t		  name_len;
31093  const arm_feature_set	  merge;
31094  const arm_feature_set	  clear;
31095};
31096
31097struct arm_arch_option_table
31098{
31099  const char *			name;
31100  size_t			name_len;
31101  const arm_feature_set		value;
31102  const arm_feature_set		default_fpu;
31103  const struct arm_ext_table *	ext_table;
31104};
31105
31106/* Used to add support for +E and +noE extension.  */
31107#define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31108/* Used to add support for a +E extension.  */
31109#define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31110/* Used to add support for a +noE extension.  */
31111#define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31112
31113#define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31114			    ~0 & ~FPU_ENDIAN_PURE)
31115
31116static const struct arm_ext_table armv5te_ext_table[] =
31117{
31118  ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31119  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31120};
31121
31122static const struct arm_ext_table armv7_ext_table[] =
31123{
31124  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31125  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31126};
31127
31128static const struct arm_ext_table armv7ve_ext_table[] =
31129{
31130  ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31131  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31132  ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31133  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31134  ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31135  ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),  /* Alias for +fp.  */
31136  ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31137
31138  ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31139	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31140
31141  /* Aliases for +simd.  */
31142  ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31143
31144  ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31145  ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31146  ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31147
31148  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31149};
31150
31151static const struct arm_ext_table armv7a_ext_table[] =
31152{
31153  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31154  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31155  ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31156  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31157  ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31158  ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31159  ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31160
31161  ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31162	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31163
31164  /* Aliases for +simd.  */
31165  ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31166  ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31167
31168  ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31169  ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31170
31171  ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31172  ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31173  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31174};
31175
31176static const struct arm_ext_table armv7r_ext_table[] =
31177{
31178  ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31179  ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp.  */
31180  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31181  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31182  ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31183  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31184  ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31185	   ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31186  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31187};
31188
31189static const struct arm_ext_table armv7em_ext_table[] =
31190{
31191  ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31192  /* Alias for +fp, used to be known as fpv4-sp-d16.  */
31193  ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31194  ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31195  ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31196  ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31197  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31198};
31199
31200static const struct arm_ext_table armv8a_ext_table[] =
31201{
31202  ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31203  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31204  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31205	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31206
31207  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31208     should use the +simd option to turn on FP.  */
31209  ARM_REMOVE ("fp", ALL_FP),
31210  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31211  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31212  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31213};
31214
31215
31216static const struct arm_ext_table armv81a_ext_table[] =
31217{
31218  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31219  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31220	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31221
31222  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31223     should use the +simd option to turn on FP.  */
31224  ARM_REMOVE ("fp", ALL_FP),
31225  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31226  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31227  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31228};
31229
31230static const struct arm_ext_table armv82a_ext_table[] =
31231{
31232  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31233  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31234  ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31235  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31236  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31237  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31238	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31239  ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31240
31241  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31242     should use the +simd option to turn on FP.  */
31243  ARM_REMOVE ("fp", ALL_FP),
31244  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31245  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31246  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31247};
31248
31249static const struct arm_ext_table armv84a_ext_table[] =
31250{
31251  ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31252  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31253  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31254  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31255  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31256	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31257
31258  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31259     should use the +simd option to turn on FP.  */
31260  ARM_REMOVE ("fp", ALL_FP),
31261  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31262  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31263  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31264};
31265
31266static const struct arm_ext_table armv85a_ext_table[] =
31267{
31268  ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31269  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31270  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31271  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31272  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31273	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31274
31275  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31276     should use the +simd option to turn on FP.  */
31277  ARM_REMOVE ("fp", ALL_FP),
31278  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31279};
31280
31281static const struct arm_ext_table armv86a_ext_table[] =
31282{
31283  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31284  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31285};
31286
31287static const struct arm_ext_table armv8m_main_ext_table[] =
31288{
31289  ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31290		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31291  ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31292  ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31293  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31294};
31295
31296
31297static const struct arm_ext_table armv8_1m_main_ext_table[] =
31298{
31299  ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31300		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31301  ARM_EXT ("fp",
31302	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31303			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31304	   ALL_FP),
31305  ARM_ADD ("fp.dp",
31306	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31307			FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31308  ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP, ARM_EXT2_MVE, 0),
31309	   ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP)),
31310  ARM_ADD ("mve.fp",
31311	   ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
31312			ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
31313			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31314  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31315};
31316
31317static const struct arm_ext_table armv8r_ext_table[] =
31318{
31319  ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31320  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31321  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31322	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31323  ARM_REMOVE ("fp", ALL_FP),
31324  ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31325  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31326};
31327
31328/* This list should, at a minimum, contain all the architecture names
31329   recognized by GCC.  */
31330#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31331#define ARM_ARCH_OPT2(N, V, DF, ext) \
31332  { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31333
31334static const struct arm_arch_option_table arm_archs[] =
31335{
31336  ARM_ARCH_OPT ("all",		  ARM_ANY,		FPU_ARCH_FPA),
31337  ARM_ARCH_OPT ("armv1",	  ARM_ARCH_V1,		FPU_ARCH_FPA),
31338  ARM_ARCH_OPT ("armv2",	  ARM_ARCH_V2,		FPU_ARCH_FPA),
31339  ARM_ARCH_OPT ("armv2a",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31340  ARM_ARCH_OPT ("armv2s",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31341  ARM_ARCH_OPT ("armv3",	  ARM_ARCH_V3,		FPU_ARCH_FPA),
31342  ARM_ARCH_OPT ("armv3m",	  ARM_ARCH_V3M,		FPU_ARCH_FPA),
31343  ARM_ARCH_OPT ("armv4",	  ARM_ARCH_V4,		FPU_ARCH_FPA),
31344  ARM_ARCH_OPT ("armv4xm",	  ARM_ARCH_V4xM,	FPU_ARCH_FPA),
31345  ARM_ARCH_OPT ("armv4t",	  ARM_ARCH_V4T,		FPU_ARCH_FPA),
31346  ARM_ARCH_OPT ("armv4txm",	  ARM_ARCH_V4TxM,	FPU_ARCH_FPA),
31347  ARM_ARCH_OPT ("armv5",	  ARM_ARCH_V5,		FPU_ARCH_VFP),
31348  ARM_ARCH_OPT ("armv5t",	  ARM_ARCH_V5T,		FPU_ARCH_VFP),
31349  ARM_ARCH_OPT ("armv5txm",	  ARM_ARCH_V5TxM,	FPU_ARCH_VFP),
31350  ARM_ARCH_OPT2 ("armv5te",	  ARM_ARCH_V5TE,	FPU_ARCH_VFP,	armv5te),
31351  ARM_ARCH_OPT2 ("armv5texp",	  ARM_ARCH_V5TExP,	FPU_ARCH_VFP, armv5te),
31352  ARM_ARCH_OPT2 ("armv5tej",	  ARM_ARCH_V5TEJ,	FPU_ARCH_VFP,	armv5te),
31353  ARM_ARCH_OPT2 ("armv6",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31354  ARM_ARCH_OPT2 ("armv6j",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31355  ARM_ARCH_OPT2 ("armv6k",	  ARM_ARCH_V6K,		FPU_ARCH_VFP,	armv5te),
31356  ARM_ARCH_OPT2 ("armv6z",	  ARM_ARCH_V6Z,		FPU_ARCH_VFP,	armv5te),
31357  /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31358     kept to preserve existing behaviour.  */
31359  ARM_ARCH_OPT2 ("armv6kz",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31360  ARM_ARCH_OPT2 ("armv6zk",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31361  ARM_ARCH_OPT2 ("armv6t2",	  ARM_ARCH_V6T2,	FPU_ARCH_VFP,	armv5te),
31362  ARM_ARCH_OPT2 ("armv6kt2",	  ARM_ARCH_V6KT2,	FPU_ARCH_VFP,	armv5te),
31363  ARM_ARCH_OPT2 ("armv6zt2",	  ARM_ARCH_V6ZT2,	FPU_ARCH_VFP,	armv5te),
31364  /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31365     kept to preserve existing behaviour.  */
31366  ARM_ARCH_OPT2 ("armv6kzt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31367  ARM_ARCH_OPT2 ("armv6zkt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31368  ARM_ARCH_OPT ("armv6-m",	  ARM_ARCH_V6M,		FPU_ARCH_VFP),
31369  ARM_ARCH_OPT ("armv6s-m",	  ARM_ARCH_V6SM,	FPU_ARCH_VFP),
31370  ARM_ARCH_OPT2 ("armv7",	  ARM_ARCH_V7,		FPU_ARCH_VFP, armv7),
31371  /* The official spelling of the ARMv7 profile variants is the dashed form.
31372     Accept the non-dashed form for compatibility with old toolchains.  */
31373  ARM_ARCH_OPT2 ("armv7a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31374  ARM_ARCH_OPT2 ("armv7ve",	  ARM_ARCH_V7VE,	FPU_ARCH_VFP, armv7ve),
31375  ARM_ARCH_OPT2 ("armv7r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31376  ARM_ARCH_OPT ("armv7m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31377  ARM_ARCH_OPT2 ("armv7-a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31378  ARM_ARCH_OPT2 ("armv7-r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31379  ARM_ARCH_OPT ("armv7-m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31380  ARM_ARCH_OPT2 ("armv7e-m",	  ARM_ARCH_V7EM,	FPU_ARCH_VFP, armv7em),
31381  ARM_ARCH_OPT ("armv8-m.base",	  ARM_ARCH_V8M_BASE,	FPU_ARCH_VFP),
31382  ARM_ARCH_OPT2 ("armv8-m.main",  ARM_ARCH_V8M_MAIN,	FPU_ARCH_VFP,
31383		 armv8m_main),
31384  ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN,	FPU_ARCH_VFP,
31385		 armv8_1m_main),
31386  ARM_ARCH_OPT2 ("armv8-a",	  ARM_ARCH_V8A,		FPU_ARCH_VFP, armv8a),
31387  ARM_ARCH_OPT2 ("armv8.1-a",	  ARM_ARCH_V8_1A,	FPU_ARCH_VFP, armv81a),
31388  ARM_ARCH_OPT2 ("armv8.2-a",	  ARM_ARCH_V8_2A,	FPU_ARCH_VFP, armv82a),
31389  ARM_ARCH_OPT2 ("armv8.3-a",	  ARM_ARCH_V8_3A,	FPU_ARCH_VFP, armv82a),
31390  ARM_ARCH_OPT2 ("armv8-r",	  ARM_ARCH_V8R,		FPU_ARCH_VFP, armv8r),
31391  ARM_ARCH_OPT2 ("armv8.4-a",	  ARM_ARCH_V8_4A,	FPU_ARCH_VFP, armv84a),
31392  ARM_ARCH_OPT2 ("armv8.5-a",	  ARM_ARCH_V8_5A,	FPU_ARCH_VFP, armv85a),
31393  ARM_ARCH_OPT2 ("armv8.6-a",	  ARM_ARCH_V8_6A,	FPU_ARCH_VFP, armv86a),
31394  ARM_ARCH_OPT ("xscale",	  ARM_ARCH_XSCALE,	FPU_ARCH_VFP),
31395  ARM_ARCH_OPT ("iwmmxt",	  ARM_ARCH_IWMMXT,	FPU_ARCH_VFP),
31396  ARM_ARCH_OPT ("iwmmxt2",	  ARM_ARCH_IWMMXT2,	FPU_ARCH_VFP),
31397  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31398};
31399#undef ARM_ARCH_OPT
31400
31401/* ISA extensions in the co-processor and main instruction set space.  */
31402
31403struct arm_option_extension_value_table
31404{
31405  const char *           name;
31406  size_t                 name_len;
31407  const arm_feature_set  merge_value;
31408  const arm_feature_set  clear_value;
31409  /* List of architectures for which an extension is available.  ARM_ARCH_NONE
31410     indicates that an extension is available for all architectures while
31411     ARM_ANY marks an empty entry.  */
31412  const arm_feature_set  allowed_archs[2];
31413};
31414
31415/* The following table must be in alphabetical order with a NULL last entry.  */
31416
31417#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
31418#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
31419
31420/* DEPRECATED: Refrain from using this table to add any new extensions, instead
31421   use the context sensitive approach using arm_ext_table's.  */
31422static const struct arm_option_extension_value_table arm_extensions[] =
31423{
31424  ARM_EXT_OPT ("crc",	 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31425			 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31426			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31427  ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31428			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
31429				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31430  ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
31431			  ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
31432			  ARM_ARCH_V8_2A),
31433  ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31434			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31435			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
31436  ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
31437				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31438  ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31439			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31440			ARM_ARCH_V8_2A),
31441  ARM_EXT_OPT ("fp16fml",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31442						  | ARM_EXT2_FP16_FML),
31443			   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31444						  | ARM_EXT2_FP16_FML),
31445			   ARM_ARCH_V8_2A),
31446  ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31447			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31448			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31449			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31450  /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
31451     Thumb divide instruction.  Due to this having the same name as the
31452     previous entry, this will be ignored when doing command-line parsing and
31453     only considered by build attribute selection code.  */
31454  ARM_EXT_OPT ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31455			ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31456			ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
31457  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
31458			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
31459  ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
31460			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
31461  ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
31462			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
31463  ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31464			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31465			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31466			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31467  ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31468			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31469				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
31470  ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
31471			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
31472			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31473  ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31474			ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31475			ARM_ARCH_V8A),
31476  ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
31477			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
31478			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31479  ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
31480			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
31481			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31482  ARM_EXT_OPT ("sb",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31483			ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31484			ARM_ARCH_V8A),
31485  ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31486			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31487			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
31488			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31489  ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
31490			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
31491			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31492  ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
31493				     | ARM_EXT_DIV),
31494			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
31495				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31496  ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
31497			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
31498  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
31499};
31500#undef ARM_EXT_OPT
31501
31502/* ISA floating-point and Advanced SIMD extensions.  */
31503struct arm_option_fpu_value_table
31504{
31505  const char *           name;
31506  const arm_feature_set  value;
31507};
31508
31509/* This list should, at a minimum, contain all the fpu names
31510   recognized by GCC.  */
31511static const struct arm_option_fpu_value_table arm_fpus[] =
31512{
31513  {"softfpa",		FPU_NONE},
31514  {"fpe",		FPU_ARCH_FPE},
31515  {"fpe2",		FPU_ARCH_FPE},
31516  {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
31517  {"fpa",		FPU_ARCH_FPA},
31518  {"fpa10",		FPU_ARCH_FPA},
31519  {"fpa11",		FPU_ARCH_FPA},
31520  {"arm7500fe",		FPU_ARCH_FPA},
31521  {"softvfp",		FPU_ARCH_VFP},
31522  {"softvfp+vfp",	FPU_ARCH_VFP_V2},
31523  {"vfp",		FPU_ARCH_VFP_V2},
31524  {"vfp9",		FPU_ARCH_VFP_V2},
31525  {"vfp3",		FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3.  */
31526  {"vfp10",		FPU_ARCH_VFP_V2},
31527  {"vfp10-r0",		FPU_ARCH_VFP_V1},
31528  {"vfpxd",		FPU_ARCH_VFP_V1xD},
31529  {"vfpv2",		FPU_ARCH_VFP_V2},
31530  {"vfpv3",		FPU_ARCH_VFP_V3},
31531  {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
31532  {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
31533  {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
31534  {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
31535  {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
31536  {"arm1020t",		FPU_ARCH_VFP_V1},
31537  {"arm1020e",		FPU_ARCH_VFP_V2},
31538  {"arm1136jfs",	FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s.  */
31539  {"arm1136jf-s",	FPU_ARCH_VFP_V2},
31540  {"maverick",		FPU_ARCH_MAVERICK},
31541  {"neon",		FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31542  {"neon-vfpv3",	FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31543  {"neon-fp16",		FPU_ARCH_NEON_FP16},
31544  {"vfpv4",		FPU_ARCH_VFP_V4},
31545  {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
31546  {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
31547  {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
31548  {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
31549  {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
31550  {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
31551  {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
31552  {"crypto-neon-fp-armv8",
31553			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
31554  {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
31555  {"crypto-neon-fp-armv8.1",
31556			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
31557  {NULL,		ARM_ARCH_NONE}
31558};
31559
31560struct arm_option_value_table
31561{
31562  const char *name;
31563  long value;
31564};
31565
31566static const struct arm_option_value_table arm_float_abis[] =
31567{
31568  {"hard",	ARM_FLOAT_ABI_HARD},
31569  {"softfp",	ARM_FLOAT_ABI_SOFTFP},
31570  {"soft",	ARM_FLOAT_ABI_SOFT},
31571  {NULL,	0}
31572};
31573
31574#ifdef OBJ_ELF
31575/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
31576static const struct arm_option_value_table arm_eabis[] =
31577{
31578  {"gnu",	EF_ARM_EABI_UNKNOWN},
31579  {"4",		EF_ARM_EABI_VER4},
31580  {"5",		EF_ARM_EABI_VER5},
31581  {NULL,	0}
31582};
31583#endif
31584
31585struct arm_long_option_table
31586{
31587  const char * option;			/* Substring to match.	*/
31588  const char * help;			/* Help information.  */
31589  int (* func) (const char * subopt);	/* Function to decode sub-option.  */
31590  const char * deprecated;		/* If non-null, print this message.  */
31591};
31592
31593static bfd_boolean
31594arm_parse_extension (const char *str, const arm_feature_set *opt_set,
31595		     arm_feature_set *ext_set,
31596		     const struct arm_ext_table *ext_table)
31597{
31598  /* We insist on extensions being specified in alphabetical order, and with
31599     extensions being added before being removed.  We achieve this by having
31600     the global ARM_EXTENSIONS table in alphabetical order, and using the
31601     ADDING_VALUE variable to indicate whether we are adding an extension (1)
31602     or removing it (0) and only allowing it to change in the order
31603     -1 -> 1 -> 0.  */
31604  const struct arm_option_extension_value_table * opt = NULL;
31605  const arm_feature_set arm_any = ARM_ANY;
31606  int adding_value = -1;
31607
31608  while (str != NULL && *str != 0)
31609    {
31610      const char *ext;
31611      size_t len;
31612
31613      if (*str != '+')
31614	{
31615	  as_bad (_("invalid architectural extension"));
31616	  return FALSE;
31617	}
31618
31619      str++;
31620      ext = strchr (str, '+');
31621
31622      if (ext != NULL)
31623	len = ext - str;
31624      else
31625	len = strlen (str);
31626
31627      if (len >= 2 && strncmp (str, "no", 2) == 0)
31628	{
31629	  if (adding_value != 0)
31630	    {
31631	      adding_value = 0;
31632	      opt = arm_extensions;
31633	    }
31634
31635	  len -= 2;
31636	  str += 2;
31637	}
31638      else if (len > 0)
31639	{
31640	  if (adding_value == -1)
31641	    {
31642	      adding_value = 1;
31643	      opt = arm_extensions;
31644	    }
31645	  else if (adding_value != 1)
31646	    {
31647	      as_bad (_("must specify extensions to add before specifying "
31648			"those to remove"));
31649	      return FALSE;
31650	    }
31651	}
31652
31653      if (len == 0)
31654	{
31655	  as_bad (_("missing architectural extension"));
31656	  return FALSE;
31657	}
31658
31659      gas_assert (adding_value != -1);
31660      gas_assert (opt != NULL);
31661
31662      if (ext_table != NULL)
31663	{
31664	  const struct arm_ext_table * ext_opt = ext_table;
31665	  bfd_boolean found = FALSE;
31666	  for (; ext_opt->name != NULL; ext_opt++)
31667	    if (ext_opt->name_len == len
31668		&& strncmp (ext_opt->name, str, len) == 0)
31669	      {
31670		if (adding_value)
31671		  {
31672		    if (ARM_FEATURE_ZERO (ext_opt->merge))
31673			/* TODO: Option not supported.  When we remove the
31674			   legacy table this case should error out.  */
31675			continue;
31676
31677		    ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
31678		  }
31679		else
31680		  {
31681		    if (ARM_FEATURE_ZERO (ext_opt->clear))
31682			/* TODO: Option not supported.  When we remove the
31683			   legacy table this case should error out.  */
31684			continue;
31685		    ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
31686		  }
31687		found = TRUE;
31688		break;
31689	      }
31690	  if (found)
31691	    {
31692	      str = ext;
31693	      continue;
31694	    }
31695	}
31696
31697      /* Scan over the options table trying to find an exact match. */
31698      for (; opt->name != NULL; opt++)
31699	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31700	  {
31701	    int i, nb_allowed_archs =
31702	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
31703	    /* Check we can apply the extension to this architecture.  */
31704	    for (i = 0; i < nb_allowed_archs; i++)
31705	      {
31706		/* Empty entry.  */
31707		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
31708		  continue;
31709		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
31710		  break;
31711	      }
31712	    if (i == nb_allowed_archs)
31713	      {
31714		as_bad (_("extension does not apply to the base architecture"));
31715		return FALSE;
31716	      }
31717
31718	    /* Add or remove the extension.  */
31719	    if (adding_value)
31720	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
31721	    else
31722	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
31723
31724	    /* Allowing Thumb division instructions for ARMv7 in autodetection
31725	       rely on this break so that duplicate extensions (extensions
31726	       with the same name as a previous extension in the list) are not
31727	       considered for command-line parsing.  */
31728	    break;
31729	  }
31730
31731      if (opt->name == NULL)
31732	{
31733	  /* Did we fail to find an extension because it wasn't specified in
31734	     alphabetical order, or because it does not exist?  */
31735
31736	  for (opt = arm_extensions; opt->name != NULL; opt++)
31737	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31738	      break;
31739
31740	  if (opt->name == NULL)
31741	    as_bad (_("unknown architectural extension `%s'"), str);
31742	  else
31743	    as_bad (_("architectural extensions must be specified in "
31744		      "alphabetical order"));
31745
31746	  return FALSE;
31747	}
31748      else
31749	{
31750	  /* We should skip the extension we've just matched the next time
31751	     round.  */
31752	  opt++;
31753	}
31754
31755      str = ext;
31756    };
31757
31758  return TRUE;
31759}
31760
31761static bfd_boolean
31762arm_parse_fp16_opt (const char *str)
31763{
31764  if (strcasecmp (str, "ieee") == 0)
31765    fp16_format = ARM_FP16_FORMAT_IEEE;
31766  else if (strcasecmp (str, "alternative") == 0)
31767    fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
31768  else
31769    {
31770      as_bad (_("unrecognised float16 format \"%s\""), str);
31771      return FALSE;
31772    }
31773
31774  return TRUE;
31775}
31776
31777static bfd_boolean
31778arm_parse_cpu (const char *str)
31779{
31780  const struct arm_cpu_option_table *opt;
31781  const char *ext = strchr (str, '+');
31782  size_t len;
31783
31784  if (ext != NULL)
31785    len = ext - str;
31786  else
31787    len = strlen (str);
31788
31789  if (len == 0)
31790    {
31791      as_bad (_("missing cpu name `%s'"), str);
31792      return FALSE;
31793    }
31794
31795  for (opt = arm_cpus; opt->name != NULL; opt++)
31796    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31797      {
31798	mcpu_cpu_opt = &opt->value;
31799	if (mcpu_ext_opt == NULL)
31800	  mcpu_ext_opt = XNEW (arm_feature_set);
31801	*mcpu_ext_opt = opt->ext;
31802	mcpu_fpu_opt = &opt->default_fpu;
31803	if (opt->canonical_name)
31804	  {
31805	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
31806	    strcpy (selected_cpu_name, opt->canonical_name);
31807	  }
31808	else
31809	  {
31810	    size_t i;
31811
31812	    if (len >= sizeof selected_cpu_name)
31813	      len = (sizeof selected_cpu_name) - 1;
31814
31815	    for (i = 0; i < len; i++)
31816	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
31817	    selected_cpu_name[i] = 0;
31818	  }
31819
31820	if (ext != NULL)
31821	  return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
31822
31823	return TRUE;
31824      }
31825
31826  as_bad (_("unknown cpu `%s'"), str);
31827  return FALSE;
31828}
31829
31830static bfd_boolean
31831arm_parse_arch (const char *str)
31832{
31833  const struct arm_arch_option_table *opt;
31834  const char *ext = strchr (str, '+');
31835  size_t len;
31836
31837  if (ext != NULL)
31838    len = ext - str;
31839  else
31840    len = strlen (str);
31841
31842  if (len == 0)
31843    {
31844      as_bad (_("missing architecture name `%s'"), str);
31845      return FALSE;
31846    }
31847
31848  for (opt = arm_archs; opt->name != NULL; opt++)
31849    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31850      {
31851	march_cpu_opt = &opt->value;
31852	if (march_ext_opt == NULL)
31853	  march_ext_opt = XNEW (arm_feature_set);
31854	*march_ext_opt = arm_arch_none;
31855	march_fpu_opt = &opt->default_fpu;
31856	selected_ctx_ext_table = opt->ext_table;
31857	strcpy (selected_cpu_name, opt->name);
31858
31859	if (ext != NULL)
31860	  return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
31861				      opt->ext_table);
31862
31863	return TRUE;
31864      }
31865
31866  as_bad (_("unknown architecture `%s'\n"), str);
31867  return FALSE;
31868}
31869
31870static bfd_boolean
31871arm_parse_fpu (const char * str)
31872{
31873  const struct arm_option_fpu_value_table * opt;
31874
31875  for (opt = arm_fpus; opt->name != NULL; opt++)
31876    if (streq (opt->name, str))
31877      {
31878	mfpu_opt = &opt->value;
31879	return TRUE;
31880      }
31881
31882  as_bad (_("unknown floating point format `%s'\n"), str);
31883  return FALSE;
31884}
31885
31886static bfd_boolean
31887arm_parse_float_abi (const char * str)
31888{
31889  const struct arm_option_value_table * opt;
31890
31891  for (opt = arm_float_abis; opt->name != NULL; opt++)
31892    if (streq (opt->name, str))
31893      {
31894	mfloat_abi_opt = opt->value;
31895	return TRUE;
31896      }
31897
31898  as_bad (_("unknown floating point abi `%s'\n"), str);
31899  return FALSE;
31900}
31901
31902#ifdef OBJ_ELF
31903static bfd_boolean
31904arm_parse_eabi (const char * str)
31905{
31906  const struct arm_option_value_table *opt;
31907
31908  for (opt = arm_eabis; opt->name != NULL; opt++)
31909    if (streq (opt->name, str))
31910      {
31911	meabi_flags = opt->value;
31912	return TRUE;
31913      }
31914  as_bad (_("unknown EABI `%s'\n"), str);
31915  return FALSE;
31916}
31917#endif
31918
31919static bfd_boolean
31920arm_parse_it_mode (const char * str)
31921{
31922  bfd_boolean ret = TRUE;
31923
31924  if (streq ("arm", str))
31925    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
31926  else if (streq ("thumb", str))
31927    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
31928  else if (streq ("always", str))
31929    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
31930  else if (streq ("never", str))
31931    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
31932  else
31933    {
31934      as_bad (_("unknown implicit IT mode `%s', should be "\
31935		"arm, thumb, always, or never."), str);
31936      ret = FALSE;
31937    }
31938
31939  return ret;
31940}
31941
31942static bfd_boolean
31943arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
31944{
31945  codecomposer_syntax = TRUE;
31946  arm_comment_chars[0] = ';';
31947  arm_line_separator_chars[0] = 0;
31948  return TRUE;
31949}
31950
31951struct arm_long_option_table arm_long_opts[] =
31952{
31953  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
31954   arm_parse_cpu, NULL},
31955  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
31956   arm_parse_arch, NULL},
31957  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
31958   arm_parse_fpu, NULL},
31959  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
31960   arm_parse_float_abi, NULL},
31961#ifdef OBJ_ELF
31962  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
31963   arm_parse_eabi, NULL},
31964#endif
31965  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
31966   arm_parse_it_mode, NULL},
31967  {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
31968   arm_ccs_mode, NULL},
31969  {"mfp16-format=",
31970   N_("[ieee|alternative]\n\
31971                          set the encoding for half precision floating point "
31972			  "numbers to IEEE\n\
31973                          or Arm alternative format."),
31974   arm_parse_fp16_opt, NULL },
31975  {NULL, NULL, 0, NULL}
31976};
31977
31978int
31979md_parse_option (int c, const char * arg)
31980{
31981  struct arm_option_table *opt;
31982  const struct arm_legacy_option_table *fopt;
31983  struct arm_long_option_table *lopt;
31984
31985  switch (c)
31986    {
31987#ifdef OPTION_EB
31988    case OPTION_EB:
31989      target_big_endian = 1;
31990      break;
31991#endif
31992
31993#ifdef OPTION_EL
31994    case OPTION_EL:
31995      target_big_endian = 0;
31996      break;
31997#endif
31998
31999    case OPTION_FIX_V4BX:
32000      fix_v4bx = TRUE;
32001      break;
32002
32003#ifdef OBJ_ELF
32004    case OPTION_FDPIC:
32005      arm_fdpic = TRUE;
32006      break;
32007#endif /* OBJ_ELF */
32008
32009    case 'a':
32010      /* Listing option.  Just ignore these, we don't support additional
32011	 ones.	*/
32012      return 0;
32013
32014    default:
32015      for (opt = arm_opts; opt->option != NULL; opt++)
32016	{
32017	  if (c == opt->option[0]
32018	      && ((arg == NULL && opt->option[1] == 0)
32019		  || streq (arg, opt->option + 1)))
32020	    {
32021	      /* If the option is deprecated, tell the user.  */
32022	      if (warn_on_deprecated && opt->deprecated != NULL)
32023		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32024			   arg ? arg : "", _(opt->deprecated));
32025
32026	      if (opt->var != NULL)
32027		*opt->var = opt->value;
32028
32029	      return 1;
32030	    }
32031	}
32032
32033      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32034	{
32035	  if (c == fopt->option[0]
32036	      && ((arg == NULL && fopt->option[1] == 0)
32037		  || streq (arg, fopt->option + 1)))
32038	    {
32039	      /* If the option is deprecated, tell the user.  */
32040	      if (warn_on_deprecated && fopt->deprecated != NULL)
32041		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32042			   arg ? arg : "", _(fopt->deprecated));
32043
32044	      if (fopt->var != NULL)
32045		*fopt->var = &fopt->value;
32046
32047	      return 1;
32048	    }
32049	}
32050
32051      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32052	{
32053	  /* These options are expected to have an argument.  */
32054	  if (c == lopt->option[0]
32055	      && arg != NULL
32056	      && strncmp (arg, lopt->option + 1,
32057			  strlen (lopt->option + 1)) == 0)
32058	    {
32059	      /* If the option is deprecated, tell the user.  */
32060	      if (warn_on_deprecated && lopt->deprecated != NULL)
32061		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32062			   _(lopt->deprecated));
32063
32064	      /* Call the sup-option parser.  */
32065	      return lopt->func (arg + strlen (lopt->option) - 1);
32066	    }
32067	}
32068
32069      return 0;
32070    }
32071
32072  return 1;
32073}
32074
32075void
32076md_show_usage (FILE * fp)
32077{
32078  struct arm_option_table *opt;
32079  struct arm_long_option_table *lopt;
32080
32081  fprintf (fp, _(" ARM-specific assembler options:\n"));
32082
32083  for (opt = arm_opts; opt->option != NULL; opt++)
32084    if (opt->help != NULL)
32085      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
32086
32087  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32088    if (lopt->help != NULL)
32089      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
32090
32091#ifdef OPTION_EB
32092  fprintf (fp, _("\
32093  -EB                     assemble code for a big-endian cpu\n"));
32094#endif
32095
32096#ifdef OPTION_EL
32097  fprintf (fp, _("\
32098  -EL                     assemble code for a little-endian cpu\n"));
32099#endif
32100
32101  fprintf (fp, _("\
32102  --fix-v4bx              Allow BX in ARMv4 code\n"));
32103
32104#ifdef OBJ_ELF
32105  fprintf (fp, _("\
32106  --fdpic                 generate an FDPIC object file\n"));
32107#endif /* OBJ_ELF */
32108}
32109
32110#ifdef OBJ_ELF
32111
32112typedef struct
32113{
32114  int val;
32115  arm_feature_set flags;
32116} cpu_arch_ver_table;
32117
32118/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
32119   chronologically for architectures, with an exception for ARMv6-M and
32120   ARMv6S-M due to legacy reasons.  No new architecture should have a
32121   special case.  This allows for build attribute selection results to be
32122   stable when new architectures are added.  */
32123static const cpu_arch_ver_table cpu_arch_ver[] =
32124{
32125    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V1},
32126    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2},
32127    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2S},
32128    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3},
32129    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3M},
32130    {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4xM},
32131    {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4},
32132    {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4TxM},
32133    {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4T},
32134    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5xM},
32135    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5},
32136    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5TxM},
32137    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5T},
32138    {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TExP},
32139    {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TE},
32140    {TAG_CPU_ARCH_V5TEJ,      ARM_ARCH_V5TEJ},
32141    {TAG_CPU_ARCH_V6,	      ARM_ARCH_V6},
32142    {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6Z},
32143    {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6KZ},
32144    {TAG_CPU_ARCH_V6K,	      ARM_ARCH_V6K},
32145    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6T2},
32146    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KT2},
32147    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6ZT2},
32148    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KZT2},
32149
32150    /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32151       always selected build attributes to match those of ARMv6-M
32152       (resp. ARMv6S-M).  However, due to these architectures being a strict
32153       subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32154       would be selected when fully respecting chronology of architectures.
32155       It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32156       move them before ARMv7 architectures.  */
32157    {TAG_CPU_ARCH_V6_M,	      ARM_ARCH_V6M},
32158    {TAG_CPU_ARCH_V6S_M,      ARM_ARCH_V6SM},
32159
32160    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7},
32161    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7A},
32162    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7R},
32163    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7M},
32164    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7VE},
32165    {TAG_CPU_ARCH_V7E_M,      ARM_ARCH_V7EM},
32166    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8A},
32167    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_1A},
32168    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_2A},
32169    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_3A},
32170    {TAG_CPU_ARCH_V8M_BASE,   ARM_ARCH_V8M_BASE},
32171    {TAG_CPU_ARCH_V8M_MAIN,   ARM_ARCH_V8M_MAIN},
32172    {TAG_CPU_ARCH_V8R,	      ARM_ARCH_V8R},
32173    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_4A},
32174    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_5A},
32175    {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32176    {TAG_CPU_ARCH_V8,	    ARM_ARCH_V8_6A},
32177    {-1,		    ARM_ARCH_NONE}
32178};
32179
32180/* Set an attribute if it has not already been set by the user.  */
32181
32182static void
32183aeabi_set_attribute_int (int tag, int value)
32184{
32185  if (tag < 1
32186      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32187      || !attributes_set_explicitly[tag])
32188    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32189}
32190
32191static void
32192aeabi_set_attribute_string (int tag, const char *value)
32193{
32194  if (tag < 1
32195      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32196      || !attributes_set_explicitly[tag])
32197    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32198}
32199
32200/* Return whether features in the *NEEDED feature set are available via
32201   extensions for the architecture whose feature set is *ARCH_FSET.  */
32202
32203static bfd_boolean
32204have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32205			    const arm_feature_set *needed)
32206{
32207  int i, nb_allowed_archs;
32208  arm_feature_set ext_fset;
32209  const struct arm_option_extension_value_table *opt;
32210
32211  ext_fset = arm_arch_none;
32212  for (opt = arm_extensions; opt->name != NULL; opt++)
32213    {
32214      /* Extension does not provide any feature we need.  */
32215      if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32216	continue;
32217
32218      nb_allowed_archs =
32219	sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32220      for (i = 0; i < nb_allowed_archs; i++)
32221	{
32222	  /* Empty entry.  */
32223	  if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32224	    break;
32225
32226	  /* Extension is available, add it.  */
32227	  if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32228	    ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32229	}
32230    }
32231
32232  /* Can we enable all features in *needed?  */
32233  return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32234}
32235
32236/* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32237   a given architecture feature set *ARCH_EXT_FSET including extension feature
32238   set *EXT_FSET.  Selection logic used depend on EXACT_MATCH:
32239   - if true, check for an exact match of the architecture modulo extensions;
32240   - otherwise, select build attribute value of the first superset
32241     architecture released so that results remains stable when new architectures
32242     are added.
32243   For -march/-mcpu=all the build attribute value of the most featureful
32244   architecture is returned.  Tag_CPU_arch_profile result is returned in
32245   PROFILE.  */
32246
32247static int
32248get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32249			      const arm_feature_set *ext_fset,
32250			      char *profile, int exact_match)
32251{
32252  arm_feature_set arch_fset;
32253  const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32254
32255  /* Select most featureful architecture with all its extensions if building
32256     for -march=all as the feature sets used to set build attributes.  */
32257  if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32258    {
32259      /* Force revisiting of decision for each new architecture.  */
32260      gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32261      *profile = 'A';
32262      return TAG_CPU_ARCH_V8;
32263    }
32264
32265  ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32266
32267  for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32268    {
32269      arm_feature_set known_arch_fset;
32270
32271      ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32272      if (exact_match)
32273	{
32274	  /* Base architecture match user-specified architecture and
32275	     extensions, eg. ARMv6S-M matching -march=armv6-m+os.  */
32276	  if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32277	    {
32278	      p_ver_ret = p_ver;
32279	      goto found;
32280	    }
32281	  /* Base architecture match user-specified architecture only
32282	     (eg. ARMv6-M in the same case as above).  Record it in case we
32283	     find a match with above condition.  */
32284	  else if (p_ver_ret == NULL
32285		   && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32286	    p_ver_ret = p_ver;
32287	}
32288      else
32289	{
32290
32291	  /* Architecture has all features wanted.  */
32292	  if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32293	    {
32294	      arm_feature_set added_fset;
32295
32296	      /* Compute features added by this architecture over the one
32297		 recorded in p_ver_ret.  */
32298	      if (p_ver_ret != NULL)
32299		ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32300				   p_ver_ret->flags);
32301	      /* First architecture that match incl. with extensions, or the
32302		 only difference in features over the recorded match is
32303		 features that were optional and are now mandatory.  */
32304	      if (p_ver_ret == NULL
32305		  || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32306		{
32307		  p_ver_ret = p_ver;
32308		  goto found;
32309		}
32310	    }
32311	  else if (p_ver_ret == NULL)
32312	    {
32313	      arm_feature_set needed_ext_fset;
32314
32315	      ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32316
32317	      /* Architecture has all features needed when using some
32318		 extensions.  Record it and continue searching in case there
32319		 exist an architecture providing all needed features without
32320		 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32321		 OS extension).  */
32322	      if (have_ext_for_needed_feat_p (&known_arch_fset,
32323					      &needed_ext_fset))
32324		p_ver_ret = p_ver;
32325	    }
32326	}
32327    }
32328
32329  if (p_ver_ret == NULL)
32330    return -1;
32331
32332found:
32333  /* Tag_CPU_arch_profile.  */
32334  if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32335      || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32336      || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32337	  && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
32338    *profile = 'A';
32339  else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
32340    *profile = 'R';
32341  else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32342    *profile = 'M';
32343  else
32344    *profile = '\0';
32345  return p_ver_ret->val;
32346}
32347
32348/* Set the public EABI object attributes.  */
32349
32350static void
32351aeabi_set_public_attributes (void)
32352{
32353  char profile = '\0';
32354  int arch = -1;
32355  int virt_sec = 0;
32356  int fp16_optional = 0;
32357  int skip_exact_match = 0;
32358  arm_feature_set flags, flags_arch, flags_ext;
32359
32360  /* Autodetection mode, choose the architecture based the instructions
32361     actually used.  */
32362  if (no_cpu_selected ())
32363    {
32364      ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32365
32366      if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32367	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32368
32369      if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32370	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32371
32372      /* Code run during relaxation relies on selected_cpu being set.  */
32373      ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32374      flags_ext = arm_arch_none;
32375      ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32376      selected_ext = flags_ext;
32377      selected_cpu = flags;
32378    }
32379  /* Otherwise, choose the architecture based on the capabilities of the
32380     requested cpu.  */
32381  else
32382    {
32383      ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32384      ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32385      flags_ext = selected_ext;
32386      flags = selected_cpu;
32387    }
32388  ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32389
32390  /* Allow the user to override the reported architecture.  */
32391  if (!ARM_FEATURE_ZERO (selected_object_arch))
32392    {
32393      ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32394      flags_ext = arm_arch_none;
32395    }
32396  else
32397    skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32398
32399  /* When this function is run again after relaxation has happened there is no
32400     way to determine whether an architecture or CPU was specified by the user:
32401     - selected_cpu is set above for relaxation to work;
32402     - march_cpu_opt is not set if only -mcpu or .cpu is used;
32403     - mcpu_cpu_opt is set to arm_arch_any for autodetection.
32404     Therefore, if not in -march=all case we first try an exact match and fall
32405     back to autodetection.  */
32406  if (!skip_exact_match)
32407    arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
32408  if (arch == -1)
32409    arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
32410  if (arch == -1)
32411    as_bad (_("no architecture contains all the instructions used\n"));
32412
32413  /* Tag_CPU_name.  */
32414  if (selected_cpu_name[0])
32415    {
32416      char *q;
32417
32418      q = selected_cpu_name;
32419      if (strncmp (q, "armv", 4) == 0)
32420	{
32421	  int i;
32422
32423	  q += 4;
32424	  for (i = 0; q[i]; i++)
32425	    q[i] = TOUPPER (q[i]);
32426	}
32427      aeabi_set_attribute_string (Tag_CPU_name, q);
32428    }
32429
32430  /* Tag_CPU_arch.  */
32431  aeabi_set_attribute_int (Tag_CPU_arch, arch);
32432
32433  /* Tag_CPU_arch_profile.  */
32434  if (profile != '\0')
32435    aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
32436
32437  /* Tag_DSP_extension.  */
32438  if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
32439    aeabi_set_attribute_int (Tag_DSP_extension, 1);
32440
32441  ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32442  /* Tag_ARM_ISA_use.  */
32443  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
32444      || ARM_FEATURE_ZERO (flags_arch))
32445    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
32446
32447  /* Tag_THUMB_ISA_use.  */
32448  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
32449      || ARM_FEATURE_ZERO (flags_arch))
32450    {
32451      int thumb_isa_use;
32452
32453      if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32454	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
32455	thumb_isa_use = 3;
32456      else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
32457	thumb_isa_use = 2;
32458      else
32459	thumb_isa_use = 1;
32460      aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
32461    }
32462
32463  /* Tag_VFP_arch.  */
32464  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
32465    aeabi_set_attribute_int (Tag_VFP_arch,
32466			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32467			     ? 7 : 8);
32468  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
32469    aeabi_set_attribute_int (Tag_VFP_arch,
32470			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32471			     ? 5 : 6);
32472  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
32473    {
32474      fp16_optional = 1;
32475      aeabi_set_attribute_int (Tag_VFP_arch, 3);
32476    }
32477  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
32478    {
32479      aeabi_set_attribute_int (Tag_VFP_arch, 4);
32480      fp16_optional = 1;
32481    }
32482  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
32483    aeabi_set_attribute_int (Tag_VFP_arch, 2);
32484  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
32485	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
32486    aeabi_set_attribute_int (Tag_VFP_arch, 1);
32487
32488  /* Tag_ABI_HardFP_use.  */
32489  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
32490      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
32491    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
32492
32493  /* Tag_WMMX_arch.  */
32494  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
32495    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
32496  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
32497    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
32498
32499  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
32500  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
32501    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
32502  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
32503    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
32504  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
32505    {
32506      if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
32507	{
32508	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
32509	}
32510      else
32511	{
32512	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
32513	  fp16_optional = 1;
32514	}
32515    }
32516
32517  if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
32518    aeabi_set_attribute_int (Tag_MVE_arch, 2);
32519  else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
32520    aeabi_set_attribute_int (Tag_MVE_arch, 1);
32521
32522  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
32523  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
32524    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
32525
32526  /* Tag_DIV_use.
32527
32528     We set Tag_DIV_use to two when integer divide instructions have been used
32529     in ARM state, or when Thumb integer divide instructions have been used,
32530     but we have no architecture profile set, nor have we any ARM instructions.
32531
32532     For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
32533     by the base architecture.
32534
32535     For new architectures we will have to check these tests.  */
32536  gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
32537  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32538      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
32539    aeabi_set_attribute_int (Tag_DIV_use, 0);
32540  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
32541	   || (profile == '\0'
32542	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
32543	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
32544    aeabi_set_attribute_int (Tag_DIV_use, 2);
32545
32546  /* Tag_MP_extension_use.  */
32547  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
32548    aeabi_set_attribute_int (Tag_MPextension_use, 1);
32549
32550  /* Tag Virtualization_use.  */
32551  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
32552    virt_sec |= 1;
32553  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
32554    virt_sec |= 2;
32555  if (virt_sec != 0)
32556    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
32557
32558  if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
32559    aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
32560}
32561
32562/* Post relaxation hook.  Recompute ARM attributes now that relaxation is
32563   finished and free extension feature bits which will not be used anymore.  */
32564
32565void
32566arm_md_post_relax (void)
32567{
32568  aeabi_set_public_attributes ();
32569  XDELETE (mcpu_ext_opt);
32570  mcpu_ext_opt = NULL;
32571  XDELETE (march_ext_opt);
32572  march_ext_opt = NULL;
32573}
32574
32575/* Add the default contents for the .ARM.attributes section.  */
32576
32577void
32578arm_md_end (void)
32579{
32580  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
32581    return;
32582
32583  aeabi_set_public_attributes ();
32584}
32585#endif /* OBJ_ELF */
32586
32587/* Parse a .cpu directive.  */
32588
32589static void
32590s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
32591{
32592  const struct arm_cpu_option_table *opt;
32593  char *name;
32594  char saved_char;
32595
32596  name = input_line_pointer;
32597  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32598    input_line_pointer++;
32599  saved_char = *input_line_pointer;
32600  *input_line_pointer = 0;
32601
32602  /* Skip the first "all" entry.  */
32603  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
32604    if (streq (opt->name, name))
32605      {
32606	selected_arch = opt->value;
32607	selected_ext = opt->ext;
32608	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32609	if (opt->canonical_name)
32610	  strcpy (selected_cpu_name, opt->canonical_name);
32611	else
32612	  {
32613	    int i;
32614	    for (i = 0; opt->name[i]; i++)
32615	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
32616
32617	    selected_cpu_name[i] = 0;
32618	  }
32619	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32620
32621	*input_line_pointer = saved_char;
32622	demand_empty_rest_of_line ();
32623	return;
32624      }
32625  as_bad (_("unknown cpu `%s'"), name);
32626  *input_line_pointer = saved_char;
32627  ignore_rest_of_line ();
32628}
32629
32630/* Parse a .arch directive.  */
32631
32632static void
32633s_arm_arch (int ignored ATTRIBUTE_UNUSED)
32634{
32635  const struct arm_arch_option_table *opt;
32636  char saved_char;
32637  char *name;
32638
32639  name = input_line_pointer;
32640  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32641    input_line_pointer++;
32642  saved_char = *input_line_pointer;
32643  *input_line_pointer = 0;
32644
32645  /* Skip the first "all" entry.  */
32646  for (opt = arm_archs + 1; opt->name != NULL; opt++)
32647    if (streq (opt->name, name))
32648      {
32649	selected_arch = opt->value;
32650	selected_ctx_ext_table = opt->ext_table;
32651	selected_ext = arm_arch_none;
32652	selected_cpu = selected_arch;
32653	strcpy (selected_cpu_name, opt->name);
32654	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32655	*input_line_pointer = saved_char;
32656	demand_empty_rest_of_line ();
32657	return;
32658      }
32659
32660  as_bad (_("unknown architecture `%s'\n"), name);
32661  *input_line_pointer = saved_char;
32662  ignore_rest_of_line ();
32663}
32664
32665/* Parse a .object_arch directive.  */
32666
32667static void
32668s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
32669{
32670  const struct arm_arch_option_table *opt;
32671  char saved_char;
32672  char *name;
32673
32674  name = input_line_pointer;
32675  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32676    input_line_pointer++;
32677  saved_char = *input_line_pointer;
32678  *input_line_pointer = 0;
32679
32680  /* Skip the first "all" entry.  */
32681  for (opt = arm_archs + 1; opt->name != NULL; opt++)
32682    if (streq (opt->name, name))
32683      {
32684	selected_object_arch = opt->value;
32685	*input_line_pointer = saved_char;
32686	demand_empty_rest_of_line ();
32687	return;
32688      }
32689
32690  as_bad (_("unknown architecture `%s'\n"), name);
32691  *input_line_pointer = saved_char;
32692  ignore_rest_of_line ();
32693}
32694
32695/* Parse a .arch_extension directive.  */
32696
32697static void
32698s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
32699{
32700  const struct arm_option_extension_value_table *opt;
32701  char saved_char;
32702  char *name;
32703  int adding_value = 1;
32704
32705  name = input_line_pointer;
32706  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32707    input_line_pointer++;
32708  saved_char = *input_line_pointer;
32709  *input_line_pointer = 0;
32710
32711  if (strlen (name) >= 2
32712      && strncmp (name, "no", 2) == 0)
32713    {
32714      adding_value = 0;
32715      name += 2;
32716    }
32717
32718  /* Check the context specific extension table */
32719  if (selected_ctx_ext_table)
32720    {
32721      const struct arm_ext_table * ext_opt;
32722      for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
32723        {
32724          if (streq (ext_opt->name, name))
32725	    {
32726	      if (adding_value)
32727		{
32728		  if (ARM_FEATURE_ZERO (ext_opt->merge))
32729		    /* TODO: Option not supported.  When we remove the
32730		    legacy table this case should error out.  */
32731		    continue;
32732		  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32733					  ext_opt->merge);
32734		}
32735	      else
32736		ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
32737
32738	      ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32739	      ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32740	      *input_line_pointer = saved_char;
32741	      demand_empty_rest_of_line ();
32742	      return;
32743	    }
32744	}
32745    }
32746
32747  for (opt = arm_extensions; opt->name != NULL; opt++)
32748    if (streq (opt->name, name))
32749      {
32750	int i, nb_allowed_archs =
32751	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
32752	for (i = 0; i < nb_allowed_archs; i++)
32753	  {
32754	    /* Empty entry.  */
32755	    if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
32756	      continue;
32757	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
32758	      break;
32759	  }
32760
32761	if (i == nb_allowed_archs)
32762	  {
32763	    as_bad (_("architectural extension `%s' is not allowed for the "
32764		      "current base architecture"), name);
32765	    break;
32766	  }
32767
32768	if (adding_value)
32769	  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32770				  opt->merge_value);
32771	else
32772	  ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
32773
32774	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32775	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32776	*input_line_pointer = saved_char;
32777	demand_empty_rest_of_line ();
32778	/* Allowing Thumb division instructions for ARMv7 in autodetection rely
32779	   on this return so that duplicate extensions (extensions with the
32780	   same name as a previous extension in the list) are not considered
32781	   for command-line parsing.  */
32782	return;
32783      }
32784
32785  if (opt->name == NULL)
32786    as_bad (_("unknown architecture extension `%s'\n"), name);
32787
32788  *input_line_pointer = saved_char;
32789  ignore_rest_of_line ();
32790}
32791
32792/* Parse a .fpu directive.  */
32793
32794static void
32795s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
32796{
32797  const struct arm_option_fpu_value_table *opt;
32798  char saved_char;
32799  char *name;
32800
32801  name = input_line_pointer;
32802  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32803    input_line_pointer++;
32804  saved_char = *input_line_pointer;
32805  *input_line_pointer = 0;
32806
32807  for (opt = arm_fpus; opt->name != NULL; opt++)
32808    if (streq (opt->name, name))
32809      {
32810	selected_fpu = opt->value;
32811	ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
32812#ifndef CPU_DEFAULT
32813	if (no_cpu_selected ())
32814	  ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
32815	else
32816#endif
32817	  ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32818	*input_line_pointer = saved_char;
32819	demand_empty_rest_of_line ();
32820	return;
32821      }
32822
32823  as_bad (_("unknown floating point format `%s'\n"), name);
32824  *input_line_pointer = saved_char;
32825  ignore_rest_of_line ();
32826}
32827
32828/* Copy symbol information.  */
32829
32830void
32831arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
32832{
32833  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
32834}
32835
32836#ifdef OBJ_ELF
32837/* Given a symbolic attribute NAME, return the proper integer value.
32838   Returns -1 if the attribute is not known.  */
32839
32840int
32841arm_convert_symbolic_attribute (const char *name)
32842{
32843  static const struct
32844  {
32845    const char * name;
32846    const int    tag;
32847  }
32848  attribute_table[] =
32849    {
32850      /* When you modify this table you should
32851	 also modify the list in doc/c-arm.texi.  */
32852#define T(tag) {#tag, tag}
32853      T (Tag_CPU_raw_name),
32854      T (Tag_CPU_name),
32855      T (Tag_CPU_arch),
32856      T (Tag_CPU_arch_profile),
32857      T (Tag_ARM_ISA_use),
32858      T (Tag_THUMB_ISA_use),
32859      T (Tag_FP_arch),
32860      T (Tag_VFP_arch),
32861      T (Tag_WMMX_arch),
32862      T (Tag_Advanced_SIMD_arch),
32863      T (Tag_PCS_config),
32864      T (Tag_ABI_PCS_R9_use),
32865      T (Tag_ABI_PCS_RW_data),
32866      T (Tag_ABI_PCS_RO_data),
32867      T (Tag_ABI_PCS_GOT_use),
32868      T (Tag_ABI_PCS_wchar_t),
32869      T (Tag_ABI_FP_rounding),
32870      T (Tag_ABI_FP_denormal),
32871      T (Tag_ABI_FP_exceptions),
32872      T (Tag_ABI_FP_user_exceptions),
32873      T (Tag_ABI_FP_number_model),
32874      T (Tag_ABI_align_needed),
32875      T (Tag_ABI_align8_needed),
32876      T (Tag_ABI_align_preserved),
32877      T (Tag_ABI_align8_preserved),
32878      T (Tag_ABI_enum_size),
32879      T (Tag_ABI_HardFP_use),
32880      T (Tag_ABI_VFP_args),
32881      T (Tag_ABI_WMMX_args),
32882      T (Tag_ABI_optimization_goals),
32883      T (Tag_ABI_FP_optimization_goals),
32884      T (Tag_compatibility),
32885      T (Tag_CPU_unaligned_access),
32886      T (Tag_FP_HP_extension),
32887      T (Tag_VFP_HP_extension),
32888      T (Tag_ABI_FP_16bit_format),
32889      T (Tag_MPextension_use),
32890      T (Tag_DIV_use),
32891      T (Tag_nodefaults),
32892      T (Tag_also_compatible_with),
32893      T (Tag_conformance),
32894      T (Tag_T2EE_use),
32895      T (Tag_Virtualization_use),
32896      T (Tag_DSP_extension),
32897      T (Tag_MVE_arch),
32898      /* We deliberately do not include Tag_MPextension_use_legacy.  */
32899#undef T
32900    };
32901  unsigned int i;
32902
32903  if (name == NULL)
32904    return -1;
32905
32906  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
32907    if (streq (name, attribute_table[i].name))
32908      return attribute_table[i].tag;
32909
32910  return -1;
32911}
32912
32913/* Apply sym value for relocations only in the case that they are for
32914   local symbols in the same segment as the fixup and you have the
32915   respective architectural feature for blx and simple switches.  */
32916
32917int
32918arm_apply_sym_value (struct fix * fixP, segT this_seg)
32919{
32920  if (fixP->fx_addsy
32921      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
32922      /* PR 17444: If the local symbol is in a different section then a reloc
32923	 will always be generated for it, so applying the symbol value now
32924	 will result in a double offset being stored in the relocation.  */
32925      && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
32926      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
32927    {
32928      switch (fixP->fx_r_type)
32929	{
32930	case BFD_RELOC_ARM_PCREL_BLX:
32931	case BFD_RELOC_THUMB_PCREL_BRANCH23:
32932	  if (ARM_IS_FUNC (fixP->fx_addsy))
32933	    return 1;
32934	  break;
32935
32936	case BFD_RELOC_ARM_PCREL_CALL:
32937	case BFD_RELOC_THUMB_PCREL_BLX:
32938	  if (THUMB_IS_FUNC (fixP->fx_addsy))
32939	    return 1;
32940	  break;
32941
32942	default:
32943	  break;
32944	}
32945
32946    }
32947  return 0;
32948}
32949#endif /* OBJ_ELF */
32950